1 /* 2 * libata-scsi.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from 31 * - http://www.t10.org/ 32 * - http://www.t13.org/ 33 * 34 */ 35 36 #include <linux/kernel.h> 37 #include <linux/blkdev.h> 38 #include <linux/spinlock.h> 39 #include <scsi/scsi.h> 40 #include <scsi/scsi_host.h> 41 #include <scsi/scsi_cmnd.h> 42 #include <scsi/scsi_eh.h> 43 #include <scsi/scsi_device.h> 44 #include <scsi/scsi_tcq.h> 45 #include <scsi/scsi_transport.h> 46 #include <linux/libata.h> 47 #include <linux/hdreg.h> 48 #include <asm/uaccess.h> 49 50 #include "libata.h" 51 52 #define SECTOR_SIZE 512 53 54 typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc); 55 56 static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, 57 const struct scsi_device *scsidev); 58 static struct ata_device * ata_scsi_find_dev(struct ata_port *ap, 59 const struct scsi_device *scsidev); 60 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 61 unsigned int id, unsigned int lun); 62 63 64 #define RW_RECOVERY_MPAGE 0x1 65 #define RW_RECOVERY_MPAGE_LEN 12 66 #define CACHE_MPAGE 0x8 67 #define CACHE_MPAGE_LEN 20 68 #define CONTROL_MPAGE 0xa 69 #define CONTROL_MPAGE_LEN 12 70 #define ALL_MPAGES 0x3f 71 #define ALL_SUB_MPAGES 0xff 72 73 74 static const u8 def_rw_recovery_mpage[RW_RECOVERY_MPAGE_LEN] = { 75 RW_RECOVERY_MPAGE, 76 RW_RECOVERY_MPAGE_LEN - 2, 77 (1 << 7), /* AWRE */ 78 0, /* read retry count */ 79 0, 0, 0, 0, 80 0, /* write retry count */ 81 0, 0, 0 82 }; 83 84 static const u8 def_cache_mpage[CACHE_MPAGE_LEN] = { 85 CACHE_MPAGE, 86 CACHE_MPAGE_LEN - 2, 87 0, /* contains WCE, needs to be 0 for logic */ 88 0, 0, 0, 0, 0, 0, 0, 0, 0, 89 0, /* contains DRA, needs to be 0 for logic */ 90 0, 0, 0, 0, 0, 0, 0 91 }; 92 93 static const u8 def_control_mpage[CONTROL_MPAGE_LEN] = { 94 CONTROL_MPAGE, 95 CONTROL_MPAGE_LEN - 2, 96 2, /* DSENSE=0, GLTSD=1 */ 97 0, /* [QAM+QERR may be 1, see 05-359r1] */ 98 0, 0, 0, 0, 0xff, 0xff, 99 0, 30 /* extended self test time, see 05-359r1 */ 100 }; 101 102 /* 103 * libata transport template. libata doesn't do real transport stuff. 104 * It just needs the eh_timed_out hook. 105 */ 106 static struct scsi_transport_template ata_scsi_transport_template = { 107 .eh_strategy_handler = ata_scsi_error, 108 .eh_timed_out = ata_scsi_timed_out, 109 .user_scan = ata_scsi_user_scan, 110 }; 111 112 113 static void ata_scsi_invalid_field(struct scsi_cmnd *cmd, 114 void (*done)(struct scsi_cmnd *)) 115 { 116 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x24, 0x0); 117 /* "Invalid field in cbd" */ 118 done(cmd); 119 } 120 121 /** 122 * ata_std_bios_param - generic bios head/sector/cylinder calculator used by sd. 123 * @sdev: SCSI device for which BIOS geometry is to be determined 124 * @bdev: block device associated with @sdev 125 * @capacity: capacity of SCSI device 126 * @geom: location to which geometry will be output 127 * 128 * Generic bios head/sector/cylinder calculator 129 * used by sd. Most BIOSes nowadays expect a XXX/255/16 (CHS) 130 * mapping. Some situations may arise where the disk is not 131 * bootable if this is not used. 132 * 133 * LOCKING: 134 * Defined by the SCSI layer. We don't really care. 135 * 136 * RETURNS: 137 * Zero. 138 */ 139 int ata_std_bios_param(struct scsi_device *sdev, struct block_device *bdev, 140 sector_t capacity, int geom[]) 141 { 142 geom[0] = 255; 143 geom[1] = 63; 144 sector_div(capacity, 255*63); 145 geom[2] = capacity; 146 147 return 0; 148 } 149 150 /** 151 * ata_get_identity - Handler for HDIO_GET_IDENTITY ioctl 152 * @sdev: SCSI device to get identify data for 153 * @arg: User buffer area for identify data 154 * 155 * LOCKING: 156 * Defined by the SCSI layer. We don't really care. 157 * 158 * RETURNS: 159 * Zero on success, negative errno on error. 160 */ 161 static int ata_get_identity(struct scsi_device *sdev, void __user *arg) 162 { 163 struct ata_port *ap = ata_shost_to_port(sdev->host); 164 struct ata_device *dev = ata_scsi_find_dev(ap, sdev); 165 u16 __user *dst = arg; 166 char buf[40]; 167 168 if (!dev) 169 return -ENOMSG; 170 171 if (copy_to_user(dst, dev->id, ATA_ID_WORDS * sizeof(u16))) 172 return -EFAULT; 173 174 ata_id_string(dev->id, buf, ATA_ID_PROD, ATA_ID_PROD_LEN); 175 if (copy_to_user(dst + ATA_ID_PROD, buf, ATA_ID_PROD_LEN)) 176 return -EFAULT; 177 178 ata_id_string(dev->id, buf, ATA_ID_FW_REV, ATA_ID_FW_REV_LEN); 179 if (copy_to_user(dst + ATA_ID_FW_REV, buf, ATA_ID_FW_REV_LEN)) 180 return -EFAULT; 181 182 ata_id_string(dev->id, buf, ATA_ID_SERNO, ATA_ID_SERNO_LEN); 183 if (copy_to_user(dst + ATA_ID_SERNO, buf, ATA_ID_SERNO_LEN)) 184 return -EFAULT; 185 186 return 0; 187 } 188 189 /** 190 * ata_cmd_ioctl - Handler for HDIO_DRIVE_CMD ioctl 191 * @scsidev: Device to which we are issuing command 192 * @arg: User provided data for issuing command 193 * 194 * LOCKING: 195 * Defined by the SCSI layer. We don't really care. 196 * 197 * RETURNS: 198 * Zero on success, negative errno on error. 199 */ 200 int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg) 201 { 202 int rc = 0; 203 u8 scsi_cmd[MAX_COMMAND_SIZE]; 204 u8 args[4], *argbuf = NULL, *sensebuf = NULL; 205 int argsize = 0; 206 enum dma_data_direction data_dir; 207 int cmd_result; 208 209 if (arg == NULL) 210 return -EINVAL; 211 212 if (copy_from_user(args, arg, sizeof(args))) 213 return -EFAULT; 214 215 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 216 if (!sensebuf) 217 return -ENOMEM; 218 219 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 220 221 if (args[3]) { 222 argsize = SECTOR_SIZE * args[3]; 223 argbuf = kmalloc(argsize, GFP_KERNEL); 224 if (argbuf == NULL) { 225 rc = -ENOMEM; 226 goto error; 227 } 228 229 scsi_cmd[1] = (4 << 1); /* PIO Data-in */ 230 scsi_cmd[2] = 0x0e; /* no off.line or cc, read from dev, 231 block count in sector count field */ 232 data_dir = DMA_FROM_DEVICE; 233 } else { 234 scsi_cmd[1] = (3 << 1); /* Non-data */ 235 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 236 data_dir = DMA_NONE; 237 } 238 239 scsi_cmd[0] = ATA_16; 240 241 scsi_cmd[4] = args[2]; 242 if (args[0] == WIN_SMART) { /* hack -- ide driver does this too... */ 243 scsi_cmd[6] = args[3]; 244 scsi_cmd[8] = args[1]; 245 scsi_cmd[10] = 0x4f; 246 scsi_cmd[12] = 0xc2; 247 } else { 248 scsi_cmd[6] = args[1]; 249 } 250 scsi_cmd[14] = args[0]; 251 252 /* Good values for timeout and retries? Values below 253 from scsi_ioctl_send_command() for default case... */ 254 cmd_result = scsi_execute(scsidev, scsi_cmd, data_dir, argbuf, argsize, 255 sensebuf, (10*HZ), 5, 0); 256 257 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 258 u8 *desc = sensebuf + 8; 259 cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 260 261 /* If we set cc then ATA pass-through will cause a 262 * check condition even if no error. Filter that. */ 263 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 264 struct scsi_sense_hdr sshdr; 265 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 266 &sshdr); 267 if (sshdr.sense_key==0 && 268 sshdr.asc==0 && sshdr.ascq==0) 269 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 270 } 271 272 /* Send userspace a few ATA registers (same as drivers/ide) */ 273 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 274 desc[0] == 0x09 ) { /* code is "ATA Descriptor" */ 275 args[0] = desc[13]; /* status */ 276 args[1] = desc[3]; /* error */ 277 args[2] = desc[5]; /* sector count (0:7) */ 278 if (copy_to_user(arg, args, sizeof(args))) 279 rc = -EFAULT; 280 } 281 } 282 283 284 if (cmd_result) { 285 rc = -EIO; 286 goto error; 287 } 288 289 if ((argbuf) 290 && copy_to_user(arg + sizeof(args), argbuf, argsize)) 291 rc = -EFAULT; 292 error: 293 kfree(sensebuf); 294 kfree(argbuf); 295 return rc; 296 } 297 298 /** 299 * ata_task_ioctl - Handler for HDIO_DRIVE_TASK ioctl 300 * @scsidev: Device to which we are issuing command 301 * @arg: User provided data for issuing command 302 * 303 * LOCKING: 304 * Defined by the SCSI layer. We don't really care. 305 * 306 * RETURNS: 307 * Zero on success, negative errno on error. 308 */ 309 int ata_task_ioctl(struct scsi_device *scsidev, void __user *arg) 310 { 311 int rc = 0; 312 u8 scsi_cmd[MAX_COMMAND_SIZE]; 313 u8 args[7], *sensebuf = NULL; 314 int cmd_result; 315 316 if (arg == NULL) 317 return -EINVAL; 318 319 if (copy_from_user(args, arg, sizeof(args))) 320 return -EFAULT; 321 322 sensebuf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); 323 if (!sensebuf) 324 return -ENOMEM; 325 326 memset(scsi_cmd, 0, sizeof(scsi_cmd)); 327 scsi_cmd[0] = ATA_16; 328 scsi_cmd[1] = (3 << 1); /* Non-data */ 329 scsi_cmd[2] = 0x20; /* cc but no off.line or data xfer */ 330 scsi_cmd[4] = args[1]; 331 scsi_cmd[6] = args[2]; 332 scsi_cmd[8] = args[3]; 333 scsi_cmd[10] = args[4]; 334 scsi_cmd[12] = args[5]; 335 scsi_cmd[13] = args[6] & 0x4f; 336 scsi_cmd[14] = args[0]; 337 338 /* Good values for timeout and retries? Values below 339 from scsi_ioctl_send_command() for default case... */ 340 cmd_result = scsi_execute(scsidev, scsi_cmd, DMA_NONE, NULL, 0, 341 sensebuf, (10*HZ), 5, 0); 342 343 if (driver_byte(cmd_result) == DRIVER_SENSE) {/* sense data available */ 344 u8 *desc = sensebuf + 8; 345 cmd_result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 346 347 /* If we set cc then ATA pass-through will cause a 348 * check condition even if no error. Filter that. */ 349 if (cmd_result & SAM_STAT_CHECK_CONDITION) { 350 struct scsi_sense_hdr sshdr; 351 scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, 352 &sshdr); 353 if (sshdr.sense_key==0 && 354 sshdr.asc==0 && sshdr.ascq==0) 355 cmd_result &= ~SAM_STAT_CHECK_CONDITION; 356 } 357 358 /* Send userspace ATA registers */ 359 if (sensebuf[0] == 0x72 && /* format is "descriptor" */ 360 desc[0] == 0x09) {/* code is "ATA Descriptor" */ 361 args[0] = desc[13]; /* status */ 362 args[1] = desc[3]; /* error */ 363 args[2] = desc[5]; /* sector count (0:7) */ 364 args[3] = desc[7]; /* lbal */ 365 args[4] = desc[9]; /* lbam */ 366 args[5] = desc[11]; /* lbah */ 367 args[6] = desc[12]; /* select */ 368 if (copy_to_user(arg, args, sizeof(args))) 369 rc = -EFAULT; 370 } 371 } 372 373 if (cmd_result) { 374 rc = -EIO; 375 goto error; 376 } 377 378 error: 379 kfree(sensebuf); 380 return rc; 381 } 382 383 int ata_scsi_ioctl(struct scsi_device *scsidev, int cmd, void __user *arg) 384 { 385 int val = -EINVAL, rc = -EINVAL; 386 387 switch (cmd) { 388 case ATA_IOC_GET_IO32: 389 val = 0; 390 if (copy_to_user(arg, &val, 1)) 391 return -EFAULT; 392 return 0; 393 394 case ATA_IOC_SET_IO32: 395 val = (unsigned long) arg; 396 if (val != 0) 397 return -EINVAL; 398 return 0; 399 400 case HDIO_GET_IDENTITY: 401 return ata_get_identity(scsidev, arg); 402 403 case HDIO_DRIVE_CMD: 404 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 405 return -EACCES; 406 return ata_cmd_ioctl(scsidev, arg); 407 408 case HDIO_DRIVE_TASK: 409 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) 410 return -EACCES; 411 return ata_task_ioctl(scsidev, arg); 412 413 default: 414 rc = -ENOTTY; 415 break; 416 } 417 418 return rc; 419 } 420 421 /** 422 * ata_scsi_qc_new - acquire new ata_queued_cmd reference 423 * @dev: ATA device to which the new command is attached 424 * @cmd: SCSI command that originated this ATA command 425 * @done: SCSI command completion function 426 * 427 * Obtain a reference to an unused ata_queued_cmd structure, 428 * which is the basic libata structure representing a single 429 * ATA command sent to the hardware. 430 * 431 * If a command was available, fill in the SCSI-specific 432 * portions of the structure with information on the 433 * current command. 434 * 435 * LOCKING: 436 * spin_lock_irqsave(host lock) 437 * 438 * RETURNS: 439 * Command allocated, or %NULL if none available. 440 */ 441 static struct ata_queued_cmd *ata_scsi_qc_new(struct ata_device *dev, 442 struct scsi_cmnd *cmd, 443 void (*done)(struct scsi_cmnd *)) 444 { 445 struct ata_queued_cmd *qc; 446 447 qc = ata_qc_new_init(dev); 448 if (qc) { 449 qc->scsicmd = cmd; 450 qc->scsidone = done; 451 452 qc->__sg = scsi_sglist(cmd); 453 qc->n_elem = scsi_sg_count(cmd); 454 } else { 455 cmd->result = (DID_OK << 16) | (QUEUE_FULL << 1); 456 done(cmd); 457 } 458 459 return qc; 460 } 461 462 /** 463 * ata_dump_status - user friendly display of error info 464 * @id: id of the port in question 465 * @tf: ptr to filled out taskfile 466 * 467 * Decode and dump the ATA error/status registers for the user so 468 * that they have some idea what really happened at the non 469 * make-believe layer. 470 * 471 * LOCKING: 472 * inherited from caller 473 */ 474 static void ata_dump_status(unsigned id, struct ata_taskfile *tf) 475 { 476 u8 stat = tf->command, err = tf->feature; 477 478 printk(KERN_WARNING "ata%u: status=0x%02x { ", id, stat); 479 if (stat & ATA_BUSY) { 480 printk("Busy }\n"); /* Data is not valid in this case */ 481 } else { 482 if (stat & 0x40) printk("DriveReady "); 483 if (stat & 0x20) printk("DeviceFault "); 484 if (stat & 0x10) printk("SeekComplete "); 485 if (stat & 0x08) printk("DataRequest "); 486 if (stat & 0x04) printk("CorrectedError "); 487 if (stat & 0x02) printk("Index "); 488 if (stat & 0x01) printk("Error "); 489 printk("}\n"); 490 491 if (err) { 492 printk(KERN_WARNING "ata%u: error=0x%02x { ", id, err); 493 if (err & 0x04) printk("DriveStatusError "); 494 if (err & 0x80) { 495 if (err & 0x04) printk("BadCRC "); 496 else printk("Sector "); 497 } 498 if (err & 0x40) printk("UncorrectableError "); 499 if (err & 0x10) printk("SectorIdNotFound "); 500 if (err & 0x02) printk("TrackZeroNotFound "); 501 if (err & 0x01) printk("AddrMarkNotFound "); 502 printk("}\n"); 503 } 504 } 505 } 506 507 /** 508 * ata_to_sense_error - convert ATA error to SCSI error 509 * @id: ATA device number 510 * @drv_stat: value contained in ATA status register 511 * @drv_err: value contained in ATA error register 512 * @sk: the sense key we'll fill out 513 * @asc: the additional sense code we'll fill out 514 * @ascq: the additional sense code qualifier we'll fill out 515 * @verbose: be verbose 516 * 517 * Converts an ATA error into a SCSI error. Fill out pointers to 518 * SK, ASC, and ASCQ bytes for later use in fixed or descriptor 519 * format sense blocks. 520 * 521 * LOCKING: 522 * spin_lock_irqsave(host lock) 523 */ 524 static void ata_to_sense_error(unsigned id, u8 drv_stat, u8 drv_err, u8 *sk, 525 u8 *asc, u8 *ascq, int verbose) 526 { 527 int i; 528 529 /* Based on the 3ware driver translation table */ 530 static const unsigned char sense_table[][4] = { 531 /* BBD|ECC|ID|MAR */ 532 {0xd1, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command 533 /* BBD|ECC|ID */ 534 {0xd0, ABORTED_COMMAND, 0x00, 0x00}, // Device busy Aborted command 535 /* ECC|MC|MARK */ 536 {0x61, HARDWARE_ERROR, 0x00, 0x00}, // Device fault Hardware error 537 /* ICRC|ABRT */ /* NB: ICRC & !ABRT is BBD */ 538 {0x84, ABORTED_COMMAND, 0x47, 0x00}, // Data CRC error SCSI parity error 539 /* MC|ID|ABRT|TRK0|MARK */ 540 {0x37, NOT_READY, 0x04, 0x00}, // Unit offline Not ready 541 /* MCR|MARK */ 542 {0x09, NOT_READY, 0x04, 0x00}, // Unrecovered disk error Not ready 543 /* Bad address mark */ 544 {0x01, MEDIUM_ERROR, 0x13, 0x00}, // Address mark not found Address mark not found for data field 545 /* TRK0 */ 546 {0x02, HARDWARE_ERROR, 0x00, 0x00}, // Track 0 not found Hardware error 547 /* Abort & !ICRC */ 548 {0x04, ABORTED_COMMAND, 0x00, 0x00}, // Aborted command Aborted command 549 /* Media change request */ 550 {0x08, NOT_READY, 0x04, 0x00}, // Media change request FIXME: faking offline 551 /* SRV */ 552 {0x10, ABORTED_COMMAND, 0x14, 0x00}, // ID not found Recorded entity not found 553 /* Media change */ 554 {0x08, NOT_READY, 0x04, 0x00}, // Media change FIXME: faking offline 555 /* ECC */ 556 {0x40, MEDIUM_ERROR, 0x11, 0x04}, // Uncorrectable ECC error Unrecovered read error 557 /* BBD - block marked bad */ 558 {0x80, MEDIUM_ERROR, 0x11, 0x04}, // Block marked bad Medium error, unrecovered read error 559 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 560 }; 561 static const unsigned char stat_table[][4] = { 562 /* Must be first because BUSY means no other bits valid */ 563 {0x80, ABORTED_COMMAND, 0x47, 0x00}, // Busy, fake parity for now 564 {0x20, HARDWARE_ERROR, 0x00, 0x00}, // Device fault 565 {0x08, ABORTED_COMMAND, 0x47, 0x00}, // Timed out in xfer, fake parity for now 566 {0x04, RECOVERED_ERROR, 0x11, 0x00}, // Recovered ECC error Medium error, recovered 567 {0xFF, 0xFF, 0xFF, 0xFF}, // END mark 568 }; 569 570 /* 571 * Is this an error we can process/parse 572 */ 573 if (drv_stat & ATA_BUSY) { 574 drv_err = 0; /* Ignore the err bits, they're invalid */ 575 } 576 577 if (drv_err) { 578 /* Look for drv_err */ 579 for (i = 0; sense_table[i][0] != 0xFF; i++) { 580 /* Look for best matches first */ 581 if ((sense_table[i][0] & drv_err) == 582 sense_table[i][0]) { 583 *sk = sense_table[i][1]; 584 *asc = sense_table[i][2]; 585 *ascq = sense_table[i][3]; 586 goto translate_done; 587 } 588 } 589 /* No immediate match */ 590 if (verbose) 591 printk(KERN_WARNING "ata%u: no sense translation for " 592 "error 0x%02x\n", id, drv_err); 593 } 594 595 /* Fall back to interpreting status bits */ 596 for (i = 0; stat_table[i][0] != 0xFF; i++) { 597 if (stat_table[i][0] & drv_stat) { 598 *sk = stat_table[i][1]; 599 *asc = stat_table[i][2]; 600 *ascq = stat_table[i][3]; 601 goto translate_done; 602 } 603 } 604 /* No error? Undecoded? */ 605 if (verbose) 606 printk(KERN_WARNING "ata%u: no sense translation for " 607 "status: 0x%02x\n", id, drv_stat); 608 609 /* We need a sensible error return here, which is tricky, and one 610 that won't cause people to do things like return a disk wrongly */ 611 *sk = ABORTED_COMMAND; 612 *asc = 0x00; 613 *ascq = 0x00; 614 615 translate_done: 616 if (verbose) 617 printk(KERN_ERR "ata%u: translated ATA stat/err 0x%02x/%02x " 618 "to SCSI SK/ASC/ASCQ 0x%x/%02x/%02x\n", 619 id, drv_stat, drv_err, *sk, *asc, *ascq); 620 return; 621 } 622 623 /* 624 * ata_gen_passthru_sense - Generate check condition sense block. 625 * @qc: Command that completed. 626 * 627 * This function is specific to the ATA descriptor format sense 628 * block specified for the ATA pass through commands. Regardless 629 * of whether the command errored or not, return a sense 630 * block. Copy all controller registers into the sense 631 * block. Clear sense key, ASC & ASCQ if there is no error. 632 * 633 * LOCKING: 634 * None. 635 */ 636 static void ata_gen_passthru_sense(struct ata_queued_cmd *qc) 637 { 638 struct scsi_cmnd *cmd = qc->scsicmd; 639 struct ata_taskfile *tf = &qc->result_tf; 640 unsigned char *sb = cmd->sense_buffer; 641 unsigned char *desc = sb + 8; 642 int verbose = qc->ap->ops->error_handler == NULL; 643 644 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 645 646 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 647 648 /* 649 * Use ata_to_sense_error() to map status register bits 650 * onto sense key, asc & ascq. 651 */ 652 if (qc->err_mask || 653 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 654 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, 655 &sb[1], &sb[2], &sb[3], verbose); 656 sb[1] &= 0x0f; 657 } 658 659 /* 660 * Sense data is current and format is descriptor. 661 */ 662 sb[0] = 0x72; 663 664 desc[0] = 0x09; 665 666 /* set length of additional sense data */ 667 sb[7] = 14; 668 desc[1] = 12; 669 670 /* 671 * Copy registers into sense buffer. 672 */ 673 desc[2] = 0x00; 674 desc[3] = tf->feature; /* == error reg */ 675 desc[5] = tf->nsect; 676 desc[7] = tf->lbal; 677 desc[9] = tf->lbam; 678 desc[11] = tf->lbah; 679 desc[12] = tf->device; 680 desc[13] = tf->command; /* == status reg */ 681 682 /* 683 * Fill in Extend bit, and the high order bytes 684 * if applicable. 685 */ 686 if (tf->flags & ATA_TFLAG_LBA48) { 687 desc[2] |= 0x01; 688 desc[4] = tf->hob_nsect; 689 desc[6] = tf->hob_lbal; 690 desc[8] = tf->hob_lbam; 691 desc[10] = tf->hob_lbah; 692 } 693 } 694 695 /** 696 * ata_gen_ata_sense - generate a SCSI fixed sense block 697 * @qc: Command that we are erroring out 698 * 699 * Generate sense block for a failed ATA command @qc. Descriptor 700 * format is used to accomodate LBA48 block address. 701 * 702 * LOCKING: 703 * None. 704 */ 705 static void ata_gen_ata_sense(struct ata_queued_cmd *qc) 706 { 707 struct ata_device *dev = qc->dev; 708 struct scsi_cmnd *cmd = qc->scsicmd; 709 struct ata_taskfile *tf = &qc->result_tf; 710 unsigned char *sb = cmd->sense_buffer; 711 unsigned char *desc = sb + 8; 712 int verbose = qc->ap->ops->error_handler == NULL; 713 u64 block; 714 715 memset(sb, 0, SCSI_SENSE_BUFFERSIZE); 716 717 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 718 719 /* sense data is current and format is descriptor */ 720 sb[0] = 0x72; 721 722 /* Use ata_to_sense_error() to map status register bits 723 * onto sense key, asc & ascq. 724 */ 725 if (qc->err_mask || 726 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 727 ata_to_sense_error(qc->ap->print_id, tf->command, tf->feature, 728 &sb[1], &sb[2], &sb[3], verbose); 729 sb[1] &= 0x0f; 730 } 731 732 block = ata_tf_read_block(&qc->result_tf, dev); 733 734 /* information sense data descriptor */ 735 sb[7] = 12; 736 desc[0] = 0x00; 737 desc[1] = 10; 738 739 desc[2] |= 0x80; /* valid */ 740 desc[6] = block >> 40; 741 desc[7] = block >> 32; 742 desc[8] = block >> 24; 743 desc[9] = block >> 16; 744 desc[10] = block >> 8; 745 desc[11] = block; 746 } 747 748 static void ata_scsi_sdev_config(struct scsi_device *sdev) 749 { 750 sdev->use_10_for_rw = 1; 751 sdev->use_10_for_ms = 1; 752 753 /* Schedule policy is determined by ->qc_defer() callback and 754 * it needs to see every deferred qc. Set dev_blocked to 1 to 755 * prevent SCSI midlayer from automatically deferring 756 * requests. 757 */ 758 sdev->max_device_blocked = 1; 759 } 760 761 static void ata_scsi_dev_config(struct scsi_device *sdev, 762 struct ata_device *dev) 763 { 764 /* configure max sectors */ 765 blk_queue_max_sectors(sdev->request_queue, dev->max_sectors); 766 767 /* SATA DMA transfers must be multiples of 4 byte, so 768 * we need to pad ATAPI transfers using an extra sg. 769 * Decrement max hw segments accordingly. 770 */ 771 if (dev->class == ATA_DEV_ATAPI) { 772 struct request_queue *q = sdev->request_queue; 773 blk_queue_max_hw_segments(q, q->max_hw_segments - 1); 774 } 775 776 if (dev->flags & ATA_DFLAG_NCQ) { 777 int depth; 778 779 depth = min(sdev->host->can_queue, ata_id_queue_depth(dev->id)); 780 depth = min(ATA_MAX_QUEUE - 1, depth); 781 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, depth); 782 } 783 } 784 785 /** 786 * ata_scsi_slave_config - Set SCSI device attributes 787 * @sdev: SCSI device to examine 788 * 789 * This is called before we actually start reading 790 * and writing to the device, to configure certain 791 * SCSI mid-layer behaviors. 792 * 793 * LOCKING: 794 * Defined by SCSI layer. We don't really care. 795 */ 796 797 int ata_scsi_slave_config(struct scsi_device *sdev) 798 { 799 struct ata_port *ap = ata_shost_to_port(sdev->host); 800 struct ata_device *dev = __ata_scsi_find_dev(ap, sdev); 801 802 ata_scsi_sdev_config(sdev); 803 804 blk_queue_max_phys_segments(sdev->request_queue, LIBATA_MAX_PRD); 805 806 sdev->manage_start_stop = 1; 807 808 if (dev) 809 ata_scsi_dev_config(sdev, dev); 810 811 return 0; /* scsi layer doesn't check return value, sigh */ 812 } 813 814 /** 815 * ata_scsi_slave_destroy - SCSI device is about to be destroyed 816 * @sdev: SCSI device to be destroyed 817 * 818 * @sdev is about to be destroyed for hot/warm unplugging. If 819 * this unplugging was initiated by libata as indicated by NULL 820 * dev->sdev, this function doesn't have to do anything. 821 * Otherwise, SCSI layer initiated warm-unplug is in progress. 822 * Clear dev->sdev, schedule the device for ATA detach and invoke 823 * EH. 824 * 825 * LOCKING: 826 * Defined by SCSI layer. We don't really care. 827 */ 828 void ata_scsi_slave_destroy(struct scsi_device *sdev) 829 { 830 struct ata_port *ap = ata_shost_to_port(sdev->host); 831 unsigned long flags; 832 struct ata_device *dev; 833 834 if (!ap->ops->error_handler) 835 return; 836 837 spin_lock_irqsave(ap->lock, flags); 838 dev = __ata_scsi_find_dev(ap, sdev); 839 if (dev && dev->sdev) { 840 /* SCSI device already in CANCEL state, no need to offline it */ 841 dev->sdev = NULL; 842 dev->flags |= ATA_DFLAG_DETACH; 843 ata_port_schedule_eh(ap); 844 } 845 spin_unlock_irqrestore(ap->lock, flags); 846 } 847 848 /** 849 * ata_scsi_change_queue_depth - SCSI callback for queue depth config 850 * @sdev: SCSI device to configure queue depth for 851 * @queue_depth: new queue depth 852 * 853 * This is libata standard hostt->change_queue_depth callback. 854 * SCSI will call into this callback when user tries to set queue 855 * depth via sysfs. 856 * 857 * LOCKING: 858 * SCSI layer (we don't care) 859 * 860 * RETURNS: 861 * Newly configured queue depth. 862 */ 863 int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) 864 { 865 struct ata_port *ap = ata_shost_to_port(sdev->host); 866 struct ata_device *dev; 867 unsigned long flags; 868 869 if (queue_depth < 1 || queue_depth == sdev->queue_depth) 870 return sdev->queue_depth; 871 872 dev = ata_scsi_find_dev(ap, sdev); 873 if (!dev || !ata_dev_enabled(dev)) 874 return sdev->queue_depth; 875 876 /* NCQ enabled? */ 877 spin_lock_irqsave(ap->lock, flags); 878 dev->flags &= ~ATA_DFLAG_NCQ_OFF; 879 if (queue_depth == 1 || !ata_ncq_enabled(dev)) { 880 dev->flags |= ATA_DFLAG_NCQ_OFF; 881 queue_depth = 1; 882 } 883 spin_unlock_irqrestore(ap->lock, flags); 884 885 /* limit and apply queue depth */ 886 queue_depth = min(queue_depth, sdev->host->can_queue); 887 queue_depth = min(queue_depth, ata_id_queue_depth(dev->id)); 888 queue_depth = min(queue_depth, ATA_MAX_QUEUE - 1); 889 890 if (sdev->queue_depth == queue_depth) 891 return -EINVAL; 892 893 scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, queue_depth); 894 return queue_depth; 895 } 896 897 /* XXX: for spindown warning */ 898 static void ata_delayed_done_timerfn(unsigned long arg) 899 { 900 struct scsi_cmnd *scmd = (void *)arg; 901 902 scmd->scsi_done(scmd); 903 } 904 905 /* XXX: for spindown warning */ 906 static void ata_delayed_done(struct scsi_cmnd *scmd) 907 { 908 static struct timer_list timer; 909 910 setup_timer(&timer, ata_delayed_done_timerfn, (unsigned long)scmd); 911 mod_timer(&timer, jiffies + 5 * HZ); 912 } 913 914 /** 915 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 916 * @qc: Storage for translated ATA taskfile 917 * 918 * Sets up an ATA taskfile to issue STANDBY (to stop) or READ VERIFY 919 * (to start). Perhaps these commands should be preceded by 920 * CHECK POWER MODE to see what power mode the device is already in. 921 * [See SAT revision 5 at www.t10.org] 922 * 923 * LOCKING: 924 * spin_lock_irqsave(host lock) 925 * 926 * RETURNS: 927 * Zero on success, non-zero on error. 928 */ 929 static unsigned int ata_scsi_start_stop_xlat(struct ata_queued_cmd *qc) 930 { 931 struct scsi_cmnd *scmd = qc->scsicmd; 932 struct ata_taskfile *tf = &qc->tf; 933 const u8 *cdb = scmd->cmnd; 934 935 if (scmd->cmd_len < 5) 936 goto invalid_fld; 937 938 tf->flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 939 tf->protocol = ATA_PROT_NODATA; 940 if (cdb[1] & 0x1) { 941 ; /* ignore IMMED bit, violates sat-r05 */ 942 } 943 if (cdb[4] & 0x2) 944 goto invalid_fld; /* LOEJ bit set not supported */ 945 if (((cdb[4] >> 4) & 0xf) != 0) 946 goto invalid_fld; /* power conditions not supported */ 947 948 if (qc->dev->horkage & ATA_HORKAGE_SKIP_PM) { 949 /* the device lacks PM support, finish without doing anything */ 950 scmd->result = SAM_STAT_GOOD; 951 return 1; 952 } 953 954 if (cdb[4] & 0x1) { 955 tf->nsect = 1; /* 1 sector, lba=0 */ 956 957 if (qc->dev->flags & ATA_DFLAG_LBA) { 958 tf->flags |= ATA_TFLAG_LBA; 959 960 tf->lbah = 0x0; 961 tf->lbam = 0x0; 962 tf->lbal = 0x0; 963 tf->device |= ATA_LBA; 964 } else { 965 /* CHS */ 966 tf->lbal = 0x1; /* sect */ 967 tf->lbam = 0x0; /* cyl low */ 968 tf->lbah = 0x0; /* cyl high */ 969 } 970 971 tf->command = ATA_CMD_VERIFY; /* READ VERIFY */ 972 } else { 973 /* XXX: This is for backward compatibility, will be 974 * removed. Read Documentation/feature-removal-schedule.txt 975 * for more info. 976 */ 977 if ((qc->dev->flags & ATA_DFLAG_SPUNDOWN) && 978 (system_state == SYSTEM_HALT || 979 system_state == SYSTEM_POWER_OFF)) { 980 static unsigned long warned = 0; 981 982 if (!test_and_set_bit(0, &warned)) { 983 ata_dev_printk(qc->dev, KERN_WARNING, 984 "DISK MIGHT NOT BE SPUN DOWN PROPERLY. " 985 "UPDATE SHUTDOWN UTILITY\n"); 986 ata_dev_printk(qc->dev, KERN_WARNING, 987 "For more info, visit " 988 "http://linux-ata.org/shutdown.html\n"); 989 990 /* ->scsi_done is not used, use it for 991 * delayed completion. 992 */ 993 scmd->scsi_done = qc->scsidone; 994 qc->scsidone = ata_delayed_done; 995 } 996 scmd->result = SAM_STAT_GOOD; 997 return 1; 998 } 999 1000 /* Issue ATA STANDBY IMMEDIATE command */ 1001 tf->command = ATA_CMD_STANDBYNOW1; 1002 } 1003 1004 /* 1005 * Standby and Idle condition timers could be implemented but that 1006 * would require libata to implement the Power condition mode page 1007 * and allow the user to change it. Changing mode pages requires 1008 * MODE SELECT to be implemented. 1009 */ 1010 1011 return 0; 1012 1013 invalid_fld: 1014 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1015 /* "Invalid field in cbd" */ 1016 return 1; 1017 } 1018 1019 1020 /** 1021 * ata_scsi_flush_xlat - Translate SCSI SYNCHRONIZE CACHE command 1022 * @qc: Storage for translated ATA taskfile 1023 * 1024 * Sets up an ATA taskfile to issue FLUSH CACHE or 1025 * FLUSH CACHE EXT. 1026 * 1027 * LOCKING: 1028 * spin_lock_irqsave(host lock) 1029 * 1030 * RETURNS: 1031 * Zero on success, non-zero on error. 1032 */ 1033 static unsigned int ata_scsi_flush_xlat(struct ata_queued_cmd *qc) 1034 { 1035 struct ata_taskfile *tf = &qc->tf; 1036 1037 tf->flags |= ATA_TFLAG_DEVICE; 1038 tf->protocol = ATA_PROT_NODATA; 1039 1040 if (qc->dev->flags & ATA_DFLAG_FLUSH_EXT) 1041 tf->command = ATA_CMD_FLUSH_EXT; 1042 else 1043 tf->command = ATA_CMD_FLUSH; 1044 1045 return 0; 1046 } 1047 1048 /** 1049 * scsi_6_lba_len - Get LBA and transfer length 1050 * @cdb: SCSI command to translate 1051 * 1052 * Calculate LBA and transfer length for 6-byte commands. 1053 * 1054 * RETURNS: 1055 * @plba: the LBA 1056 * @plen: the transfer length 1057 */ 1058 static void scsi_6_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1059 { 1060 u64 lba = 0; 1061 u32 len; 1062 1063 VPRINTK("six-byte command\n"); 1064 1065 lba |= ((u64)(cdb[1] & 0x1f)) << 16; 1066 lba |= ((u64)cdb[2]) << 8; 1067 lba |= ((u64)cdb[3]); 1068 1069 len = cdb[4]; 1070 1071 *plba = lba; 1072 *plen = len; 1073 } 1074 1075 /** 1076 * scsi_10_lba_len - Get LBA and transfer length 1077 * @cdb: SCSI command to translate 1078 * 1079 * Calculate LBA and transfer length for 10-byte commands. 1080 * 1081 * RETURNS: 1082 * @plba: the LBA 1083 * @plen: the transfer length 1084 */ 1085 static void scsi_10_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1086 { 1087 u64 lba = 0; 1088 u32 len = 0; 1089 1090 VPRINTK("ten-byte command\n"); 1091 1092 lba |= ((u64)cdb[2]) << 24; 1093 lba |= ((u64)cdb[3]) << 16; 1094 lba |= ((u64)cdb[4]) << 8; 1095 lba |= ((u64)cdb[5]); 1096 1097 len |= ((u32)cdb[7]) << 8; 1098 len |= ((u32)cdb[8]); 1099 1100 *plba = lba; 1101 *plen = len; 1102 } 1103 1104 /** 1105 * scsi_16_lba_len - Get LBA and transfer length 1106 * @cdb: SCSI command to translate 1107 * 1108 * Calculate LBA and transfer length for 16-byte commands. 1109 * 1110 * RETURNS: 1111 * @plba: the LBA 1112 * @plen: the transfer length 1113 */ 1114 static void scsi_16_lba_len(const u8 *cdb, u64 *plba, u32 *plen) 1115 { 1116 u64 lba = 0; 1117 u32 len = 0; 1118 1119 VPRINTK("sixteen-byte command\n"); 1120 1121 lba |= ((u64)cdb[2]) << 56; 1122 lba |= ((u64)cdb[3]) << 48; 1123 lba |= ((u64)cdb[4]) << 40; 1124 lba |= ((u64)cdb[5]) << 32; 1125 lba |= ((u64)cdb[6]) << 24; 1126 lba |= ((u64)cdb[7]) << 16; 1127 lba |= ((u64)cdb[8]) << 8; 1128 lba |= ((u64)cdb[9]); 1129 1130 len |= ((u32)cdb[10]) << 24; 1131 len |= ((u32)cdb[11]) << 16; 1132 len |= ((u32)cdb[12]) << 8; 1133 len |= ((u32)cdb[13]); 1134 1135 *plba = lba; 1136 *plen = len; 1137 } 1138 1139 /** 1140 * ata_scsi_verify_xlat - Translate SCSI VERIFY command into an ATA one 1141 * @qc: Storage for translated ATA taskfile 1142 * 1143 * Converts SCSI VERIFY command to an ATA READ VERIFY command. 1144 * 1145 * LOCKING: 1146 * spin_lock_irqsave(host lock) 1147 * 1148 * RETURNS: 1149 * Zero on success, non-zero on error. 1150 */ 1151 static unsigned int ata_scsi_verify_xlat(struct ata_queued_cmd *qc) 1152 { 1153 struct scsi_cmnd *scmd = qc->scsicmd; 1154 struct ata_taskfile *tf = &qc->tf; 1155 struct ata_device *dev = qc->dev; 1156 u64 dev_sectors = qc->dev->n_sectors; 1157 const u8 *cdb = scmd->cmnd; 1158 u64 block; 1159 u32 n_block; 1160 1161 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1162 tf->protocol = ATA_PROT_NODATA; 1163 1164 if (cdb[0] == VERIFY) { 1165 if (scmd->cmd_len < 10) 1166 goto invalid_fld; 1167 scsi_10_lba_len(cdb, &block, &n_block); 1168 } else if (cdb[0] == VERIFY_16) { 1169 if (scmd->cmd_len < 16) 1170 goto invalid_fld; 1171 scsi_16_lba_len(cdb, &block, &n_block); 1172 } else 1173 goto invalid_fld; 1174 1175 if (!n_block) 1176 goto nothing_to_do; 1177 if (block >= dev_sectors) 1178 goto out_of_range; 1179 if ((block + n_block) > dev_sectors) 1180 goto out_of_range; 1181 1182 if (dev->flags & ATA_DFLAG_LBA) { 1183 tf->flags |= ATA_TFLAG_LBA; 1184 1185 if (lba_28_ok(block, n_block)) { 1186 /* use LBA28 */ 1187 tf->command = ATA_CMD_VERIFY; 1188 tf->device |= (block >> 24) & 0xf; 1189 } else if (lba_48_ok(block, n_block)) { 1190 if (!(dev->flags & ATA_DFLAG_LBA48)) 1191 goto out_of_range; 1192 1193 /* use LBA48 */ 1194 tf->flags |= ATA_TFLAG_LBA48; 1195 tf->command = ATA_CMD_VERIFY_EXT; 1196 1197 tf->hob_nsect = (n_block >> 8) & 0xff; 1198 1199 tf->hob_lbah = (block >> 40) & 0xff; 1200 tf->hob_lbam = (block >> 32) & 0xff; 1201 tf->hob_lbal = (block >> 24) & 0xff; 1202 } else 1203 /* request too large even for LBA48 */ 1204 goto out_of_range; 1205 1206 tf->nsect = n_block & 0xff; 1207 1208 tf->lbah = (block >> 16) & 0xff; 1209 tf->lbam = (block >> 8) & 0xff; 1210 tf->lbal = block & 0xff; 1211 1212 tf->device |= ATA_LBA; 1213 } else { 1214 /* CHS */ 1215 u32 sect, head, cyl, track; 1216 1217 if (!lba_28_ok(block, n_block)) 1218 goto out_of_range; 1219 1220 /* Convert LBA to CHS */ 1221 track = (u32)block / dev->sectors; 1222 cyl = track / dev->heads; 1223 head = track % dev->heads; 1224 sect = (u32)block % dev->sectors + 1; 1225 1226 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 1227 (u32)block, track, cyl, head, sect); 1228 1229 /* Check whether the converted CHS can fit. 1230 Cylinder: 0-65535 1231 Head: 0-15 1232 Sector: 1-255*/ 1233 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 1234 goto out_of_range; 1235 1236 tf->command = ATA_CMD_VERIFY; 1237 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 1238 tf->lbal = sect; 1239 tf->lbam = cyl; 1240 tf->lbah = cyl >> 8; 1241 tf->device |= head; 1242 } 1243 1244 return 0; 1245 1246 invalid_fld: 1247 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1248 /* "Invalid field in cbd" */ 1249 return 1; 1250 1251 out_of_range: 1252 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1253 /* "Logical Block Address out of range" */ 1254 return 1; 1255 1256 nothing_to_do: 1257 scmd->result = SAM_STAT_GOOD; 1258 return 1; 1259 } 1260 1261 /** 1262 * ata_scsi_rw_xlat - Translate SCSI r/w command into an ATA one 1263 * @qc: Storage for translated ATA taskfile 1264 * 1265 * Converts any of six SCSI read/write commands into the 1266 * ATA counterpart, including starting sector (LBA), 1267 * sector count, and taking into account the device's LBA48 1268 * support. 1269 * 1270 * Commands %READ_6, %READ_10, %READ_16, %WRITE_6, %WRITE_10, and 1271 * %WRITE_16 are currently supported. 1272 * 1273 * LOCKING: 1274 * spin_lock_irqsave(host lock) 1275 * 1276 * RETURNS: 1277 * Zero on success, non-zero on error. 1278 */ 1279 static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc) 1280 { 1281 struct scsi_cmnd *scmd = qc->scsicmd; 1282 const u8 *cdb = scmd->cmnd; 1283 unsigned int tf_flags = 0; 1284 u64 block; 1285 u32 n_block; 1286 int rc; 1287 1288 if (cdb[0] == WRITE_10 || cdb[0] == WRITE_6 || cdb[0] == WRITE_16) 1289 tf_flags |= ATA_TFLAG_WRITE; 1290 1291 /* Calculate the SCSI LBA, transfer length and FUA. */ 1292 switch (cdb[0]) { 1293 case READ_10: 1294 case WRITE_10: 1295 if (unlikely(scmd->cmd_len < 10)) 1296 goto invalid_fld; 1297 scsi_10_lba_len(cdb, &block, &n_block); 1298 if (unlikely(cdb[1] & (1 << 3))) 1299 tf_flags |= ATA_TFLAG_FUA; 1300 break; 1301 case READ_6: 1302 case WRITE_6: 1303 if (unlikely(scmd->cmd_len < 6)) 1304 goto invalid_fld; 1305 scsi_6_lba_len(cdb, &block, &n_block); 1306 1307 /* for 6-byte r/w commands, transfer length 0 1308 * means 256 blocks of data, not 0 block. 1309 */ 1310 if (!n_block) 1311 n_block = 256; 1312 break; 1313 case READ_16: 1314 case WRITE_16: 1315 if (unlikely(scmd->cmd_len < 16)) 1316 goto invalid_fld; 1317 scsi_16_lba_len(cdb, &block, &n_block); 1318 if (unlikely(cdb[1] & (1 << 3))) 1319 tf_flags |= ATA_TFLAG_FUA; 1320 break; 1321 default: 1322 DPRINTK("no-byte command\n"); 1323 goto invalid_fld; 1324 } 1325 1326 /* Check and compose ATA command */ 1327 if (!n_block) 1328 /* For 10-byte and 16-byte SCSI R/W commands, transfer 1329 * length 0 means transfer 0 block of data. 1330 * However, for ATA R/W commands, sector count 0 means 1331 * 256 or 65536 sectors, not 0 sectors as in SCSI. 1332 * 1333 * WARNING: one or two older ATA drives treat 0 as 0... 1334 */ 1335 goto nothing_to_do; 1336 1337 qc->flags |= ATA_QCFLAG_IO; 1338 qc->nbytes = n_block * ATA_SECT_SIZE; 1339 1340 rc = ata_build_rw_tf(&qc->tf, qc->dev, block, n_block, tf_flags, 1341 qc->tag); 1342 if (likely(rc == 0)) 1343 return 0; 1344 1345 if (rc == -ERANGE) 1346 goto out_of_range; 1347 /* treat all other errors as -EINVAL, fall through */ 1348 invalid_fld: 1349 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x0); 1350 /* "Invalid field in cbd" */ 1351 return 1; 1352 1353 out_of_range: 1354 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x21, 0x0); 1355 /* "Logical Block Address out of range" */ 1356 return 1; 1357 1358 nothing_to_do: 1359 scmd->result = SAM_STAT_GOOD; 1360 return 1; 1361 } 1362 1363 static void ata_scsi_qc_complete(struct ata_queued_cmd *qc) 1364 { 1365 struct ata_port *ap = qc->ap; 1366 struct scsi_cmnd *cmd = qc->scsicmd; 1367 u8 *cdb = cmd->cmnd; 1368 int need_sense = (qc->err_mask != 0); 1369 1370 /* We snoop the SET_FEATURES - Write Cache ON/OFF command, and 1371 * schedule EH_REVALIDATE operation to update the IDENTIFY DEVICE 1372 * cache 1373 */ 1374 if (ap->ops->error_handler && !need_sense) { 1375 switch (qc->tf.command) { 1376 case ATA_CMD_SET_FEATURES: 1377 if ((qc->tf.feature == SETFEATURES_WC_ON) || 1378 (qc->tf.feature == SETFEATURES_WC_OFF)) { 1379 ap->link.eh_info.action |= ATA_EH_REVALIDATE; 1380 ata_port_schedule_eh(ap); 1381 } 1382 break; 1383 1384 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 1385 case ATA_CMD_SET_MULTI: /* multi_count changed */ 1386 ap->link.eh_info.action |= ATA_EH_REVALIDATE; 1387 ata_port_schedule_eh(ap); 1388 break; 1389 } 1390 } 1391 1392 /* For ATA pass thru (SAT) commands, generate a sense block if 1393 * user mandated it or if there's an error. Note that if we 1394 * generate because the user forced us to, a check condition 1395 * is generated and the ATA register values are returned 1396 * whether the command completed successfully or not. If there 1397 * was no error, SK, ASC and ASCQ will all be zero. 1398 */ 1399 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1400 ((cdb[2] & 0x20) || need_sense)) { 1401 ata_gen_passthru_sense(qc); 1402 } else { 1403 if (!need_sense) { 1404 cmd->result = SAM_STAT_GOOD; 1405 } else { 1406 /* TODO: decide which descriptor format to use 1407 * for 48b LBA devices and call that here 1408 * instead of the fixed desc, which is only 1409 * good for smaller LBA (and maybe CHS?) 1410 * devices. 1411 */ 1412 ata_gen_ata_sense(qc); 1413 } 1414 } 1415 1416 /* XXX: track spindown state for spindown skipping and warning */ 1417 if (unlikely(qc->tf.command == ATA_CMD_STANDBY || 1418 qc->tf.command == ATA_CMD_STANDBYNOW1)) 1419 qc->dev->flags |= ATA_DFLAG_SPUNDOWN; 1420 else if (likely(system_state != SYSTEM_HALT && 1421 system_state != SYSTEM_POWER_OFF)) 1422 qc->dev->flags &= ~ATA_DFLAG_SPUNDOWN; 1423 1424 if (need_sense && !ap->ops->error_handler) 1425 ata_dump_status(ap->print_id, &qc->result_tf); 1426 1427 qc->scsidone(cmd); 1428 1429 ata_qc_free(qc); 1430 } 1431 1432 /** 1433 * ata_scsi_translate - Translate then issue SCSI command to ATA device 1434 * @dev: ATA device to which the command is addressed 1435 * @cmd: SCSI command to execute 1436 * @done: SCSI command completion function 1437 * @xlat_func: Actor which translates @cmd to an ATA taskfile 1438 * 1439 * Our ->queuecommand() function has decided that the SCSI 1440 * command issued can be directly translated into an ATA 1441 * command, rather than handled internally. 1442 * 1443 * This function sets up an ata_queued_cmd structure for the 1444 * SCSI command, and sends that ata_queued_cmd to the hardware. 1445 * 1446 * The xlat_func argument (actor) returns 0 if ready to execute 1447 * ATA command, else 1 to finish translation. If 1 is returned 1448 * then cmd->result (and possibly cmd->sense_buffer) are assumed 1449 * to be set reflecting an error condition or clean (early) 1450 * termination. 1451 * 1452 * LOCKING: 1453 * spin_lock_irqsave(host lock) 1454 * 1455 * RETURNS: 1456 * 0 on success, SCSI_ML_QUEUE_DEVICE_BUSY if the command 1457 * needs to be deferred. 1458 */ 1459 static int ata_scsi_translate(struct ata_device *dev, struct scsi_cmnd *cmd, 1460 void (*done)(struct scsi_cmnd *), 1461 ata_xlat_func_t xlat_func) 1462 { 1463 struct ata_port *ap = dev->link->ap; 1464 struct ata_queued_cmd *qc; 1465 int rc; 1466 1467 VPRINTK("ENTER\n"); 1468 1469 qc = ata_scsi_qc_new(dev, cmd, done); 1470 if (!qc) 1471 goto err_mem; 1472 1473 /* data is present; dma-map it */ 1474 if (cmd->sc_data_direction == DMA_FROM_DEVICE || 1475 cmd->sc_data_direction == DMA_TO_DEVICE) { 1476 if (unlikely(scsi_bufflen(cmd) < 1)) { 1477 ata_dev_printk(dev, KERN_WARNING, 1478 "WARNING: zero len r/w req\n"); 1479 goto err_did; 1480 } 1481 1482 ata_sg_init(qc, scsi_sglist(cmd), scsi_sg_count(cmd)); 1483 1484 qc->dma_dir = cmd->sc_data_direction; 1485 } 1486 1487 qc->complete_fn = ata_scsi_qc_complete; 1488 1489 if (xlat_func(qc)) 1490 goto early_finish; 1491 1492 if (ap->ops->qc_defer) { 1493 if ((rc = ap->ops->qc_defer(qc))) 1494 goto defer; 1495 } 1496 1497 /* select device, send command to hardware */ 1498 ata_qc_issue(qc); 1499 1500 VPRINTK("EXIT\n"); 1501 return 0; 1502 1503 early_finish: 1504 ata_qc_free(qc); 1505 qc->scsidone(cmd); 1506 DPRINTK("EXIT - early finish (good or error)\n"); 1507 return 0; 1508 1509 err_did: 1510 ata_qc_free(qc); 1511 cmd->result = (DID_ERROR << 16); 1512 qc->scsidone(cmd); 1513 err_mem: 1514 DPRINTK("EXIT - internal\n"); 1515 return 0; 1516 1517 defer: 1518 ata_qc_free(qc); 1519 DPRINTK("EXIT - defer\n"); 1520 if (rc == ATA_DEFER_LINK) 1521 return SCSI_MLQUEUE_DEVICE_BUSY; 1522 else 1523 return SCSI_MLQUEUE_HOST_BUSY; 1524 } 1525 1526 /** 1527 * ata_scsi_rbuf_get - Map response buffer. 1528 * @cmd: SCSI command containing buffer to be mapped. 1529 * @buf_out: Pointer to mapped area. 1530 * 1531 * Maps buffer contained within SCSI command @cmd. 1532 * 1533 * LOCKING: 1534 * spin_lock_irqsave(host lock) 1535 * 1536 * RETURNS: 1537 * Length of response buffer. 1538 */ 1539 1540 static unsigned int ata_scsi_rbuf_get(struct scsi_cmnd *cmd, u8 **buf_out) 1541 { 1542 u8 *buf; 1543 unsigned int buflen; 1544 1545 struct scatterlist *sg = scsi_sglist(cmd); 1546 1547 if (sg) { 1548 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1549 buflen = sg->length; 1550 } else { 1551 buf = NULL; 1552 buflen = 0; 1553 } 1554 1555 *buf_out = buf; 1556 return buflen; 1557 } 1558 1559 /** 1560 * ata_scsi_rbuf_put - Unmap response buffer. 1561 * @cmd: SCSI command containing buffer to be unmapped. 1562 * @buf: buffer to unmap 1563 * 1564 * Unmaps response buffer contained within @cmd. 1565 * 1566 * LOCKING: 1567 * spin_lock_irqsave(host lock) 1568 */ 1569 1570 static inline void ata_scsi_rbuf_put(struct scsi_cmnd *cmd, u8 *buf) 1571 { 1572 struct scatterlist *sg = scsi_sglist(cmd); 1573 if (sg) 1574 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1575 } 1576 1577 /** 1578 * ata_scsi_rbuf_fill - wrapper for SCSI command simulators 1579 * @args: device IDENTIFY data / SCSI command of interest. 1580 * @actor: Callback hook for desired SCSI command simulator 1581 * 1582 * Takes care of the hard work of simulating a SCSI command... 1583 * Mapping the response buffer, calling the command's handler, 1584 * and handling the handler's return value. This return value 1585 * indicates whether the handler wishes the SCSI command to be 1586 * completed successfully (0), or not (in which case cmd->result 1587 * and sense buffer are assumed to be set). 1588 * 1589 * LOCKING: 1590 * spin_lock_irqsave(host lock) 1591 */ 1592 1593 void ata_scsi_rbuf_fill(struct ata_scsi_args *args, 1594 unsigned int (*actor) (struct ata_scsi_args *args, 1595 u8 *rbuf, unsigned int buflen)) 1596 { 1597 u8 *rbuf; 1598 unsigned int buflen, rc; 1599 struct scsi_cmnd *cmd = args->cmd; 1600 1601 buflen = ata_scsi_rbuf_get(cmd, &rbuf); 1602 memset(rbuf, 0, buflen); 1603 rc = actor(args, rbuf, buflen); 1604 ata_scsi_rbuf_put(cmd, rbuf); 1605 1606 if (rc == 0) 1607 cmd->result = SAM_STAT_GOOD; 1608 args->done(cmd); 1609 } 1610 1611 /** 1612 * ATA_SCSI_RBUF_SET - helper to set values in SCSI response buffer 1613 * @idx: byte index into SCSI response buffer 1614 * @val: value to set 1615 * 1616 * To be used by SCSI command simulator functions. This macros 1617 * expects two local variables, u8 *rbuf and unsigned int buflen, 1618 * are in scope. 1619 * 1620 * LOCKING: 1621 * None. 1622 */ 1623 #define ATA_SCSI_RBUF_SET(idx, val) do { \ 1624 if ((idx) < buflen) rbuf[(idx)] = (u8)(val); \ 1625 } while (0) 1626 1627 /** 1628 * ata_scsiop_inq_std - Simulate INQUIRY command 1629 * @args: device IDENTIFY data / SCSI command of interest. 1630 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1631 * @buflen: Response buffer length. 1632 * 1633 * Returns standard device identification data associated 1634 * with non-VPD INQUIRY command output. 1635 * 1636 * LOCKING: 1637 * spin_lock_irqsave(host lock) 1638 */ 1639 1640 unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 1641 unsigned int buflen) 1642 { 1643 u8 hdr[] = { 1644 TYPE_DISK, 1645 0, 1646 0x5, /* claim SPC-3 version compatibility */ 1647 2, 1648 95 - 4 1649 }; 1650 1651 /* set scsi removeable (RMB) bit per ata bit */ 1652 if (ata_id_removeable(args->id)) 1653 hdr[1] |= (1 << 7); 1654 1655 VPRINTK("ENTER\n"); 1656 1657 memcpy(rbuf, hdr, sizeof(hdr)); 1658 1659 if (buflen > 35) { 1660 memcpy(&rbuf[8], "ATA ", 8); 1661 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); 1662 ata_id_string(args->id, &rbuf[32], ATA_ID_FW_REV, 4); 1663 if (rbuf[32] == 0 || rbuf[32] == ' ') 1664 memcpy(&rbuf[32], "n/a ", 4); 1665 } 1666 1667 if (buflen > 63) { 1668 const u8 versions[] = { 1669 0x60, /* SAM-3 (no version claimed) */ 1670 1671 0x03, 1672 0x20, /* SBC-2 (no version claimed) */ 1673 1674 0x02, 1675 0x60 /* SPC-3 (no version claimed) */ 1676 }; 1677 1678 memcpy(rbuf + 59, versions, sizeof(versions)); 1679 } 1680 1681 return 0; 1682 } 1683 1684 /** 1685 * ata_scsiop_inq_00 - Simulate INQUIRY VPD page 0, list of pages 1686 * @args: device IDENTIFY data / SCSI command of interest. 1687 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1688 * @buflen: Response buffer length. 1689 * 1690 * Returns list of inquiry VPD pages available. 1691 * 1692 * LOCKING: 1693 * spin_lock_irqsave(host lock) 1694 */ 1695 1696 unsigned int ata_scsiop_inq_00(struct ata_scsi_args *args, u8 *rbuf, 1697 unsigned int buflen) 1698 { 1699 const u8 pages[] = { 1700 0x00, /* page 0x00, this page */ 1701 0x80, /* page 0x80, unit serial no page */ 1702 0x83 /* page 0x83, device ident page */ 1703 }; 1704 rbuf[3] = sizeof(pages); /* number of supported VPD pages */ 1705 1706 if (buflen > 6) 1707 memcpy(rbuf + 4, pages, sizeof(pages)); 1708 1709 return 0; 1710 } 1711 1712 /** 1713 * ata_scsiop_inq_80 - Simulate INQUIRY VPD page 80, device serial number 1714 * @args: device IDENTIFY data / SCSI command of interest. 1715 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1716 * @buflen: Response buffer length. 1717 * 1718 * Returns ATA device serial number. 1719 * 1720 * LOCKING: 1721 * spin_lock_irqsave(host lock) 1722 */ 1723 1724 unsigned int ata_scsiop_inq_80(struct ata_scsi_args *args, u8 *rbuf, 1725 unsigned int buflen) 1726 { 1727 const u8 hdr[] = { 1728 0, 1729 0x80, /* this page code */ 1730 0, 1731 ATA_ID_SERNO_LEN, /* page len */ 1732 }; 1733 memcpy(rbuf, hdr, sizeof(hdr)); 1734 1735 if (buflen > (ATA_ID_SERNO_LEN + 4 - 1)) 1736 ata_id_string(args->id, (unsigned char *) &rbuf[4], 1737 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1738 1739 return 0; 1740 } 1741 1742 /** 1743 * ata_scsiop_inq_83 - Simulate INQUIRY VPD page 83, device identity 1744 * @args: device IDENTIFY data / SCSI command of interest. 1745 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1746 * @buflen: Response buffer length. 1747 * 1748 * Yields two logical unit device identification designators: 1749 * - vendor specific ASCII containing the ATA serial number 1750 * - SAT defined "t10 vendor id based" containing ASCII vendor 1751 * name ("ATA "), model and serial numbers. 1752 * 1753 * LOCKING: 1754 * spin_lock_irqsave(host lock) 1755 */ 1756 1757 unsigned int ata_scsiop_inq_83(struct ata_scsi_args *args, u8 *rbuf, 1758 unsigned int buflen) 1759 { 1760 int num; 1761 const int sat_model_serial_desc_len = 68; 1762 1763 rbuf[1] = 0x83; /* this page code */ 1764 num = 4; 1765 1766 if (buflen > (ATA_ID_SERNO_LEN + num + 3)) { 1767 /* piv=0, assoc=lu, code_set=ACSII, designator=vendor */ 1768 rbuf[num + 0] = 2; 1769 rbuf[num + 3] = ATA_ID_SERNO_LEN; 1770 num += 4; 1771 ata_id_string(args->id, (unsigned char *) rbuf + num, 1772 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1773 num += ATA_ID_SERNO_LEN; 1774 } 1775 if (buflen > (sat_model_serial_desc_len + num + 3)) { 1776 /* SAT defined lu model and serial numbers descriptor */ 1777 /* piv=0, assoc=lu, code_set=ACSII, designator=t10 vendor id */ 1778 rbuf[num + 0] = 2; 1779 rbuf[num + 1] = 1; 1780 rbuf[num + 3] = sat_model_serial_desc_len; 1781 num += 4; 1782 memcpy(rbuf + num, "ATA ", 8); 1783 num += 8; 1784 ata_id_string(args->id, (unsigned char *) rbuf + num, 1785 ATA_ID_PROD, ATA_ID_PROD_LEN); 1786 num += ATA_ID_PROD_LEN; 1787 ata_id_string(args->id, (unsigned char *) rbuf + num, 1788 ATA_ID_SERNO, ATA_ID_SERNO_LEN); 1789 num += ATA_ID_SERNO_LEN; 1790 } 1791 rbuf[3] = num - 4; /* page len (assume less than 256 bytes) */ 1792 return 0; 1793 } 1794 1795 /** 1796 * ata_scsiop_inq_89 - Simulate INQUIRY VPD page 89, ATA info 1797 * @args: device IDENTIFY data / SCSI command of interest. 1798 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1799 * @buflen: Response buffer length. 1800 * 1801 * Yields SAT-specified ATA VPD page. 1802 * 1803 * LOCKING: 1804 * spin_lock_irqsave(host lock) 1805 */ 1806 1807 unsigned int ata_scsiop_inq_89(struct ata_scsi_args *args, u8 *rbuf, 1808 unsigned int buflen) 1809 { 1810 u8 pbuf[60]; 1811 struct ata_taskfile tf; 1812 unsigned int i; 1813 1814 if (!buflen) 1815 return 0; 1816 1817 memset(&pbuf, 0, sizeof(pbuf)); 1818 memset(&tf, 0, sizeof(tf)); 1819 1820 pbuf[1] = 0x89; /* our page code */ 1821 pbuf[2] = (0x238 >> 8); /* page size fixed at 238h */ 1822 pbuf[3] = (0x238 & 0xff); 1823 1824 memcpy(&pbuf[8], "linux ", 8); 1825 memcpy(&pbuf[16], "libata ", 16); 1826 memcpy(&pbuf[32], DRV_VERSION, 4); 1827 ata_id_string(args->id, &pbuf[32], ATA_ID_FW_REV, 4); 1828 1829 /* we don't store the ATA device signature, so we fake it */ 1830 1831 tf.command = ATA_DRDY; /* really, this is Status reg */ 1832 tf.lbal = 0x1; 1833 tf.nsect = 0x1; 1834 1835 ata_tf_to_fis(&tf, 0, 1, &pbuf[36]); /* TODO: PMP? */ 1836 pbuf[36] = 0x34; /* force D2H Reg FIS (34h) */ 1837 1838 pbuf[56] = ATA_CMD_ID_ATA; 1839 1840 i = min(buflen, 60U); 1841 memcpy(rbuf, &pbuf[0], i); 1842 buflen -= i; 1843 1844 if (!buflen) 1845 return 0; 1846 1847 memcpy(&rbuf[60], &args->id[0], min(buflen, 512U)); 1848 return 0; 1849 } 1850 1851 /** 1852 * ata_scsiop_noop - Command handler that simply returns success. 1853 * @args: device IDENTIFY data / SCSI command of interest. 1854 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1855 * @buflen: Response buffer length. 1856 * 1857 * No operation. Simply returns success to caller, to indicate 1858 * that the caller should successfully complete this SCSI command. 1859 * 1860 * LOCKING: 1861 * spin_lock_irqsave(host lock) 1862 */ 1863 1864 unsigned int ata_scsiop_noop(struct ata_scsi_args *args, u8 *rbuf, 1865 unsigned int buflen) 1866 { 1867 VPRINTK("ENTER\n"); 1868 return 0; 1869 } 1870 1871 /** 1872 * ata_msense_push - Push data onto MODE SENSE data output buffer 1873 * @ptr_io: (input/output) Location to store more output data 1874 * @last: End of output data buffer 1875 * @buf: Pointer to BLOB being added to output buffer 1876 * @buflen: Length of BLOB 1877 * 1878 * Store MODE SENSE data on an output buffer. 1879 * 1880 * LOCKING: 1881 * None. 1882 */ 1883 1884 static void ata_msense_push(u8 **ptr_io, const u8 *last, 1885 const u8 *buf, unsigned int buflen) 1886 { 1887 u8 *ptr = *ptr_io; 1888 1889 if ((ptr + buflen - 1) > last) 1890 return; 1891 1892 memcpy(ptr, buf, buflen); 1893 1894 ptr += buflen; 1895 1896 *ptr_io = ptr; 1897 } 1898 1899 /** 1900 * ata_msense_caching - Simulate MODE SENSE caching info page 1901 * @id: device IDENTIFY data 1902 * @ptr_io: (input/output) Location to store more output data 1903 * @last: End of output data buffer 1904 * 1905 * Generate a caching info page, which conditionally indicates 1906 * write caching to the SCSI layer, depending on device 1907 * capabilities. 1908 * 1909 * LOCKING: 1910 * None. 1911 */ 1912 1913 static unsigned int ata_msense_caching(u16 *id, u8 **ptr_io, 1914 const u8 *last) 1915 { 1916 u8 page[CACHE_MPAGE_LEN]; 1917 1918 memcpy(page, def_cache_mpage, sizeof(page)); 1919 if (ata_id_wcache_enabled(id)) 1920 page[2] |= (1 << 2); /* write cache enable */ 1921 if (!ata_id_rahead_enabled(id)) 1922 page[12] |= (1 << 5); /* disable read ahead */ 1923 1924 ata_msense_push(ptr_io, last, page, sizeof(page)); 1925 return sizeof(page); 1926 } 1927 1928 /** 1929 * ata_msense_ctl_mode - Simulate MODE SENSE control mode page 1930 * @dev: Device associated with this MODE SENSE command 1931 * @ptr_io: (input/output) Location to store more output data 1932 * @last: End of output data buffer 1933 * 1934 * Generate a generic MODE SENSE control mode page. 1935 * 1936 * LOCKING: 1937 * None. 1938 */ 1939 1940 static unsigned int ata_msense_ctl_mode(u8 **ptr_io, const u8 *last) 1941 { 1942 ata_msense_push(ptr_io, last, def_control_mpage, 1943 sizeof(def_control_mpage)); 1944 return sizeof(def_control_mpage); 1945 } 1946 1947 /** 1948 * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page 1949 * @dev: Device associated with this MODE SENSE command 1950 * @ptr_io: (input/output) Location to store more output data 1951 * @last: End of output data buffer 1952 * 1953 * Generate a generic MODE SENSE r/w error recovery page. 1954 * 1955 * LOCKING: 1956 * None. 1957 */ 1958 1959 static unsigned int ata_msense_rw_recovery(u8 **ptr_io, const u8 *last) 1960 { 1961 1962 ata_msense_push(ptr_io, last, def_rw_recovery_mpage, 1963 sizeof(def_rw_recovery_mpage)); 1964 return sizeof(def_rw_recovery_mpage); 1965 } 1966 1967 /* 1968 * We can turn this into a real blacklist if it's needed, for now just 1969 * blacklist any Maxtor BANC1G10 revision firmware 1970 */ 1971 static int ata_dev_supports_fua(u16 *id) 1972 { 1973 unsigned char model[ATA_ID_PROD_LEN + 1], fw[ATA_ID_FW_REV_LEN + 1]; 1974 1975 if (!libata_fua) 1976 return 0; 1977 if (!ata_id_has_fua(id)) 1978 return 0; 1979 1980 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model)); 1981 ata_id_c_string(id, fw, ATA_ID_FW_REV, sizeof(fw)); 1982 1983 if (strcmp(model, "Maxtor")) 1984 return 1; 1985 if (strcmp(fw, "BANC1G10")) 1986 return 1; 1987 1988 return 0; /* blacklisted */ 1989 } 1990 1991 /** 1992 * ata_scsiop_mode_sense - Simulate MODE SENSE 6, 10 commands 1993 * @args: device IDENTIFY data / SCSI command of interest. 1994 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 1995 * @buflen: Response buffer length. 1996 * 1997 * Simulate MODE SENSE commands. Assume this is invoked for direct 1998 * access devices (e.g. disks) only. There should be no block 1999 * descriptor for other device types. 2000 * 2001 * LOCKING: 2002 * spin_lock_irqsave(host lock) 2003 */ 2004 2005 unsigned int ata_scsiop_mode_sense(struct ata_scsi_args *args, u8 *rbuf, 2006 unsigned int buflen) 2007 { 2008 struct ata_device *dev = args->dev; 2009 u8 *scsicmd = args->cmd->cmnd, *p, *last; 2010 const u8 sat_blk_desc[] = { 2011 0, 0, 0, 0, /* number of blocks: sat unspecified */ 2012 0, 2013 0, 0x2, 0x0 /* block length: 512 bytes */ 2014 }; 2015 u8 pg, spg; 2016 unsigned int ebd, page_control, six_byte, output_len, alloc_len, minlen; 2017 u8 dpofua; 2018 2019 VPRINTK("ENTER\n"); 2020 2021 six_byte = (scsicmd[0] == MODE_SENSE); 2022 ebd = !(scsicmd[1] & 0x8); /* dbd bit inverted == edb */ 2023 /* 2024 * LLBA bit in msense(10) ignored (compliant) 2025 */ 2026 2027 page_control = scsicmd[2] >> 6; 2028 switch (page_control) { 2029 case 0: /* current */ 2030 break; /* supported */ 2031 case 3: /* saved */ 2032 goto saving_not_supp; 2033 case 1: /* changeable */ 2034 case 2: /* defaults */ 2035 default: 2036 goto invalid_fld; 2037 } 2038 2039 if (six_byte) { 2040 output_len = 4 + (ebd ? 8 : 0); 2041 alloc_len = scsicmd[4]; 2042 } else { 2043 output_len = 8 + (ebd ? 8 : 0); 2044 alloc_len = (scsicmd[7] << 8) + scsicmd[8]; 2045 } 2046 minlen = (alloc_len < buflen) ? alloc_len : buflen; 2047 2048 p = rbuf + output_len; 2049 last = rbuf + minlen - 1; 2050 2051 pg = scsicmd[2] & 0x3f; 2052 spg = scsicmd[3]; 2053 /* 2054 * No mode subpages supported (yet) but asking for _all_ 2055 * subpages may be valid 2056 */ 2057 if (spg && (spg != ALL_SUB_MPAGES)) 2058 goto invalid_fld; 2059 2060 switch(pg) { 2061 case RW_RECOVERY_MPAGE: 2062 output_len += ata_msense_rw_recovery(&p, last); 2063 break; 2064 2065 case CACHE_MPAGE: 2066 output_len += ata_msense_caching(args->id, &p, last); 2067 break; 2068 2069 case CONTROL_MPAGE: { 2070 output_len += ata_msense_ctl_mode(&p, last); 2071 break; 2072 } 2073 2074 case ALL_MPAGES: 2075 output_len += ata_msense_rw_recovery(&p, last); 2076 output_len += ata_msense_caching(args->id, &p, last); 2077 output_len += ata_msense_ctl_mode(&p, last); 2078 break; 2079 2080 default: /* invalid page code */ 2081 goto invalid_fld; 2082 } 2083 2084 if (minlen < 1) 2085 return 0; 2086 2087 dpofua = 0; 2088 if (ata_dev_supports_fua(args->id) && (dev->flags & ATA_DFLAG_LBA48) && 2089 (!(dev->flags & ATA_DFLAG_PIO) || dev->multi_count)) 2090 dpofua = 1 << 4; 2091 2092 if (six_byte) { 2093 output_len--; 2094 rbuf[0] = output_len; 2095 if (minlen > 2) 2096 rbuf[2] |= dpofua; 2097 if (ebd) { 2098 if (minlen > 3) 2099 rbuf[3] = sizeof(sat_blk_desc); 2100 if (minlen > 11) 2101 memcpy(rbuf + 4, sat_blk_desc, 2102 sizeof(sat_blk_desc)); 2103 } 2104 } else { 2105 output_len -= 2; 2106 rbuf[0] = output_len >> 8; 2107 if (minlen > 1) 2108 rbuf[1] = output_len; 2109 if (minlen > 3) 2110 rbuf[3] |= dpofua; 2111 if (ebd) { 2112 if (minlen > 7) 2113 rbuf[7] = sizeof(sat_blk_desc); 2114 if (minlen > 15) 2115 memcpy(rbuf + 8, sat_blk_desc, 2116 sizeof(sat_blk_desc)); 2117 } 2118 } 2119 return 0; 2120 2121 invalid_fld: 2122 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x24, 0x0); 2123 /* "Invalid field in cbd" */ 2124 return 1; 2125 2126 saving_not_supp: 2127 ata_scsi_set_sense(args->cmd, ILLEGAL_REQUEST, 0x39, 0x0); 2128 /* "Saving parameters not supported" */ 2129 return 1; 2130 } 2131 2132 /** 2133 * ata_scsiop_read_cap - Simulate READ CAPACITY[ 16] commands 2134 * @args: device IDENTIFY data / SCSI command of interest. 2135 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2136 * @buflen: Response buffer length. 2137 * 2138 * Simulate READ CAPACITY commands. 2139 * 2140 * LOCKING: 2141 * None. 2142 */ 2143 unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf, 2144 unsigned int buflen) 2145 { 2146 u64 last_lba = args->dev->n_sectors - 1; /* LBA of the last block */ 2147 2148 VPRINTK("ENTER\n"); 2149 2150 if (args->cmd->cmnd[0] == READ_CAPACITY) { 2151 if (last_lba >= 0xffffffffULL) 2152 last_lba = 0xffffffff; 2153 2154 /* sector count, 32-bit */ 2155 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 3)); 2156 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 2)); 2157 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 1)); 2158 ATA_SCSI_RBUF_SET(3, last_lba); 2159 2160 /* sector size */ 2161 ATA_SCSI_RBUF_SET(6, ATA_SECT_SIZE >> 8); 2162 ATA_SCSI_RBUF_SET(7, ATA_SECT_SIZE); 2163 } else { 2164 /* sector count, 64-bit */ 2165 ATA_SCSI_RBUF_SET(0, last_lba >> (8 * 7)); 2166 ATA_SCSI_RBUF_SET(1, last_lba >> (8 * 6)); 2167 ATA_SCSI_RBUF_SET(2, last_lba >> (8 * 5)); 2168 ATA_SCSI_RBUF_SET(3, last_lba >> (8 * 4)); 2169 ATA_SCSI_RBUF_SET(4, last_lba >> (8 * 3)); 2170 ATA_SCSI_RBUF_SET(5, last_lba >> (8 * 2)); 2171 ATA_SCSI_RBUF_SET(6, last_lba >> (8 * 1)); 2172 ATA_SCSI_RBUF_SET(7, last_lba); 2173 2174 /* sector size */ 2175 ATA_SCSI_RBUF_SET(10, ATA_SECT_SIZE >> 8); 2176 ATA_SCSI_RBUF_SET(11, ATA_SECT_SIZE); 2177 } 2178 2179 return 0; 2180 } 2181 2182 /** 2183 * ata_scsiop_report_luns - Simulate REPORT LUNS command 2184 * @args: device IDENTIFY data / SCSI command of interest. 2185 * @rbuf: Response buffer, to which simulated SCSI cmd output is sent. 2186 * @buflen: Response buffer length. 2187 * 2188 * Simulate REPORT LUNS command. 2189 * 2190 * LOCKING: 2191 * spin_lock_irqsave(host lock) 2192 */ 2193 2194 unsigned int ata_scsiop_report_luns(struct ata_scsi_args *args, u8 *rbuf, 2195 unsigned int buflen) 2196 { 2197 VPRINTK("ENTER\n"); 2198 rbuf[3] = 8; /* just one lun, LUN 0, size 8 bytes */ 2199 2200 return 0; 2201 } 2202 2203 /** 2204 * ata_scsi_set_sense - Set SCSI sense data and status 2205 * @cmd: SCSI request to be handled 2206 * @sk: SCSI-defined sense key 2207 * @asc: SCSI-defined additional sense code 2208 * @ascq: SCSI-defined additional sense code qualifier 2209 * 2210 * Helper function that builds a valid fixed format, current 2211 * response code and the given sense key (sk), additional sense 2212 * code (asc) and additional sense code qualifier (ascq) with 2213 * a SCSI command status of %SAM_STAT_CHECK_CONDITION and 2214 * DRIVER_SENSE set in the upper bits of scsi_cmnd::result . 2215 * 2216 * LOCKING: 2217 * Not required 2218 */ 2219 2220 void ata_scsi_set_sense(struct scsi_cmnd *cmd, u8 sk, u8 asc, u8 ascq) 2221 { 2222 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 2223 2224 cmd->sense_buffer[0] = 0x70; /* fixed format, current */ 2225 cmd->sense_buffer[2] = sk; 2226 cmd->sense_buffer[7] = 18 - 8; /* additional sense length */ 2227 cmd->sense_buffer[12] = asc; 2228 cmd->sense_buffer[13] = ascq; 2229 } 2230 2231 /** 2232 * ata_scsi_badcmd - End a SCSI request with an error 2233 * @cmd: SCSI request to be handled 2234 * @done: SCSI command completion function 2235 * @asc: SCSI-defined additional sense code 2236 * @ascq: SCSI-defined additional sense code qualifier 2237 * 2238 * Helper function that completes a SCSI command with 2239 * %SAM_STAT_CHECK_CONDITION, with a sense key %ILLEGAL_REQUEST 2240 * and the specified additional sense codes. 2241 * 2242 * LOCKING: 2243 * spin_lock_irqsave(host lock) 2244 */ 2245 2246 void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8 asc, u8 ascq) 2247 { 2248 DPRINTK("ENTER\n"); 2249 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, asc, ascq); 2250 2251 done(cmd); 2252 } 2253 2254 static void atapi_sense_complete(struct ata_queued_cmd *qc) 2255 { 2256 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) { 2257 /* FIXME: not quite right; we don't want the 2258 * translation of taskfile registers into 2259 * a sense descriptors, since that's only 2260 * correct for ATA, not ATAPI 2261 */ 2262 ata_gen_passthru_sense(qc); 2263 } 2264 2265 qc->scsidone(qc->scsicmd); 2266 ata_qc_free(qc); 2267 } 2268 2269 /* is it pointless to prefer PIO for "safety reasons"? */ 2270 static inline int ata_pio_use_silly(struct ata_port *ap) 2271 { 2272 return (ap->flags & ATA_FLAG_PIO_DMA); 2273 } 2274 2275 static void atapi_request_sense(struct ata_queued_cmd *qc) 2276 { 2277 struct ata_port *ap = qc->ap; 2278 struct scsi_cmnd *cmd = qc->scsicmd; 2279 2280 DPRINTK("ATAPI request sense\n"); 2281 2282 /* FIXME: is this needed? */ 2283 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer)); 2284 2285 ap->ops->tf_read(ap, &qc->tf); 2286 2287 /* fill these in, for the case where they are -not- overwritten */ 2288 cmd->sense_buffer[0] = 0x70; 2289 cmd->sense_buffer[2] = qc->tf.feature >> 4; 2290 2291 ata_qc_reinit(qc); 2292 2293 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer)); 2294 qc->dma_dir = DMA_FROM_DEVICE; 2295 2296 memset(&qc->cdb, 0, qc->dev->cdb_len); 2297 qc->cdb[0] = REQUEST_SENSE; 2298 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE; 2299 2300 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2301 qc->tf.command = ATA_CMD_PACKET; 2302 2303 if (ata_pio_use_silly(ap)) { 2304 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2305 qc->tf.feature |= ATAPI_PKT_DMA; 2306 } else { 2307 qc->tf.protocol = ATA_PROT_ATAPI; 2308 qc->tf.lbam = SCSI_SENSE_BUFFERSIZE; 2309 qc->tf.lbah = 0; 2310 } 2311 qc->nbytes = SCSI_SENSE_BUFFERSIZE; 2312 2313 qc->complete_fn = atapi_sense_complete; 2314 2315 ata_qc_issue(qc); 2316 2317 DPRINTK("EXIT\n"); 2318 } 2319 2320 static void atapi_qc_complete(struct ata_queued_cmd *qc) 2321 { 2322 struct scsi_cmnd *cmd = qc->scsicmd; 2323 unsigned int err_mask = qc->err_mask; 2324 2325 VPRINTK("ENTER, err_mask 0x%X\n", err_mask); 2326 2327 /* handle completion from new EH */ 2328 if (unlikely(qc->ap->ops->error_handler && 2329 (err_mask || qc->flags & ATA_QCFLAG_SENSE_VALID))) { 2330 2331 if (!(qc->flags & ATA_QCFLAG_SENSE_VALID)) { 2332 /* FIXME: not quite right; we don't want the 2333 * translation of taskfile registers into a 2334 * sense descriptors, since that's only 2335 * correct for ATA, not ATAPI 2336 */ 2337 ata_gen_passthru_sense(qc); 2338 } 2339 2340 /* SCSI EH automatically locks door if sdev->locked is 2341 * set. Sometimes door lock request continues to 2342 * fail, for example, when no media is present. This 2343 * creates a loop - SCSI EH issues door lock which 2344 * fails and gets invoked again to acquire sense data 2345 * for the failed command. 2346 * 2347 * If door lock fails, always clear sdev->locked to 2348 * avoid this infinite loop. 2349 */ 2350 if (qc->cdb[0] == ALLOW_MEDIUM_REMOVAL) 2351 qc->dev->sdev->locked = 0; 2352 2353 qc->scsicmd->result = SAM_STAT_CHECK_CONDITION; 2354 qc->scsidone(cmd); 2355 ata_qc_free(qc); 2356 return; 2357 } 2358 2359 /* successful completion or old EH failure path */ 2360 if (unlikely(err_mask & AC_ERR_DEV)) { 2361 cmd->result = SAM_STAT_CHECK_CONDITION; 2362 atapi_request_sense(qc); 2363 return; 2364 } else if (unlikely(err_mask)) { 2365 /* FIXME: not quite right; we don't want the 2366 * translation of taskfile registers into 2367 * a sense descriptors, since that's only 2368 * correct for ATA, not ATAPI 2369 */ 2370 ata_gen_passthru_sense(qc); 2371 } else { 2372 u8 *scsicmd = cmd->cmnd; 2373 2374 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { 2375 u8 *buf = NULL; 2376 unsigned int buflen; 2377 2378 buflen = ata_scsi_rbuf_get(cmd, &buf); 2379 2380 /* ATAPI devices typically report zero for their SCSI version, 2381 * and sometimes deviate from the spec WRT response data 2382 * format. If SCSI version is reported as zero like normal, 2383 * then we make the following fixups: 1) Fake MMC-5 version, 2384 * to indicate to the Linux scsi midlayer this is a modern 2385 * device. 2) Ensure response data format / ATAPI information 2386 * are always correct. 2387 */ 2388 if (buf[2] == 0) { 2389 buf[2] = 0x5; 2390 buf[3] = 0x32; 2391 } 2392 2393 ata_scsi_rbuf_put(cmd, buf); 2394 } 2395 2396 cmd->result = SAM_STAT_GOOD; 2397 } 2398 2399 qc->scsidone(cmd); 2400 ata_qc_free(qc); 2401 } 2402 /** 2403 * atapi_xlat - Initialize PACKET taskfile 2404 * @qc: command structure to be initialized 2405 * 2406 * LOCKING: 2407 * spin_lock_irqsave(host lock) 2408 * 2409 * RETURNS: 2410 * Zero on success, non-zero on failure. 2411 */ 2412 static unsigned int atapi_xlat(struct ata_queued_cmd *qc) 2413 { 2414 struct scsi_cmnd *scmd = qc->scsicmd; 2415 struct ata_device *dev = qc->dev; 2416 int using_pio = (dev->flags & ATA_DFLAG_PIO); 2417 int nodata = (scmd->sc_data_direction == DMA_NONE); 2418 unsigned int nbytes; 2419 2420 memset(qc->cdb, 0, dev->cdb_len); 2421 memcpy(qc->cdb, scmd->cmnd, scmd->cmd_len); 2422 2423 qc->complete_fn = atapi_qc_complete; 2424 2425 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2426 if (scmd->sc_data_direction == DMA_TO_DEVICE) { 2427 qc->tf.flags |= ATA_TFLAG_WRITE; 2428 DPRINTK("direction: write\n"); 2429 } 2430 2431 qc->tf.command = ATA_CMD_PACKET; 2432 qc->nbytes = scsi_bufflen(scmd); 2433 2434 /* check whether ATAPI DMA is safe */ 2435 if (!using_pio && ata_check_atapi_dma(qc)) 2436 using_pio = 1; 2437 2438 /* Some controller variants snoop this value for Packet transfers 2439 to do state machine and FIFO management. Thus we want to set it 2440 properly, and for DMA where it is effectively meaningless */ 2441 nbytes = min(qc->nbytes, (unsigned int)63 * 1024); 2442 2443 qc->tf.lbam = (nbytes & 0xFF); 2444 qc->tf.lbah = (nbytes >> 8); 2445 2446 if (using_pio || nodata) { 2447 /* no data, or PIO data xfer */ 2448 if (nodata) 2449 qc->tf.protocol = ATA_PROT_ATAPI_NODATA; 2450 else 2451 qc->tf.protocol = ATA_PROT_ATAPI; 2452 } else { 2453 /* DMA data xfer */ 2454 qc->tf.protocol = ATA_PROT_ATAPI_DMA; 2455 qc->tf.feature |= ATAPI_PKT_DMA; 2456 2457 if (atapi_dmadir && (scmd->sc_data_direction != DMA_TO_DEVICE)) 2458 /* some SATA bridges need us to indicate data xfer direction */ 2459 qc->tf.feature |= ATAPI_DMADIR; 2460 } 2461 2462 2463 /* FIXME: We need to translate 0x05 READ_BLOCK_LIMITS to a MODE_SENSE 2464 as ATAPI tape drives don't get this right otherwise */ 2465 return 0; 2466 } 2467 2468 static struct ata_device * ata_find_dev(struct ata_port *ap, int devno) 2469 { 2470 if (ap->nr_pmp_links == 0) { 2471 if (likely(devno < ata_link_max_devices(&ap->link))) 2472 return &ap->link.device[devno]; 2473 } else { 2474 if (likely(devno < ap->nr_pmp_links)) 2475 return &ap->pmp_link[devno].device[0]; 2476 } 2477 2478 return NULL; 2479 } 2480 2481 static struct ata_device * __ata_scsi_find_dev(struct ata_port *ap, 2482 const struct scsi_device *scsidev) 2483 { 2484 int devno; 2485 2486 /* skip commands not addressed to targets we simulate */ 2487 if (ap->nr_pmp_links == 0) { 2488 if (unlikely(scsidev->channel || scsidev->lun)) 2489 return NULL; 2490 devno = scsidev->id; 2491 } else { 2492 if (unlikely(scsidev->id || scsidev->lun)) 2493 return NULL; 2494 devno = scsidev->channel; 2495 } 2496 2497 return ata_find_dev(ap, devno); 2498 } 2499 2500 /** 2501 * ata_scsi_dev_enabled - determine if device is enabled 2502 * @dev: ATA device 2503 * 2504 * Determine if commands should be sent to the specified device. 2505 * 2506 * LOCKING: 2507 * spin_lock_irqsave(host lock) 2508 * 2509 * RETURNS: 2510 * 0 if commands are not allowed / 1 if commands are allowed 2511 */ 2512 2513 static int ata_scsi_dev_enabled(struct ata_device *dev) 2514 { 2515 if (unlikely(!ata_dev_enabled(dev))) 2516 return 0; 2517 2518 if (!atapi_enabled || (dev->link->ap->flags & ATA_FLAG_NO_ATAPI)) { 2519 if (unlikely(dev->class == ATA_DEV_ATAPI)) { 2520 ata_dev_printk(dev, KERN_WARNING, 2521 "WARNING: ATAPI is %s, device ignored.\n", 2522 atapi_enabled ? "not supported with this driver" : "disabled"); 2523 return 0; 2524 } 2525 } 2526 2527 return 1; 2528 } 2529 2530 /** 2531 * ata_scsi_find_dev - lookup ata_device from scsi_cmnd 2532 * @ap: ATA port to which the device is attached 2533 * @scsidev: SCSI device from which we derive the ATA device 2534 * 2535 * Given various information provided in struct scsi_cmnd, 2536 * map that onto an ATA bus, and using that mapping 2537 * determine which ata_device is associated with the 2538 * SCSI command to be sent. 2539 * 2540 * LOCKING: 2541 * spin_lock_irqsave(host lock) 2542 * 2543 * RETURNS: 2544 * Associated ATA device, or %NULL if not found. 2545 */ 2546 static struct ata_device * 2547 ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev) 2548 { 2549 struct ata_device *dev = __ata_scsi_find_dev(ap, scsidev); 2550 2551 if (unlikely(!dev || !ata_scsi_dev_enabled(dev))) 2552 return NULL; 2553 2554 return dev; 2555 } 2556 2557 /* 2558 * ata_scsi_map_proto - Map pass-thru protocol value to taskfile value. 2559 * @byte1: Byte 1 from pass-thru CDB. 2560 * 2561 * RETURNS: 2562 * ATA_PROT_UNKNOWN if mapping failed/unimplemented, protocol otherwise. 2563 */ 2564 static u8 2565 ata_scsi_map_proto(u8 byte1) 2566 { 2567 switch((byte1 & 0x1e) >> 1) { 2568 case 3: /* Non-data */ 2569 return ATA_PROT_NODATA; 2570 2571 case 6: /* DMA */ 2572 case 10: /* UDMA Data-in */ 2573 case 11: /* UDMA Data-Out */ 2574 return ATA_PROT_DMA; 2575 2576 case 4: /* PIO Data-in */ 2577 case 5: /* PIO Data-out */ 2578 return ATA_PROT_PIO; 2579 2580 case 0: /* Hard Reset */ 2581 case 1: /* SRST */ 2582 case 8: /* Device Diagnostic */ 2583 case 9: /* Device Reset */ 2584 case 7: /* DMA Queued */ 2585 case 12: /* FPDMA */ 2586 case 15: /* Return Response Info */ 2587 default: /* Reserved */ 2588 break; 2589 } 2590 2591 return ATA_PROT_UNKNOWN; 2592 } 2593 2594 /** 2595 * ata_scsi_pass_thru - convert ATA pass-thru CDB to taskfile 2596 * @qc: command structure to be initialized 2597 * 2598 * Handles either 12 or 16-byte versions of the CDB. 2599 * 2600 * RETURNS: 2601 * Zero on success, non-zero on failure. 2602 */ 2603 static unsigned int ata_scsi_pass_thru(struct ata_queued_cmd *qc) 2604 { 2605 struct ata_taskfile *tf = &(qc->tf); 2606 struct scsi_cmnd *scmd = qc->scsicmd; 2607 struct ata_device *dev = qc->dev; 2608 const u8 *cdb = scmd->cmnd; 2609 2610 if ((tf->protocol = ata_scsi_map_proto(cdb[1])) == ATA_PROT_UNKNOWN) 2611 goto invalid_fld; 2612 2613 /* We may not issue DMA commands if no DMA mode is set */ 2614 if (tf->protocol == ATA_PROT_DMA && dev->dma_mode == 0) 2615 goto invalid_fld; 2616 2617 /* 2618 * 12 and 16 byte CDBs use different offsets to 2619 * provide the various register values. 2620 */ 2621 if (cdb[0] == ATA_16) { 2622 /* 2623 * 16-byte CDB - may contain extended commands. 2624 * 2625 * If that is the case, copy the upper byte register values. 2626 */ 2627 if (cdb[1] & 0x01) { 2628 tf->hob_feature = cdb[3]; 2629 tf->hob_nsect = cdb[5]; 2630 tf->hob_lbal = cdb[7]; 2631 tf->hob_lbam = cdb[9]; 2632 tf->hob_lbah = cdb[11]; 2633 tf->flags |= ATA_TFLAG_LBA48; 2634 } else 2635 tf->flags &= ~ATA_TFLAG_LBA48; 2636 2637 /* 2638 * Always copy low byte, device and command registers. 2639 */ 2640 tf->feature = cdb[4]; 2641 tf->nsect = cdb[6]; 2642 tf->lbal = cdb[8]; 2643 tf->lbam = cdb[10]; 2644 tf->lbah = cdb[12]; 2645 tf->device = cdb[13]; 2646 tf->command = cdb[14]; 2647 } else { 2648 /* 2649 * 12-byte CDB - incapable of extended commands. 2650 */ 2651 tf->flags &= ~ATA_TFLAG_LBA48; 2652 2653 tf->feature = cdb[3]; 2654 tf->nsect = cdb[4]; 2655 tf->lbal = cdb[5]; 2656 tf->lbam = cdb[6]; 2657 tf->lbah = cdb[7]; 2658 tf->device = cdb[8]; 2659 tf->command = cdb[9]; 2660 } 2661 2662 /* enforce correct master/slave bit */ 2663 tf->device = dev->devno ? 2664 tf->device | ATA_DEV1 : tf->device & ~ATA_DEV1; 2665 2666 /* sanity check for pio multi commands */ 2667 if ((cdb[1] & 0xe0) && !is_multi_taskfile(tf)) 2668 goto invalid_fld; 2669 2670 if (is_multi_taskfile(tf)) { 2671 unsigned int multi_count = 1 << (cdb[1] >> 5); 2672 2673 /* compare the passed through multi_count 2674 * with the cached multi_count of libata 2675 */ 2676 if (multi_count != dev->multi_count) 2677 ata_dev_printk(dev, KERN_WARNING, 2678 "invalid multi_count %u ignored\n", 2679 multi_count); 2680 } 2681 2682 /* READ/WRITE LONG use a non-standard sect_size */ 2683 qc->sect_size = ATA_SECT_SIZE; 2684 switch (tf->command) { 2685 case ATA_CMD_READ_LONG: 2686 case ATA_CMD_READ_LONG_ONCE: 2687 case ATA_CMD_WRITE_LONG: 2688 case ATA_CMD_WRITE_LONG_ONCE: 2689 if (tf->protocol != ATA_PROT_PIO || tf->nsect != 1) 2690 goto invalid_fld; 2691 qc->sect_size = scsi_bufflen(scmd); 2692 } 2693 2694 /* 2695 * Filter SET_FEATURES - XFER MODE command -- otherwise, 2696 * SET_FEATURES - XFER MODE must be preceded/succeeded 2697 * by an update to hardware-specific registers for each 2698 * controller (i.e. the reason for ->set_piomode(), 2699 * ->set_dmamode(), and ->post_set_mode() hooks). 2700 */ 2701 if ((tf->command == ATA_CMD_SET_FEATURES) 2702 && (tf->feature == SETFEATURES_XFER)) 2703 goto invalid_fld; 2704 2705 /* 2706 * Set flags so that all registers will be written, 2707 * and pass on write indication (used for PIO/DMA 2708 * setup.) 2709 */ 2710 tf->flags |= (ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE); 2711 2712 if (scmd->sc_data_direction == DMA_TO_DEVICE) 2713 tf->flags |= ATA_TFLAG_WRITE; 2714 2715 /* 2716 * Set transfer length. 2717 * 2718 * TODO: find out if we need to do more here to 2719 * cover scatter/gather case. 2720 */ 2721 qc->nbytes = scsi_bufflen(scmd); 2722 2723 /* request result TF */ 2724 qc->flags |= ATA_QCFLAG_RESULT_TF; 2725 2726 return 0; 2727 2728 invalid_fld: 2729 ata_scsi_set_sense(scmd, ILLEGAL_REQUEST, 0x24, 0x00); 2730 /* "Invalid field in cdb" */ 2731 return 1; 2732 } 2733 2734 /** 2735 * ata_get_xlat_func - check if SCSI to ATA translation is possible 2736 * @dev: ATA device 2737 * @cmd: SCSI command opcode to consider 2738 * 2739 * Look up the SCSI command given, and determine whether the 2740 * SCSI command is to be translated or simulated. 2741 * 2742 * RETURNS: 2743 * Pointer to translation function if possible, %NULL if not. 2744 */ 2745 2746 static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) 2747 { 2748 switch (cmd) { 2749 case READ_6: 2750 case READ_10: 2751 case READ_16: 2752 2753 case WRITE_6: 2754 case WRITE_10: 2755 case WRITE_16: 2756 return ata_scsi_rw_xlat; 2757 2758 case SYNCHRONIZE_CACHE: 2759 if (ata_try_flush_cache(dev)) 2760 return ata_scsi_flush_xlat; 2761 break; 2762 2763 case VERIFY: 2764 case VERIFY_16: 2765 return ata_scsi_verify_xlat; 2766 2767 case ATA_12: 2768 case ATA_16: 2769 return ata_scsi_pass_thru; 2770 2771 case START_STOP: 2772 return ata_scsi_start_stop_xlat; 2773 } 2774 2775 return NULL; 2776 } 2777 2778 /** 2779 * ata_scsi_dump_cdb - dump SCSI command contents to dmesg 2780 * @ap: ATA port to which the command was being sent 2781 * @cmd: SCSI command to dump 2782 * 2783 * Prints the contents of a SCSI command via printk(). 2784 */ 2785 2786 static inline void ata_scsi_dump_cdb(struct ata_port *ap, 2787 struct scsi_cmnd *cmd) 2788 { 2789 #ifdef ATA_DEBUG 2790 struct scsi_device *scsidev = cmd->device; 2791 u8 *scsicmd = cmd->cmnd; 2792 2793 DPRINTK("CDB (%u:%d,%d,%d) %02x %02x %02x %02x %02x %02x %02x %02x %02x\n", 2794 ap->print_id, 2795 scsidev->channel, scsidev->id, scsidev->lun, 2796 scsicmd[0], scsicmd[1], scsicmd[2], scsicmd[3], 2797 scsicmd[4], scsicmd[5], scsicmd[6], scsicmd[7], 2798 scsicmd[8]); 2799 #endif 2800 } 2801 2802 static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, 2803 void (*done)(struct scsi_cmnd *), 2804 struct ata_device *dev) 2805 { 2806 u8 scsi_op = scmd->cmnd[0]; 2807 ata_xlat_func_t xlat_func; 2808 int rc = 0; 2809 2810 if (dev->class == ATA_DEV_ATA) { 2811 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) 2812 goto bad_cdb_len; 2813 2814 xlat_func = ata_get_xlat_func(dev, scsi_op); 2815 } else { 2816 if (unlikely(!scmd->cmd_len)) 2817 goto bad_cdb_len; 2818 2819 xlat_func = NULL; 2820 if (likely((scsi_op != ATA_16) || !atapi_passthru16)) { 2821 /* relay SCSI command to ATAPI device */ 2822 if (unlikely(scmd->cmd_len > dev->cdb_len)) 2823 goto bad_cdb_len; 2824 2825 xlat_func = atapi_xlat; 2826 } else { 2827 /* ATA_16 passthru, treat as an ATA command */ 2828 if (unlikely(scmd->cmd_len > 16)) 2829 goto bad_cdb_len; 2830 2831 xlat_func = ata_get_xlat_func(dev, scsi_op); 2832 } 2833 } 2834 2835 if (xlat_func) 2836 rc = ata_scsi_translate(dev, scmd, done, xlat_func); 2837 else 2838 ata_scsi_simulate(dev, scmd, done); 2839 2840 return rc; 2841 2842 bad_cdb_len: 2843 DPRINTK("bad CDB len=%u, scsi_op=0x%02x, max=%u\n", 2844 scmd->cmd_len, scsi_op, dev->cdb_len); 2845 scmd->result = DID_ERROR << 16; 2846 done(scmd); 2847 return 0; 2848 } 2849 2850 /** 2851 * ata_scsi_queuecmd - Issue SCSI cdb to libata-managed device 2852 * @cmd: SCSI command to be sent 2853 * @done: Completion function, called when command is complete 2854 * 2855 * In some cases, this function translates SCSI commands into 2856 * ATA taskfiles, and queues the taskfiles to be sent to 2857 * hardware. In other cases, this function simulates a 2858 * SCSI device by evaluating and responding to certain 2859 * SCSI commands. This creates the overall effect of 2860 * ATA and ATAPI devices appearing as SCSI devices. 2861 * 2862 * LOCKING: 2863 * Releases scsi-layer-held lock, and obtains host lock. 2864 * 2865 * RETURNS: 2866 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 2867 * 0 otherwise. 2868 */ 2869 int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) 2870 { 2871 struct ata_port *ap; 2872 struct ata_device *dev; 2873 struct scsi_device *scsidev = cmd->device; 2874 struct Scsi_Host *shost = scsidev->host; 2875 int rc = 0; 2876 2877 ap = ata_shost_to_port(shost); 2878 2879 spin_unlock(shost->host_lock); 2880 spin_lock(ap->lock); 2881 2882 ata_scsi_dump_cdb(ap, cmd); 2883 2884 dev = ata_scsi_find_dev(ap, scsidev); 2885 if (likely(dev)) 2886 rc = __ata_scsi_queuecmd(cmd, done, dev); 2887 else { 2888 cmd->result = (DID_BAD_TARGET << 16); 2889 done(cmd); 2890 } 2891 2892 spin_unlock(ap->lock); 2893 spin_lock(shost->host_lock); 2894 return rc; 2895 } 2896 2897 /** 2898 * ata_scsi_simulate - simulate SCSI command on ATA device 2899 * @dev: the target device 2900 * @cmd: SCSI command being sent to device. 2901 * @done: SCSI command completion function. 2902 * 2903 * Interprets and directly executes a select list of SCSI commands 2904 * that can be handled internally. 2905 * 2906 * LOCKING: 2907 * spin_lock_irqsave(host lock) 2908 */ 2909 2910 void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd, 2911 void (*done)(struct scsi_cmnd *)) 2912 { 2913 struct ata_scsi_args args; 2914 const u8 *scsicmd = cmd->cmnd; 2915 u8 tmp8; 2916 2917 args.dev = dev; 2918 args.id = dev->id; 2919 args.cmd = cmd; 2920 args.done = done; 2921 2922 switch(scsicmd[0]) { 2923 /* TODO: worth improving? */ 2924 case FORMAT_UNIT: 2925 ata_scsi_invalid_field(cmd, done); 2926 break; 2927 2928 case INQUIRY: 2929 if (scsicmd[1] & 2) /* is CmdDt set? */ 2930 ata_scsi_invalid_field(cmd, done); 2931 else if ((scsicmd[1] & 1) == 0) /* is EVPD clear? */ 2932 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_std); 2933 else switch (scsicmd[2]) { 2934 case 0x00: 2935 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_00); 2936 break; 2937 case 0x80: 2938 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_80); 2939 break; 2940 case 0x83: 2941 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_83); 2942 break; 2943 case 0x89: 2944 ata_scsi_rbuf_fill(&args, ata_scsiop_inq_89); 2945 break; 2946 default: 2947 ata_scsi_invalid_field(cmd, done); 2948 break; 2949 } 2950 break; 2951 2952 case MODE_SENSE: 2953 case MODE_SENSE_10: 2954 ata_scsi_rbuf_fill(&args, ata_scsiop_mode_sense); 2955 break; 2956 2957 case MODE_SELECT: /* unconditionally return */ 2958 case MODE_SELECT_10: /* bad-field-in-cdb */ 2959 ata_scsi_invalid_field(cmd, done); 2960 break; 2961 2962 case READ_CAPACITY: 2963 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2964 break; 2965 2966 case SERVICE_ACTION_IN: 2967 if ((scsicmd[1] & 0x1f) == SAI_READ_CAPACITY_16) 2968 ata_scsi_rbuf_fill(&args, ata_scsiop_read_cap); 2969 else 2970 ata_scsi_invalid_field(cmd, done); 2971 break; 2972 2973 case REPORT_LUNS: 2974 ata_scsi_rbuf_fill(&args, ata_scsiop_report_luns); 2975 break; 2976 2977 case REQUEST_SENSE: 2978 ata_scsi_set_sense(cmd, 0, 0, 0); 2979 cmd->result = (DRIVER_SENSE << 24); 2980 done(cmd); 2981 break; 2982 2983 /* if we reach this, then writeback caching is disabled, 2984 * turning this into a no-op. 2985 */ 2986 case SYNCHRONIZE_CACHE: 2987 /* fall through */ 2988 2989 /* no-op's, complete with success */ 2990 case REZERO_UNIT: 2991 case SEEK_6: 2992 case SEEK_10: 2993 case TEST_UNIT_READY: 2994 ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 2995 break; 2996 2997 case SEND_DIAGNOSTIC: 2998 tmp8 = scsicmd[1] & ~(1 << 3); 2999 if ((tmp8 == 0x4) && (!scsicmd[3]) && (!scsicmd[4])) 3000 ata_scsi_rbuf_fill(&args, ata_scsiop_noop); 3001 else 3002 ata_scsi_invalid_field(cmd, done); 3003 break; 3004 3005 /* all other commands */ 3006 default: 3007 ata_scsi_set_sense(cmd, ILLEGAL_REQUEST, 0x20, 0x0); 3008 /* "Invalid command operation code" */ 3009 done(cmd); 3010 break; 3011 } 3012 } 3013 3014 int ata_scsi_add_hosts(struct ata_host *host, struct scsi_host_template *sht) 3015 { 3016 int i, rc; 3017 3018 for (i = 0; i < host->n_ports; i++) { 3019 struct ata_port *ap = host->ports[i]; 3020 struct Scsi_Host *shost; 3021 3022 rc = -ENOMEM; 3023 shost = scsi_host_alloc(sht, sizeof(struct ata_port *)); 3024 if (!shost) 3025 goto err_alloc; 3026 3027 *(struct ata_port **)&shost->hostdata[0] = ap; 3028 ap->scsi_host = shost; 3029 3030 shost->transportt = &ata_scsi_transport_template; 3031 shost->unique_id = ap->print_id; 3032 shost->max_id = 16; 3033 shost->max_lun = 1; 3034 shost->max_channel = 1; 3035 shost->max_cmd_len = 16; 3036 3037 /* Schedule policy is determined by ->qc_defer() 3038 * callback and it needs to see every deferred qc. 3039 * Set host_blocked to 1 to prevent SCSI midlayer from 3040 * automatically deferring requests. 3041 */ 3042 shost->max_host_blocked = 1; 3043 3044 rc = scsi_add_host(ap->scsi_host, ap->host->dev); 3045 if (rc) 3046 goto err_add; 3047 } 3048 3049 return 0; 3050 3051 err_add: 3052 scsi_host_put(host->ports[i]->scsi_host); 3053 err_alloc: 3054 while (--i >= 0) { 3055 struct Scsi_Host *shost = host->ports[i]->scsi_host; 3056 3057 scsi_remove_host(shost); 3058 scsi_host_put(shost); 3059 } 3060 return rc; 3061 } 3062 3063 void ata_scsi_scan_host(struct ata_port *ap, int sync) 3064 { 3065 int tries = 5; 3066 struct ata_device *last_failed_dev = NULL; 3067 struct ata_link *link; 3068 struct ata_device *dev; 3069 3070 if (ap->flags & ATA_FLAG_DISABLED) 3071 return; 3072 3073 repeat: 3074 ata_port_for_each_link(link, ap) { 3075 ata_link_for_each_dev(dev, link) { 3076 struct scsi_device *sdev; 3077 int channel = 0, id = 0; 3078 3079 if (!ata_dev_enabled(dev) || dev->sdev) 3080 continue; 3081 3082 if (ata_is_host_link(link)) 3083 id = dev->devno; 3084 else 3085 channel = link->pmp; 3086 3087 sdev = __scsi_add_device(ap->scsi_host, channel, id, 0, 3088 NULL); 3089 if (!IS_ERR(sdev)) { 3090 dev->sdev = sdev; 3091 scsi_device_put(sdev); 3092 } 3093 } 3094 } 3095 3096 /* If we scanned while EH was in progress or allocation 3097 * failure occurred, scan would have failed silently. Check 3098 * whether all devices are attached. 3099 */ 3100 ata_port_for_each_link(link, ap) { 3101 ata_link_for_each_dev(dev, link) { 3102 if (ata_dev_enabled(dev) && !dev->sdev) 3103 goto exit_loop; 3104 } 3105 } 3106 exit_loop: 3107 if (!link) 3108 return; 3109 3110 /* we're missing some SCSI devices */ 3111 if (sync) { 3112 /* If caller requested synchrnous scan && we've made 3113 * any progress, sleep briefly and repeat. 3114 */ 3115 if (dev != last_failed_dev) { 3116 msleep(100); 3117 last_failed_dev = dev; 3118 goto repeat; 3119 } 3120 3121 /* We might be failing to detect boot device, give it 3122 * a few more chances. 3123 */ 3124 if (--tries) { 3125 msleep(100); 3126 goto repeat; 3127 } 3128 3129 ata_port_printk(ap, KERN_ERR, "WARNING: synchronous SCSI scan " 3130 "failed without making any progress,\n" 3131 " switching to async\n"); 3132 } 3133 3134 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 3135 round_jiffies_relative(HZ)); 3136 } 3137 3138 /** 3139 * ata_scsi_offline_dev - offline attached SCSI device 3140 * @dev: ATA device to offline attached SCSI device for 3141 * 3142 * This function is called from ata_eh_hotplug() and responsible 3143 * for taking the SCSI device attached to @dev offline. This 3144 * function is called with host lock which protects dev->sdev 3145 * against clearing. 3146 * 3147 * LOCKING: 3148 * spin_lock_irqsave(host lock) 3149 * 3150 * RETURNS: 3151 * 1 if attached SCSI device exists, 0 otherwise. 3152 */ 3153 int ata_scsi_offline_dev(struct ata_device *dev) 3154 { 3155 if (dev->sdev) { 3156 scsi_device_set_state(dev->sdev, SDEV_OFFLINE); 3157 return 1; 3158 } 3159 return 0; 3160 } 3161 3162 /** 3163 * ata_scsi_remove_dev - remove attached SCSI device 3164 * @dev: ATA device to remove attached SCSI device for 3165 * 3166 * This function is called from ata_eh_scsi_hotplug() and 3167 * responsible for removing the SCSI device attached to @dev. 3168 * 3169 * LOCKING: 3170 * Kernel thread context (may sleep). 3171 */ 3172 static void ata_scsi_remove_dev(struct ata_device *dev) 3173 { 3174 struct ata_port *ap = dev->link->ap; 3175 struct scsi_device *sdev; 3176 unsigned long flags; 3177 3178 /* Alas, we need to grab scan_mutex to ensure SCSI device 3179 * state doesn't change underneath us and thus 3180 * scsi_device_get() always succeeds. The mutex locking can 3181 * be removed if there is __scsi_device_get() interface which 3182 * increments reference counts regardless of device state. 3183 */ 3184 mutex_lock(&ap->scsi_host->scan_mutex); 3185 spin_lock_irqsave(ap->lock, flags); 3186 3187 /* clearing dev->sdev is protected by host lock */ 3188 sdev = dev->sdev; 3189 dev->sdev = NULL; 3190 3191 if (sdev) { 3192 /* If user initiated unplug races with us, sdev can go 3193 * away underneath us after the host lock and 3194 * scan_mutex are released. Hold onto it. 3195 */ 3196 if (scsi_device_get(sdev) == 0) { 3197 /* The following ensures the attached sdev is 3198 * offline on return from ata_scsi_offline_dev() 3199 * regardless it wins or loses the race 3200 * against this function. 3201 */ 3202 scsi_device_set_state(sdev, SDEV_OFFLINE); 3203 } else { 3204 WARN_ON(1); 3205 sdev = NULL; 3206 } 3207 } 3208 3209 spin_unlock_irqrestore(ap->lock, flags); 3210 mutex_unlock(&ap->scsi_host->scan_mutex); 3211 3212 if (sdev) { 3213 ata_dev_printk(dev, KERN_INFO, "detaching (SCSI %s)\n", 3214 sdev->sdev_gendev.bus_id); 3215 3216 scsi_remove_device(sdev); 3217 scsi_device_put(sdev); 3218 } 3219 } 3220 3221 static void ata_scsi_handle_link_detach(struct ata_link *link) 3222 { 3223 struct ata_port *ap = link->ap; 3224 struct ata_device *dev; 3225 3226 ata_link_for_each_dev(dev, link) { 3227 unsigned long flags; 3228 3229 if (!(dev->flags & ATA_DFLAG_DETACHED)) 3230 continue; 3231 3232 spin_lock_irqsave(ap->lock, flags); 3233 dev->flags &= ~ATA_DFLAG_DETACHED; 3234 spin_unlock_irqrestore(ap->lock, flags); 3235 3236 ata_scsi_remove_dev(dev); 3237 } 3238 } 3239 3240 /** 3241 * ata_scsi_media_change_notify - send media change event 3242 * @atadev: Pointer to the disk device with media change event 3243 * 3244 * Tell the block layer to send a media change notification 3245 * event. 3246 * 3247 * LOCKING: 3248 * spin_lock_irqsave(host lock) 3249 */ 3250 void ata_scsi_media_change_notify(struct ata_device *dev) 3251 { 3252 #ifdef OTHER_AN_PATCHES_HAVE_BEEN_APPLIED 3253 if (dev->sdev) 3254 scsi_device_event_notify(dev->sdev, SDEV_MEDIA_CHANGE); 3255 #endif 3256 } 3257 3258 /** 3259 * ata_scsi_hotplug - SCSI part of hotplug 3260 * @work: Pointer to ATA port to perform SCSI hotplug on 3261 * 3262 * Perform SCSI part of hotplug. It's executed from a separate 3263 * workqueue after EH completes. This is necessary because SCSI 3264 * hot plugging requires working EH and hot unplugging is 3265 * synchronized with hot plugging with a mutex. 3266 * 3267 * LOCKING: 3268 * Kernel thread context (may sleep). 3269 */ 3270 void ata_scsi_hotplug(struct work_struct *work) 3271 { 3272 struct ata_port *ap = 3273 container_of(work, struct ata_port, hotplug_task.work); 3274 int i; 3275 3276 if (ap->pflags & ATA_PFLAG_UNLOADING) { 3277 DPRINTK("ENTER/EXIT - unloading\n"); 3278 return; 3279 } 3280 3281 DPRINTK("ENTER\n"); 3282 3283 /* Unplug detached devices. We cannot use link iterator here 3284 * because PMP links have to be scanned even if PMP is 3285 * currently not attached. Iterate manually. 3286 */ 3287 ata_scsi_handle_link_detach(&ap->link); 3288 if (ap->pmp_link) 3289 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 3290 ata_scsi_handle_link_detach(&ap->pmp_link[i]); 3291 3292 /* scan for new ones */ 3293 ata_scsi_scan_host(ap, 0); 3294 3295 DPRINTK("EXIT\n"); 3296 } 3297 3298 /** 3299 * ata_scsi_user_scan - indication for user-initiated bus scan 3300 * @shost: SCSI host to scan 3301 * @channel: Channel to scan 3302 * @id: ID to scan 3303 * @lun: LUN to scan 3304 * 3305 * This function is called when user explicitly requests bus 3306 * scan. Set probe pending flag and invoke EH. 3307 * 3308 * LOCKING: 3309 * SCSI layer (we don't care) 3310 * 3311 * RETURNS: 3312 * Zero. 3313 */ 3314 static int ata_scsi_user_scan(struct Scsi_Host *shost, unsigned int channel, 3315 unsigned int id, unsigned int lun) 3316 { 3317 struct ata_port *ap = ata_shost_to_port(shost); 3318 unsigned long flags; 3319 int devno, rc = 0; 3320 3321 if (!ap->ops->error_handler) 3322 return -EOPNOTSUPP; 3323 3324 if (lun != SCAN_WILD_CARD && lun) 3325 return -EINVAL; 3326 3327 if (ap->nr_pmp_links == 0) { 3328 if (channel != SCAN_WILD_CARD && channel) 3329 return -EINVAL; 3330 devno = id; 3331 } else { 3332 if (id != SCAN_WILD_CARD && id) 3333 return -EINVAL; 3334 devno = channel; 3335 } 3336 3337 spin_lock_irqsave(ap->lock, flags); 3338 3339 if (devno == SCAN_WILD_CARD) { 3340 struct ata_link *link; 3341 3342 ata_port_for_each_link(link, ap) { 3343 struct ata_eh_info *ehi = &link->eh_info; 3344 ehi->probe_mask |= (1 << ata_link_max_devices(link)) - 1; 3345 ehi->action |= ATA_EH_SOFTRESET; 3346 } 3347 } else { 3348 struct ata_device *dev = ata_find_dev(ap, devno); 3349 3350 if (dev) { 3351 struct ata_eh_info *ehi = &dev->link->eh_info; 3352 ehi->probe_mask |= 1 << dev->devno; 3353 ehi->action |= ATA_EH_SOFTRESET; 3354 ehi->flags |= ATA_EHI_RESUME_LINK; 3355 } else 3356 rc = -EINVAL; 3357 } 3358 3359 if (rc == 0) { 3360 ata_port_schedule_eh(ap); 3361 spin_unlock_irqrestore(ap->lock, flags); 3362 ata_port_wait_eh(ap); 3363 } else 3364 spin_unlock_irqrestore(ap->lock, flags); 3365 3366 return rc; 3367 } 3368 3369 /** 3370 * ata_scsi_dev_rescan - initiate scsi_rescan_device() 3371 * @work: Pointer to ATA port to perform scsi_rescan_device() 3372 * 3373 * After ATA pass thru (SAT) commands are executed successfully, 3374 * libata need to propagate the changes to SCSI layer. This 3375 * function must be executed from ata_aux_wq such that sdev 3376 * attach/detach don't race with rescan. 3377 * 3378 * LOCKING: 3379 * Kernel thread context (may sleep). 3380 */ 3381 void ata_scsi_dev_rescan(struct work_struct *work) 3382 { 3383 struct ata_port *ap = 3384 container_of(work, struct ata_port, scsi_rescan_task); 3385 struct ata_link *link; 3386 struct ata_device *dev; 3387 unsigned long flags; 3388 3389 spin_lock_irqsave(ap->lock, flags); 3390 3391 ata_port_for_each_link(link, ap) { 3392 ata_link_for_each_dev(dev, link) { 3393 struct scsi_device *sdev = dev->sdev; 3394 3395 if (!ata_dev_enabled(dev) || !sdev) 3396 continue; 3397 if (scsi_device_get(sdev)) 3398 continue; 3399 3400 spin_unlock_irqrestore(ap->lock, flags); 3401 scsi_rescan_device(&(sdev->sdev_gendev)); 3402 scsi_device_put(sdev); 3403 spin_lock_irqsave(ap->lock, flags); 3404 } 3405 } 3406 3407 spin_unlock_irqrestore(ap->lock, flags); 3408 } 3409 3410 /** 3411 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device 3412 * @host: ATA host container for all SAS ports 3413 * @port_info: Information from low-level host driver 3414 * @shost: SCSI host that the scsi device is attached to 3415 * 3416 * LOCKING: 3417 * PCI/etc. bus probe sem. 3418 * 3419 * RETURNS: 3420 * ata_port pointer on success / NULL on failure. 3421 */ 3422 3423 struct ata_port *ata_sas_port_alloc(struct ata_host *host, 3424 struct ata_port_info *port_info, 3425 struct Scsi_Host *shost) 3426 { 3427 struct ata_port *ap; 3428 3429 ap = ata_port_alloc(host); 3430 if (!ap) 3431 return NULL; 3432 3433 ap->port_no = 0; 3434 ap->lock = shost->host_lock; 3435 ap->pio_mask = port_info->pio_mask; 3436 ap->mwdma_mask = port_info->mwdma_mask; 3437 ap->udma_mask = port_info->udma_mask; 3438 ap->flags |= port_info->flags; 3439 ap->ops = port_info->port_ops; 3440 ap->cbl = ATA_CBL_SATA; 3441 3442 return ap; 3443 } 3444 EXPORT_SYMBOL_GPL(ata_sas_port_alloc); 3445 3446 /** 3447 * ata_sas_port_start - Set port up for dma. 3448 * @ap: Port to initialize 3449 * 3450 * Called just after data structures for each port are 3451 * initialized. Allocates DMA pad. 3452 * 3453 * May be used as the port_start() entry in ata_port_operations. 3454 * 3455 * LOCKING: 3456 * Inherited from caller. 3457 */ 3458 int ata_sas_port_start(struct ata_port *ap) 3459 { 3460 return ata_pad_alloc(ap, ap->dev); 3461 } 3462 EXPORT_SYMBOL_GPL(ata_sas_port_start); 3463 3464 /** 3465 * ata_port_stop - Undo ata_sas_port_start() 3466 * @ap: Port to shut down 3467 * 3468 * Frees the DMA pad. 3469 * 3470 * May be used as the port_stop() entry in ata_port_operations. 3471 * 3472 * LOCKING: 3473 * Inherited from caller. 3474 */ 3475 3476 void ata_sas_port_stop(struct ata_port *ap) 3477 { 3478 ata_pad_free(ap, ap->dev); 3479 } 3480 EXPORT_SYMBOL_GPL(ata_sas_port_stop); 3481 3482 /** 3483 * ata_sas_port_init - Initialize a SATA device 3484 * @ap: SATA port to initialize 3485 * 3486 * LOCKING: 3487 * PCI/etc. bus probe sem. 3488 * 3489 * RETURNS: 3490 * Zero on success, non-zero on error. 3491 */ 3492 3493 int ata_sas_port_init(struct ata_port *ap) 3494 { 3495 int rc = ap->ops->port_start(ap); 3496 3497 if (!rc) { 3498 ap->print_id = ata_print_id++; 3499 rc = ata_bus_probe(ap); 3500 } 3501 3502 return rc; 3503 } 3504 EXPORT_SYMBOL_GPL(ata_sas_port_init); 3505 3506 /** 3507 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc 3508 * @ap: SATA port to destroy 3509 * 3510 */ 3511 3512 void ata_sas_port_destroy(struct ata_port *ap) 3513 { 3514 if (ap->ops->port_stop) 3515 ap->ops->port_stop(ap); 3516 kfree(ap); 3517 } 3518 EXPORT_SYMBOL_GPL(ata_sas_port_destroy); 3519 3520 /** 3521 * ata_sas_slave_configure - Default slave_config routine for libata devices 3522 * @sdev: SCSI device to configure 3523 * @ap: ATA port to which SCSI device is attached 3524 * 3525 * RETURNS: 3526 * Zero. 3527 */ 3528 3529 int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) 3530 { 3531 ata_scsi_sdev_config(sdev); 3532 ata_scsi_dev_config(sdev, ap->link.device); 3533 return 0; 3534 } 3535 EXPORT_SYMBOL_GPL(ata_sas_slave_configure); 3536 3537 /** 3538 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device 3539 * @cmd: SCSI command to be sent 3540 * @done: Completion function, called when command is complete 3541 * @ap: ATA port to which the command is being sent 3542 * 3543 * RETURNS: 3544 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 3545 * 0 otherwise. 3546 */ 3547 3548 int ata_sas_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), 3549 struct ata_port *ap) 3550 { 3551 int rc = 0; 3552 3553 ata_scsi_dump_cdb(ap, cmd); 3554 3555 if (likely(ata_scsi_dev_enabled(ap->link.device))) 3556 rc = __ata_scsi_queuecmd(cmd, done, ap->link.device); 3557 else { 3558 cmd->result = (DID_BAD_TARGET << 16); 3559 done(cmd); 3560 } 3561 return rc; 3562 } 3563 EXPORT_SYMBOL_GPL(ata_sas_queuecmd); 3564