1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/highmem.h> 50 #include <linux/spinlock.h> 51 #include <linux/blkdev.h> 52 #include <linux/delay.h> 53 #include <linux/timer.h> 54 #include <linux/interrupt.h> 55 #include <linux/completion.h> 56 #include <linux/suspend.h> 57 #include <linux/workqueue.h> 58 #include <linux/jiffies.h> 59 #include <linux/scatterlist.h> 60 #include <linux/io.h> 61 #include <scsi/scsi.h> 62 #include <scsi/scsi_cmnd.h> 63 #include <scsi/scsi_host.h> 64 #include <linux/libata.h> 65 #include <asm/semaphore.h> 66 #include <asm/byteorder.h> 67 #include <linux/cdrom.h> 68 69 #include "libata.h" 70 71 72 /* debounce timing parameters in msecs { interval, duration, timeout } */ 73 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 74 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 75 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 76 77 static unsigned int ata_dev_init_params(struct ata_device *dev, 78 u16 heads, u16 sectors); 79 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 80 static unsigned int ata_dev_set_feature(struct ata_device *dev, 81 u8 enable, u8 feature); 82 static void ata_dev_xfermask(struct ata_device *dev); 83 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 84 85 unsigned int ata_print_id = 1; 86 static struct workqueue_struct *ata_wq; 87 88 struct workqueue_struct *ata_aux_wq; 89 90 struct ata_force_param { 91 const char *name; 92 unsigned int cbl; 93 int spd_limit; 94 unsigned long xfer_mask; 95 unsigned int horkage_on; 96 unsigned int horkage_off; 97 }; 98 99 struct ata_force_ent { 100 int port; 101 int device; 102 struct ata_force_param param; 103 }; 104 105 static struct ata_force_ent *ata_force_tbl; 106 static int ata_force_tbl_size; 107 108 static char ata_force_param_buf[PAGE_SIZE] __initdata; 109 /* param_buf is thrown away after initialization, disallow read */ 110 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 111 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 112 113 int atapi_enabled = 1; 114 module_param(atapi_enabled, int, 0444); 115 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 116 117 static int atapi_dmadir = 0; 118 module_param(atapi_dmadir, int, 0444); 119 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); 120 121 int atapi_passthru16 = 1; 122 module_param(atapi_passthru16, int, 0444); 123 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); 124 125 int libata_fua = 0; 126 module_param_named(fua, libata_fua, int, 0444); 127 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 128 129 static int ata_ignore_hpa; 130 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 131 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 132 133 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 134 module_param_named(dma, libata_dma_mask, int, 0444); 135 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 136 137 static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ; 138 module_param(ata_probe_timeout, int, 0444); 139 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 140 141 int libata_noacpi = 0; 142 module_param_named(noacpi, libata_noacpi, int, 0444); 143 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); 144 145 int libata_allow_tpm = 0; 146 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 147 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands"); 148 149 MODULE_AUTHOR("Jeff Garzik"); 150 MODULE_DESCRIPTION("Library module for ATA devices"); 151 MODULE_LICENSE("GPL"); 152 MODULE_VERSION(DRV_VERSION); 153 154 155 /** 156 * ata_force_cbl - force cable type according to libata.force 157 * @ap: ATA port of interest 158 * 159 * Force cable type according to libata.force and whine about it. 160 * The last entry which has matching port number is used, so it 161 * can be specified as part of device force parameters. For 162 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 163 * same effect. 164 * 165 * LOCKING: 166 * EH context. 167 */ 168 void ata_force_cbl(struct ata_port *ap) 169 { 170 int i; 171 172 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 173 const struct ata_force_ent *fe = &ata_force_tbl[i]; 174 175 if (fe->port != -1 && fe->port != ap->print_id) 176 continue; 177 178 if (fe->param.cbl == ATA_CBL_NONE) 179 continue; 180 181 ap->cbl = fe->param.cbl; 182 ata_port_printk(ap, KERN_NOTICE, 183 "FORCE: cable set to %s\n", fe->param.name); 184 return; 185 } 186 } 187 188 /** 189 * ata_force_spd_limit - force SATA spd limit according to libata.force 190 * @link: ATA link of interest 191 * 192 * Force SATA spd limit according to libata.force and whine about 193 * it. When only the port part is specified (e.g. 1:), the limit 194 * applies to all links connected to both the host link and all 195 * fan-out ports connected via PMP. If the device part is 196 * specified as 0 (e.g. 1.00:), it specifies the first fan-out 197 * link not the host link. Device number 15 always points to the 198 * host link whether PMP is attached or not. 199 * 200 * LOCKING: 201 * EH context. 202 */ 203 static void ata_force_spd_limit(struct ata_link *link) 204 { 205 int linkno, i; 206 207 if (ata_is_host_link(link)) 208 linkno = 15; 209 else 210 linkno = link->pmp; 211 212 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 213 const struct ata_force_ent *fe = &ata_force_tbl[i]; 214 215 if (fe->port != -1 && fe->port != link->ap->print_id) 216 continue; 217 218 if (fe->device != -1 && fe->device != linkno) 219 continue; 220 221 if (!fe->param.spd_limit) 222 continue; 223 224 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 225 ata_link_printk(link, KERN_NOTICE, 226 "FORCE: PHY spd limit set to %s\n", fe->param.name); 227 return; 228 } 229 } 230 231 /** 232 * ata_force_xfermask - force xfermask according to libata.force 233 * @dev: ATA device of interest 234 * 235 * Force xfer_mask according to libata.force and whine about it. 236 * For consistency with link selection, device number 15 selects 237 * the first device connected to the host link. 238 * 239 * LOCKING: 240 * EH context. 241 */ 242 static void ata_force_xfermask(struct ata_device *dev) 243 { 244 int devno = dev->link->pmp + dev->devno; 245 int alt_devno = devno; 246 int i; 247 248 /* allow n.15 for the first device attached to host port */ 249 if (ata_is_host_link(dev->link) && devno == 0) 250 alt_devno = 15; 251 252 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 253 const struct ata_force_ent *fe = &ata_force_tbl[i]; 254 unsigned long pio_mask, mwdma_mask, udma_mask; 255 256 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 257 continue; 258 259 if (fe->device != -1 && fe->device != devno && 260 fe->device != alt_devno) 261 continue; 262 263 if (!fe->param.xfer_mask) 264 continue; 265 266 ata_unpack_xfermask(fe->param.xfer_mask, 267 &pio_mask, &mwdma_mask, &udma_mask); 268 if (udma_mask) 269 dev->udma_mask = udma_mask; 270 else if (mwdma_mask) { 271 dev->udma_mask = 0; 272 dev->mwdma_mask = mwdma_mask; 273 } else { 274 dev->udma_mask = 0; 275 dev->mwdma_mask = 0; 276 dev->pio_mask = pio_mask; 277 } 278 279 ata_dev_printk(dev, KERN_NOTICE, 280 "FORCE: xfer_mask set to %s\n", fe->param.name); 281 return; 282 } 283 } 284 285 /** 286 * ata_force_horkage - force horkage according to libata.force 287 * @dev: ATA device of interest 288 * 289 * Force horkage according to libata.force and whine about it. 290 * For consistency with link selection, device number 15 selects 291 * the first device connected to the host link. 292 * 293 * LOCKING: 294 * EH context. 295 */ 296 static void ata_force_horkage(struct ata_device *dev) 297 { 298 int devno = dev->link->pmp + dev->devno; 299 int alt_devno = devno; 300 int i; 301 302 /* allow n.15 for the first device attached to host port */ 303 if (ata_is_host_link(dev->link) && devno == 0) 304 alt_devno = 15; 305 306 for (i = 0; i < ata_force_tbl_size; i++) { 307 const struct ata_force_ent *fe = &ata_force_tbl[i]; 308 309 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 310 continue; 311 312 if (fe->device != -1 && fe->device != devno && 313 fe->device != alt_devno) 314 continue; 315 316 if (!(~dev->horkage & fe->param.horkage_on) && 317 !(dev->horkage & fe->param.horkage_off)) 318 continue; 319 320 dev->horkage |= fe->param.horkage_on; 321 dev->horkage &= ~fe->param.horkage_off; 322 323 ata_dev_printk(dev, KERN_NOTICE, 324 "FORCE: horkage modified (%s)\n", fe->param.name); 325 } 326 } 327 328 /** 329 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 330 * @opcode: SCSI opcode 331 * 332 * Determine ATAPI command type from @opcode. 333 * 334 * LOCKING: 335 * None. 336 * 337 * RETURNS: 338 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 339 */ 340 int atapi_cmd_type(u8 opcode) 341 { 342 switch (opcode) { 343 case GPCMD_READ_10: 344 case GPCMD_READ_12: 345 return ATAPI_READ; 346 347 case GPCMD_WRITE_10: 348 case GPCMD_WRITE_12: 349 case GPCMD_WRITE_AND_VERIFY_10: 350 return ATAPI_WRITE; 351 352 case GPCMD_READ_CD: 353 case GPCMD_READ_CD_MSF: 354 return ATAPI_READ_CD; 355 356 case ATA_16: 357 case ATA_12: 358 if (atapi_passthru16) 359 return ATAPI_PASS_THRU; 360 /* fall thru */ 361 default: 362 return ATAPI_MISC; 363 } 364 } 365 366 /** 367 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 368 * @tf: Taskfile to convert 369 * @pmp: Port multiplier port 370 * @is_cmd: This FIS is for command 371 * @fis: Buffer into which data will output 372 * 373 * Converts a standard ATA taskfile to a Serial ATA 374 * FIS structure (Register - Host to Device). 375 * 376 * LOCKING: 377 * Inherited from caller. 378 */ 379 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 380 { 381 fis[0] = 0x27; /* Register - Host to Device FIS */ 382 fis[1] = pmp & 0xf; /* Port multiplier number*/ 383 if (is_cmd) 384 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 385 386 fis[2] = tf->command; 387 fis[3] = tf->feature; 388 389 fis[4] = tf->lbal; 390 fis[5] = tf->lbam; 391 fis[6] = tf->lbah; 392 fis[7] = tf->device; 393 394 fis[8] = tf->hob_lbal; 395 fis[9] = tf->hob_lbam; 396 fis[10] = tf->hob_lbah; 397 fis[11] = tf->hob_feature; 398 399 fis[12] = tf->nsect; 400 fis[13] = tf->hob_nsect; 401 fis[14] = 0; 402 fis[15] = tf->ctl; 403 404 fis[16] = 0; 405 fis[17] = 0; 406 fis[18] = 0; 407 fis[19] = 0; 408 } 409 410 /** 411 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 412 * @fis: Buffer from which data will be input 413 * @tf: Taskfile to output 414 * 415 * Converts a serial ATA FIS structure to a standard ATA taskfile. 416 * 417 * LOCKING: 418 * Inherited from caller. 419 */ 420 421 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 422 { 423 tf->command = fis[2]; /* status */ 424 tf->feature = fis[3]; /* error */ 425 426 tf->lbal = fis[4]; 427 tf->lbam = fis[5]; 428 tf->lbah = fis[6]; 429 tf->device = fis[7]; 430 431 tf->hob_lbal = fis[8]; 432 tf->hob_lbam = fis[9]; 433 tf->hob_lbah = fis[10]; 434 435 tf->nsect = fis[12]; 436 tf->hob_nsect = fis[13]; 437 } 438 439 static const u8 ata_rw_cmds[] = { 440 /* pio multi */ 441 ATA_CMD_READ_MULTI, 442 ATA_CMD_WRITE_MULTI, 443 ATA_CMD_READ_MULTI_EXT, 444 ATA_CMD_WRITE_MULTI_EXT, 445 0, 446 0, 447 0, 448 ATA_CMD_WRITE_MULTI_FUA_EXT, 449 /* pio */ 450 ATA_CMD_PIO_READ, 451 ATA_CMD_PIO_WRITE, 452 ATA_CMD_PIO_READ_EXT, 453 ATA_CMD_PIO_WRITE_EXT, 454 0, 455 0, 456 0, 457 0, 458 /* dma */ 459 ATA_CMD_READ, 460 ATA_CMD_WRITE, 461 ATA_CMD_READ_EXT, 462 ATA_CMD_WRITE_EXT, 463 0, 464 0, 465 0, 466 ATA_CMD_WRITE_FUA_EXT 467 }; 468 469 /** 470 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 471 * @tf: command to examine and configure 472 * @dev: device tf belongs to 473 * 474 * Examine the device configuration and tf->flags to calculate 475 * the proper read/write commands and protocol to use. 476 * 477 * LOCKING: 478 * caller. 479 */ 480 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 481 { 482 u8 cmd; 483 484 int index, fua, lba48, write; 485 486 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 487 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 488 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 489 490 if (dev->flags & ATA_DFLAG_PIO) { 491 tf->protocol = ATA_PROT_PIO; 492 index = dev->multi_count ? 0 : 8; 493 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 494 /* Unable to use DMA due to host limitation */ 495 tf->protocol = ATA_PROT_PIO; 496 index = dev->multi_count ? 0 : 8; 497 } else { 498 tf->protocol = ATA_PROT_DMA; 499 index = 16; 500 } 501 502 cmd = ata_rw_cmds[index + fua + lba48 + write]; 503 if (cmd) { 504 tf->command = cmd; 505 return 0; 506 } 507 return -1; 508 } 509 510 /** 511 * ata_tf_read_block - Read block address from ATA taskfile 512 * @tf: ATA taskfile of interest 513 * @dev: ATA device @tf belongs to 514 * 515 * LOCKING: 516 * None. 517 * 518 * Read block address from @tf. This function can handle all 519 * three address formats - LBA, LBA48 and CHS. tf->protocol and 520 * flags select the address format to use. 521 * 522 * RETURNS: 523 * Block address read from @tf. 524 */ 525 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 526 { 527 u64 block = 0; 528 529 if (tf->flags & ATA_TFLAG_LBA) { 530 if (tf->flags & ATA_TFLAG_LBA48) { 531 block |= (u64)tf->hob_lbah << 40; 532 block |= (u64)tf->hob_lbam << 32; 533 block |= tf->hob_lbal << 24; 534 } else 535 block |= (tf->device & 0xf) << 24; 536 537 block |= tf->lbah << 16; 538 block |= tf->lbam << 8; 539 block |= tf->lbal; 540 } else { 541 u32 cyl, head, sect; 542 543 cyl = tf->lbam | (tf->lbah << 8); 544 head = tf->device & 0xf; 545 sect = tf->lbal; 546 547 block = (cyl * dev->heads + head) * dev->sectors + sect; 548 } 549 550 return block; 551 } 552 553 /** 554 * ata_build_rw_tf - Build ATA taskfile for given read/write request 555 * @tf: Target ATA taskfile 556 * @dev: ATA device @tf belongs to 557 * @block: Block address 558 * @n_block: Number of blocks 559 * @tf_flags: RW/FUA etc... 560 * @tag: tag 561 * 562 * LOCKING: 563 * None. 564 * 565 * Build ATA taskfile @tf for read/write request described by 566 * @block, @n_block, @tf_flags and @tag on @dev. 567 * 568 * RETURNS: 569 * 570 * 0 on success, -ERANGE if the request is too large for @dev, 571 * -EINVAL if the request is invalid. 572 */ 573 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 574 u64 block, u32 n_block, unsigned int tf_flags, 575 unsigned int tag) 576 { 577 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 578 tf->flags |= tf_flags; 579 580 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 581 /* yay, NCQ */ 582 if (!lba_48_ok(block, n_block)) 583 return -ERANGE; 584 585 tf->protocol = ATA_PROT_NCQ; 586 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 587 588 if (tf->flags & ATA_TFLAG_WRITE) 589 tf->command = ATA_CMD_FPDMA_WRITE; 590 else 591 tf->command = ATA_CMD_FPDMA_READ; 592 593 tf->nsect = tag << 3; 594 tf->hob_feature = (n_block >> 8) & 0xff; 595 tf->feature = n_block & 0xff; 596 597 tf->hob_lbah = (block >> 40) & 0xff; 598 tf->hob_lbam = (block >> 32) & 0xff; 599 tf->hob_lbal = (block >> 24) & 0xff; 600 tf->lbah = (block >> 16) & 0xff; 601 tf->lbam = (block >> 8) & 0xff; 602 tf->lbal = block & 0xff; 603 604 tf->device = 1 << 6; 605 if (tf->flags & ATA_TFLAG_FUA) 606 tf->device |= 1 << 7; 607 } else if (dev->flags & ATA_DFLAG_LBA) { 608 tf->flags |= ATA_TFLAG_LBA; 609 610 if (lba_28_ok(block, n_block)) { 611 /* use LBA28 */ 612 tf->device |= (block >> 24) & 0xf; 613 } else if (lba_48_ok(block, n_block)) { 614 if (!(dev->flags & ATA_DFLAG_LBA48)) 615 return -ERANGE; 616 617 /* use LBA48 */ 618 tf->flags |= ATA_TFLAG_LBA48; 619 620 tf->hob_nsect = (n_block >> 8) & 0xff; 621 622 tf->hob_lbah = (block >> 40) & 0xff; 623 tf->hob_lbam = (block >> 32) & 0xff; 624 tf->hob_lbal = (block >> 24) & 0xff; 625 } else 626 /* request too large even for LBA48 */ 627 return -ERANGE; 628 629 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 630 return -EINVAL; 631 632 tf->nsect = n_block & 0xff; 633 634 tf->lbah = (block >> 16) & 0xff; 635 tf->lbam = (block >> 8) & 0xff; 636 tf->lbal = block & 0xff; 637 638 tf->device |= ATA_LBA; 639 } else { 640 /* CHS */ 641 u32 sect, head, cyl, track; 642 643 /* The request -may- be too large for CHS addressing. */ 644 if (!lba_28_ok(block, n_block)) 645 return -ERANGE; 646 647 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 648 return -EINVAL; 649 650 /* Convert LBA to CHS */ 651 track = (u32)block / dev->sectors; 652 cyl = track / dev->heads; 653 head = track % dev->heads; 654 sect = (u32)block % dev->sectors + 1; 655 656 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 657 (u32)block, track, cyl, head, sect); 658 659 /* Check whether the converted CHS can fit. 660 Cylinder: 0-65535 661 Head: 0-15 662 Sector: 1-255*/ 663 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 664 return -ERANGE; 665 666 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 667 tf->lbal = sect; 668 tf->lbam = cyl; 669 tf->lbah = cyl >> 8; 670 tf->device |= head; 671 } 672 673 return 0; 674 } 675 676 /** 677 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 678 * @pio_mask: pio_mask 679 * @mwdma_mask: mwdma_mask 680 * @udma_mask: udma_mask 681 * 682 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 683 * unsigned int xfer_mask. 684 * 685 * LOCKING: 686 * None. 687 * 688 * RETURNS: 689 * Packed xfer_mask. 690 */ 691 unsigned long ata_pack_xfermask(unsigned long pio_mask, 692 unsigned long mwdma_mask, 693 unsigned long udma_mask) 694 { 695 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 696 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 697 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 698 } 699 700 /** 701 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 702 * @xfer_mask: xfer_mask to unpack 703 * @pio_mask: resulting pio_mask 704 * @mwdma_mask: resulting mwdma_mask 705 * @udma_mask: resulting udma_mask 706 * 707 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 708 * Any NULL distination masks will be ignored. 709 */ 710 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 711 unsigned long *mwdma_mask, unsigned long *udma_mask) 712 { 713 if (pio_mask) 714 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 715 if (mwdma_mask) 716 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 717 if (udma_mask) 718 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 719 } 720 721 static const struct ata_xfer_ent { 722 int shift, bits; 723 u8 base; 724 } ata_xfer_tbl[] = { 725 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 726 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 727 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 728 { -1, }, 729 }; 730 731 /** 732 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 733 * @xfer_mask: xfer_mask of interest 734 * 735 * Return matching XFER_* value for @xfer_mask. Only the highest 736 * bit of @xfer_mask is considered. 737 * 738 * LOCKING: 739 * None. 740 * 741 * RETURNS: 742 * Matching XFER_* value, 0xff if no match found. 743 */ 744 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 745 { 746 int highbit = fls(xfer_mask) - 1; 747 const struct ata_xfer_ent *ent; 748 749 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 750 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 751 return ent->base + highbit - ent->shift; 752 return 0xff; 753 } 754 755 /** 756 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 757 * @xfer_mode: XFER_* of interest 758 * 759 * Return matching xfer_mask for @xfer_mode. 760 * 761 * LOCKING: 762 * None. 763 * 764 * RETURNS: 765 * Matching xfer_mask, 0 if no match found. 766 */ 767 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 768 { 769 const struct ata_xfer_ent *ent; 770 771 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 772 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 773 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 774 & ~((1 << ent->shift) - 1); 775 return 0; 776 } 777 778 /** 779 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 780 * @xfer_mode: XFER_* of interest 781 * 782 * Return matching xfer_shift for @xfer_mode. 783 * 784 * LOCKING: 785 * None. 786 * 787 * RETURNS: 788 * Matching xfer_shift, -1 if no match found. 789 */ 790 int ata_xfer_mode2shift(unsigned long xfer_mode) 791 { 792 const struct ata_xfer_ent *ent; 793 794 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 795 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 796 return ent->shift; 797 return -1; 798 } 799 800 /** 801 * ata_mode_string - convert xfer_mask to string 802 * @xfer_mask: mask of bits supported; only highest bit counts. 803 * 804 * Determine string which represents the highest speed 805 * (highest bit in @modemask). 806 * 807 * LOCKING: 808 * None. 809 * 810 * RETURNS: 811 * Constant C string representing highest speed listed in 812 * @mode_mask, or the constant C string "<n/a>". 813 */ 814 const char *ata_mode_string(unsigned long xfer_mask) 815 { 816 static const char * const xfer_mode_str[] = { 817 "PIO0", 818 "PIO1", 819 "PIO2", 820 "PIO3", 821 "PIO4", 822 "PIO5", 823 "PIO6", 824 "MWDMA0", 825 "MWDMA1", 826 "MWDMA2", 827 "MWDMA3", 828 "MWDMA4", 829 "UDMA/16", 830 "UDMA/25", 831 "UDMA/33", 832 "UDMA/44", 833 "UDMA/66", 834 "UDMA/100", 835 "UDMA/133", 836 "UDMA7", 837 }; 838 int highbit; 839 840 highbit = fls(xfer_mask) - 1; 841 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 842 return xfer_mode_str[highbit]; 843 return "<n/a>"; 844 } 845 846 static const char *sata_spd_string(unsigned int spd) 847 { 848 static const char * const spd_str[] = { 849 "1.5 Gbps", 850 "3.0 Gbps", 851 }; 852 853 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 854 return "<unknown>"; 855 return spd_str[spd - 1]; 856 } 857 858 void ata_dev_disable(struct ata_device *dev) 859 { 860 if (ata_dev_enabled(dev)) { 861 if (ata_msg_drv(dev->link->ap)) 862 ata_dev_printk(dev, KERN_WARNING, "disabled\n"); 863 ata_acpi_on_disable(dev); 864 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | 865 ATA_DNXFER_QUIET); 866 dev->class++; 867 } 868 } 869 870 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) 871 { 872 struct ata_link *link = dev->link; 873 struct ata_port *ap = link->ap; 874 u32 scontrol; 875 unsigned int err_mask; 876 int rc; 877 878 /* 879 * disallow DIPM for drivers which haven't set 880 * ATA_FLAG_IPM. This is because when DIPM is enabled, 881 * phy ready will be set in the interrupt status on 882 * state changes, which will cause some drivers to 883 * think there are errors - additionally drivers will 884 * need to disable hot plug. 885 */ 886 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { 887 ap->pm_policy = NOT_AVAILABLE; 888 return -EINVAL; 889 } 890 891 /* 892 * For DIPM, we will only enable it for the 893 * min_power setting. 894 * 895 * Why? Because Disks are too stupid to know that 896 * If the host rejects a request to go to SLUMBER 897 * they should retry at PARTIAL, and instead it 898 * just would give up. So, for medium_power to 899 * work at all, we need to only allow HIPM. 900 */ 901 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 902 if (rc) 903 return rc; 904 905 switch (policy) { 906 case MIN_POWER: 907 /* no restrictions on IPM transitions */ 908 scontrol &= ~(0x3 << 8); 909 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 910 if (rc) 911 return rc; 912 913 /* enable DIPM */ 914 if (dev->flags & ATA_DFLAG_DIPM) 915 err_mask = ata_dev_set_feature(dev, 916 SETFEATURES_SATA_ENABLE, SATA_DIPM); 917 break; 918 case MEDIUM_POWER: 919 /* allow IPM to PARTIAL */ 920 scontrol &= ~(0x1 << 8); 921 scontrol |= (0x2 << 8); 922 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 923 if (rc) 924 return rc; 925 926 /* 927 * we don't have to disable DIPM since IPM flags 928 * disallow transitions to SLUMBER, which effectively 929 * disable DIPM if it does not support PARTIAL 930 */ 931 break; 932 case NOT_AVAILABLE: 933 case MAX_PERFORMANCE: 934 /* disable all IPM transitions */ 935 scontrol |= (0x3 << 8); 936 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 937 if (rc) 938 return rc; 939 940 /* 941 * we don't have to disable DIPM since IPM flags 942 * disallow all transitions which effectively 943 * disable DIPM anyway. 944 */ 945 break; 946 } 947 948 /* FIXME: handle SET FEATURES failure */ 949 (void) err_mask; 950 951 return 0; 952 } 953 954 /** 955 * ata_dev_enable_pm - enable SATA interface power management 956 * @dev: device to enable power management 957 * @policy: the link power management policy 958 * 959 * Enable SATA Interface power management. This will enable 960 * Device Interface Power Management (DIPM) for min_power 961 * policy, and then call driver specific callbacks for 962 * enabling Host Initiated Power management. 963 * 964 * Locking: Caller. 965 * Returns: -EINVAL if IPM is not supported, 0 otherwise. 966 */ 967 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) 968 { 969 int rc = 0; 970 struct ata_port *ap = dev->link->ap; 971 972 /* set HIPM first, then DIPM */ 973 if (ap->ops->enable_pm) 974 rc = ap->ops->enable_pm(ap, policy); 975 if (rc) 976 goto enable_pm_out; 977 rc = ata_dev_set_dipm(dev, policy); 978 979 enable_pm_out: 980 if (rc) 981 ap->pm_policy = MAX_PERFORMANCE; 982 else 983 ap->pm_policy = policy; 984 return /* rc */; /* hopefully we can use 'rc' eventually */ 985 } 986 987 #ifdef CONFIG_PM 988 /** 989 * ata_dev_disable_pm - disable SATA interface power management 990 * @dev: device to disable power management 991 * 992 * Disable SATA Interface power management. This will disable 993 * Device Interface Power Management (DIPM) without changing 994 * policy, call driver specific callbacks for disabling Host 995 * Initiated Power management. 996 * 997 * Locking: Caller. 998 * Returns: void 999 */ 1000 static void ata_dev_disable_pm(struct ata_device *dev) 1001 { 1002 struct ata_port *ap = dev->link->ap; 1003 1004 ata_dev_set_dipm(dev, MAX_PERFORMANCE); 1005 if (ap->ops->disable_pm) 1006 ap->ops->disable_pm(ap); 1007 } 1008 #endif /* CONFIG_PM */ 1009 1010 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) 1011 { 1012 ap->pm_policy = policy; 1013 ap->link.eh_info.action |= ATA_EH_LPM; 1014 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; 1015 ata_port_schedule_eh(ap); 1016 } 1017 1018 #ifdef CONFIG_PM 1019 static void ata_lpm_enable(struct ata_host *host) 1020 { 1021 struct ata_link *link; 1022 struct ata_port *ap; 1023 struct ata_device *dev; 1024 int i; 1025 1026 for (i = 0; i < host->n_ports; i++) { 1027 ap = host->ports[i]; 1028 ata_port_for_each_link(link, ap) { 1029 ata_link_for_each_dev(dev, link) 1030 ata_dev_disable_pm(dev); 1031 } 1032 } 1033 } 1034 1035 static void ata_lpm_disable(struct ata_host *host) 1036 { 1037 int i; 1038 1039 for (i = 0; i < host->n_ports; i++) { 1040 struct ata_port *ap = host->ports[i]; 1041 ata_lpm_schedule(ap, ap->pm_policy); 1042 } 1043 } 1044 #endif /* CONFIG_PM */ 1045 1046 1047 /** 1048 * ata_devchk - PATA device presence detection 1049 * @ap: ATA channel to examine 1050 * @device: Device to examine (starting at zero) 1051 * 1052 * This technique was originally described in 1053 * Hale Landis's ATADRVR (www.ata-atapi.com), and 1054 * later found its way into the ATA/ATAPI spec. 1055 * 1056 * Write a pattern to the ATA shadow registers, 1057 * and if a device is present, it will respond by 1058 * correctly storing and echoing back the 1059 * ATA shadow register contents. 1060 * 1061 * LOCKING: 1062 * caller. 1063 */ 1064 1065 static unsigned int ata_devchk(struct ata_port *ap, unsigned int device) 1066 { 1067 struct ata_ioports *ioaddr = &ap->ioaddr; 1068 u8 nsect, lbal; 1069 1070 ap->ops->dev_select(ap, device); 1071 1072 iowrite8(0x55, ioaddr->nsect_addr); 1073 iowrite8(0xaa, ioaddr->lbal_addr); 1074 1075 iowrite8(0xaa, ioaddr->nsect_addr); 1076 iowrite8(0x55, ioaddr->lbal_addr); 1077 1078 iowrite8(0x55, ioaddr->nsect_addr); 1079 iowrite8(0xaa, ioaddr->lbal_addr); 1080 1081 nsect = ioread8(ioaddr->nsect_addr); 1082 lbal = ioread8(ioaddr->lbal_addr); 1083 1084 if ((nsect == 0x55) && (lbal == 0xaa)) 1085 return 1; /* we found a device */ 1086 1087 return 0; /* nothing found */ 1088 } 1089 1090 /** 1091 * ata_dev_classify - determine device type based on ATA-spec signature 1092 * @tf: ATA taskfile register set for device to be identified 1093 * 1094 * Determine from taskfile register contents whether a device is 1095 * ATA or ATAPI, as per "Signature and persistence" section 1096 * of ATA/PI spec (volume 1, sect 5.14). 1097 * 1098 * LOCKING: 1099 * None. 1100 * 1101 * RETURNS: 1102 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1103 * %ATA_DEV_UNKNOWN the event of failure. 1104 */ 1105 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1106 { 1107 /* Apple's open source Darwin code hints that some devices only 1108 * put a proper signature into the LBA mid/high registers, 1109 * So, we only check those. It's sufficient for uniqueness. 1110 * 1111 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1112 * signatures for ATA and ATAPI devices attached on SerialATA, 1113 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1114 * spec has never mentioned about using different signatures 1115 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1116 * Multiplier specification began to use 0x69/0x96 to identify 1117 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1118 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1119 * 0x69/0x96 shortly and described them as reserved for 1120 * SerialATA. 1121 * 1122 * We follow the current spec and consider that 0x69/0x96 1123 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1124 */ 1125 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1126 DPRINTK("found ATA device by sig\n"); 1127 return ATA_DEV_ATA; 1128 } 1129 1130 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1131 DPRINTK("found ATAPI device by sig\n"); 1132 return ATA_DEV_ATAPI; 1133 } 1134 1135 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1136 DPRINTK("found PMP device by sig\n"); 1137 return ATA_DEV_PMP; 1138 } 1139 1140 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1141 printk(KERN_INFO "ata: SEMB device ignored\n"); 1142 return ATA_DEV_SEMB_UNSUP; /* not yet */ 1143 } 1144 1145 DPRINTK("unknown device\n"); 1146 return ATA_DEV_UNKNOWN; 1147 } 1148 1149 /** 1150 * ata_dev_try_classify - Parse returned ATA device signature 1151 * @dev: ATA device to classify (starting at zero) 1152 * @present: device seems present 1153 * @r_err: Value of error register on completion 1154 * 1155 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs, 1156 * an ATA/ATAPI-defined set of values is placed in the ATA 1157 * shadow registers, indicating the results of device detection 1158 * and diagnostics. 1159 * 1160 * Select the ATA device, and read the values from the ATA shadow 1161 * registers. Then parse according to the Error register value, 1162 * and the spec-defined values examined by ata_dev_classify(). 1163 * 1164 * LOCKING: 1165 * caller. 1166 * 1167 * RETURNS: 1168 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE. 1169 */ 1170 unsigned int ata_dev_try_classify(struct ata_device *dev, int present, 1171 u8 *r_err) 1172 { 1173 struct ata_port *ap = dev->link->ap; 1174 struct ata_taskfile tf; 1175 unsigned int class; 1176 u8 err; 1177 1178 ap->ops->dev_select(ap, dev->devno); 1179 1180 memset(&tf, 0, sizeof(tf)); 1181 1182 ap->ops->tf_read(ap, &tf); 1183 err = tf.feature; 1184 if (r_err) 1185 *r_err = err; 1186 1187 /* see if device passed diags: continue and warn later */ 1188 if (err == 0) 1189 /* diagnostic fail : do nothing _YET_ */ 1190 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC; 1191 else if (err == 1) 1192 /* do nothing */ ; 1193 else if ((dev->devno == 0) && (err == 0x81)) 1194 /* do nothing */ ; 1195 else 1196 return ATA_DEV_NONE; 1197 1198 /* determine if device is ATA or ATAPI */ 1199 class = ata_dev_classify(&tf); 1200 1201 if (class == ATA_DEV_UNKNOWN) { 1202 /* If the device failed diagnostic, it's likely to 1203 * have reported incorrect device signature too. 1204 * Assume ATA device if the device seems present but 1205 * device signature is invalid with diagnostic 1206 * failure. 1207 */ 1208 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC)) 1209 class = ATA_DEV_ATA; 1210 else 1211 class = ATA_DEV_NONE; 1212 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0)) 1213 class = ATA_DEV_NONE; 1214 1215 return class; 1216 } 1217 1218 /** 1219 * ata_id_string - Convert IDENTIFY DEVICE page into string 1220 * @id: IDENTIFY DEVICE results we will examine 1221 * @s: string into which data is output 1222 * @ofs: offset into identify device page 1223 * @len: length of string to return. must be an even number. 1224 * 1225 * The strings in the IDENTIFY DEVICE page are broken up into 1226 * 16-bit chunks. Run through the string, and output each 1227 * 8-bit chunk linearly, regardless of platform. 1228 * 1229 * LOCKING: 1230 * caller. 1231 */ 1232 1233 void ata_id_string(const u16 *id, unsigned char *s, 1234 unsigned int ofs, unsigned int len) 1235 { 1236 unsigned int c; 1237 1238 while (len > 0) { 1239 c = id[ofs] >> 8; 1240 *s = c; 1241 s++; 1242 1243 c = id[ofs] & 0xff; 1244 *s = c; 1245 s++; 1246 1247 ofs++; 1248 len -= 2; 1249 } 1250 } 1251 1252 /** 1253 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1254 * @id: IDENTIFY DEVICE results we will examine 1255 * @s: string into which data is output 1256 * @ofs: offset into identify device page 1257 * @len: length of string to return. must be an odd number. 1258 * 1259 * This function is identical to ata_id_string except that it 1260 * trims trailing spaces and terminates the resulting string with 1261 * null. @len must be actual maximum length (even number) + 1. 1262 * 1263 * LOCKING: 1264 * caller. 1265 */ 1266 void ata_id_c_string(const u16 *id, unsigned char *s, 1267 unsigned int ofs, unsigned int len) 1268 { 1269 unsigned char *p; 1270 1271 WARN_ON(!(len & 1)); 1272 1273 ata_id_string(id, s, ofs, len - 1); 1274 1275 p = s + strnlen(s, len - 1); 1276 while (p > s && p[-1] == ' ') 1277 p--; 1278 *p = '\0'; 1279 } 1280 1281 static u64 ata_id_n_sectors(const u16 *id) 1282 { 1283 if (ata_id_has_lba(id)) { 1284 if (ata_id_has_lba48(id)) 1285 return ata_id_u64(id, 100); 1286 else 1287 return ata_id_u32(id, 60); 1288 } else { 1289 if (ata_id_current_chs_valid(id)) 1290 return ata_id_u32(id, 57); 1291 else 1292 return id[1] * id[3] * id[6]; 1293 } 1294 } 1295 1296 static u64 ata_tf_to_lba48(struct ata_taskfile *tf) 1297 { 1298 u64 sectors = 0; 1299 1300 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1301 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1302 sectors |= (tf->hob_lbal & 0xff) << 24; 1303 sectors |= (tf->lbah & 0xff) << 16; 1304 sectors |= (tf->lbam & 0xff) << 8; 1305 sectors |= (tf->lbal & 0xff); 1306 1307 return ++sectors; 1308 } 1309 1310 static u64 ata_tf_to_lba(struct ata_taskfile *tf) 1311 { 1312 u64 sectors = 0; 1313 1314 sectors |= (tf->device & 0x0f) << 24; 1315 sectors |= (tf->lbah & 0xff) << 16; 1316 sectors |= (tf->lbam & 0xff) << 8; 1317 sectors |= (tf->lbal & 0xff); 1318 1319 return ++sectors; 1320 } 1321 1322 /** 1323 * ata_read_native_max_address - Read native max address 1324 * @dev: target device 1325 * @max_sectors: out parameter for the result native max address 1326 * 1327 * Perform an LBA48 or LBA28 native size query upon the device in 1328 * question. 1329 * 1330 * RETURNS: 1331 * 0 on success, -EACCES if command is aborted by the drive. 1332 * -EIO on other errors. 1333 */ 1334 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1335 { 1336 unsigned int err_mask; 1337 struct ata_taskfile tf; 1338 int lba48 = ata_id_has_lba48(dev->id); 1339 1340 ata_tf_init(dev, &tf); 1341 1342 /* always clear all address registers */ 1343 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1344 1345 if (lba48) { 1346 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1347 tf.flags |= ATA_TFLAG_LBA48; 1348 } else 1349 tf.command = ATA_CMD_READ_NATIVE_MAX; 1350 1351 tf.protocol |= ATA_PROT_NODATA; 1352 tf.device |= ATA_LBA; 1353 1354 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1355 if (err_mask) { 1356 ata_dev_printk(dev, KERN_WARNING, "failed to read native " 1357 "max address (err_mask=0x%x)\n", err_mask); 1358 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1359 return -EACCES; 1360 return -EIO; 1361 } 1362 1363 if (lba48) 1364 *max_sectors = ata_tf_to_lba48(&tf); 1365 else 1366 *max_sectors = ata_tf_to_lba(&tf); 1367 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1368 (*max_sectors)--; 1369 return 0; 1370 } 1371 1372 /** 1373 * ata_set_max_sectors - Set max sectors 1374 * @dev: target device 1375 * @new_sectors: new max sectors value to set for the device 1376 * 1377 * Set max sectors of @dev to @new_sectors. 1378 * 1379 * RETURNS: 1380 * 0 on success, -EACCES if command is aborted or denied (due to 1381 * previous non-volatile SET_MAX) by the drive. -EIO on other 1382 * errors. 1383 */ 1384 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1385 { 1386 unsigned int err_mask; 1387 struct ata_taskfile tf; 1388 int lba48 = ata_id_has_lba48(dev->id); 1389 1390 new_sectors--; 1391 1392 ata_tf_init(dev, &tf); 1393 1394 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1395 1396 if (lba48) { 1397 tf.command = ATA_CMD_SET_MAX_EXT; 1398 tf.flags |= ATA_TFLAG_LBA48; 1399 1400 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1401 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1402 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1403 } else { 1404 tf.command = ATA_CMD_SET_MAX; 1405 1406 tf.device |= (new_sectors >> 24) & 0xf; 1407 } 1408 1409 tf.protocol |= ATA_PROT_NODATA; 1410 tf.device |= ATA_LBA; 1411 1412 tf.lbal = (new_sectors >> 0) & 0xff; 1413 tf.lbam = (new_sectors >> 8) & 0xff; 1414 tf.lbah = (new_sectors >> 16) & 0xff; 1415 1416 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1417 if (err_mask) { 1418 ata_dev_printk(dev, KERN_WARNING, "failed to set " 1419 "max address (err_mask=0x%x)\n", err_mask); 1420 if (err_mask == AC_ERR_DEV && 1421 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1422 return -EACCES; 1423 return -EIO; 1424 } 1425 1426 return 0; 1427 } 1428 1429 /** 1430 * ata_hpa_resize - Resize a device with an HPA set 1431 * @dev: Device to resize 1432 * 1433 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1434 * it if required to the full size of the media. The caller must check 1435 * the drive has the HPA feature set enabled. 1436 * 1437 * RETURNS: 1438 * 0 on success, -errno on failure. 1439 */ 1440 static int ata_hpa_resize(struct ata_device *dev) 1441 { 1442 struct ata_eh_context *ehc = &dev->link->eh_context; 1443 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1444 u64 sectors = ata_id_n_sectors(dev->id); 1445 u64 native_sectors; 1446 int rc; 1447 1448 /* do we need to do it? */ 1449 if (dev->class != ATA_DEV_ATA || 1450 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1451 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1452 return 0; 1453 1454 /* read native max address */ 1455 rc = ata_read_native_max_address(dev, &native_sectors); 1456 if (rc) { 1457 /* If device aborted the command or HPA isn't going to 1458 * be unlocked, skip HPA resizing. 1459 */ 1460 if (rc == -EACCES || !ata_ignore_hpa) { 1461 ata_dev_printk(dev, KERN_WARNING, "HPA support seems " 1462 "broken, skipping HPA handling\n"); 1463 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1464 1465 /* we can continue if device aborted the command */ 1466 if (rc == -EACCES) 1467 rc = 0; 1468 } 1469 1470 return rc; 1471 } 1472 1473 /* nothing to do? */ 1474 if (native_sectors <= sectors || !ata_ignore_hpa) { 1475 if (!print_info || native_sectors == sectors) 1476 return 0; 1477 1478 if (native_sectors > sectors) 1479 ata_dev_printk(dev, KERN_INFO, 1480 "HPA detected: current %llu, native %llu\n", 1481 (unsigned long long)sectors, 1482 (unsigned long long)native_sectors); 1483 else if (native_sectors < sectors) 1484 ata_dev_printk(dev, KERN_WARNING, 1485 "native sectors (%llu) is smaller than " 1486 "sectors (%llu)\n", 1487 (unsigned long long)native_sectors, 1488 (unsigned long long)sectors); 1489 return 0; 1490 } 1491 1492 /* let's unlock HPA */ 1493 rc = ata_set_max_sectors(dev, native_sectors); 1494 if (rc == -EACCES) { 1495 /* if device aborted the command, skip HPA resizing */ 1496 ata_dev_printk(dev, KERN_WARNING, "device aborted resize " 1497 "(%llu -> %llu), skipping HPA handling\n", 1498 (unsigned long long)sectors, 1499 (unsigned long long)native_sectors); 1500 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1501 return 0; 1502 } else if (rc) 1503 return rc; 1504 1505 /* re-read IDENTIFY data */ 1506 rc = ata_dev_reread_id(dev, 0); 1507 if (rc) { 1508 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " 1509 "data after HPA resizing\n"); 1510 return rc; 1511 } 1512 1513 if (print_info) { 1514 u64 new_sectors = ata_id_n_sectors(dev->id); 1515 ata_dev_printk(dev, KERN_INFO, 1516 "HPA unlocked: %llu -> %llu, native %llu\n", 1517 (unsigned long long)sectors, 1518 (unsigned long long)new_sectors, 1519 (unsigned long long)native_sectors); 1520 } 1521 1522 return 0; 1523 } 1524 1525 /** 1526 * ata_noop_dev_select - Select device 0/1 on ATA bus 1527 * @ap: ATA channel to manipulate 1528 * @device: ATA device (numbered from zero) to select 1529 * 1530 * This function performs no actual function. 1531 * 1532 * May be used as the dev_select() entry in ata_port_operations. 1533 * 1534 * LOCKING: 1535 * caller. 1536 */ 1537 void ata_noop_dev_select(struct ata_port *ap, unsigned int device) 1538 { 1539 } 1540 1541 1542 /** 1543 * ata_std_dev_select - Select device 0/1 on ATA bus 1544 * @ap: ATA channel to manipulate 1545 * @device: ATA device (numbered from zero) to select 1546 * 1547 * Use the method defined in the ATA specification to 1548 * make either device 0, or device 1, active on the 1549 * ATA channel. Works with both PIO and MMIO. 1550 * 1551 * May be used as the dev_select() entry in ata_port_operations. 1552 * 1553 * LOCKING: 1554 * caller. 1555 */ 1556 1557 void ata_std_dev_select(struct ata_port *ap, unsigned int device) 1558 { 1559 u8 tmp; 1560 1561 if (device == 0) 1562 tmp = ATA_DEVICE_OBS; 1563 else 1564 tmp = ATA_DEVICE_OBS | ATA_DEV1; 1565 1566 iowrite8(tmp, ap->ioaddr.device_addr); 1567 ata_pause(ap); /* needed; also flushes, for mmio */ 1568 } 1569 1570 /** 1571 * ata_dev_select - Select device 0/1 on ATA bus 1572 * @ap: ATA channel to manipulate 1573 * @device: ATA device (numbered from zero) to select 1574 * @wait: non-zero to wait for Status register BSY bit to clear 1575 * @can_sleep: non-zero if context allows sleeping 1576 * 1577 * Use the method defined in the ATA specification to 1578 * make either device 0, or device 1, active on the 1579 * ATA channel. 1580 * 1581 * This is a high-level version of ata_std_dev_select(), 1582 * which additionally provides the services of inserting 1583 * the proper pauses and status polling, where needed. 1584 * 1585 * LOCKING: 1586 * caller. 1587 */ 1588 1589 void ata_dev_select(struct ata_port *ap, unsigned int device, 1590 unsigned int wait, unsigned int can_sleep) 1591 { 1592 if (ata_msg_probe(ap)) 1593 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, " 1594 "device %u, wait %u\n", device, wait); 1595 1596 if (wait) 1597 ata_wait_idle(ap); 1598 1599 ap->ops->dev_select(ap, device); 1600 1601 if (wait) { 1602 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI) 1603 msleep(150); 1604 ata_wait_idle(ap); 1605 } 1606 } 1607 1608 /** 1609 * ata_dump_id - IDENTIFY DEVICE info debugging output 1610 * @id: IDENTIFY DEVICE page to dump 1611 * 1612 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1613 * page. 1614 * 1615 * LOCKING: 1616 * caller. 1617 */ 1618 1619 static inline void ata_dump_id(const u16 *id) 1620 { 1621 DPRINTK("49==0x%04x " 1622 "53==0x%04x " 1623 "63==0x%04x " 1624 "64==0x%04x " 1625 "75==0x%04x \n", 1626 id[49], 1627 id[53], 1628 id[63], 1629 id[64], 1630 id[75]); 1631 DPRINTK("80==0x%04x " 1632 "81==0x%04x " 1633 "82==0x%04x " 1634 "83==0x%04x " 1635 "84==0x%04x \n", 1636 id[80], 1637 id[81], 1638 id[82], 1639 id[83], 1640 id[84]); 1641 DPRINTK("88==0x%04x " 1642 "93==0x%04x\n", 1643 id[88], 1644 id[93]); 1645 } 1646 1647 /** 1648 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1649 * @id: IDENTIFY data to compute xfer mask from 1650 * 1651 * Compute the xfermask for this device. This is not as trivial 1652 * as it seems if we must consider early devices correctly. 1653 * 1654 * FIXME: pre IDE drive timing (do we care ?). 1655 * 1656 * LOCKING: 1657 * None. 1658 * 1659 * RETURNS: 1660 * Computed xfermask 1661 */ 1662 unsigned long ata_id_xfermask(const u16 *id) 1663 { 1664 unsigned long pio_mask, mwdma_mask, udma_mask; 1665 1666 /* Usual case. Word 53 indicates word 64 is valid */ 1667 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1668 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1669 pio_mask <<= 3; 1670 pio_mask |= 0x7; 1671 } else { 1672 /* If word 64 isn't valid then Word 51 high byte holds 1673 * the PIO timing number for the maximum. Turn it into 1674 * a mask. 1675 */ 1676 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1677 if (mode < 5) /* Valid PIO range */ 1678 pio_mask = (2 << mode) - 1; 1679 else 1680 pio_mask = 1; 1681 1682 /* But wait.. there's more. Design your standards by 1683 * committee and you too can get a free iordy field to 1684 * process. However its the speeds not the modes that 1685 * are supported... Note drivers using the timing API 1686 * will get this right anyway 1687 */ 1688 } 1689 1690 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1691 1692 if (ata_id_is_cfa(id)) { 1693 /* 1694 * Process compact flash extended modes 1695 */ 1696 int pio = id[163] & 0x7; 1697 int dma = (id[163] >> 3) & 7; 1698 1699 if (pio) 1700 pio_mask |= (1 << 5); 1701 if (pio > 1) 1702 pio_mask |= (1 << 6); 1703 if (dma) 1704 mwdma_mask |= (1 << 3); 1705 if (dma > 1) 1706 mwdma_mask |= (1 << 4); 1707 } 1708 1709 udma_mask = 0; 1710 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1711 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1712 1713 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1714 } 1715 1716 /** 1717 * ata_pio_queue_task - Queue port_task 1718 * @ap: The ata_port to queue port_task for 1719 * @fn: workqueue function to be scheduled 1720 * @data: data for @fn to use 1721 * @delay: delay time for workqueue function 1722 * 1723 * Schedule @fn(@data) for execution after @delay jiffies using 1724 * port_task. There is one port_task per port and it's the 1725 * user(low level driver)'s responsibility to make sure that only 1726 * one task is active at any given time. 1727 * 1728 * libata core layer takes care of synchronization between 1729 * port_task and EH. ata_pio_queue_task() may be ignored for EH 1730 * synchronization. 1731 * 1732 * LOCKING: 1733 * Inherited from caller. 1734 */ 1735 static void ata_pio_queue_task(struct ata_port *ap, void *data, 1736 unsigned long delay) 1737 { 1738 ap->port_task_data = data; 1739 1740 /* may fail if ata_port_flush_task() in progress */ 1741 queue_delayed_work(ata_wq, &ap->port_task, delay); 1742 } 1743 1744 /** 1745 * ata_port_flush_task - Flush port_task 1746 * @ap: The ata_port to flush port_task for 1747 * 1748 * After this function completes, port_task is guranteed not to 1749 * be running or scheduled. 1750 * 1751 * LOCKING: 1752 * Kernel thread context (may sleep) 1753 */ 1754 void ata_port_flush_task(struct ata_port *ap) 1755 { 1756 DPRINTK("ENTER\n"); 1757 1758 cancel_rearming_delayed_work(&ap->port_task); 1759 1760 if (ata_msg_ctl(ap)) 1761 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); 1762 } 1763 1764 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1765 { 1766 struct completion *waiting = qc->private_data; 1767 1768 complete(waiting); 1769 } 1770 1771 /** 1772 * ata_exec_internal_sg - execute libata internal command 1773 * @dev: Device to which the command is sent 1774 * @tf: Taskfile registers for the command and the result 1775 * @cdb: CDB for packet command 1776 * @dma_dir: Data tranfer direction of the command 1777 * @sgl: sg list for the data buffer of the command 1778 * @n_elem: Number of sg entries 1779 * @timeout: Timeout in msecs (0 for default) 1780 * 1781 * Executes libata internal command with timeout. @tf contains 1782 * command on entry and result on return. Timeout and error 1783 * conditions are reported via return value. No recovery action 1784 * is taken after a command times out. It's caller's duty to 1785 * clean up after timeout. 1786 * 1787 * LOCKING: 1788 * None. Should be called with kernel context, might sleep. 1789 * 1790 * RETURNS: 1791 * Zero on success, AC_ERR_* mask on failure 1792 */ 1793 unsigned ata_exec_internal_sg(struct ata_device *dev, 1794 struct ata_taskfile *tf, const u8 *cdb, 1795 int dma_dir, struct scatterlist *sgl, 1796 unsigned int n_elem, unsigned long timeout) 1797 { 1798 struct ata_link *link = dev->link; 1799 struct ata_port *ap = link->ap; 1800 u8 command = tf->command; 1801 struct ata_queued_cmd *qc; 1802 unsigned int tag, preempted_tag; 1803 u32 preempted_sactive, preempted_qc_active; 1804 int preempted_nr_active_links; 1805 DECLARE_COMPLETION_ONSTACK(wait); 1806 unsigned long flags; 1807 unsigned int err_mask; 1808 int rc; 1809 1810 spin_lock_irqsave(ap->lock, flags); 1811 1812 /* no internal command while frozen */ 1813 if (ap->pflags & ATA_PFLAG_FROZEN) { 1814 spin_unlock_irqrestore(ap->lock, flags); 1815 return AC_ERR_SYSTEM; 1816 } 1817 1818 /* initialize internal qc */ 1819 1820 /* XXX: Tag 0 is used for drivers with legacy EH as some 1821 * drivers choke if any other tag is given. This breaks 1822 * ata_tag_internal() test for those drivers. Don't use new 1823 * EH stuff without converting to it. 1824 */ 1825 if (ap->ops->error_handler) 1826 tag = ATA_TAG_INTERNAL; 1827 else 1828 tag = 0; 1829 1830 if (test_and_set_bit(tag, &ap->qc_allocated)) 1831 BUG(); 1832 qc = __ata_qc_from_tag(ap, tag); 1833 1834 qc->tag = tag; 1835 qc->scsicmd = NULL; 1836 qc->ap = ap; 1837 qc->dev = dev; 1838 ata_qc_reinit(qc); 1839 1840 preempted_tag = link->active_tag; 1841 preempted_sactive = link->sactive; 1842 preempted_qc_active = ap->qc_active; 1843 preempted_nr_active_links = ap->nr_active_links; 1844 link->active_tag = ATA_TAG_POISON; 1845 link->sactive = 0; 1846 ap->qc_active = 0; 1847 ap->nr_active_links = 0; 1848 1849 /* prepare & issue qc */ 1850 qc->tf = *tf; 1851 if (cdb) 1852 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1853 qc->flags |= ATA_QCFLAG_RESULT_TF; 1854 qc->dma_dir = dma_dir; 1855 if (dma_dir != DMA_NONE) { 1856 unsigned int i, buflen = 0; 1857 struct scatterlist *sg; 1858 1859 for_each_sg(sgl, sg, n_elem, i) 1860 buflen += sg->length; 1861 1862 ata_sg_init(qc, sgl, n_elem); 1863 qc->nbytes = buflen; 1864 } 1865 1866 qc->private_data = &wait; 1867 qc->complete_fn = ata_qc_complete_internal; 1868 1869 ata_qc_issue(qc); 1870 1871 spin_unlock_irqrestore(ap->lock, flags); 1872 1873 if (!timeout) 1874 timeout = ata_probe_timeout * 1000 / HZ; 1875 1876 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1877 1878 ata_port_flush_task(ap); 1879 1880 if (!rc) { 1881 spin_lock_irqsave(ap->lock, flags); 1882 1883 /* We're racing with irq here. If we lose, the 1884 * following test prevents us from completing the qc 1885 * twice. If we win, the port is frozen and will be 1886 * cleaned up by ->post_internal_cmd(). 1887 */ 1888 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1889 qc->err_mask |= AC_ERR_TIMEOUT; 1890 1891 if (ap->ops->error_handler) 1892 ata_port_freeze(ap); 1893 else 1894 ata_qc_complete(qc); 1895 1896 if (ata_msg_warn(ap)) 1897 ata_dev_printk(dev, KERN_WARNING, 1898 "qc timeout (cmd 0x%x)\n", command); 1899 } 1900 1901 spin_unlock_irqrestore(ap->lock, flags); 1902 } 1903 1904 /* do post_internal_cmd */ 1905 if (ap->ops->post_internal_cmd) 1906 ap->ops->post_internal_cmd(qc); 1907 1908 /* perform minimal error analysis */ 1909 if (qc->flags & ATA_QCFLAG_FAILED) { 1910 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1911 qc->err_mask |= AC_ERR_DEV; 1912 1913 if (!qc->err_mask) 1914 qc->err_mask |= AC_ERR_OTHER; 1915 1916 if (qc->err_mask & ~AC_ERR_OTHER) 1917 qc->err_mask &= ~AC_ERR_OTHER; 1918 } 1919 1920 /* finish up */ 1921 spin_lock_irqsave(ap->lock, flags); 1922 1923 *tf = qc->result_tf; 1924 err_mask = qc->err_mask; 1925 1926 ata_qc_free(qc); 1927 link->active_tag = preempted_tag; 1928 link->sactive = preempted_sactive; 1929 ap->qc_active = preempted_qc_active; 1930 ap->nr_active_links = preempted_nr_active_links; 1931 1932 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1933 * Until those drivers are fixed, we detect the condition 1934 * here, fail the command with AC_ERR_SYSTEM and reenable the 1935 * port. 1936 * 1937 * Note that this doesn't change any behavior as internal 1938 * command failure results in disabling the device in the 1939 * higher layer for LLDDs without new reset/EH callbacks. 1940 * 1941 * Kill the following code as soon as those drivers are fixed. 1942 */ 1943 if (ap->flags & ATA_FLAG_DISABLED) { 1944 err_mask |= AC_ERR_SYSTEM; 1945 ata_port_probe(ap); 1946 } 1947 1948 spin_unlock_irqrestore(ap->lock, flags); 1949 1950 return err_mask; 1951 } 1952 1953 /** 1954 * ata_exec_internal - execute libata internal command 1955 * @dev: Device to which the command is sent 1956 * @tf: Taskfile registers for the command and the result 1957 * @cdb: CDB for packet command 1958 * @dma_dir: Data tranfer direction of the command 1959 * @buf: Data buffer of the command 1960 * @buflen: Length of data buffer 1961 * @timeout: Timeout in msecs (0 for default) 1962 * 1963 * Wrapper around ata_exec_internal_sg() which takes simple 1964 * buffer instead of sg list. 1965 * 1966 * LOCKING: 1967 * None. Should be called with kernel context, might sleep. 1968 * 1969 * RETURNS: 1970 * Zero on success, AC_ERR_* mask on failure 1971 */ 1972 unsigned ata_exec_internal(struct ata_device *dev, 1973 struct ata_taskfile *tf, const u8 *cdb, 1974 int dma_dir, void *buf, unsigned int buflen, 1975 unsigned long timeout) 1976 { 1977 struct scatterlist *psg = NULL, sg; 1978 unsigned int n_elem = 0; 1979 1980 if (dma_dir != DMA_NONE) { 1981 WARN_ON(!buf); 1982 sg_init_one(&sg, buf, buflen); 1983 psg = &sg; 1984 n_elem++; 1985 } 1986 1987 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1988 timeout); 1989 } 1990 1991 /** 1992 * ata_do_simple_cmd - execute simple internal command 1993 * @dev: Device to which the command is sent 1994 * @cmd: Opcode to execute 1995 * 1996 * Execute a 'simple' command, that only consists of the opcode 1997 * 'cmd' itself, without filling any other registers 1998 * 1999 * LOCKING: 2000 * Kernel thread context (may sleep). 2001 * 2002 * RETURNS: 2003 * Zero on success, AC_ERR_* mask on failure 2004 */ 2005 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 2006 { 2007 struct ata_taskfile tf; 2008 2009 ata_tf_init(dev, &tf); 2010 2011 tf.command = cmd; 2012 tf.flags |= ATA_TFLAG_DEVICE; 2013 tf.protocol = ATA_PROT_NODATA; 2014 2015 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 2016 } 2017 2018 /** 2019 * ata_pio_need_iordy - check if iordy needed 2020 * @adev: ATA device 2021 * 2022 * Check if the current speed of the device requires IORDY. Used 2023 * by various controllers for chip configuration. 2024 */ 2025 2026 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 2027 { 2028 /* Controller doesn't support IORDY. Probably a pointless check 2029 as the caller should know this */ 2030 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 2031 return 0; 2032 /* PIO3 and higher it is mandatory */ 2033 if (adev->pio_mode > XFER_PIO_2) 2034 return 1; 2035 /* We turn it on when possible */ 2036 if (ata_id_has_iordy(adev->id)) 2037 return 1; 2038 return 0; 2039 } 2040 2041 /** 2042 * ata_pio_mask_no_iordy - Return the non IORDY mask 2043 * @adev: ATA device 2044 * 2045 * Compute the highest mode possible if we are not using iordy. Return 2046 * -1 if no iordy mode is available. 2047 */ 2048 2049 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 2050 { 2051 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 2052 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 2053 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 2054 /* Is the speed faster than the drive allows non IORDY ? */ 2055 if (pio) { 2056 /* This is cycle times not frequency - watch the logic! */ 2057 if (pio > 240) /* PIO2 is 240nS per cycle */ 2058 return 3 << ATA_SHIFT_PIO; 2059 return 7 << ATA_SHIFT_PIO; 2060 } 2061 } 2062 return 3 << ATA_SHIFT_PIO; 2063 } 2064 2065 /** 2066 * ata_dev_read_id - Read ID data from the specified device 2067 * @dev: target device 2068 * @p_class: pointer to class of the target device (may be changed) 2069 * @flags: ATA_READID_* flags 2070 * @id: buffer to read IDENTIFY data into 2071 * 2072 * Read ID data from the specified device. ATA_CMD_ID_ATA is 2073 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 2074 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 2075 * for pre-ATA4 drives. 2076 * 2077 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 2078 * now we abort if we hit that case. 2079 * 2080 * LOCKING: 2081 * Kernel thread context (may sleep) 2082 * 2083 * RETURNS: 2084 * 0 on success, -errno otherwise. 2085 */ 2086 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 2087 unsigned int flags, u16 *id) 2088 { 2089 struct ata_port *ap = dev->link->ap; 2090 unsigned int class = *p_class; 2091 struct ata_taskfile tf; 2092 unsigned int err_mask = 0; 2093 const char *reason; 2094 int may_fallback = 1, tried_spinup = 0; 2095 int rc; 2096 2097 if (ata_msg_ctl(ap)) 2098 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2099 2100 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */ 2101 retry: 2102 ata_tf_init(dev, &tf); 2103 2104 switch (class) { 2105 case ATA_DEV_ATA: 2106 tf.command = ATA_CMD_ID_ATA; 2107 break; 2108 case ATA_DEV_ATAPI: 2109 tf.command = ATA_CMD_ID_ATAPI; 2110 break; 2111 default: 2112 rc = -ENODEV; 2113 reason = "unsupported class"; 2114 goto err_out; 2115 } 2116 2117 tf.protocol = ATA_PROT_PIO; 2118 2119 /* Some devices choke if TF registers contain garbage. Make 2120 * sure those are properly initialized. 2121 */ 2122 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2123 2124 /* Device presence detection is unreliable on some 2125 * controllers. Always poll IDENTIFY if available. 2126 */ 2127 tf.flags |= ATA_TFLAG_POLLING; 2128 2129 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE, 2130 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 2131 if (err_mask) { 2132 if (err_mask & AC_ERR_NODEV_HINT) { 2133 ata_dev_printk(dev, KERN_DEBUG, 2134 "NODEV after polling detection\n"); 2135 return -ENOENT; 2136 } 2137 2138 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 2139 /* Device or controller might have reported 2140 * the wrong device class. Give a shot at the 2141 * other IDENTIFY if the current one is 2142 * aborted by the device. 2143 */ 2144 if (may_fallback) { 2145 may_fallback = 0; 2146 2147 if (class == ATA_DEV_ATA) 2148 class = ATA_DEV_ATAPI; 2149 else 2150 class = ATA_DEV_ATA; 2151 goto retry; 2152 } 2153 2154 /* Control reaches here iff the device aborted 2155 * both flavors of IDENTIFYs which happens 2156 * sometimes with phantom devices. 2157 */ 2158 ata_dev_printk(dev, KERN_DEBUG, 2159 "both IDENTIFYs aborted, assuming NODEV\n"); 2160 return -ENOENT; 2161 } 2162 2163 rc = -EIO; 2164 reason = "I/O error"; 2165 goto err_out; 2166 } 2167 2168 /* Falling back doesn't make sense if ID data was read 2169 * successfully at least once. 2170 */ 2171 may_fallback = 0; 2172 2173 swap_buf_le16(id, ATA_ID_WORDS); 2174 2175 /* sanity check */ 2176 rc = -EINVAL; 2177 reason = "device reports invalid type"; 2178 2179 if (class == ATA_DEV_ATA) { 2180 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 2181 goto err_out; 2182 } else { 2183 if (ata_id_is_ata(id)) 2184 goto err_out; 2185 } 2186 2187 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 2188 tried_spinup = 1; 2189 /* 2190 * Drive powered-up in standby mode, and requires a specific 2191 * SET_FEATURES spin-up subcommand before it will accept 2192 * anything other than the original IDENTIFY command. 2193 */ 2194 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 2195 if (err_mask && id[2] != 0x738c) { 2196 rc = -EIO; 2197 reason = "SPINUP failed"; 2198 goto err_out; 2199 } 2200 /* 2201 * If the drive initially returned incomplete IDENTIFY info, 2202 * we now must reissue the IDENTIFY command. 2203 */ 2204 if (id[2] == 0x37c8) 2205 goto retry; 2206 } 2207 2208 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2209 /* 2210 * The exact sequence expected by certain pre-ATA4 drives is: 2211 * SRST RESET 2212 * IDENTIFY (optional in early ATA) 2213 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2214 * anything else.. 2215 * Some drives were very specific about that exact sequence. 2216 * 2217 * Note that ATA4 says lba is mandatory so the second check 2218 * shoud never trigger. 2219 */ 2220 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2221 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2222 if (err_mask) { 2223 rc = -EIO; 2224 reason = "INIT_DEV_PARAMS failed"; 2225 goto err_out; 2226 } 2227 2228 /* current CHS translation info (id[53-58]) might be 2229 * changed. reread the identify device info. 2230 */ 2231 flags &= ~ATA_READID_POSTRESET; 2232 goto retry; 2233 } 2234 } 2235 2236 *p_class = class; 2237 2238 return 0; 2239 2240 err_out: 2241 if (ata_msg_warn(ap)) 2242 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " 2243 "(%s, err_mask=0x%x)\n", reason, err_mask); 2244 return rc; 2245 } 2246 2247 static inline u8 ata_dev_knobble(struct ata_device *dev) 2248 { 2249 struct ata_port *ap = dev->link->ap; 2250 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2251 } 2252 2253 static void ata_dev_config_ncq(struct ata_device *dev, 2254 char *desc, size_t desc_sz) 2255 { 2256 struct ata_port *ap = dev->link->ap; 2257 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2258 2259 if (!ata_id_has_ncq(dev->id)) { 2260 desc[0] = '\0'; 2261 return; 2262 } 2263 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2264 snprintf(desc, desc_sz, "NCQ (not used)"); 2265 return; 2266 } 2267 if (ap->flags & ATA_FLAG_NCQ) { 2268 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2269 dev->flags |= ATA_DFLAG_NCQ; 2270 } 2271 2272 if (hdepth >= ddepth) 2273 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); 2274 else 2275 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 2276 } 2277 2278 /** 2279 * ata_dev_configure - Configure the specified ATA/ATAPI device 2280 * @dev: Target device to configure 2281 * 2282 * Configure @dev according to @dev->id. Generic and low-level 2283 * driver specific fixups are also applied. 2284 * 2285 * LOCKING: 2286 * Kernel thread context (may sleep) 2287 * 2288 * RETURNS: 2289 * 0 on success, -errno otherwise 2290 */ 2291 int ata_dev_configure(struct ata_device *dev) 2292 { 2293 struct ata_port *ap = dev->link->ap; 2294 struct ata_eh_context *ehc = &dev->link->eh_context; 2295 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2296 const u16 *id = dev->id; 2297 unsigned long xfer_mask; 2298 char revbuf[7]; /* XYZ-99\0 */ 2299 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2300 char modelbuf[ATA_ID_PROD_LEN+1]; 2301 int rc; 2302 2303 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2304 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", 2305 __func__); 2306 return 0; 2307 } 2308 2309 if (ata_msg_probe(ap)) 2310 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2311 2312 /* set horkage */ 2313 dev->horkage |= ata_dev_blacklisted(dev); 2314 ata_force_horkage(dev); 2315 2316 /* let ACPI work its magic */ 2317 rc = ata_acpi_on_devcfg(dev); 2318 if (rc) 2319 return rc; 2320 2321 /* massage HPA, do it early as it might change IDENTIFY data */ 2322 rc = ata_hpa_resize(dev); 2323 if (rc) 2324 return rc; 2325 2326 /* print device capabilities */ 2327 if (ata_msg_probe(ap)) 2328 ata_dev_printk(dev, KERN_DEBUG, 2329 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2330 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2331 __func__, 2332 id[49], id[82], id[83], id[84], 2333 id[85], id[86], id[87], id[88]); 2334 2335 /* initialize to-be-configured parameters */ 2336 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2337 dev->max_sectors = 0; 2338 dev->cdb_len = 0; 2339 dev->n_sectors = 0; 2340 dev->cylinders = 0; 2341 dev->heads = 0; 2342 dev->sectors = 0; 2343 2344 /* 2345 * common ATA, ATAPI feature tests 2346 */ 2347 2348 /* find max transfer mode; for printk only */ 2349 xfer_mask = ata_id_xfermask(id); 2350 2351 if (ata_msg_probe(ap)) 2352 ata_dump_id(id); 2353 2354 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2355 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2356 sizeof(fwrevbuf)); 2357 2358 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2359 sizeof(modelbuf)); 2360 2361 /* ATA-specific feature tests */ 2362 if (dev->class == ATA_DEV_ATA) { 2363 if (ata_id_is_cfa(id)) { 2364 if (id[162] & 1) /* CPRM may make this media unusable */ 2365 ata_dev_printk(dev, KERN_WARNING, 2366 "supports DRM functions and may " 2367 "not be fully accessable.\n"); 2368 snprintf(revbuf, 7, "CFA"); 2369 } else { 2370 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2371 /* Warn the user if the device has TPM extensions */ 2372 if (ata_id_has_tpm(id)) 2373 ata_dev_printk(dev, KERN_WARNING, 2374 "supports DRM functions and may " 2375 "not be fully accessable.\n"); 2376 } 2377 2378 dev->n_sectors = ata_id_n_sectors(id); 2379 2380 if (dev->id[59] & 0x100) 2381 dev->multi_count = dev->id[59] & 0xff; 2382 2383 if (ata_id_has_lba(id)) { 2384 const char *lba_desc; 2385 char ncq_desc[20]; 2386 2387 lba_desc = "LBA"; 2388 dev->flags |= ATA_DFLAG_LBA; 2389 if (ata_id_has_lba48(id)) { 2390 dev->flags |= ATA_DFLAG_LBA48; 2391 lba_desc = "LBA48"; 2392 2393 if (dev->n_sectors >= (1UL << 28) && 2394 ata_id_has_flush_ext(id)) 2395 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2396 } 2397 2398 /* config NCQ */ 2399 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2400 2401 /* print device info to dmesg */ 2402 if (ata_msg_drv(ap) && print_info) { 2403 ata_dev_printk(dev, KERN_INFO, 2404 "%s: %s, %s, max %s\n", 2405 revbuf, modelbuf, fwrevbuf, 2406 ata_mode_string(xfer_mask)); 2407 ata_dev_printk(dev, KERN_INFO, 2408 "%Lu sectors, multi %u: %s %s\n", 2409 (unsigned long long)dev->n_sectors, 2410 dev->multi_count, lba_desc, ncq_desc); 2411 } 2412 } else { 2413 /* CHS */ 2414 2415 /* Default translation */ 2416 dev->cylinders = id[1]; 2417 dev->heads = id[3]; 2418 dev->sectors = id[6]; 2419 2420 if (ata_id_current_chs_valid(id)) { 2421 /* Current CHS translation is valid. */ 2422 dev->cylinders = id[54]; 2423 dev->heads = id[55]; 2424 dev->sectors = id[56]; 2425 } 2426 2427 /* print device info to dmesg */ 2428 if (ata_msg_drv(ap) && print_info) { 2429 ata_dev_printk(dev, KERN_INFO, 2430 "%s: %s, %s, max %s\n", 2431 revbuf, modelbuf, fwrevbuf, 2432 ata_mode_string(xfer_mask)); 2433 ata_dev_printk(dev, KERN_INFO, 2434 "%Lu sectors, multi %u, CHS %u/%u/%u\n", 2435 (unsigned long long)dev->n_sectors, 2436 dev->multi_count, dev->cylinders, 2437 dev->heads, dev->sectors); 2438 } 2439 } 2440 2441 dev->cdb_len = 16; 2442 } 2443 2444 /* ATAPI-specific feature tests */ 2445 else if (dev->class == ATA_DEV_ATAPI) { 2446 const char *cdb_intr_string = ""; 2447 const char *atapi_an_string = ""; 2448 const char *dma_dir_string = ""; 2449 u32 sntf; 2450 2451 rc = atapi_cdb_len(id); 2452 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2453 if (ata_msg_warn(ap)) 2454 ata_dev_printk(dev, KERN_WARNING, 2455 "unsupported CDB len\n"); 2456 rc = -EINVAL; 2457 goto err_out_nosup; 2458 } 2459 dev->cdb_len = (unsigned int) rc; 2460 2461 /* Enable ATAPI AN if both the host and device have 2462 * the support. If PMP is attached, SNTF is required 2463 * to enable ATAPI AN to discern between PHY status 2464 * changed notifications and ATAPI ANs. 2465 */ 2466 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2467 (!ap->nr_pmp_links || 2468 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2469 unsigned int err_mask; 2470 2471 /* issue SET feature command to turn this on */ 2472 err_mask = ata_dev_set_feature(dev, 2473 SETFEATURES_SATA_ENABLE, SATA_AN); 2474 if (err_mask) 2475 ata_dev_printk(dev, KERN_ERR, 2476 "failed to enable ATAPI AN " 2477 "(err_mask=0x%x)\n", err_mask); 2478 else { 2479 dev->flags |= ATA_DFLAG_AN; 2480 atapi_an_string = ", ATAPI AN"; 2481 } 2482 } 2483 2484 if (ata_id_cdb_intr(dev->id)) { 2485 dev->flags |= ATA_DFLAG_CDB_INTR; 2486 cdb_intr_string = ", CDB intr"; 2487 } 2488 2489 if (atapi_dmadir || atapi_id_dmadir(dev->id)) { 2490 dev->flags |= ATA_DFLAG_DMADIR; 2491 dma_dir_string = ", DMADIR"; 2492 } 2493 2494 /* print device info to dmesg */ 2495 if (ata_msg_drv(ap) && print_info) 2496 ata_dev_printk(dev, KERN_INFO, 2497 "ATAPI: %s, %s, max %s%s%s%s\n", 2498 modelbuf, fwrevbuf, 2499 ata_mode_string(xfer_mask), 2500 cdb_intr_string, atapi_an_string, 2501 dma_dir_string); 2502 } 2503 2504 /* determine max_sectors */ 2505 dev->max_sectors = ATA_MAX_SECTORS; 2506 if (dev->flags & ATA_DFLAG_LBA48) 2507 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2508 2509 if (!(dev->horkage & ATA_HORKAGE_IPM)) { 2510 if (ata_id_has_hipm(dev->id)) 2511 dev->flags |= ATA_DFLAG_HIPM; 2512 if (ata_id_has_dipm(dev->id)) 2513 dev->flags |= ATA_DFLAG_DIPM; 2514 } 2515 2516 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2517 200 sectors */ 2518 if (ata_dev_knobble(dev)) { 2519 if (ata_msg_drv(ap) && print_info) 2520 ata_dev_printk(dev, KERN_INFO, 2521 "applying bridge limits\n"); 2522 dev->udma_mask &= ATA_UDMA5; 2523 dev->max_sectors = ATA_MAX_SECTORS; 2524 } 2525 2526 if ((dev->class == ATA_DEV_ATAPI) && 2527 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2528 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2529 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2530 } 2531 2532 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2533 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2534 dev->max_sectors); 2535 2536 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { 2537 dev->horkage |= ATA_HORKAGE_IPM; 2538 2539 /* reset link pm_policy for this port to no pm */ 2540 ap->pm_policy = MAX_PERFORMANCE; 2541 } 2542 2543 if (ap->ops->dev_config) 2544 ap->ops->dev_config(dev); 2545 2546 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2547 /* Let the user know. We don't want to disallow opens for 2548 rescue purposes, or in case the vendor is just a blithering 2549 idiot. Do this after the dev_config call as some controllers 2550 with buggy firmware may want to avoid reporting false device 2551 bugs */ 2552 2553 if (print_info) { 2554 ata_dev_printk(dev, KERN_WARNING, 2555 "Drive reports diagnostics failure. This may indicate a drive\n"); 2556 ata_dev_printk(dev, KERN_WARNING, 2557 "fault or invalid emulation. Contact drive vendor for information.\n"); 2558 } 2559 } 2560 2561 if (ata_msg_probe(ap)) 2562 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n", 2563 __func__, ata_chk_status(ap)); 2564 return 0; 2565 2566 err_out_nosup: 2567 if (ata_msg_probe(ap)) 2568 ata_dev_printk(dev, KERN_DEBUG, 2569 "%s: EXIT, err\n", __func__); 2570 return rc; 2571 } 2572 2573 /** 2574 * ata_cable_40wire - return 40 wire cable type 2575 * @ap: port 2576 * 2577 * Helper method for drivers which want to hardwire 40 wire cable 2578 * detection. 2579 */ 2580 2581 int ata_cable_40wire(struct ata_port *ap) 2582 { 2583 return ATA_CBL_PATA40; 2584 } 2585 2586 /** 2587 * ata_cable_80wire - return 80 wire cable type 2588 * @ap: port 2589 * 2590 * Helper method for drivers which want to hardwire 80 wire cable 2591 * detection. 2592 */ 2593 2594 int ata_cable_80wire(struct ata_port *ap) 2595 { 2596 return ATA_CBL_PATA80; 2597 } 2598 2599 /** 2600 * ata_cable_unknown - return unknown PATA cable. 2601 * @ap: port 2602 * 2603 * Helper method for drivers which have no PATA cable detection. 2604 */ 2605 2606 int ata_cable_unknown(struct ata_port *ap) 2607 { 2608 return ATA_CBL_PATA_UNK; 2609 } 2610 2611 /** 2612 * ata_cable_ignore - return ignored PATA cable. 2613 * @ap: port 2614 * 2615 * Helper method for drivers which don't use cable type to limit 2616 * transfer mode. 2617 */ 2618 int ata_cable_ignore(struct ata_port *ap) 2619 { 2620 return ATA_CBL_PATA_IGN; 2621 } 2622 2623 /** 2624 * ata_cable_sata - return SATA cable type 2625 * @ap: port 2626 * 2627 * Helper method for drivers which have SATA cables 2628 */ 2629 2630 int ata_cable_sata(struct ata_port *ap) 2631 { 2632 return ATA_CBL_SATA; 2633 } 2634 2635 /** 2636 * ata_bus_probe - Reset and probe ATA bus 2637 * @ap: Bus to probe 2638 * 2639 * Master ATA bus probing function. Initiates a hardware-dependent 2640 * bus reset, then attempts to identify any devices found on 2641 * the bus. 2642 * 2643 * LOCKING: 2644 * PCI/etc. bus probe sem. 2645 * 2646 * RETURNS: 2647 * Zero on success, negative errno otherwise. 2648 */ 2649 2650 int ata_bus_probe(struct ata_port *ap) 2651 { 2652 unsigned int classes[ATA_MAX_DEVICES]; 2653 int tries[ATA_MAX_DEVICES]; 2654 int rc; 2655 struct ata_device *dev; 2656 2657 ata_port_probe(ap); 2658 2659 ata_link_for_each_dev(dev, &ap->link) 2660 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2661 2662 retry: 2663 ata_link_for_each_dev(dev, &ap->link) { 2664 /* If we issue an SRST then an ATA drive (not ATAPI) 2665 * may change configuration and be in PIO0 timing. If 2666 * we do a hard reset (or are coming from power on) 2667 * this is true for ATA or ATAPI. Until we've set a 2668 * suitable controller mode we should not touch the 2669 * bus as we may be talking too fast. 2670 */ 2671 dev->pio_mode = XFER_PIO_0; 2672 2673 /* If the controller has a pio mode setup function 2674 * then use it to set the chipset to rights. Don't 2675 * touch the DMA setup as that will be dealt with when 2676 * configuring devices. 2677 */ 2678 if (ap->ops->set_piomode) 2679 ap->ops->set_piomode(ap, dev); 2680 } 2681 2682 /* reset and determine device classes */ 2683 ap->ops->phy_reset(ap); 2684 2685 ata_link_for_each_dev(dev, &ap->link) { 2686 if (!(ap->flags & ATA_FLAG_DISABLED) && 2687 dev->class != ATA_DEV_UNKNOWN) 2688 classes[dev->devno] = dev->class; 2689 else 2690 classes[dev->devno] = ATA_DEV_NONE; 2691 2692 dev->class = ATA_DEV_UNKNOWN; 2693 } 2694 2695 ata_port_probe(ap); 2696 2697 /* read IDENTIFY page and configure devices. We have to do the identify 2698 specific sequence bass-ackwards so that PDIAG- is released by 2699 the slave device */ 2700 2701 ata_link_for_each_dev_reverse(dev, &ap->link) { 2702 if (tries[dev->devno]) 2703 dev->class = classes[dev->devno]; 2704 2705 if (!ata_dev_enabled(dev)) 2706 continue; 2707 2708 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2709 dev->id); 2710 if (rc) 2711 goto fail; 2712 } 2713 2714 /* Now ask for the cable type as PDIAG- should have been released */ 2715 if (ap->ops->cable_detect) 2716 ap->cbl = ap->ops->cable_detect(ap); 2717 2718 /* We may have SATA bridge glue hiding here irrespective of the 2719 reported cable types and sensed types */ 2720 ata_link_for_each_dev(dev, &ap->link) { 2721 if (!ata_dev_enabled(dev)) 2722 continue; 2723 /* SATA drives indicate we have a bridge. We don't know which 2724 end of the link the bridge is which is a problem */ 2725 if (ata_id_is_sata(dev->id)) 2726 ap->cbl = ATA_CBL_SATA; 2727 } 2728 2729 /* After the identify sequence we can now set up the devices. We do 2730 this in the normal order so that the user doesn't get confused */ 2731 2732 ata_link_for_each_dev(dev, &ap->link) { 2733 if (!ata_dev_enabled(dev)) 2734 continue; 2735 2736 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2737 rc = ata_dev_configure(dev); 2738 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2739 if (rc) 2740 goto fail; 2741 } 2742 2743 /* configure transfer mode */ 2744 rc = ata_set_mode(&ap->link, &dev); 2745 if (rc) 2746 goto fail; 2747 2748 ata_link_for_each_dev(dev, &ap->link) 2749 if (ata_dev_enabled(dev)) 2750 return 0; 2751 2752 /* no device present, disable port */ 2753 ata_port_disable(ap); 2754 return -ENODEV; 2755 2756 fail: 2757 tries[dev->devno]--; 2758 2759 switch (rc) { 2760 case -EINVAL: 2761 /* eeek, something went very wrong, give up */ 2762 tries[dev->devno] = 0; 2763 break; 2764 2765 case -ENODEV: 2766 /* give it just one more chance */ 2767 tries[dev->devno] = min(tries[dev->devno], 1); 2768 case -EIO: 2769 if (tries[dev->devno] == 1) { 2770 /* This is the last chance, better to slow 2771 * down than lose it. 2772 */ 2773 sata_down_spd_limit(&ap->link); 2774 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2775 } 2776 } 2777 2778 if (!tries[dev->devno]) 2779 ata_dev_disable(dev); 2780 2781 goto retry; 2782 } 2783 2784 /** 2785 * ata_port_probe - Mark port as enabled 2786 * @ap: Port for which we indicate enablement 2787 * 2788 * Modify @ap data structure such that the system 2789 * thinks that the entire port is enabled. 2790 * 2791 * LOCKING: host lock, or some other form of 2792 * serialization. 2793 */ 2794 2795 void ata_port_probe(struct ata_port *ap) 2796 { 2797 ap->flags &= ~ATA_FLAG_DISABLED; 2798 } 2799 2800 /** 2801 * sata_print_link_status - Print SATA link status 2802 * @link: SATA link to printk link status about 2803 * 2804 * This function prints link speed and status of a SATA link. 2805 * 2806 * LOCKING: 2807 * None. 2808 */ 2809 void sata_print_link_status(struct ata_link *link) 2810 { 2811 u32 sstatus, scontrol, tmp; 2812 2813 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2814 return; 2815 sata_scr_read(link, SCR_CONTROL, &scontrol); 2816 2817 if (ata_link_online(link)) { 2818 tmp = (sstatus >> 4) & 0xf; 2819 ata_link_printk(link, KERN_INFO, 2820 "SATA link up %s (SStatus %X SControl %X)\n", 2821 sata_spd_string(tmp), sstatus, scontrol); 2822 } else { 2823 ata_link_printk(link, KERN_INFO, 2824 "SATA link down (SStatus %X SControl %X)\n", 2825 sstatus, scontrol); 2826 } 2827 } 2828 2829 /** 2830 * ata_dev_pair - return other device on cable 2831 * @adev: device 2832 * 2833 * Obtain the other device on the same cable, or if none is 2834 * present NULL is returned 2835 */ 2836 2837 struct ata_device *ata_dev_pair(struct ata_device *adev) 2838 { 2839 struct ata_link *link = adev->link; 2840 struct ata_device *pair = &link->device[1 - adev->devno]; 2841 if (!ata_dev_enabled(pair)) 2842 return NULL; 2843 return pair; 2844 } 2845 2846 /** 2847 * ata_port_disable - Disable port. 2848 * @ap: Port to be disabled. 2849 * 2850 * Modify @ap data structure such that the system 2851 * thinks that the entire port is disabled, and should 2852 * never attempt to probe or communicate with devices 2853 * on this port. 2854 * 2855 * LOCKING: host lock, or some other form of 2856 * serialization. 2857 */ 2858 2859 void ata_port_disable(struct ata_port *ap) 2860 { 2861 ap->link.device[0].class = ATA_DEV_NONE; 2862 ap->link.device[1].class = ATA_DEV_NONE; 2863 ap->flags |= ATA_FLAG_DISABLED; 2864 } 2865 2866 /** 2867 * sata_down_spd_limit - adjust SATA spd limit downward 2868 * @link: Link to adjust SATA spd limit for 2869 * 2870 * Adjust SATA spd limit of @link downward. Note that this 2871 * function only adjusts the limit. The change must be applied 2872 * using sata_set_spd(). 2873 * 2874 * LOCKING: 2875 * Inherited from caller. 2876 * 2877 * RETURNS: 2878 * 0 on success, negative errno on failure 2879 */ 2880 int sata_down_spd_limit(struct ata_link *link) 2881 { 2882 u32 sstatus, spd, mask; 2883 int rc, highbit; 2884 2885 if (!sata_scr_valid(link)) 2886 return -EOPNOTSUPP; 2887 2888 /* If SCR can be read, use it to determine the current SPD. 2889 * If not, use cached value in link->sata_spd. 2890 */ 2891 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2892 if (rc == 0) 2893 spd = (sstatus >> 4) & 0xf; 2894 else 2895 spd = link->sata_spd; 2896 2897 mask = link->sata_spd_limit; 2898 if (mask <= 1) 2899 return -EINVAL; 2900 2901 /* unconditionally mask off the highest bit */ 2902 highbit = fls(mask) - 1; 2903 mask &= ~(1 << highbit); 2904 2905 /* Mask off all speeds higher than or equal to the current 2906 * one. Force 1.5Gbps if current SPD is not available. 2907 */ 2908 if (spd > 1) 2909 mask &= (1 << (spd - 1)) - 1; 2910 else 2911 mask &= 1; 2912 2913 /* were we already at the bottom? */ 2914 if (!mask) 2915 return -EINVAL; 2916 2917 link->sata_spd_limit = mask; 2918 2919 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", 2920 sata_spd_string(fls(mask))); 2921 2922 return 0; 2923 } 2924 2925 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2926 { 2927 struct ata_link *host_link = &link->ap->link; 2928 u32 limit, target, spd; 2929 2930 limit = link->sata_spd_limit; 2931 2932 /* Don't configure downstream link faster than upstream link. 2933 * It doesn't speed up anything and some PMPs choke on such 2934 * configuration. 2935 */ 2936 if (!ata_is_host_link(link) && host_link->sata_spd) 2937 limit &= (1 << host_link->sata_spd) - 1; 2938 2939 if (limit == UINT_MAX) 2940 target = 0; 2941 else 2942 target = fls(limit); 2943 2944 spd = (*scontrol >> 4) & 0xf; 2945 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2946 2947 return spd != target; 2948 } 2949 2950 /** 2951 * sata_set_spd_needed - is SATA spd configuration needed 2952 * @link: Link in question 2953 * 2954 * Test whether the spd limit in SControl matches 2955 * @link->sata_spd_limit. This function is used to determine 2956 * whether hardreset is necessary to apply SATA spd 2957 * configuration. 2958 * 2959 * LOCKING: 2960 * Inherited from caller. 2961 * 2962 * RETURNS: 2963 * 1 if SATA spd configuration is needed, 0 otherwise. 2964 */ 2965 int sata_set_spd_needed(struct ata_link *link) 2966 { 2967 u32 scontrol; 2968 2969 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2970 return 1; 2971 2972 return __sata_set_spd_needed(link, &scontrol); 2973 } 2974 2975 /** 2976 * sata_set_spd - set SATA spd according to spd limit 2977 * @link: Link to set SATA spd for 2978 * 2979 * Set SATA spd of @link according to sata_spd_limit. 2980 * 2981 * LOCKING: 2982 * Inherited from caller. 2983 * 2984 * RETURNS: 2985 * 0 if spd doesn't need to be changed, 1 if spd has been 2986 * changed. Negative errno if SCR registers are inaccessible. 2987 */ 2988 int sata_set_spd(struct ata_link *link) 2989 { 2990 u32 scontrol; 2991 int rc; 2992 2993 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2994 return rc; 2995 2996 if (!__sata_set_spd_needed(link, &scontrol)) 2997 return 0; 2998 2999 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3000 return rc; 3001 3002 return 1; 3003 } 3004 3005 /* 3006 * This mode timing computation functionality is ported over from 3007 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 3008 */ 3009 /* 3010 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 3011 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 3012 * for UDMA6, which is currently supported only by Maxtor drives. 3013 * 3014 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 3015 */ 3016 3017 static const struct ata_timing ata_timing[] = { 3018 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */ 3019 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 }, 3020 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 }, 3021 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 }, 3022 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 }, 3023 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 }, 3024 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 }, 3025 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 }, 3026 3027 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 }, 3028 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 }, 3029 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 }, 3030 3031 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 }, 3032 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 }, 3033 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 }, 3034 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 }, 3035 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 }, 3036 3037 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3038 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3039 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 }, 3040 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 }, 3041 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 }, 3042 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 }, 3043 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 }, 3044 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 }, 3045 3046 { 0xFF } 3047 }; 3048 3049 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3050 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3051 3052 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3053 { 3054 q->setup = EZ(t->setup * 1000, T); 3055 q->act8b = EZ(t->act8b * 1000, T); 3056 q->rec8b = EZ(t->rec8b * 1000, T); 3057 q->cyc8b = EZ(t->cyc8b * 1000, T); 3058 q->active = EZ(t->active * 1000, T); 3059 q->recover = EZ(t->recover * 1000, T); 3060 q->cycle = EZ(t->cycle * 1000, T); 3061 q->udma = EZ(t->udma * 1000, UT); 3062 } 3063 3064 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3065 struct ata_timing *m, unsigned int what) 3066 { 3067 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3068 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3069 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3070 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3071 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3072 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3073 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3074 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3075 } 3076 3077 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3078 { 3079 const struct ata_timing *t = ata_timing; 3080 3081 while (xfer_mode > t->mode) 3082 t++; 3083 3084 if (xfer_mode == t->mode) 3085 return t; 3086 return NULL; 3087 } 3088 3089 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3090 struct ata_timing *t, int T, int UT) 3091 { 3092 const struct ata_timing *s; 3093 struct ata_timing p; 3094 3095 /* 3096 * Find the mode. 3097 */ 3098 3099 if (!(s = ata_timing_find_mode(speed))) 3100 return -EINVAL; 3101 3102 memcpy(t, s, sizeof(*s)); 3103 3104 /* 3105 * If the drive is an EIDE drive, it can tell us it needs extended 3106 * PIO/MW_DMA cycle timing. 3107 */ 3108 3109 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3110 memset(&p, 0, sizeof(p)); 3111 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 3112 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 3113 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 3114 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 3115 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 3116 } 3117 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3118 } 3119 3120 /* 3121 * Convert the timing to bus clock counts. 3122 */ 3123 3124 ata_timing_quantize(t, t, T, UT); 3125 3126 /* 3127 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3128 * S.M.A.R.T * and some other commands. We have to ensure that the 3129 * DMA cycle timing is slower/equal than the fastest PIO timing. 3130 */ 3131 3132 if (speed > XFER_PIO_6) { 3133 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3134 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3135 } 3136 3137 /* 3138 * Lengthen active & recovery time so that cycle time is correct. 3139 */ 3140 3141 if (t->act8b + t->rec8b < t->cyc8b) { 3142 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3143 t->rec8b = t->cyc8b - t->act8b; 3144 } 3145 3146 if (t->active + t->recover < t->cycle) { 3147 t->active += (t->cycle - (t->active + t->recover)) / 2; 3148 t->recover = t->cycle - t->active; 3149 } 3150 3151 /* In a few cases quantisation may produce enough errors to 3152 leave t->cycle too low for the sum of active and recovery 3153 if so we must correct this */ 3154 if (t->active + t->recover > t->cycle) 3155 t->cycle = t->active + t->recover; 3156 3157 return 0; 3158 } 3159 3160 /** 3161 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3162 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3163 * @cycle: cycle duration in ns 3164 * 3165 * Return matching xfer mode for @cycle. The returned mode is of 3166 * the transfer type specified by @xfer_shift. If @cycle is too 3167 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3168 * than the fastest known mode, the fasted mode is returned. 3169 * 3170 * LOCKING: 3171 * None. 3172 * 3173 * RETURNS: 3174 * Matching xfer_mode, 0xff if no match found. 3175 */ 3176 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3177 { 3178 u8 base_mode = 0xff, last_mode = 0xff; 3179 const struct ata_xfer_ent *ent; 3180 const struct ata_timing *t; 3181 3182 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3183 if (ent->shift == xfer_shift) 3184 base_mode = ent->base; 3185 3186 for (t = ata_timing_find_mode(base_mode); 3187 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3188 unsigned short this_cycle; 3189 3190 switch (xfer_shift) { 3191 case ATA_SHIFT_PIO: 3192 case ATA_SHIFT_MWDMA: 3193 this_cycle = t->cycle; 3194 break; 3195 case ATA_SHIFT_UDMA: 3196 this_cycle = t->udma; 3197 break; 3198 default: 3199 return 0xff; 3200 } 3201 3202 if (cycle > this_cycle) 3203 break; 3204 3205 last_mode = t->mode; 3206 } 3207 3208 return last_mode; 3209 } 3210 3211 /** 3212 * ata_down_xfermask_limit - adjust dev xfer masks downward 3213 * @dev: Device to adjust xfer masks 3214 * @sel: ATA_DNXFER_* selector 3215 * 3216 * Adjust xfer masks of @dev downward. Note that this function 3217 * does not apply the change. Invoking ata_set_mode() afterwards 3218 * will apply the limit. 3219 * 3220 * LOCKING: 3221 * Inherited from caller. 3222 * 3223 * RETURNS: 3224 * 0 on success, negative errno on failure 3225 */ 3226 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3227 { 3228 char buf[32]; 3229 unsigned long orig_mask, xfer_mask; 3230 unsigned long pio_mask, mwdma_mask, udma_mask; 3231 int quiet, highbit; 3232 3233 quiet = !!(sel & ATA_DNXFER_QUIET); 3234 sel &= ~ATA_DNXFER_QUIET; 3235 3236 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3237 dev->mwdma_mask, 3238 dev->udma_mask); 3239 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3240 3241 switch (sel) { 3242 case ATA_DNXFER_PIO: 3243 highbit = fls(pio_mask) - 1; 3244 pio_mask &= ~(1 << highbit); 3245 break; 3246 3247 case ATA_DNXFER_DMA: 3248 if (udma_mask) { 3249 highbit = fls(udma_mask) - 1; 3250 udma_mask &= ~(1 << highbit); 3251 if (!udma_mask) 3252 return -ENOENT; 3253 } else if (mwdma_mask) { 3254 highbit = fls(mwdma_mask) - 1; 3255 mwdma_mask &= ~(1 << highbit); 3256 if (!mwdma_mask) 3257 return -ENOENT; 3258 } 3259 break; 3260 3261 case ATA_DNXFER_40C: 3262 udma_mask &= ATA_UDMA_MASK_40C; 3263 break; 3264 3265 case ATA_DNXFER_FORCE_PIO0: 3266 pio_mask &= 1; 3267 case ATA_DNXFER_FORCE_PIO: 3268 mwdma_mask = 0; 3269 udma_mask = 0; 3270 break; 3271 3272 default: 3273 BUG(); 3274 } 3275 3276 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3277 3278 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3279 return -ENOENT; 3280 3281 if (!quiet) { 3282 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3283 snprintf(buf, sizeof(buf), "%s:%s", 3284 ata_mode_string(xfer_mask), 3285 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3286 else 3287 snprintf(buf, sizeof(buf), "%s", 3288 ata_mode_string(xfer_mask)); 3289 3290 ata_dev_printk(dev, KERN_WARNING, 3291 "limiting speed to %s\n", buf); 3292 } 3293 3294 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3295 &dev->udma_mask); 3296 3297 return 0; 3298 } 3299 3300 static int ata_dev_set_mode(struct ata_device *dev) 3301 { 3302 struct ata_eh_context *ehc = &dev->link->eh_context; 3303 const char *dev_err_whine = ""; 3304 int ign_dev_err = 0; 3305 unsigned int err_mask; 3306 int rc; 3307 3308 dev->flags &= ~ATA_DFLAG_PIO; 3309 if (dev->xfer_shift == ATA_SHIFT_PIO) 3310 dev->flags |= ATA_DFLAG_PIO; 3311 3312 err_mask = ata_dev_set_xfermode(dev); 3313 3314 if (err_mask & ~AC_ERR_DEV) 3315 goto fail; 3316 3317 /* revalidate */ 3318 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3319 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3320 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3321 if (rc) 3322 return rc; 3323 3324 /* Old CFA may refuse this command, which is just fine */ 3325 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id)) 3326 ign_dev_err = 1; 3327 3328 /* Some very old devices and some bad newer ones fail any kind of 3329 SET_XFERMODE request but support PIO0-2 timings and no IORDY */ 3330 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) && 3331 dev->pio_mode <= XFER_PIO_2) 3332 ign_dev_err = 1; 3333 3334 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3335 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3336 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3337 dev->dma_mode == XFER_MW_DMA_0 && 3338 (dev->id[63] >> 8) & 1) 3339 ign_dev_err = 1; 3340 3341 /* if the device is actually configured correctly, ignore dev err */ 3342 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3343 ign_dev_err = 1; 3344 3345 if (err_mask & AC_ERR_DEV) { 3346 if (!ign_dev_err) 3347 goto fail; 3348 else 3349 dev_err_whine = " (device error ignored)"; 3350 } 3351 3352 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3353 dev->xfer_shift, (int)dev->xfer_mode); 3354 3355 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n", 3356 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3357 dev_err_whine); 3358 3359 return 0; 3360 3361 fail: 3362 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 3363 "(err_mask=0x%x)\n", err_mask); 3364 return -EIO; 3365 } 3366 3367 /** 3368 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3369 * @link: link on which timings will be programmed 3370 * @r_failed_dev: out parameter for failed device 3371 * 3372 * Standard implementation of the function used to tune and set 3373 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3374 * ata_dev_set_mode() fails, pointer to the failing device is 3375 * returned in @r_failed_dev. 3376 * 3377 * LOCKING: 3378 * PCI/etc. bus probe sem. 3379 * 3380 * RETURNS: 3381 * 0 on success, negative errno otherwise 3382 */ 3383 3384 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3385 { 3386 struct ata_port *ap = link->ap; 3387 struct ata_device *dev; 3388 int rc = 0, used_dma = 0, found = 0; 3389 3390 /* step 1: calculate xfer_mask */ 3391 ata_link_for_each_dev(dev, link) { 3392 unsigned long pio_mask, dma_mask; 3393 unsigned int mode_mask; 3394 3395 if (!ata_dev_enabled(dev)) 3396 continue; 3397 3398 mode_mask = ATA_DMA_MASK_ATA; 3399 if (dev->class == ATA_DEV_ATAPI) 3400 mode_mask = ATA_DMA_MASK_ATAPI; 3401 else if (ata_id_is_cfa(dev->id)) 3402 mode_mask = ATA_DMA_MASK_CFA; 3403 3404 ata_dev_xfermask(dev); 3405 ata_force_xfermask(dev); 3406 3407 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3408 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3409 3410 if (libata_dma_mask & mode_mask) 3411 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3412 else 3413 dma_mask = 0; 3414 3415 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3416 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3417 3418 found = 1; 3419 if (dev->dma_mode != 0xff) 3420 used_dma = 1; 3421 } 3422 if (!found) 3423 goto out; 3424 3425 /* step 2: always set host PIO timings */ 3426 ata_link_for_each_dev(dev, link) { 3427 if (!ata_dev_enabled(dev)) 3428 continue; 3429 3430 if (dev->pio_mode == 0xff) { 3431 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 3432 rc = -EINVAL; 3433 goto out; 3434 } 3435 3436 dev->xfer_mode = dev->pio_mode; 3437 dev->xfer_shift = ATA_SHIFT_PIO; 3438 if (ap->ops->set_piomode) 3439 ap->ops->set_piomode(ap, dev); 3440 } 3441 3442 /* step 3: set host DMA timings */ 3443 ata_link_for_each_dev(dev, link) { 3444 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff) 3445 continue; 3446 3447 dev->xfer_mode = dev->dma_mode; 3448 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3449 if (ap->ops->set_dmamode) 3450 ap->ops->set_dmamode(ap, dev); 3451 } 3452 3453 /* step 4: update devices' xfer mode */ 3454 ata_link_for_each_dev(dev, link) { 3455 /* don't update suspended devices' xfer mode */ 3456 if (!ata_dev_enabled(dev)) 3457 continue; 3458 3459 rc = ata_dev_set_mode(dev); 3460 if (rc) 3461 goto out; 3462 } 3463 3464 /* Record simplex status. If we selected DMA then the other 3465 * host channels are not permitted to do so. 3466 */ 3467 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3468 ap->host->simplex_claimed = ap; 3469 3470 out: 3471 if (rc) 3472 *r_failed_dev = dev; 3473 return rc; 3474 } 3475 3476 /** 3477 * ata_tf_to_host - issue ATA taskfile to host controller 3478 * @ap: port to which command is being issued 3479 * @tf: ATA taskfile register set 3480 * 3481 * Issues ATA taskfile register set to ATA host controller, 3482 * with proper synchronization with interrupt handler and 3483 * other threads. 3484 * 3485 * LOCKING: 3486 * spin_lock_irqsave(host lock) 3487 */ 3488 3489 static inline void ata_tf_to_host(struct ata_port *ap, 3490 const struct ata_taskfile *tf) 3491 { 3492 ap->ops->tf_load(ap, tf); 3493 ap->ops->exec_command(ap, tf); 3494 } 3495 3496 /** 3497 * ata_busy_sleep - sleep until BSY clears, or timeout 3498 * @ap: port containing status register to be polled 3499 * @tmout_pat: impatience timeout 3500 * @tmout: overall timeout 3501 * 3502 * Sleep until ATA Status register bit BSY clears, 3503 * or a timeout occurs. 3504 * 3505 * LOCKING: 3506 * Kernel thread context (may sleep). 3507 * 3508 * RETURNS: 3509 * 0 on success, -errno otherwise. 3510 */ 3511 int ata_busy_sleep(struct ata_port *ap, 3512 unsigned long tmout_pat, unsigned long tmout) 3513 { 3514 unsigned long timer_start, timeout; 3515 u8 status; 3516 3517 status = ata_busy_wait(ap, ATA_BUSY, 300); 3518 timer_start = jiffies; 3519 timeout = timer_start + tmout_pat; 3520 while (status != 0xff && (status & ATA_BUSY) && 3521 time_before(jiffies, timeout)) { 3522 msleep(50); 3523 status = ata_busy_wait(ap, ATA_BUSY, 3); 3524 } 3525 3526 if (status != 0xff && (status & ATA_BUSY)) 3527 ata_port_printk(ap, KERN_WARNING, 3528 "port is slow to respond, please be patient " 3529 "(Status 0x%x)\n", status); 3530 3531 timeout = timer_start + tmout; 3532 while (status != 0xff && (status & ATA_BUSY) && 3533 time_before(jiffies, timeout)) { 3534 msleep(50); 3535 status = ata_chk_status(ap); 3536 } 3537 3538 if (status == 0xff) 3539 return -ENODEV; 3540 3541 if (status & ATA_BUSY) { 3542 ata_port_printk(ap, KERN_ERR, "port failed to respond " 3543 "(%lu secs, Status 0x%x)\n", 3544 tmout / HZ, status); 3545 return -EBUSY; 3546 } 3547 3548 return 0; 3549 } 3550 3551 /** 3552 * ata_wait_after_reset - wait before checking status after reset 3553 * @ap: port containing status register to be polled 3554 * @deadline: deadline jiffies for the operation 3555 * 3556 * After reset, we need to pause a while before reading status. 3557 * Also, certain combination of controller and device report 0xff 3558 * for some duration (e.g. until SATA PHY is up and running) 3559 * which is interpreted as empty port in ATA world. This 3560 * function also waits for such devices to get out of 0xff 3561 * status. 3562 * 3563 * LOCKING: 3564 * Kernel thread context (may sleep). 3565 */ 3566 void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline) 3567 { 3568 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT; 3569 3570 if (time_before(until, deadline)) 3571 deadline = until; 3572 3573 /* Spec mandates ">= 2ms" before checking status. We wait 3574 * 150ms, because that was the magic delay used for ATAPI 3575 * devices in Hale Landis's ATADRVR, for the period of time 3576 * between when the ATA command register is written, and then 3577 * status is checked. Because waiting for "a while" before 3578 * checking status is fine, post SRST, we perform this magic 3579 * delay here as well. 3580 * 3581 * Old drivers/ide uses the 2mS rule and then waits for ready. 3582 */ 3583 msleep(150); 3584 3585 /* Wait for 0xff to clear. Some SATA devices take a long time 3586 * to clear 0xff after reset. For example, HHD424020F7SV00 3587 * iVDR needs >= 800ms while. Quantum GoVault needs even more 3588 * than that. 3589 * 3590 * Note that some PATA controllers (pata_ali) explode if 3591 * status register is read more than once when there's no 3592 * device attached. 3593 */ 3594 if (ap->flags & ATA_FLAG_SATA) { 3595 while (1) { 3596 u8 status = ata_chk_status(ap); 3597 3598 if (status != 0xff || time_after(jiffies, deadline)) 3599 return; 3600 3601 msleep(50); 3602 } 3603 } 3604 } 3605 3606 /** 3607 * ata_wait_ready - sleep until BSY clears, or timeout 3608 * @ap: port containing status register to be polled 3609 * @deadline: deadline jiffies for the operation 3610 * 3611 * Sleep until ATA Status register bit BSY clears, or timeout 3612 * occurs. 3613 * 3614 * LOCKING: 3615 * Kernel thread context (may sleep). 3616 * 3617 * RETURNS: 3618 * 0 on success, -errno otherwise. 3619 */ 3620 int ata_wait_ready(struct ata_port *ap, unsigned long deadline) 3621 { 3622 unsigned long start = jiffies; 3623 int warned = 0; 3624 3625 while (1) { 3626 u8 status = ata_chk_status(ap); 3627 unsigned long now = jiffies; 3628 3629 if (!(status & ATA_BUSY)) 3630 return 0; 3631 if (!ata_link_online(&ap->link) && status == 0xff) 3632 return -ENODEV; 3633 if (time_after(now, deadline)) 3634 return -EBUSY; 3635 3636 if (!warned && time_after(now, start + 5 * HZ) && 3637 (deadline - now > 3 * HZ)) { 3638 ata_port_printk(ap, KERN_WARNING, 3639 "port is slow to respond, please be patient " 3640 "(Status 0x%x)\n", status); 3641 warned = 1; 3642 } 3643 3644 msleep(50); 3645 } 3646 } 3647 3648 static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask, 3649 unsigned long deadline) 3650 { 3651 struct ata_ioports *ioaddr = &ap->ioaddr; 3652 unsigned int dev0 = devmask & (1 << 0); 3653 unsigned int dev1 = devmask & (1 << 1); 3654 int rc, ret = 0; 3655 3656 /* if device 0 was found in ata_devchk, wait for its 3657 * BSY bit to clear 3658 */ 3659 if (dev0) { 3660 rc = ata_wait_ready(ap, deadline); 3661 if (rc) { 3662 if (rc != -ENODEV) 3663 return rc; 3664 ret = rc; 3665 } 3666 } 3667 3668 /* if device 1 was found in ata_devchk, wait for register 3669 * access briefly, then wait for BSY to clear. 3670 */ 3671 if (dev1) { 3672 int i; 3673 3674 ap->ops->dev_select(ap, 1); 3675 3676 /* Wait for register access. Some ATAPI devices fail 3677 * to set nsect/lbal after reset, so don't waste too 3678 * much time on it. We're gonna wait for !BSY anyway. 3679 */ 3680 for (i = 0; i < 2; i++) { 3681 u8 nsect, lbal; 3682 3683 nsect = ioread8(ioaddr->nsect_addr); 3684 lbal = ioread8(ioaddr->lbal_addr); 3685 if ((nsect == 1) && (lbal == 1)) 3686 break; 3687 msleep(50); /* give drive a breather */ 3688 } 3689 3690 rc = ata_wait_ready(ap, deadline); 3691 if (rc) { 3692 if (rc != -ENODEV) 3693 return rc; 3694 ret = rc; 3695 } 3696 } 3697 3698 /* is all this really necessary? */ 3699 ap->ops->dev_select(ap, 0); 3700 if (dev1) 3701 ap->ops->dev_select(ap, 1); 3702 if (dev0) 3703 ap->ops->dev_select(ap, 0); 3704 3705 return ret; 3706 } 3707 3708 static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask, 3709 unsigned long deadline) 3710 { 3711 struct ata_ioports *ioaddr = &ap->ioaddr; 3712 3713 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id); 3714 3715 /* software reset. causes dev0 to be selected */ 3716 iowrite8(ap->ctl, ioaddr->ctl_addr); 3717 udelay(20); /* FIXME: flush */ 3718 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr); 3719 udelay(20); /* FIXME: flush */ 3720 iowrite8(ap->ctl, ioaddr->ctl_addr); 3721 3722 /* wait a while before checking status */ 3723 ata_wait_after_reset(ap, deadline); 3724 3725 /* Before we perform post reset processing we want to see if 3726 * the bus shows 0xFF because the odd clown forgets the D7 3727 * pulldown resistor. 3728 */ 3729 if (ata_chk_status(ap) == 0xFF) 3730 return -ENODEV; 3731 3732 return ata_bus_post_reset(ap, devmask, deadline); 3733 } 3734 3735 /** 3736 * ata_bus_reset - reset host port and associated ATA channel 3737 * @ap: port to reset 3738 * 3739 * This is typically the first time we actually start issuing 3740 * commands to the ATA channel. We wait for BSY to clear, then 3741 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its 3742 * result. Determine what devices, if any, are on the channel 3743 * by looking at the device 0/1 error register. Look at the signature 3744 * stored in each device's taskfile registers, to determine if 3745 * the device is ATA or ATAPI. 3746 * 3747 * LOCKING: 3748 * PCI/etc. bus probe sem. 3749 * Obtains host lock. 3750 * 3751 * SIDE EFFECTS: 3752 * Sets ATA_FLAG_DISABLED if bus reset fails. 3753 */ 3754 3755 void ata_bus_reset(struct ata_port *ap) 3756 { 3757 struct ata_device *device = ap->link.device; 3758 struct ata_ioports *ioaddr = &ap->ioaddr; 3759 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 3760 u8 err; 3761 unsigned int dev0, dev1 = 0, devmask = 0; 3762 int rc; 3763 3764 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no); 3765 3766 /* determine if device 0/1 are present */ 3767 if (ap->flags & ATA_FLAG_SATA_RESET) 3768 dev0 = 1; 3769 else { 3770 dev0 = ata_devchk(ap, 0); 3771 if (slave_possible) 3772 dev1 = ata_devchk(ap, 1); 3773 } 3774 3775 if (dev0) 3776 devmask |= (1 << 0); 3777 if (dev1) 3778 devmask |= (1 << 1); 3779 3780 /* select device 0 again */ 3781 ap->ops->dev_select(ap, 0); 3782 3783 /* issue bus reset */ 3784 if (ap->flags & ATA_FLAG_SRST) { 3785 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ); 3786 if (rc && rc != -ENODEV) 3787 goto err_out; 3788 } 3789 3790 /* 3791 * determine by signature whether we have ATA or ATAPI devices 3792 */ 3793 device[0].class = ata_dev_try_classify(&device[0], dev0, &err); 3794 if ((slave_possible) && (err != 0x81)) 3795 device[1].class = ata_dev_try_classify(&device[1], dev1, &err); 3796 3797 /* is double-select really necessary? */ 3798 if (device[1].class != ATA_DEV_NONE) 3799 ap->ops->dev_select(ap, 1); 3800 if (device[0].class != ATA_DEV_NONE) 3801 ap->ops->dev_select(ap, 0); 3802 3803 /* if no devices were detected, disable this port */ 3804 if ((device[0].class == ATA_DEV_NONE) && 3805 (device[1].class == ATA_DEV_NONE)) 3806 goto err_out; 3807 3808 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) { 3809 /* set up device control for ATA_FLAG_SATA_RESET */ 3810 iowrite8(ap->ctl, ioaddr->ctl_addr); 3811 } 3812 3813 DPRINTK("EXIT\n"); 3814 return; 3815 3816 err_out: 3817 ata_port_printk(ap, KERN_ERR, "disabling port\n"); 3818 ata_port_disable(ap); 3819 3820 DPRINTK("EXIT\n"); 3821 } 3822 3823 /** 3824 * sata_link_debounce - debounce SATA phy status 3825 * @link: ATA link to debounce SATA phy status for 3826 * @params: timing parameters { interval, duratinon, timeout } in msec 3827 * @deadline: deadline jiffies for the operation 3828 * 3829 * Make sure SStatus of @link reaches stable state, determined by 3830 * holding the same value where DET is not 1 for @duration polled 3831 * every @interval, before @timeout. Timeout constraints the 3832 * beginning of the stable state. Because DET gets stuck at 1 on 3833 * some controllers after hot unplugging, this functions waits 3834 * until timeout then returns 0 if DET is stable at 1. 3835 * 3836 * @timeout is further limited by @deadline. The sooner of the 3837 * two is used. 3838 * 3839 * LOCKING: 3840 * Kernel thread context (may sleep) 3841 * 3842 * RETURNS: 3843 * 0 on success, -errno on failure. 3844 */ 3845 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3846 unsigned long deadline) 3847 { 3848 unsigned long interval_msec = params[0]; 3849 unsigned long duration = msecs_to_jiffies(params[1]); 3850 unsigned long last_jiffies, t; 3851 u32 last, cur; 3852 int rc; 3853 3854 t = jiffies + msecs_to_jiffies(params[2]); 3855 if (time_before(t, deadline)) 3856 deadline = t; 3857 3858 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3859 return rc; 3860 cur &= 0xf; 3861 3862 last = cur; 3863 last_jiffies = jiffies; 3864 3865 while (1) { 3866 msleep(interval_msec); 3867 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3868 return rc; 3869 cur &= 0xf; 3870 3871 /* DET stable? */ 3872 if (cur == last) { 3873 if (cur == 1 && time_before(jiffies, deadline)) 3874 continue; 3875 if (time_after(jiffies, last_jiffies + duration)) 3876 return 0; 3877 continue; 3878 } 3879 3880 /* unstable, start over */ 3881 last = cur; 3882 last_jiffies = jiffies; 3883 3884 /* Check deadline. If debouncing failed, return 3885 * -EPIPE to tell upper layer to lower link speed. 3886 */ 3887 if (time_after(jiffies, deadline)) 3888 return -EPIPE; 3889 } 3890 } 3891 3892 /** 3893 * sata_link_resume - resume SATA link 3894 * @link: ATA link to resume SATA 3895 * @params: timing parameters { interval, duratinon, timeout } in msec 3896 * @deadline: deadline jiffies for the operation 3897 * 3898 * Resume SATA phy @link and debounce it. 3899 * 3900 * LOCKING: 3901 * Kernel thread context (may sleep) 3902 * 3903 * RETURNS: 3904 * 0 on success, -errno on failure. 3905 */ 3906 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3907 unsigned long deadline) 3908 { 3909 u32 scontrol; 3910 int rc; 3911 3912 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3913 return rc; 3914 3915 scontrol = (scontrol & 0x0f0) | 0x300; 3916 3917 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3918 return rc; 3919 3920 /* Some PHYs react badly if SStatus is pounded immediately 3921 * after resuming. Delay 200ms before debouncing. 3922 */ 3923 msleep(200); 3924 3925 return sata_link_debounce(link, params, deadline); 3926 } 3927 3928 /** 3929 * ata_std_prereset - prepare for reset 3930 * @link: ATA link to be reset 3931 * @deadline: deadline jiffies for the operation 3932 * 3933 * @link is about to be reset. Initialize it. Failure from 3934 * prereset makes libata abort whole reset sequence and give up 3935 * that port, so prereset should be best-effort. It does its 3936 * best to prepare for reset sequence but if things go wrong, it 3937 * should just whine, not fail. 3938 * 3939 * LOCKING: 3940 * Kernel thread context (may sleep) 3941 * 3942 * RETURNS: 3943 * 0 on success, -errno otherwise. 3944 */ 3945 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3946 { 3947 struct ata_port *ap = link->ap; 3948 struct ata_eh_context *ehc = &link->eh_context; 3949 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3950 int rc; 3951 3952 /* handle link resume */ 3953 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) && 3954 (link->flags & ATA_LFLAG_HRST_TO_RESUME)) 3955 ehc->i.action |= ATA_EH_HARDRESET; 3956 3957 /* Some PMPs don't work with only SRST, force hardreset if PMP 3958 * is supported. 3959 */ 3960 if (ap->flags & ATA_FLAG_PMP) 3961 ehc->i.action |= ATA_EH_HARDRESET; 3962 3963 /* if we're about to do hardreset, nothing more to do */ 3964 if (ehc->i.action & ATA_EH_HARDRESET) 3965 return 0; 3966 3967 /* if SATA, resume link */ 3968 if (ap->flags & ATA_FLAG_SATA) { 3969 rc = sata_link_resume(link, timing, deadline); 3970 /* whine about phy resume failure but proceed */ 3971 if (rc && rc != -EOPNOTSUPP) 3972 ata_link_printk(link, KERN_WARNING, "failed to resume " 3973 "link for reset (errno=%d)\n", rc); 3974 } 3975 3976 /* Wait for !BSY if the controller can wait for the first D2H 3977 * Reg FIS and we don't know that no device is attached. 3978 */ 3979 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) { 3980 rc = ata_wait_ready(ap, deadline); 3981 if (rc && rc != -ENODEV) { 3982 ata_link_printk(link, KERN_WARNING, "device not ready " 3983 "(errno=%d), forcing hardreset\n", rc); 3984 ehc->i.action |= ATA_EH_HARDRESET; 3985 } 3986 } 3987 3988 return 0; 3989 } 3990 3991 /** 3992 * ata_std_softreset - reset host port via ATA SRST 3993 * @link: ATA link to reset 3994 * @classes: resulting classes of attached devices 3995 * @deadline: deadline jiffies for the operation 3996 * 3997 * Reset host port using ATA SRST. 3998 * 3999 * LOCKING: 4000 * Kernel thread context (may sleep) 4001 * 4002 * RETURNS: 4003 * 0 on success, -errno otherwise. 4004 */ 4005 int ata_std_softreset(struct ata_link *link, unsigned int *classes, 4006 unsigned long deadline) 4007 { 4008 struct ata_port *ap = link->ap; 4009 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; 4010 unsigned int devmask = 0; 4011 int rc; 4012 u8 err; 4013 4014 DPRINTK("ENTER\n"); 4015 4016 if (ata_link_offline(link)) { 4017 classes[0] = ATA_DEV_NONE; 4018 goto out; 4019 } 4020 4021 /* determine if device 0/1 are present */ 4022 if (ata_devchk(ap, 0)) 4023 devmask |= (1 << 0); 4024 if (slave_possible && ata_devchk(ap, 1)) 4025 devmask |= (1 << 1); 4026 4027 /* select device 0 again */ 4028 ap->ops->dev_select(ap, 0); 4029 4030 /* issue bus reset */ 4031 DPRINTK("about to softreset, devmask=%x\n", devmask); 4032 rc = ata_bus_softreset(ap, devmask, deadline); 4033 /* if link is occupied, -ENODEV too is an error */ 4034 if (rc && (rc != -ENODEV || sata_scr_valid(link))) { 4035 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc); 4036 return rc; 4037 } 4038 4039 /* determine by signature whether we have ATA or ATAPI devices */ 4040 classes[0] = ata_dev_try_classify(&link->device[0], 4041 devmask & (1 << 0), &err); 4042 if (slave_possible && err != 0x81) 4043 classes[1] = ata_dev_try_classify(&link->device[1], 4044 devmask & (1 << 1), &err); 4045 4046 out: 4047 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]); 4048 return 0; 4049 } 4050 4051 /** 4052 * sata_link_hardreset - reset link via SATA phy reset 4053 * @link: link to reset 4054 * @timing: timing parameters { interval, duratinon, timeout } in msec 4055 * @deadline: deadline jiffies for the operation 4056 * 4057 * SATA phy-reset @link using DET bits of SControl register. 4058 * 4059 * LOCKING: 4060 * Kernel thread context (may sleep) 4061 * 4062 * RETURNS: 4063 * 0 on success, -errno otherwise. 4064 */ 4065 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 4066 unsigned long deadline) 4067 { 4068 u32 scontrol; 4069 int rc; 4070 4071 DPRINTK("ENTER\n"); 4072 4073 if (sata_set_spd_needed(link)) { 4074 /* SATA spec says nothing about how to reconfigure 4075 * spd. To be on the safe side, turn off phy during 4076 * reconfiguration. This works for at least ICH7 AHCI 4077 * and Sil3124. 4078 */ 4079 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 4080 goto out; 4081 4082 scontrol = (scontrol & 0x0f0) | 0x304; 4083 4084 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 4085 goto out; 4086 4087 sata_set_spd(link); 4088 } 4089 4090 /* issue phy wake/reset */ 4091 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 4092 goto out; 4093 4094 scontrol = (scontrol & 0x0f0) | 0x301; 4095 4096 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 4097 goto out; 4098 4099 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 4100 * 10.4.2 says at least 1 ms. 4101 */ 4102 msleep(1); 4103 4104 /* bring link back */ 4105 rc = sata_link_resume(link, timing, deadline); 4106 out: 4107 DPRINTK("EXIT, rc=%d\n", rc); 4108 return rc; 4109 } 4110 4111 /** 4112 * sata_std_hardreset - reset host port via SATA phy reset 4113 * @link: link to reset 4114 * @class: resulting class of attached device 4115 * @deadline: deadline jiffies for the operation 4116 * 4117 * SATA phy-reset host port using DET bits of SControl register, 4118 * wait for !BSY and classify the attached device. 4119 * 4120 * LOCKING: 4121 * Kernel thread context (may sleep) 4122 * 4123 * RETURNS: 4124 * 0 on success, -errno otherwise. 4125 */ 4126 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 4127 unsigned long deadline) 4128 { 4129 struct ata_port *ap = link->ap; 4130 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 4131 int rc; 4132 4133 DPRINTK("ENTER\n"); 4134 4135 /* do hardreset */ 4136 rc = sata_link_hardreset(link, timing, deadline); 4137 if (rc) { 4138 ata_link_printk(link, KERN_ERR, 4139 "COMRESET failed (errno=%d)\n", rc); 4140 return rc; 4141 } 4142 4143 /* TODO: phy layer with polling, timeouts, etc. */ 4144 if (ata_link_offline(link)) { 4145 *class = ATA_DEV_NONE; 4146 DPRINTK("EXIT, link offline\n"); 4147 return 0; 4148 } 4149 4150 /* wait a while before checking status */ 4151 ata_wait_after_reset(ap, deadline); 4152 4153 /* If PMP is supported, we have to do follow-up SRST. Note 4154 * that some PMPs don't send D2H Reg FIS after hardreset at 4155 * all if the first port is empty. Wait for it just for a 4156 * second and request follow-up SRST. 4157 */ 4158 if (ap->flags & ATA_FLAG_PMP) { 4159 ata_wait_ready(ap, jiffies + HZ); 4160 return -EAGAIN; 4161 } 4162 4163 rc = ata_wait_ready(ap, deadline); 4164 /* link occupied, -ENODEV too is an error */ 4165 if (rc) { 4166 ata_link_printk(link, KERN_ERR, 4167 "COMRESET failed (errno=%d)\n", rc); 4168 return rc; 4169 } 4170 4171 ap->ops->dev_select(ap, 0); /* probably unnecessary */ 4172 4173 *class = ata_dev_try_classify(link->device, 1, NULL); 4174 4175 DPRINTK("EXIT, class=%u\n", *class); 4176 return 0; 4177 } 4178 4179 /** 4180 * ata_std_postreset - standard postreset callback 4181 * @link: the target ata_link 4182 * @classes: classes of attached devices 4183 * 4184 * This function is invoked after a successful reset. Note that 4185 * the device might have been reset more than once using 4186 * different reset methods before postreset is invoked. 4187 * 4188 * LOCKING: 4189 * Kernel thread context (may sleep) 4190 */ 4191 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 4192 { 4193 struct ata_port *ap = link->ap; 4194 u32 serror; 4195 4196 DPRINTK("ENTER\n"); 4197 4198 /* print link status */ 4199 sata_print_link_status(link); 4200 4201 /* clear SError */ 4202 if (sata_scr_read(link, SCR_ERROR, &serror) == 0) 4203 sata_scr_write(link, SCR_ERROR, serror); 4204 link->eh_info.serror = 0; 4205 4206 /* is double-select really necessary? */ 4207 if (classes[0] != ATA_DEV_NONE) 4208 ap->ops->dev_select(ap, 1); 4209 if (classes[1] != ATA_DEV_NONE) 4210 ap->ops->dev_select(ap, 0); 4211 4212 /* bail out if no device is present */ 4213 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) { 4214 DPRINTK("EXIT, no device\n"); 4215 return; 4216 } 4217 4218 /* set up device control */ 4219 if (ap->ioaddr.ctl_addr) 4220 iowrite8(ap->ctl, ap->ioaddr.ctl_addr); 4221 4222 DPRINTK("EXIT\n"); 4223 } 4224 4225 /** 4226 * ata_dev_same_device - Determine whether new ID matches configured device 4227 * @dev: device to compare against 4228 * @new_class: class of the new device 4229 * @new_id: IDENTIFY page of the new device 4230 * 4231 * Compare @new_class and @new_id against @dev and determine 4232 * whether @dev is the device indicated by @new_class and 4233 * @new_id. 4234 * 4235 * LOCKING: 4236 * None. 4237 * 4238 * RETURNS: 4239 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 4240 */ 4241 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 4242 const u16 *new_id) 4243 { 4244 const u16 *old_id = dev->id; 4245 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 4246 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 4247 4248 if (dev->class != new_class) { 4249 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", 4250 dev->class, new_class); 4251 return 0; 4252 } 4253 4254 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 4255 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 4256 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 4257 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 4258 4259 if (strcmp(model[0], model[1])) { 4260 ata_dev_printk(dev, KERN_INFO, "model number mismatch " 4261 "'%s' != '%s'\n", model[0], model[1]); 4262 return 0; 4263 } 4264 4265 if (strcmp(serial[0], serial[1])) { 4266 ata_dev_printk(dev, KERN_INFO, "serial number mismatch " 4267 "'%s' != '%s'\n", serial[0], serial[1]); 4268 return 0; 4269 } 4270 4271 return 1; 4272 } 4273 4274 /** 4275 * ata_dev_reread_id - Re-read IDENTIFY data 4276 * @dev: target ATA device 4277 * @readid_flags: read ID flags 4278 * 4279 * Re-read IDENTIFY page and make sure @dev is still attached to 4280 * the port. 4281 * 4282 * LOCKING: 4283 * Kernel thread context (may sleep) 4284 * 4285 * RETURNS: 4286 * 0 on success, negative errno otherwise 4287 */ 4288 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4289 { 4290 unsigned int class = dev->class; 4291 u16 *id = (void *)dev->link->ap->sector_buf; 4292 int rc; 4293 4294 /* read ID data */ 4295 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4296 if (rc) 4297 return rc; 4298 4299 /* is the device still there? */ 4300 if (!ata_dev_same_device(dev, class, id)) 4301 return -ENODEV; 4302 4303 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4304 return 0; 4305 } 4306 4307 /** 4308 * ata_dev_revalidate - Revalidate ATA device 4309 * @dev: device to revalidate 4310 * @new_class: new class code 4311 * @readid_flags: read ID flags 4312 * 4313 * Re-read IDENTIFY page, make sure @dev is still attached to the 4314 * port and reconfigure it according to the new IDENTIFY page. 4315 * 4316 * LOCKING: 4317 * Kernel thread context (may sleep) 4318 * 4319 * RETURNS: 4320 * 0 on success, negative errno otherwise 4321 */ 4322 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4323 unsigned int readid_flags) 4324 { 4325 u64 n_sectors = dev->n_sectors; 4326 int rc; 4327 4328 if (!ata_dev_enabled(dev)) 4329 return -ENODEV; 4330 4331 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4332 if (ata_class_enabled(new_class) && 4333 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) { 4334 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", 4335 dev->class, new_class); 4336 rc = -ENODEV; 4337 goto fail; 4338 } 4339 4340 /* re-read ID */ 4341 rc = ata_dev_reread_id(dev, readid_flags); 4342 if (rc) 4343 goto fail; 4344 4345 /* configure device according to the new ID */ 4346 rc = ata_dev_configure(dev); 4347 if (rc) 4348 goto fail; 4349 4350 /* verify n_sectors hasn't changed */ 4351 if (dev->class == ATA_DEV_ATA && n_sectors && 4352 dev->n_sectors != n_sectors) { 4353 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " 4354 "%llu != %llu\n", 4355 (unsigned long long)n_sectors, 4356 (unsigned long long)dev->n_sectors); 4357 4358 /* restore original n_sectors */ 4359 dev->n_sectors = n_sectors; 4360 4361 rc = -ENODEV; 4362 goto fail; 4363 } 4364 4365 return 0; 4366 4367 fail: 4368 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); 4369 return rc; 4370 } 4371 4372 struct ata_blacklist_entry { 4373 const char *model_num; 4374 const char *model_rev; 4375 unsigned long horkage; 4376 }; 4377 4378 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4379 /* Devices with DMA related problems under Linux */ 4380 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4381 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4382 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4383 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4384 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4385 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4386 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4387 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4388 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4389 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, 4390 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA }, 4391 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4392 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4393 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4394 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4395 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4396 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, 4397 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA }, 4398 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4399 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4400 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4401 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4402 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4403 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4404 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4405 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4406 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4407 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4408 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4409 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4410 /* Odd clown on sil3726/4726 PMPs */ 4411 { "Config Disk", NULL, ATA_HORKAGE_NODMA | 4412 ATA_HORKAGE_SKIP_PM }, 4413 4414 /* Weird ATAPI devices */ 4415 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4416 4417 /* Devices we expect to fail diagnostics */ 4418 4419 /* Devices where NCQ should be avoided */ 4420 /* NCQ is slow */ 4421 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4422 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4423 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4424 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4425 /* NCQ is broken */ 4426 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4427 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4428 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4429 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4430 4431 /* Blacklist entries taken from Silicon Image 3124/3132 4432 Windows driver .inf file - also several Linux problem reports */ 4433 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4434 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4435 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4436 4437 /* devices which puke on READ_NATIVE_MAX */ 4438 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4439 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4440 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4441 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4442 4443 /* Devices which report 1 sector over size HPA */ 4444 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4445 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4446 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4447 4448 /* Devices which get the IVB wrong */ 4449 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4450 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, }, 4451 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, }, 4452 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, 4453 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, 4454 4455 /* End Marker */ 4456 { } 4457 }; 4458 4459 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) 4460 { 4461 const char *p; 4462 int len; 4463 4464 /* 4465 * check for trailing wildcard: *\0 4466 */ 4467 p = strchr(patt, wildchar); 4468 if (p && ((*(p + 1)) == 0)) 4469 len = p - patt; 4470 else { 4471 len = strlen(name); 4472 if (!len) { 4473 if (!*patt) 4474 return 0; 4475 return -1; 4476 } 4477 } 4478 4479 return strncmp(patt, name, len); 4480 } 4481 4482 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4483 { 4484 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4485 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4486 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4487 4488 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4489 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4490 4491 while (ad->model_num) { 4492 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { 4493 if (ad->model_rev == NULL) 4494 return ad->horkage; 4495 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) 4496 return ad->horkage; 4497 } 4498 ad++; 4499 } 4500 return 0; 4501 } 4502 4503 static int ata_dma_blacklisted(const struct ata_device *dev) 4504 { 4505 /* We don't support polling DMA. 4506 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4507 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4508 */ 4509 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4510 (dev->flags & ATA_DFLAG_CDB_INTR)) 4511 return 1; 4512 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4513 } 4514 4515 /** 4516 * ata_is_40wire - check drive side detection 4517 * @dev: device 4518 * 4519 * Perform drive side detection decoding, allowing for device vendors 4520 * who can't follow the documentation. 4521 */ 4522 4523 static int ata_is_40wire(struct ata_device *dev) 4524 { 4525 if (dev->horkage & ATA_HORKAGE_IVB) 4526 return ata_drive_40wire_relaxed(dev->id); 4527 return ata_drive_40wire(dev->id); 4528 } 4529 4530 /** 4531 * ata_dev_xfermask - Compute supported xfermask of the given device 4532 * @dev: Device to compute xfermask for 4533 * 4534 * Compute supported xfermask of @dev and store it in 4535 * dev->*_mask. This function is responsible for applying all 4536 * known limits including host controller limits, device 4537 * blacklist, etc... 4538 * 4539 * LOCKING: 4540 * None. 4541 */ 4542 static void ata_dev_xfermask(struct ata_device *dev) 4543 { 4544 struct ata_link *link = dev->link; 4545 struct ata_port *ap = link->ap; 4546 struct ata_host *host = ap->host; 4547 unsigned long xfer_mask; 4548 4549 /* controller modes available */ 4550 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4551 ap->mwdma_mask, ap->udma_mask); 4552 4553 /* drive modes available */ 4554 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4555 dev->mwdma_mask, dev->udma_mask); 4556 xfer_mask &= ata_id_xfermask(dev->id); 4557 4558 /* 4559 * CFA Advanced TrueIDE timings are not allowed on a shared 4560 * cable 4561 */ 4562 if (ata_dev_pair(dev)) { 4563 /* No PIO5 or PIO6 */ 4564 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4565 /* No MWDMA3 or MWDMA 4 */ 4566 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4567 } 4568 4569 if (ata_dma_blacklisted(dev)) { 4570 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4571 ata_dev_printk(dev, KERN_WARNING, 4572 "device is on DMA blacklist, disabling DMA\n"); 4573 } 4574 4575 if ((host->flags & ATA_HOST_SIMPLEX) && 4576 host->simplex_claimed && host->simplex_claimed != ap) { 4577 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4578 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4579 "other device, disabling DMA\n"); 4580 } 4581 4582 if (ap->flags & ATA_FLAG_NO_IORDY) 4583 xfer_mask &= ata_pio_mask_no_iordy(dev); 4584 4585 if (ap->ops->mode_filter) 4586 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4587 4588 /* Apply cable rule here. Don't apply it early because when 4589 * we handle hot plug the cable type can itself change. 4590 * Check this last so that we know if the transfer rate was 4591 * solely limited by the cable. 4592 * Unknown or 80 wire cables reported host side are checked 4593 * drive side as well. Cases where we know a 40wire cable 4594 * is used safely for 80 are not checked here. 4595 */ 4596 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4597 /* UDMA/44 or higher would be available */ 4598 if ((ap->cbl == ATA_CBL_PATA40) || 4599 (ata_is_40wire(dev) && 4600 (ap->cbl == ATA_CBL_PATA_UNK || 4601 ap->cbl == ATA_CBL_PATA80))) { 4602 ata_dev_printk(dev, KERN_WARNING, 4603 "limited to UDMA/33 due to 40-wire cable\n"); 4604 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4605 } 4606 4607 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4608 &dev->mwdma_mask, &dev->udma_mask); 4609 } 4610 4611 /** 4612 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4613 * @dev: Device to which command will be sent 4614 * 4615 * Issue SET FEATURES - XFER MODE command to device @dev 4616 * on port @ap. 4617 * 4618 * LOCKING: 4619 * PCI/etc. bus probe sem. 4620 * 4621 * RETURNS: 4622 * 0 on success, AC_ERR_* mask otherwise. 4623 */ 4624 4625 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4626 { 4627 struct ata_taskfile tf; 4628 unsigned int err_mask; 4629 4630 /* set up set-features taskfile */ 4631 DPRINTK("set features - xfer mode\n"); 4632 4633 /* Some controllers and ATAPI devices show flaky interrupt 4634 * behavior after setting xfer mode. Use polling instead. 4635 */ 4636 ata_tf_init(dev, &tf); 4637 tf.command = ATA_CMD_SET_FEATURES; 4638 tf.feature = SETFEATURES_XFER; 4639 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4640 tf.protocol = ATA_PROT_NODATA; 4641 /* If we are using IORDY we must send the mode setting command */ 4642 if (ata_pio_need_iordy(dev)) 4643 tf.nsect = dev->xfer_mode; 4644 /* If the device has IORDY and the controller does not - turn it off */ 4645 else if (ata_id_has_iordy(dev->id)) 4646 tf.nsect = 0x01; 4647 else /* In the ancient relic department - skip all of this */ 4648 return 0; 4649 4650 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4651 4652 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4653 return err_mask; 4654 } 4655 /** 4656 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4657 * @dev: Device to which command will be sent 4658 * @enable: Whether to enable or disable the feature 4659 * @feature: The sector count represents the feature to set 4660 * 4661 * Issue SET FEATURES - SATA FEATURES command to device @dev 4662 * on port @ap with sector count 4663 * 4664 * LOCKING: 4665 * PCI/etc. bus probe sem. 4666 * 4667 * RETURNS: 4668 * 0 on success, AC_ERR_* mask otherwise. 4669 */ 4670 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, 4671 u8 feature) 4672 { 4673 struct ata_taskfile tf; 4674 unsigned int err_mask; 4675 4676 /* set up set-features taskfile */ 4677 DPRINTK("set features - SATA features\n"); 4678 4679 ata_tf_init(dev, &tf); 4680 tf.command = ATA_CMD_SET_FEATURES; 4681 tf.feature = enable; 4682 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4683 tf.protocol = ATA_PROT_NODATA; 4684 tf.nsect = feature; 4685 4686 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4687 4688 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4689 return err_mask; 4690 } 4691 4692 /** 4693 * ata_dev_init_params - Issue INIT DEV PARAMS command 4694 * @dev: Device to which command will be sent 4695 * @heads: Number of heads (taskfile parameter) 4696 * @sectors: Number of sectors (taskfile parameter) 4697 * 4698 * LOCKING: 4699 * Kernel thread context (may sleep) 4700 * 4701 * RETURNS: 4702 * 0 on success, AC_ERR_* mask otherwise. 4703 */ 4704 static unsigned int ata_dev_init_params(struct ata_device *dev, 4705 u16 heads, u16 sectors) 4706 { 4707 struct ata_taskfile tf; 4708 unsigned int err_mask; 4709 4710 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4711 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4712 return AC_ERR_INVALID; 4713 4714 /* set up init dev params taskfile */ 4715 DPRINTK("init dev params \n"); 4716 4717 ata_tf_init(dev, &tf); 4718 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4719 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4720 tf.protocol = ATA_PROT_NODATA; 4721 tf.nsect = sectors; 4722 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4723 4724 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4725 /* A clean abort indicates an original or just out of spec drive 4726 and we should continue as we issue the setup based on the 4727 drive reported working geometry */ 4728 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4729 err_mask = 0; 4730 4731 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4732 return err_mask; 4733 } 4734 4735 /** 4736 * ata_sg_clean - Unmap DMA memory associated with command 4737 * @qc: Command containing DMA memory to be released 4738 * 4739 * Unmap all mapped DMA memory associated with this command. 4740 * 4741 * LOCKING: 4742 * spin_lock_irqsave(host lock) 4743 */ 4744 void ata_sg_clean(struct ata_queued_cmd *qc) 4745 { 4746 struct ata_port *ap = qc->ap; 4747 struct scatterlist *sg = qc->sg; 4748 int dir = qc->dma_dir; 4749 4750 WARN_ON(sg == NULL); 4751 4752 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4753 4754 if (qc->n_elem) 4755 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir); 4756 4757 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4758 qc->sg = NULL; 4759 } 4760 4761 /** 4762 * ata_fill_sg - Fill PCI IDE PRD table 4763 * @qc: Metadata associated with taskfile to be transferred 4764 * 4765 * Fill PCI IDE PRD (scatter-gather) table with segments 4766 * associated with the current disk command. 4767 * 4768 * LOCKING: 4769 * spin_lock_irqsave(host lock) 4770 * 4771 */ 4772 static void ata_fill_sg(struct ata_queued_cmd *qc) 4773 { 4774 struct ata_port *ap = qc->ap; 4775 struct scatterlist *sg; 4776 unsigned int si, pi; 4777 4778 pi = 0; 4779 for_each_sg(qc->sg, sg, qc->n_elem, si) { 4780 u32 addr, offset; 4781 u32 sg_len, len; 4782 4783 /* determine if physical DMA addr spans 64K boundary. 4784 * Note h/w doesn't support 64-bit, so we unconditionally 4785 * truncate dma_addr_t to u32. 4786 */ 4787 addr = (u32) sg_dma_address(sg); 4788 sg_len = sg_dma_len(sg); 4789 4790 while (sg_len) { 4791 offset = addr & 0xffff; 4792 len = sg_len; 4793 if ((offset + sg_len) > 0x10000) 4794 len = 0x10000 - offset; 4795 4796 ap->prd[pi].addr = cpu_to_le32(addr); 4797 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff); 4798 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 4799 4800 pi++; 4801 sg_len -= len; 4802 addr += len; 4803 } 4804 } 4805 4806 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 4807 } 4808 4809 /** 4810 * ata_fill_sg_dumb - Fill PCI IDE PRD table 4811 * @qc: Metadata associated with taskfile to be transferred 4812 * 4813 * Fill PCI IDE PRD (scatter-gather) table with segments 4814 * associated with the current disk command. Perform the fill 4815 * so that we avoid writing any length 64K records for 4816 * controllers that don't follow the spec. 4817 * 4818 * LOCKING: 4819 * spin_lock_irqsave(host lock) 4820 * 4821 */ 4822 static void ata_fill_sg_dumb(struct ata_queued_cmd *qc) 4823 { 4824 struct ata_port *ap = qc->ap; 4825 struct scatterlist *sg; 4826 unsigned int si, pi; 4827 4828 pi = 0; 4829 for_each_sg(qc->sg, sg, qc->n_elem, si) { 4830 u32 addr, offset; 4831 u32 sg_len, len, blen; 4832 4833 /* determine if physical DMA addr spans 64K boundary. 4834 * Note h/w doesn't support 64-bit, so we unconditionally 4835 * truncate dma_addr_t to u32. 4836 */ 4837 addr = (u32) sg_dma_address(sg); 4838 sg_len = sg_dma_len(sg); 4839 4840 while (sg_len) { 4841 offset = addr & 0xffff; 4842 len = sg_len; 4843 if ((offset + sg_len) > 0x10000) 4844 len = 0x10000 - offset; 4845 4846 blen = len & 0xffff; 4847 ap->prd[pi].addr = cpu_to_le32(addr); 4848 if (blen == 0) { 4849 /* Some PATA chipsets like the CS5530 can't 4850 cope with 0x0000 meaning 64K as the spec says */ 4851 ap->prd[pi].flags_len = cpu_to_le32(0x8000); 4852 blen = 0x8000; 4853 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000); 4854 } 4855 ap->prd[pi].flags_len = cpu_to_le32(blen); 4856 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len); 4857 4858 pi++; 4859 sg_len -= len; 4860 addr += len; 4861 } 4862 } 4863 4864 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT); 4865 } 4866 4867 /** 4868 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported 4869 * @qc: Metadata associated with taskfile to check 4870 * 4871 * Allow low-level driver to filter ATA PACKET commands, returning 4872 * a status indicating whether or not it is OK to use DMA for the 4873 * supplied PACKET command. 4874 * 4875 * LOCKING: 4876 * spin_lock_irqsave(host lock) 4877 * 4878 * RETURNS: 0 when ATAPI DMA can be used 4879 * nonzero otherwise 4880 */ 4881 int ata_check_atapi_dma(struct ata_queued_cmd *qc) 4882 { 4883 struct ata_port *ap = qc->ap; 4884 4885 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4886 * few ATAPI devices choke on such DMA requests. 4887 */ 4888 if (unlikely(qc->nbytes & 15)) 4889 return 1; 4890 4891 if (ap->ops->check_atapi_dma) 4892 return ap->ops->check_atapi_dma(qc); 4893 4894 return 0; 4895 } 4896 4897 /** 4898 * ata_std_qc_defer - Check whether a qc needs to be deferred 4899 * @qc: ATA command in question 4900 * 4901 * Non-NCQ commands cannot run with any other command, NCQ or 4902 * not. As upper layer only knows the queue depth, we are 4903 * responsible for maintaining exclusion. This function checks 4904 * whether a new command @qc can be issued. 4905 * 4906 * LOCKING: 4907 * spin_lock_irqsave(host lock) 4908 * 4909 * RETURNS: 4910 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4911 */ 4912 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4913 { 4914 struct ata_link *link = qc->dev->link; 4915 4916 if (qc->tf.protocol == ATA_PROT_NCQ) { 4917 if (!ata_tag_valid(link->active_tag)) 4918 return 0; 4919 } else { 4920 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4921 return 0; 4922 } 4923 4924 return ATA_DEFER_LINK; 4925 } 4926 4927 /** 4928 * ata_qc_prep - Prepare taskfile for submission 4929 * @qc: Metadata associated with taskfile to be prepared 4930 * 4931 * Prepare ATA taskfile for submission. 4932 * 4933 * LOCKING: 4934 * spin_lock_irqsave(host lock) 4935 */ 4936 void ata_qc_prep(struct ata_queued_cmd *qc) 4937 { 4938 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 4939 return; 4940 4941 ata_fill_sg(qc); 4942 } 4943 4944 /** 4945 * ata_dumb_qc_prep - Prepare taskfile for submission 4946 * @qc: Metadata associated with taskfile to be prepared 4947 * 4948 * Prepare ATA taskfile for submission. 4949 * 4950 * LOCKING: 4951 * spin_lock_irqsave(host lock) 4952 */ 4953 void ata_dumb_qc_prep(struct ata_queued_cmd *qc) 4954 { 4955 if (!(qc->flags & ATA_QCFLAG_DMAMAP)) 4956 return; 4957 4958 ata_fill_sg_dumb(qc); 4959 } 4960 4961 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4962 4963 /** 4964 * ata_sg_init - Associate command with scatter-gather table. 4965 * @qc: Command to be associated 4966 * @sg: Scatter-gather table. 4967 * @n_elem: Number of elements in s/g table. 4968 * 4969 * Initialize the data-related elements of queued_cmd @qc 4970 * to point to a scatter-gather table @sg, containing @n_elem 4971 * elements. 4972 * 4973 * LOCKING: 4974 * spin_lock_irqsave(host lock) 4975 */ 4976 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4977 unsigned int n_elem) 4978 { 4979 qc->sg = sg; 4980 qc->n_elem = n_elem; 4981 qc->cursg = qc->sg; 4982 } 4983 4984 /** 4985 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4986 * @qc: Command with scatter-gather table to be mapped. 4987 * 4988 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4989 * 4990 * LOCKING: 4991 * spin_lock_irqsave(host lock) 4992 * 4993 * RETURNS: 4994 * Zero on success, negative on error. 4995 * 4996 */ 4997 static int ata_sg_setup(struct ata_queued_cmd *qc) 4998 { 4999 struct ata_port *ap = qc->ap; 5000 unsigned int n_elem; 5001 5002 VPRINTK("ENTER, ata%u\n", ap->print_id); 5003 5004 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 5005 if (n_elem < 1) 5006 return -1; 5007 5008 DPRINTK("%d sg elements mapped\n", n_elem); 5009 5010 qc->n_elem = n_elem; 5011 qc->flags |= ATA_QCFLAG_DMAMAP; 5012 5013 return 0; 5014 } 5015 5016 /** 5017 * swap_buf_le16 - swap halves of 16-bit words in place 5018 * @buf: Buffer to swap 5019 * @buf_words: Number of 16-bit words in buffer. 5020 * 5021 * Swap halves of 16-bit words if needed to convert from 5022 * little-endian byte order to native cpu byte order, or 5023 * vice-versa. 5024 * 5025 * LOCKING: 5026 * Inherited from caller. 5027 */ 5028 void swap_buf_le16(u16 *buf, unsigned int buf_words) 5029 { 5030 #ifdef __BIG_ENDIAN 5031 unsigned int i; 5032 5033 for (i = 0; i < buf_words; i++) 5034 buf[i] = le16_to_cpu(buf[i]); 5035 #endif /* __BIG_ENDIAN */ 5036 } 5037 5038 /** 5039 * ata_data_xfer - Transfer data by PIO 5040 * @dev: device to target 5041 * @buf: data buffer 5042 * @buflen: buffer length 5043 * @rw: read/write 5044 * 5045 * Transfer data from/to the device data register by PIO. 5046 * 5047 * LOCKING: 5048 * Inherited from caller. 5049 * 5050 * RETURNS: 5051 * Bytes consumed. 5052 */ 5053 unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf, 5054 unsigned int buflen, int rw) 5055 { 5056 struct ata_port *ap = dev->link->ap; 5057 void __iomem *data_addr = ap->ioaddr.data_addr; 5058 unsigned int words = buflen >> 1; 5059 5060 /* Transfer multiple of 2 bytes */ 5061 if (rw == READ) 5062 ioread16_rep(data_addr, buf, words); 5063 else 5064 iowrite16_rep(data_addr, buf, words); 5065 5066 /* Transfer trailing 1 byte, if any. */ 5067 if (unlikely(buflen & 0x01)) { 5068 __le16 align_buf[1] = { 0 }; 5069 unsigned char *trailing_buf = buf + buflen - 1; 5070 5071 if (rw == READ) { 5072 align_buf[0] = cpu_to_le16(ioread16(data_addr)); 5073 memcpy(trailing_buf, align_buf, 1); 5074 } else { 5075 memcpy(align_buf, trailing_buf, 1); 5076 iowrite16(le16_to_cpu(align_buf[0]), data_addr); 5077 } 5078 words++; 5079 } 5080 5081 return words << 1; 5082 } 5083 5084 /** 5085 * ata_data_xfer_noirq - Transfer data by PIO 5086 * @dev: device to target 5087 * @buf: data buffer 5088 * @buflen: buffer length 5089 * @rw: read/write 5090 * 5091 * Transfer data from/to the device data register by PIO. Do the 5092 * transfer with interrupts disabled. 5093 * 5094 * LOCKING: 5095 * Inherited from caller. 5096 * 5097 * RETURNS: 5098 * Bytes consumed. 5099 */ 5100 unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf, 5101 unsigned int buflen, int rw) 5102 { 5103 unsigned long flags; 5104 unsigned int consumed; 5105 5106 local_irq_save(flags); 5107 consumed = ata_data_xfer(dev, buf, buflen, rw); 5108 local_irq_restore(flags); 5109 5110 return consumed; 5111 } 5112 5113 5114 /** 5115 * ata_pio_sector - Transfer a sector of data. 5116 * @qc: Command on going 5117 * 5118 * Transfer qc->sect_size bytes of data from/to the ATA device. 5119 * 5120 * LOCKING: 5121 * Inherited from caller. 5122 */ 5123 5124 static void ata_pio_sector(struct ata_queued_cmd *qc) 5125 { 5126 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE); 5127 struct ata_port *ap = qc->ap; 5128 struct page *page; 5129 unsigned int offset; 5130 unsigned char *buf; 5131 5132 if (qc->curbytes == qc->nbytes - qc->sect_size) 5133 ap->hsm_task_state = HSM_ST_LAST; 5134 5135 page = sg_page(qc->cursg); 5136 offset = qc->cursg->offset + qc->cursg_ofs; 5137 5138 /* get the current page and offset */ 5139 page = nth_page(page, (offset >> PAGE_SHIFT)); 5140 offset %= PAGE_SIZE; 5141 5142 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 5143 5144 if (PageHighMem(page)) { 5145 unsigned long flags; 5146 5147 /* FIXME: use a bounce buffer */ 5148 local_irq_save(flags); 5149 buf = kmap_atomic(page, KM_IRQ0); 5150 5151 /* do the actual data transfer */ 5152 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); 5153 5154 kunmap_atomic(buf, KM_IRQ0); 5155 local_irq_restore(flags); 5156 } else { 5157 buf = page_address(page); 5158 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write); 5159 } 5160 5161 qc->curbytes += qc->sect_size; 5162 qc->cursg_ofs += qc->sect_size; 5163 5164 if (qc->cursg_ofs == qc->cursg->length) { 5165 qc->cursg = sg_next(qc->cursg); 5166 qc->cursg_ofs = 0; 5167 } 5168 } 5169 5170 /** 5171 * ata_pio_sectors - Transfer one or many sectors. 5172 * @qc: Command on going 5173 * 5174 * Transfer one or many sectors of data from/to the 5175 * ATA device for the DRQ request. 5176 * 5177 * LOCKING: 5178 * Inherited from caller. 5179 */ 5180 5181 static void ata_pio_sectors(struct ata_queued_cmd *qc) 5182 { 5183 if (is_multi_taskfile(&qc->tf)) { 5184 /* READ/WRITE MULTIPLE */ 5185 unsigned int nsect; 5186 5187 WARN_ON(qc->dev->multi_count == 0); 5188 5189 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size, 5190 qc->dev->multi_count); 5191 while (nsect--) 5192 ata_pio_sector(qc); 5193 } else 5194 ata_pio_sector(qc); 5195 5196 ata_altstatus(qc->ap); /* flush */ 5197 } 5198 5199 /** 5200 * atapi_send_cdb - Write CDB bytes to hardware 5201 * @ap: Port to which ATAPI device is attached. 5202 * @qc: Taskfile currently active 5203 * 5204 * When device has indicated its readiness to accept 5205 * a CDB, this function is called. Send the CDB. 5206 * 5207 * LOCKING: 5208 * caller. 5209 */ 5210 5211 static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) 5212 { 5213 /* send SCSI cdb */ 5214 DPRINTK("send cdb\n"); 5215 WARN_ON(qc->dev->cdb_len < 12); 5216 5217 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); 5218 ata_altstatus(ap); /* flush */ 5219 5220 switch (qc->tf.protocol) { 5221 case ATAPI_PROT_PIO: 5222 ap->hsm_task_state = HSM_ST; 5223 break; 5224 case ATAPI_PROT_NODATA: 5225 ap->hsm_task_state = HSM_ST_LAST; 5226 break; 5227 case ATAPI_PROT_DMA: 5228 ap->hsm_task_state = HSM_ST_LAST; 5229 /* initiate bmdma */ 5230 ap->ops->bmdma_start(qc); 5231 break; 5232 } 5233 } 5234 5235 /** 5236 * __atapi_pio_bytes - Transfer data from/to the ATAPI device. 5237 * @qc: Command on going 5238 * @bytes: number of bytes 5239 * 5240 * Transfer Transfer data from/to the ATAPI device. 5241 * 5242 * LOCKING: 5243 * Inherited from caller. 5244 * 5245 */ 5246 static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes) 5247 { 5248 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ; 5249 struct ata_port *ap = qc->ap; 5250 struct ata_device *dev = qc->dev; 5251 struct ata_eh_info *ehi = &dev->link->eh_info; 5252 struct scatterlist *sg; 5253 struct page *page; 5254 unsigned char *buf; 5255 unsigned int offset, count, consumed; 5256 5257 next_sg: 5258 sg = qc->cursg; 5259 if (unlikely(!sg)) { 5260 ata_ehi_push_desc(ehi, "unexpected or too much trailing data " 5261 "buf=%u cur=%u bytes=%u", 5262 qc->nbytes, qc->curbytes, bytes); 5263 return -1; 5264 } 5265 5266 page = sg_page(sg); 5267 offset = sg->offset + qc->cursg_ofs; 5268 5269 /* get the current page and offset */ 5270 page = nth_page(page, (offset >> PAGE_SHIFT)); 5271 offset %= PAGE_SIZE; 5272 5273 /* don't overrun current sg */ 5274 count = min(sg->length - qc->cursg_ofs, bytes); 5275 5276 /* don't cross page boundaries */ 5277 count = min(count, (unsigned int)PAGE_SIZE - offset); 5278 5279 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read"); 5280 5281 if (PageHighMem(page)) { 5282 unsigned long flags; 5283 5284 /* FIXME: use bounce buffer */ 5285 local_irq_save(flags); 5286 buf = kmap_atomic(page, KM_IRQ0); 5287 5288 /* do the actual data transfer */ 5289 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw); 5290 5291 kunmap_atomic(buf, KM_IRQ0); 5292 local_irq_restore(flags); 5293 } else { 5294 buf = page_address(page); 5295 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw); 5296 } 5297 5298 bytes -= min(bytes, consumed); 5299 qc->curbytes += count; 5300 qc->cursg_ofs += count; 5301 5302 if (qc->cursg_ofs == sg->length) { 5303 qc->cursg = sg_next(qc->cursg); 5304 qc->cursg_ofs = 0; 5305 } 5306 5307 /* consumed can be larger than count only for the last transfer */ 5308 WARN_ON(qc->cursg && count != consumed); 5309 5310 if (bytes) 5311 goto next_sg; 5312 return 0; 5313 } 5314 5315 /** 5316 * atapi_pio_bytes - Transfer data from/to the ATAPI device. 5317 * @qc: Command on going 5318 * 5319 * Transfer Transfer data from/to the ATAPI device. 5320 * 5321 * LOCKING: 5322 * Inherited from caller. 5323 */ 5324 5325 static void atapi_pio_bytes(struct ata_queued_cmd *qc) 5326 { 5327 struct ata_port *ap = qc->ap; 5328 struct ata_device *dev = qc->dev; 5329 struct ata_eh_info *ehi = &dev->link->eh_info; 5330 unsigned int ireason, bc_lo, bc_hi, bytes; 5331 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0; 5332 5333 /* Abuse qc->result_tf for temp storage of intermediate TF 5334 * here to save some kernel stack usage. 5335 * For normal completion, qc->result_tf is not relevant. For 5336 * error, qc->result_tf is later overwritten by ata_qc_complete(). 5337 * So, the correctness of qc->result_tf is not affected. 5338 */ 5339 ap->ops->tf_read(ap, &qc->result_tf); 5340 ireason = qc->result_tf.nsect; 5341 bc_lo = qc->result_tf.lbam; 5342 bc_hi = qc->result_tf.lbah; 5343 bytes = (bc_hi << 8) | bc_lo; 5344 5345 /* shall be cleared to zero, indicating xfer of data */ 5346 if (unlikely(ireason & (1 << 0))) 5347 goto atapi_check; 5348 5349 /* make sure transfer direction matches expected */ 5350 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0; 5351 if (unlikely(do_write != i_write)) 5352 goto atapi_check; 5353 5354 if (unlikely(!bytes)) 5355 goto atapi_check; 5356 5357 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes); 5358 5359 if (unlikely(__atapi_pio_bytes(qc, bytes))) 5360 goto err_out; 5361 ata_altstatus(ap); /* flush */ 5362 5363 return; 5364 5365 atapi_check: 5366 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)", 5367 ireason, bytes); 5368 err_out: 5369 qc->err_mask |= AC_ERR_HSM; 5370 ap->hsm_task_state = HSM_ST_ERR; 5371 } 5372 5373 /** 5374 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue. 5375 * @ap: the target ata_port 5376 * @qc: qc on going 5377 * 5378 * RETURNS: 5379 * 1 if ok in workqueue, 0 otherwise. 5380 */ 5381 5382 static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc) 5383 { 5384 if (qc->tf.flags & ATA_TFLAG_POLLING) 5385 return 1; 5386 5387 if (ap->hsm_task_state == HSM_ST_FIRST) { 5388 if (qc->tf.protocol == ATA_PROT_PIO && 5389 (qc->tf.flags & ATA_TFLAG_WRITE)) 5390 return 1; 5391 5392 if (ata_is_atapi(qc->tf.protocol) && 5393 !(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 5394 return 1; 5395 } 5396 5397 return 0; 5398 } 5399 5400 /** 5401 * ata_hsm_qc_complete - finish a qc running on standard HSM 5402 * @qc: Command to complete 5403 * @in_wq: 1 if called from workqueue, 0 otherwise 5404 * 5405 * Finish @qc which is running on standard HSM. 5406 * 5407 * LOCKING: 5408 * If @in_wq is zero, spin_lock_irqsave(host lock). 5409 * Otherwise, none on entry and grabs host lock. 5410 */ 5411 static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) 5412 { 5413 struct ata_port *ap = qc->ap; 5414 unsigned long flags; 5415 5416 if (ap->ops->error_handler) { 5417 if (in_wq) { 5418 spin_lock_irqsave(ap->lock, flags); 5419 5420 /* EH might have kicked in while host lock is 5421 * released. 5422 */ 5423 qc = ata_qc_from_tag(ap, qc->tag); 5424 if (qc) { 5425 if (likely(!(qc->err_mask & AC_ERR_HSM))) { 5426 ap->ops->irq_on(ap); 5427 ata_qc_complete(qc); 5428 } else 5429 ata_port_freeze(ap); 5430 } 5431 5432 spin_unlock_irqrestore(ap->lock, flags); 5433 } else { 5434 if (likely(!(qc->err_mask & AC_ERR_HSM))) 5435 ata_qc_complete(qc); 5436 else 5437 ata_port_freeze(ap); 5438 } 5439 } else { 5440 if (in_wq) { 5441 spin_lock_irqsave(ap->lock, flags); 5442 ap->ops->irq_on(ap); 5443 ata_qc_complete(qc); 5444 spin_unlock_irqrestore(ap->lock, flags); 5445 } else 5446 ata_qc_complete(qc); 5447 } 5448 } 5449 5450 /** 5451 * ata_hsm_move - move the HSM to the next state. 5452 * @ap: the target ata_port 5453 * @qc: qc on going 5454 * @status: current device status 5455 * @in_wq: 1 if called from workqueue, 0 otherwise 5456 * 5457 * RETURNS: 5458 * 1 when poll next status needed, 0 otherwise. 5459 */ 5460 int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, 5461 u8 status, int in_wq) 5462 { 5463 unsigned long flags = 0; 5464 int poll_next; 5465 5466 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0); 5467 5468 /* Make sure ata_qc_issue_prot() does not throw things 5469 * like DMA polling into the workqueue. Notice that 5470 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING). 5471 */ 5472 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc)); 5473 5474 fsm_start: 5475 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n", 5476 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status); 5477 5478 switch (ap->hsm_task_state) { 5479 case HSM_ST_FIRST: 5480 /* Send first data block or PACKET CDB */ 5481 5482 /* If polling, we will stay in the work queue after 5483 * sending the data. Otherwise, interrupt handler 5484 * takes over after sending the data. 5485 */ 5486 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING); 5487 5488 /* check device status */ 5489 if (unlikely((status & ATA_DRQ) == 0)) { 5490 /* handle BSY=0, DRQ=0 as error */ 5491 if (likely(status & (ATA_ERR | ATA_DF))) 5492 /* device stops HSM for abort/error */ 5493 qc->err_mask |= AC_ERR_DEV; 5494 else 5495 /* HSM violation. Let EH handle this */ 5496 qc->err_mask |= AC_ERR_HSM; 5497 5498 ap->hsm_task_state = HSM_ST_ERR; 5499 goto fsm_start; 5500 } 5501 5502 /* Device should not ask for data transfer (DRQ=1) 5503 * when it finds something wrong. 5504 * We ignore DRQ here and stop the HSM by 5505 * changing hsm_task_state to HSM_ST_ERR and 5506 * let the EH abort the command or reset the device. 5507 */ 5508 if (unlikely(status & (ATA_ERR | ATA_DF))) { 5509 /* Some ATAPI tape drives forget to clear the ERR bit 5510 * when doing the next command (mostly request sense). 5511 * We ignore ERR here to workaround and proceed sending 5512 * the CDB. 5513 */ 5514 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { 5515 ata_port_printk(ap, KERN_WARNING, 5516 "DRQ=1 with device error, " 5517 "dev_stat 0x%X\n", status); 5518 qc->err_mask |= AC_ERR_HSM; 5519 ap->hsm_task_state = HSM_ST_ERR; 5520 goto fsm_start; 5521 } 5522 } 5523 5524 /* Send the CDB (atapi) or the first data block (ata pio out). 5525 * During the state transition, interrupt handler shouldn't 5526 * be invoked before the data transfer is complete and 5527 * hsm_task_state is changed. Hence, the following locking. 5528 */ 5529 if (in_wq) 5530 spin_lock_irqsave(ap->lock, flags); 5531 5532 if (qc->tf.protocol == ATA_PROT_PIO) { 5533 /* PIO data out protocol. 5534 * send first data block. 5535 */ 5536 5537 /* ata_pio_sectors() might change the state 5538 * to HSM_ST_LAST. so, the state is changed here 5539 * before ata_pio_sectors(). 5540 */ 5541 ap->hsm_task_state = HSM_ST; 5542 ata_pio_sectors(qc); 5543 } else 5544 /* send CDB */ 5545 atapi_send_cdb(ap, qc); 5546 5547 if (in_wq) 5548 spin_unlock_irqrestore(ap->lock, flags); 5549 5550 /* if polling, ata_pio_task() handles the rest. 5551 * otherwise, interrupt handler takes over from here. 5552 */ 5553 break; 5554 5555 case HSM_ST: 5556 /* complete command or read/write the data register */ 5557 if (qc->tf.protocol == ATAPI_PROT_PIO) { 5558 /* ATAPI PIO protocol */ 5559 if ((status & ATA_DRQ) == 0) { 5560 /* No more data to transfer or device error. 5561 * Device error will be tagged in HSM_ST_LAST. 5562 */ 5563 ap->hsm_task_state = HSM_ST_LAST; 5564 goto fsm_start; 5565 } 5566 5567 /* Device should not ask for data transfer (DRQ=1) 5568 * when it finds something wrong. 5569 * We ignore DRQ here and stop the HSM by 5570 * changing hsm_task_state to HSM_ST_ERR and 5571 * let the EH abort the command or reset the device. 5572 */ 5573 if (unlikely(status & (ATA_ERR | ATA_DF))) { 5574 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with " 5575 "device error, dev_stat 0x%X\n", 5576 status); 5577 qc->err_mask |= AC_ERR_HSM; 5578 ap->hsm_task_state = HSM_ST_ERR; 5579 goto fsm_start; 5580 } 5581 5582 atapi_pio_bytes(qc); 5583 5584 if (unlikely(ap->hsm_task_state == HSM_ST_ERR)) 5585 /* bad ireason reported by device */ 5586 goto fsm_start; 5587 5588 } else { 5589 /* ATA PIO protocol */ 5590 if (unlikely((status & ATA_DRQ) == 0)) { 5591 /* handle BSY=0, DRQ=0 as error */ 5592 if (likely(status & (ATA_ERR | ATA_DF))) 5593 /* device stops HSM for abort/error */ 5594 qc->err_mask |= AC_ERR_DEV; 5595 else 5596 /* HSM violation. Let EH handle this. 5597 * Phantom devices also trigger this 5598 * condition. Mark hint. 5599 */ 5600 qc->err_mask |= AC_ERR_HSM | 5601 AC_ERR_NODEV_HINT; 5602 5603 ap->hsm_task_state = HSM_ST_ERR; 5604 goto fsm_start; 5605 } 5606 5607 /* For PIO reads, some devices may ask for 5608 * data transfer (DRQ=1) alone with ERR=1. 5609 * We respect DRQ here and transfer one 5610 * block of junk data before changing the 5611 * hsm_task_state to HSM_ST_ERR. 5612 * 5613 * For PIO writes, ERR=1 DRQ=1 doesn't make 5614 * sense since the data block has been 5615 * transferred to the device. 5616 */ 5617 if (unlikely(status & (ATA_ERR | ATA_DF))) { 5618 /* data might be corrputed */ 5619 qc->err_mask |= AC_ERR_DEV; 5620 5621 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) { 5622 ata_pio_sectors(qc); 5623 status = ata_wait_idle(ap); 5624 } 5625 5626 if (status & (ATA_BUSY | ATA_DRQ)) 5627 qc->err_mask |= AC_ERR_HSM; 5628 5629 /* ata_pio_sectors() might change the 5630 * state to HSM_ST_LAST. so, the state 5631 * is changed after ata_pio_sectors(). 5632 */ 5633 ap->hsm_task_state = HSM_ST_ERR; 5634 goto fsm_start; 5635 } 5636 5637 ata_pio_sectors(qc); 5638 5639 if (ap->hsm_task_state == HSM_ST_LAST && 5640 (!(qc->tf.flags & ATA_TFLAG_WRITE))) { 5641 /* all data read */ 5642 status = ata_wait_idle(ap); 5643 goto fsm_start; 5644 } 5645 } 5646 5647 poll_next = 1; 5648 break; 5649 5650 case HSM_ST_LAST: 5651 if (unlikely(!ata_ok(status))) { 5652 qc->err_mask |= __ac_err_mask(status); 5653 ap->hsm_task_state = HSM_ST_ERR; 5654 goto fsm_start; 5655 } 5656 5657 /* no more data to transfer */ 5658 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n", 5659 ap->print_id, qc->dev->devno, status); 5660 5661 WARN_ON(qc->err_mask); 5662 5663 ap->hsm_task_state = HSM_ST_IDLE; 5664 5665 /* complete taskfile transaction */ 5666 ata_hsm_qc_complete(qc, in_wq); 5667 5668 poll_next = 0; 5669 break; 5670 5671 case HSM_ST_ERR: 5672 /* make sure qc->err_mask is available to 5673 * know what's wrong and recover 5674 */ 5675 WARN_ON(qc->err_mask == 0); 5676 5677 ap->hsm_task_state = HSM_ST_IDLE; 5678 5679 /* complete taskfile transaction */ 5680 ata_hsm_qc_complete(qc, in_wq); 5681 5682 poll_next = 0; 5683 break; 5684 default: 5685 poll_next = 0; 5686 BUG(); 5687 } 5688 5689 return poll_next; 5690 } 5691 5692 static void ata_pio_task(struct work_struct *work) 5693 { 5694 struct ata_port *ap = 5695 container_of(work, struct ata_port, port_task.work); 5696 struct ata_queued_cmd *qc = ap->port_task_data; 5697 u8 status; 5698 int poll_next; 5699 5700 fsm_start: 5701 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE); 5702 5703 /* 5704 * This is purely heuristic. This is a fast path. 5705 * Sometimes when we enter, BSY will be cleared in 5706 * a chk-status or two. If not, the drive is probably seeking 5707 * or something. Snooze for a couple msecs, then 5708 * chk-status again. If still busy, queue delayed work. 5709 */ 5710 status = ata_busy_wait(ap, ATA_BUSY, 5); 5711 if (status & ATA_BUSY) { 5712 msleep(2); 5713 status = ata_busy_wait(ap, ATA_BUSY, 10); 5714 if (status & ATA_BUSY) { 5715 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE); 5716 return; 5717 } 5718 } 5719 5720 /* move the HSM */ 5721 poll_next = ata_hsm_move(ap, qc, status, 1); 5722 5723 /* another command or interrupt handler 5724 * may be running at this point. 5725 */ 5726 if (poll_next) 5727 goto fsm_start; 5728 } 5729 5730 /** 5731 * ata_qc_new - Request an available ATA command, for queueing 5732 * @ap: Port associated with device @dev 5733 * @dev: Device from whom we request an available command structure 5734 * 5735 * LOCKING: 5736 * None. 5737 */ 5738 5739 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 5740 { 5741 struct ata_queued_cmd *qc = NULL; 5742 unsigned int i; 5743 5744 /* no command while frozen */ 5745 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 5746 return NULL; 5747 5748 /* the last tag is reserved for internal command. */ 5749 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 5750 if (!test_and_set_bit(i, &ap->qc_allocated)) { 5751 qc = __ata_qc_from_tag(ap, i); 5752 break; 5753 } 5754 5755 if (qc) 5756 qc->tag = i; 5757 5758 return qc; 5759 } 5760 5761 /** 5762 * ata_qc_new_init - Request an available ATA command, and initialize it 5763 * @dev: Device from whom we request an available command structure 5764 * 5765 * LOCKING: 5766 * None. 5767 */ 5768 5769 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 5770 { 5771 struct ata_port *ap = dev->link->ap; 5772 struct ata_queued_cmd *qc; 5773 5774 qc = ata_qc_new(ap); 5775 if (qc) { 5776 qc->scsicmd = NULL; 5777 qc->ap = ap; 5778 qc->dev = dev; 5779 5780 ata_qc_reinit(qc); 5781 } 5782 5783 return qc; 5784 } 5785 5786 /** 5787 * ata_qc_free - free unused ata_queued_cmd 5788 * @qc: Command to complete 5789 * 5790 * Designed to free unused ata_queued_cmd object 5791 * in case something prevents using it. 5792 * 5793 * LOCKING: 5794 * spin_lock_irqsave(host lock) 5795 */ 5796 void ata_qc_free(struct ata_queued_cmd *qc) 5797 { 5798 struct ata_port *ap = qc->ap; 5799 unsigned int tag; 5800 5801 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5802 5803 qc->flags = 0; 5804 tag = qc->tag; 5805 if (likely(ata_tag_valid(tag))) { 5806 qc->tag = ATA_TAG_POISON; 5807 clear_bit(tag, &ap->qc_allocated); 5808 } 5809 } 5810 5811 void __ata_qc_complete(struct ata_queued_cmd *qc) 5812 { 5813 struct ata_port *ap = qc->ap; 5814 struct ata_link *link = qc->dev->link; 5815 5816 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5817 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 5818 5819 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 5820 ata_sg_clean(qc); 5821 5822 /* command should be marked inactive atomically with qc completion */ 5823 if (qc->tf.protocol == ATA_PROT_NCQ) { 5824 link->sactive &= ~(1 << qc->tag); 5825 if (!link->sactive) 5826 ap->nr_active_links--; 5827 } else { 5828 link->active_tag = ATA_TAG_POISON; 5829 ap->nr_active_links--; 5830 } 5831 5832 /* clear exclusive status */ 5833 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 5834 ap->excl_link == link)) 5835 ap->excl_link = NULL; 5836 5837 /* atapi: mark qc as inactive to prevent the interrupt handler 5838 * from completing the command twice later, before the error handler 5839 * is called. (when rc != 0 and atapi request sense is needed) 5840 */ 5841 qc->flags &= ~ATA_QCFLAG_ACTIVE; 5842 ap->qc_active &= ~(1 << qc->tag); 5843 5844 /* call completion callback */ 5845 qc->complete_fn(qc); 5846 } 5847 5848 static void fill_result_tf(struct ata_queued_cmd *qc) 5849 { 5850 struct ata_port *ap = qc->ap; 5851 5852 qc->result_tf.flags = qc->tf.flags; 5853 ap->ops->tf_read(ap, &qc->result_tf); 5854 } 5855 5856 static void ata_verify_xfer(struct ata_queued_cmd *qc) 5857 { 5858 struct ata_device *dev = qc->dev; 5859 5860 if (ata_tag_internal(qc->tag)) 5861 return; 5862 5863 if (ata_is_nodata(qc->tf.protocol)) 5864 return; 5865 5866 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 5867 return; 5868 5869 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 5870 } 5871 5872 /** 5873 * ata_qc_complete - Complete an active ATA command 5874 * @qc: Command to complete 5875 * @err_mask: ATA Status register contents 5876 * 5877 * Indicate to the mid and upper layers that an ATA 5878 * command has completed, with either an ok or not-ok status. 5879 * 5880 * LOCKING: 5881 * spin_lock_irqsave(host lock) 5882 */ 5883 void ata_qc_complete(struct ata_queued_cmd *qc) 5884 { 5885 struct ata_port *ap = qc->ap; 5886 5887 /* XXX: New EH and old EH use different mechanisms to 5888 * synchronize EH with regular execution path. 5889 * 5890 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 5891 * Normal execution path is responsible for not accessing a 5892 * failed qc. libata core enforces the rule by returning NULL 5893 * from ata_qc_from_tag() for failed qcs. 5894 * 5895 * Old EH depends on ata_qc_complete() nullifying completion 5896 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 5897 * not synchronize with interrupt handler. Only PIO task is 5898 * taken care of. 5899 */ 5900 if (ap->ops->error_handler) { 5901 struct ata_device *dev = qc->dev; 5902 struct ata_eh_info *ehi = &dev->link->eh_info; 5903 5904 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN); 5905 5906 if (unlikely(qc->err_mask)) 5907 qc->flags |= ATA_QCFLAG_FAILED; 5908 5909 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 5910 if (!ata_tag_internal(qc->tag)) { 5911 /* always fill result TF for failed qc */ 5912 fill_result_tf(qc); 5913 ata_qc_schedule_eh(qc); 5914 return; 5915 } 5916 } 5917 5918 /* read result TF if requested */ 5919 if (qc->flags & ATA_QCFLAG_RESULT_TF) 5920 fill_result_tf(qc); 5921 5922 /* Some commands need post-processing after successful 5923 * completion. 5924 */ 5925 switch (qc->tf.command) { 5926 case ATA_CMD_SET_FEATURES: 5927 if (qc->tf.feature != SETFEATURES_WC_ON && 5928 qc->tf.feature != SETFEATURES_WC_OFF) 5929 break; 5930 /* fall through */ 5931 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 5932 case ATA_CMD_SET_MULTI: /* multi_count changed */ 5933 /* revalidate device */ 5934 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 5935 ata_port_schedule_eh(ap); 5936 break; 5937 5938 case ATA_CMD_SLEEP: 5939 dev->flags |= ATA_DFLAG_SLEEPING; 5940 break; 5941 } 5942 5943 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 5944 ata_verify_xfer(qc); 5945 5946 __ata_qc_complete(qc); 5947 } else { 5948 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5949 return; 5950 5951 /* read result TF if failed or requested */ 5952 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5953 fill_result_tf(qc); 5954 5955 __ata_qc_complete(qc); 5956 } 5957 } 5958 5959 /** 5960 * ata_qc_complete_multiple - Complete multiple qcs successfully 5961 * @ap: port in question 5962 * @qc_active: new qc_active mask 5963 * @finish_qc: LLDD callback invoked before completing a qc 5964 * 5965 * Complete in-flight commands. This functions is meant to be 5966 * called from low-level driver's interrupt routine to complete 5967 * requests normally. ap->qc_active and @qc_active is compared 5968 * and commands are completed accordingly. 5969 * 5970 * LOCKING: 5971 * spin_lock_irqsave(host lock) 5972 * 5973 * RETURNS: 5974 * Number of completed commands on success, -errno otherwise. 5975 */ 5976 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active, 5977 void (*finish_qc)(struct ata_queued_cmd *)) 5978 { 5979 int nr_done = 0; 5980 u32 done_mask; 5981 int i; 5982 5983 done_mask = ap->qc_active ^ qc_active; 5984 5985 if (unlikely(done_mask & qc_active)) { 5986 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " 5987 "(%08x->%08x)\n", ap->qc_active, qc_active); 5988 return -EINVAL; 5989 } 5990 5991 for (i = 0; i < ATA_MAX_QUEUE; i++) { 5992 struct ata_queued_cmd *qc; 5993 5994 if (!(done_mask & (1 << i))) 5995 continue; 5996 5997 if ((qc = ata_qc_from_tag(ap, i))) { 5998 if (finish_qc) 5999 finish_qc(qc); 6000 ata_qc_complete(qc); 6001 nr_done++; 6002 } 6003 } 6004 6005 return nr_done; 6006 } 6007 6008 /** 6009 * ata_qc_issue - issue taskfile to device 6010 * @qc: command to issue to device 6011 * 6012 * Prepare an ATA command to submission to device. 6013 * This includes mapping the data into a DMA-able 6014 * area, filling in the S/G table, and finally 6015 * writing the taskfile to hardware, starting the command. 6016 * 6017 * LOCKING: 6018 * spin_lock_irqsave(host lock) 6019 */ 6020 void ata_qc_issue(struct ata_queued_cmd *qc) 6021 { 6022 struct ata_port *ap = qc->ap; 6023 struct ata_link *link = qc->dev->link; 6024 u8 prot = qc->tf.protocol; 6025 6026 /* Make sure only one non-NCQ command is outstanding. The 6027 * check is skipped for old EH because it reuses active qc to 6028 * request ATAPI sense. 6029 */ 6030 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 6031 6032 if (ata_is_ncq(prot)) { 6033 WARN_ON(link->sactive & (1 << qc->tag)); 6034 6035 if (!link->sactive) 6036 ap->nr_active_links++; 6037 link->sactive |= 1 << qc->tag; 6038 } else { 6039 WARN_ON(link->sactive); 6040 6041 ap->nr_active_links++; 6042 link->active_tag = qc->tag; 6043 } 6044 6045 qc->flags |= ATA_QCFLAG_ACTIVE; 6046 ap->qc_active |= 1 << qc->tag; 6047 6048 /* We guarantee to LLDs that they will have at least one 6049 * non-zero sg if the command is a data command. 6050 */ 6051 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 6052 6053 if (ata_is_dma(prot) || (ata_is_pio(prot) && 6054 (ap->flags & ATA_FLAG_PIO_DMA))) 6055 if (ata_sg_setup(qc)) 6056 goto sg_err; 6057 6058 /* if device is sleeping, schedule softreset and abort the link */ 6059 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 6060 link->eh_info.action |= ATA_EH_SOFTRESET; 6061 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 6062 ata_link_abort(link); 6063 return; 6064 } 6065 6066 ap->ops->qc_prep(qc); 6067 6068 qc->err_mask |= ap->ops->qc_issue(qc); 6069 if (unlikely(qc->err_mask)) 6070 goto err; 6071 return; 6072 6073 sg_err: 6074 qc->err_mask |= AC_ERR_SYSTEM; 6075 err: 6076 ata_qc_complete(qc); 6077 } 6078 6079 /** 6080 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner 6081 * @qc: command to issue to device 6082 * 6083 * Using various libata functions and hooks, this function 6084 * starts an ATA command. ATA commands are grouped into 6085 * classes called "protocols", and issuing each type of protocol 6086 * is slightly different. 6087 * 6088 * May be used as the qc_issue() entry in ata_port_operations. 6089 * 6090 * LOCKING: 6091 * spin_lock_irqsave(host lock) 6092 * 6093 * RETURNS: 6094 * Zero on success, AC_ERR_* mask on failure 6095 */ 6096 6097 unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) 6098 { 6099 struct ata_port *ap = qc->ap; 6100 6101 /* Use polling pio if the LLD doesn't handle 6102 * interrupt driven pio and atapi CDB interrupt. 6103 */ 6104 if (ap->flags & ATA_FLAG_PIO_POLLING) { 6105 switch (qc->tf.protocol) { 6106 case ATA_PROT_PIO: 6107 case ATA_PROT_NODATA: 6108 case ATAPI_PROT_PIO: 6109 case ATAPI_PROT_NODATA: 6110 qc->tf.flags |= ATA_TFLAG_POLLING; 6111 break; 6112 case ATAPI_PROT_DMA: 6113 if (qc->dev->flags & ATA_DFLAG_CDB_INTR) 6114 /* see ata_dma_blacklisted() */ 6115 BUG(); 6116 break; 6117 default: 6118 break; 6119 } 6120 } 6121 6122 /* select the device */ 6123 ata_dev_select(ap, qc->dev->devno, 1, 0); 6124 6125 /* start the command */ 6126 switch (qc->tf.protocol) { 6127 case ATA_PROT_NODATA: 6128 if (qc->tf.flags & ATA_TFLAG_POLLING) 6129 ata_qc_set_polling(qc); 6130 6131 ata_tf_to_host(ap, &qc->tf); 6132 ap->hsm_task_state = HSM_ST_LAST; 6133 6134 if (qc->tf.flags & ATA_TFLAG_POLLING) 6135 ata_pio_queue_task(ap, qc, 0); 6136 6137 break; 6138 6139 case ATA_PROT_DMA: 6140 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 6141 6142 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 6143 ap->ops->bmdma_setup(qc); /* set up bmdma */ 6144 ap->ops->bmdma_start(qc); /* initiate bmdma */ 6145 ap->hsm_task_state = HSM_ST_LAST; 6146 break; 6147 6148 case ATA_PROT_PIO: 6149 if (qc->tf.flags & ATA_TFLAG_POLLING) 6150 ata_qc_set_polling(qc); 6151 6152 ata_tf_to_host(ap, &qc->tf); 6153 6154 if (qc->tf.flags & ATA_TFLAG_WRITE) { 6155 /* PIO data out protocol */ 6156 ap->hsm_task_state = HSM_ST_FIRST; 6157 ata_pio_queue_task(ap, qc, 0); 6158 6159 /* always send first data block using 6160 * the ata_pio_task() codepath. 6161 */ 6162 } else { 6163 /* PIO data in protocol */ 6164 ap->hsm_task_state = HSM_ST; 6165 6166 if (qc->tf.flags & ATA_TFLAG_POLLING) 6167 ata_pio_queue_task(ap, qc, 0); 6168 6169 /* if polling, ata_pio_task() handles the rest. 6170 * otherwise, interrupt handler takes over from here. 6171 */ 6172 } 6173 6174 break; 6175 6176 case ATAPI_PROT_PIO: 6177 case ATAPI_PROT_NODATA: 6178 if (qc->tf.flags & ATA_TFLAG_POLLING) 6179 ata_qc_set_polling(qc); 6180 6181 ata_tf_to_host(ap, &qc->tf); 6182 6183 ap->hsm_task_state = HSM_ST_FIRST; 6184 6185 /* send cdb by polling if no cdb interrupt */ 6186 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) || 6187 (qc->tf.flags & ATA_TFLAG_POLLING)) 6188 ata_pio_queue_task(ap, qc, 0); 6189 break; 6190 6191 case ATAPI_PROT_DMA: 6192 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING); 6193 6194 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ 6195 ap->ops->bmdma_setup(qc); /* set up bmdma */ 6196 ap->hsm_task_state = HSM_ST_FIRST; 6197 6198 /* send cdb by polling if no cdb interrupt */ 6199 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 6200 ata_pio_queue_task(ap, qc, 0); 6201 break; 6202 6203 default: 6204 WARN_ON(1); 6205 return AC_ERR_SYSTEM; 6206 } 6207 6208 return 0; 6209 } 6210 6211 /** 6212 * ata_host_intr - Handle host interrupt for given (port, task) 6213 * @ap: Port on which interrupt arrived (possibly...) 6214 * @qc: Taskfile currently active in engine 6215 * 6216 * Handle host interrupt for given queued command. Currently, 6217 * only DMA interrupts are handled. All other commands are 6218 * handled via polling with interrupts disabled (nIEN bit). 6219 * 6220 * LOCKING: 6221 * spin_lock_irqsave(host lock) 6222 * 6223 * RETURNS: 6224 * One if interrupt was handled, zero if not (shared irq). 6225 */ 6226 6227 inline unsigned int ata_host_intr(struct ata_port *ap, 6228 struct ata_queued_cmd *qc) 6229 { 6230 struct ata_eh_info *ehi = &ap->link.eh_info; 6231 u8 status, host_stat = 0; 6232 6233 VPRINTK("ata%u: protocol %d task_state %d\n", 6234 ap->print_id, qc->tf.protocol, ap->hsm_task_state); 6235 6236 /* Check whether we are expecting interrupt in this state */ 6237 switch (ap->hsm_task_state) { 6238 case HSM_ST_FIRST: 6239 /* Some pre-ATAPI-4 devices assert INTRQ 6240 * at this state when ready to receive CDB. 6241 */ 6242 6243 /* Check the ATA_DFLAG_CDB_INTR flag is enough here. 6244 * The flag was turned on only for atapi devices. No 6245 * need to check ata_is_atapi(qc->tf.protocol) again. 6246 */ 6247 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) 6248 goto idle_irq; 6249 break; 6250 case HSM_ST_LAST: 6251 if (qc->tf.protocol == ATA_PROT_DMA || 6252 qc->tf.protocol == ATAPI_PROT_DMA) { 6253 /* check status of DMA engine */ 6254 host_stat = ap->ops->bmdma_status(ap); 6255 VPRINTK("ata%u: host_stat 0x%X\n", 6256 ap->print_id, host_stat); 6257 6258 /* if it's not our irq... */ 6259 if (!(host_stat & ATA_DMA_INTR)) 6260 goto idle_irq; 6261 6262 /* before we do anything else, clear DMA-Start bit */ 6263 ap->ops->bmdma_stop(qc); 6264 6265 if (unlikely(host_stat & ATA_DMA_ERR)) { 6266 /* error when transfering data to/from memory */ 6267 qc->err_mask |= AC_ERR_HOST_BUS; 6268 ap->hsm_task_state = HSM_ST_ERR; 6269 } 6270 } 6271 break; 6272 case HSM_ST: 6273 break; 6274 default: 6275 goto idle_irq; 6276 } 6277 6278 /* check altstatus */ 6279 status = ata_altstatus(ap); 6280 if (status & ATA_BUSY) 6281 goto idle_irq; 6282 6283 /* check main status, clearing INTRQ */ 6284 status = ata_chk_status(ap); 6285 if (unlikely(status & ATA_BUSY)) 6286 goto idle_irq; 6287 6288 /* ack bmdma irq events */ 6289 ap->ops->irq_clear(ap); 6290 6291 ata_hsm_move(ap, qc, status, 0); 6292 6293 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA || 6294 qc->tf.protocol == ATAPI_PROT_DMA)) 6295 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat); 6296 6297 return 1; /* irq handled */ 6298 6299 idle_irq: 6300 ap->stats.idle_irq++; 6301 6302 #ifdef ATA_IRQ_TRAP 6303 if ((ap->stats.idle_irq % 1000) == 0) { 6304 ata_chk_status(ap); 6305 ap->ops->irq_clear(ap); 6306 ata_port_printk(ap, KERN_WARNING, "irq trap\n"); 6307 return 1; 6308 } 6309 #endif 6310 return 0; /* irq not handled */ 6311 } 6312 6313 /** 6314 * ata_interrupt - Default ATA host interrupt handler 6315 * @irq: irq line (unused) 6316 * @dev_instance: pointer to our ata_host information structure 6317 * 6318 * Default interrupt handler for PCI IDE devices. Calls 6319 * ata_host_intr() for each port that is not disabled. 6320 * 6321 * LOCKING: 6322 * Obtains host lock during operation. 6323 * 6324 * RETURNS: 6325 * IRQ_NONE or IRQ_HANDLED. 6326 */ 6327 6328 irqreturn_t ata_interrupt(int irq, void *dev_instance) 6329 { 6330 struct ata_host *host = dev_instance; 6331 unsigned int i; 6332 unsigned int handled = 0; 6333 unsigned long flags; 6334 6335 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */ 6336 spin_lock_irqsave(&host->lock, flags); 6337 6338 for (i = 0; i < host->n_ports; i++) { 6339 struct ata_port *ap; 6340 6341 ap = host->ports[i]; 6342 if (ap && 6343 !(ap->flags & ATA_FLAG_DISABLED)) { 6344 struct ata_queued_cmd *qc; 6345 6346 qc = ata_qc_from_tag(ap, ap->link.active_tag); 6347 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) && 6348 (qc->flags & ATA_QCFLAG_ACTIVE)) 6349 handled |= ata_host_intr(ap, qc); 6350 } 6351 } 6352 6353 spin_unlock_irqrestore(&host->lock, flags); 6354 6355 return IRQ_RETVAL(handled); 6356 } 6357 6358 /** 6359 * sata_scr_valid - test whether SCRs are accessible 6360 * @link: ATA link to test SCR accessibility for 6361 * 6362 * Test whether SCRs are accessible for @link. 6363 * 6364 * LOCKING: 6365 * None. 6366 * 6367 * RETURNS: 6368 * 1 if SCRs are accessible, 0 otherwise. 6369 */ 6370 int sata_scr_valid(struct ata_link *link) 6371 { 6372 struct ata_port *ap = link->ap; 6373 6374 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 6375 } 6376 6377 /** 6378 * sata_scr_read - read SCR register of the specified port 6379 * @link: ATA link to read SCR for 6380 * @reg: SCR to read 6381 * @val: Place to store read value 6382 * 6383 * Read SCR register @reg of @link into *@val. This function is 6384 * guaranteed to succeed if @link is ap->link, the cable type of 6385 * the port is SATA and the port implements ->scr_read. 6386 * 6387 * LOCKING: 6388 * None if @link is ap->link. Kernel thread context otherwise. 6389 * 6390 * RETURNS: 6391 * 0 on success, negative errno on failure. 6392 */ 6393 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 6394 { 6395 if (ata_is_host_link(link)) { 6396 struct ata_port *ap = link->ap; 6397 6398 if (sata_scr_valid(link)) 6399 return ap->ops->scr_read(ap, reg, val); 6400 return -EOPNOTSUPP; 6401 } 6402 6403 return sata_pmp_scr_read(link, reg, val); 6404 } 6405 6406 /** 6407 * sata_scr_write - write SCR register of the specified port 6408 * @link: ATA link to write SCR for 6409 * @reg: SCR to write 6410 * @val: value to write 6411 * 6412 * Write @val to SCR register @reg of @link. This function is 6413 * guaranteed to succeed if @link is ap->link, the cable type of 6414 * the port is SATA and the port implements ->scr_read. 6415 * 6416 * LOCKING: 6417 * None if @link is ap->link. Kernel thread context otherwise. 6418 * 6419 * RETURNS: 6420 * 0 on success, negative errno on failure. 6421 */ 6422 int sata_scr_write(struct ata_link *link, int reg, u32 val) 6423 { 6424 if (ata_is_host_link(link)) { 6425 struct ata_port *ap = link->ap; 6426 6427 if (sata_scr_valid(link)) 6428 return ap->ops->scr_write(ap, reg, val); 6429 return -EOPNOTSUPP; 6430 } 6431 6432 return sata_pmp_scr_write(link, reg, val); 6433 } 6434 6435 /** 6436 * sata_scr_write_flush - write SCR register of the specified port and flush 6437 * @link: ATA link to write SCR for 6438 * @reg: SCR to write 6439 * @val: value to write 6440 * 6441 * This function is identical to sata_scr_write() except that this 6442 * function performs flush after writing to the register. 6443 * 6444 * LOCKING: 6445 * None if @link is ap->link. Kernel thread context otherwise. 6446 * 6447 * RETURNS: 6448 * 0 on success, negative errno on failure. 6449 */ 6450 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 6451 { 6452 if (ata_is_host_link(link)) { 6453 struct ata_port *ap = link->ap; 6454 int rc; 6455 6456 if (sata_scr_valid(link)) { 6457 rc = ap->ops->scr_write(ap, reg, val); 6458 if (rc == 0) 6459 rc = ap->ops->scr_read(ap, reg, &val); 6460 return rc; 6461 } 6462 return -EOPNOTSUPP; 6463 } 6464 6465 return sata_pmp_scr_write(link, reg, val); 6466 } 6467 6468 /** 6469 * ata_link_online - test whether the given link is online 6470 * @link: ATA link to test 6471 * 6472 * Test whether @link is online. Note that this function returns 6473 * 0 if online status of @link cannot be obtained, so 6474 * ata_link_online(link) != !ata_link_offline(link). 6475 * 6476 * LOCKING: 6477 * None. 6478 * 6479 * RETURNS: 6480 * 1 if the port online status is available and online. 6481 */ 6482 int ata_link_online(struct ata_link *link) 6483 { 6484 u32 sstatus; 6485 6486 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 6487 (sstatus & 0xf) == 0x3) 6488 return 1; 6489 return 0; 6490 } 6491 6492 /** 6493 * ata_link_offline - test whether the given link is offline 6494 * @link: ATA link to test 6495 * 6496 * Test whether @link is offline. Note that this function 6497 * returns 0 if offline status of @link cannot be obtained, so 6498 * ata_link_online(link) != !ata_link_offline(link). 6499 * 6500 * LOCKING: 6501 * None. 6502 * 6503 * RETURNS: 6504 * 1 if the port offline status is available and offline. 6505 */ 6506 int ata_link_offline(struct ata_link *link) 6507 { 6508 u32 sstatus; 6509 6510 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 6511 (sstatus & 0xf) != 0x3) 6512 return 1; 6513 return 0; 6514 } 6515 6516 int ata_flush_cache(struct ata_device *dev) 6517 { 6518 unsigned int err_mask; 6519 u8 cmd; 6520 6521 if (!ata_try_flush_cache(dev)) 6522 return 0; 6523 6524 if (dev->flags & ATA_DFLAG_FLUSH_EXT) 6525 cmd = ATA_CMD_FLUSH_EXT; 6526 else 6527 cmd = ATA_CMD_FLUSH; 6528 6529 /* This is wrong. On a failed flush we get back the LBA of the lost 6530 sector and we should (assuming it wasn't aborted as unknown) issue 6531 a further flush command to continue the writeback until it 6532 does not error */ 6533 err_mask = ata_do_simple_cmd(dev, cmd); 6534 if (err_mask) { 6535 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n"); 6536 return -EIO; 6537 } 6538 6539 return 0; 6540 } 6541 6542 #ifdef CONFIG_PM 6543 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 6544 unsigned int action, unsigned int ehi_flags, 6545 int wait) 6546 { 6547 unsigned long flags; 6548 int i, rc; 6549 6550 for (i = 0; i < host->n_ports; i++) { 6551 struct ata_port *ap = host->ports[i]; 6552 struct ata_link *link; 6553 6554 /* Previous resume operation might still be in 6555 * progress. Wait for PM_PENDING to clear. 6556 */ 6557 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 6558 ata_port_wait_eh(ap); 6559 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 6560 } 6561 6562 /* request PM ops to EH */ 6563 spin_lock_irqsave(ap->lock, flags); 6564 6565 ap->pm_mesg = mesg; 6566 if (wait) { 6567 rc = 0; 6568 ap->pm_result = &rc; 6569 } 6570 6571 ap->pflags |= ATA_PFLAG_PM_PENDING; 6572 __ata_port_for_each_link(link, ap) { 6573 link->eh_info.action |= action; 6574 link->eh_info.flags |= ehi_flags; 6575 } 6576 6577 ata_port_schedule_eh(ap); 6578 6579 spin_unlock_irqrestore(ap->lock, flags); 6580 6581 /* wait and check result */ 6582 if (wait) { 6583 ata_port_wait_eh(ap); 6584 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 6585 if (rc) 6586 return rc; 6587 } 6588 } 6589 6590 return 0; 6591 } 6592 6593 /** 6594 * ata_host_suspend - suspend host 6595 * @host: host to suspend 6596 * @mesg: PM message 6597 * 6598 * Suspend @host. Actual operation is performed by EH. This 6599 * function requests EH to perform PM operations and waits for EH 6600 * to finish. 6601 * 6602 * LOCKING: 6603 * Kernel thread context (may sleep). 6604 * 6605 * RETURNS: 6606 * 0 on success, -errno on failure. 6607 */ 6608 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 6609 { 6610 int rc; 6611 6612 /* 6613 * disable link pm on all ports before requesting 6614 * any pm activity 6615 */ 6616 ata_lpm_enable(host); 6617 6618 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 6619 if (rc == 0) 6620 host->dev->power.power_state = mesg; 6621 return rc; 6622 } 6623 6624 /** 6625 * ata_host_resume - resume host 6626 * @host: host to resume 6627 * 6628 * Resume @host. Actual operation is performed by EH. This 6629 * function requests EH to perform PM operations and returns. 6630 * Note that all resume operations are performed parallely. 6631 * 6632 * LOCKING: 6633 * Kernel thread context (may sleep). 6634 */ 6635 void ata_host_resume(struct ata_host *host) 6636 { 6637 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET, 6638 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 6639 host->dev->power.power_state = PMSG_ON; 6640 6641 /* reenable link pm */ 6642 ata_lpm_disable(host); 6643 } 6644 #endif 6645 6646 /** 6647 * ata_port_start - Set port up for dma. 6648 * @ap: Port to initialize 6649 * 6650 * Called just after data structures for each port are 6651 * initialized. Allocates space for PRD table. 6652 * 6653 * May be used as the port_start() entry in ata_port_operations. 6654 * 6655 * LOCKING: 6656 * Inherited from caller. 6657 */ 6658 int ata_port_start(struct ata_port *ap) 6659 { 6660 struct device *dev = ap->dev; 6661 6662 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 6663 GFP_KERNEL); 6664 if (!ap->prd) 6665 return -ENOMEM; 6666 6667 return 0; 6668 } 6669 6670 /** 6671 * ata_dev_init - Initialize an ata_device structure 6672 * @dev: Device structure to initialize 6673 * 6674 * Initialize @dev in preparation for probing. 6675 * 6676 * LOCKING: 6677 * Inherited from caller. 6678 */ 6679 void ata_dev_init(struct ata_device *dev) 6680 { 6681 struct ata_link *link = dev->link; 6682 struct ata_port *ap = link->ap; 6683 unsigned long flags; 6684 6685 /* SATA spd limit is bound to the first device */ 6686 link->sata_spd_limit = link->hw_sata_spd_limit; 6687 link->sata_spd = 0; 6688 6689 /* High bits of dev->flags are used to record warm plug 6690 * requests which occur asynchronously. Synchronize using 6691 * host lock. 6692 */ 6693 spin_lock_irqsave(ap->lock, flags); 6694 dev->flags &= ~ATA_DFLAG_INIT_MASK; 6695 dev->horkage = 0; 6696 spin_unlock_irqrestore(ap->lock, flags); 6697 6698 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0, 6699 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET); 6700 dev->pio_mask = UINT_MAX; 6701 dev->mwdma_mask = UINT_MAX; 6702 dev->udma_mask = UINT_MAX; 6703 } 6704 6705 /** 6706 * ata_link_init - Initialize an ata_link structure 6707 * @ap: ATA port link is attached to 6708 * @link: Link structure to initialize 6709 * @pmp: Port multiplier port number 6710 * 6711 * Initialize @link. 6712 * 6713 * LOCKING: 6714 * Kernel thread context (may sleep) 6715 */ 6716 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 6717 { 6718 int i; 6719 6720 /* clear everything except for devices */ 6721 memset(link, 0, offsetof(struct ata_link, device[0])); 6722 6723 link->ap = ap; 6724 link->pmp = pmp; 6725 link->active_tag = ATA_TAG_POISON; 6726 link->hw_sata_spd_limit = UINT_MAX; 6727 6728 /* can't use iterator, ap isn't initialized yet */ 6729 for (i = 0; i < ATA_MAX_DEVICES; i++) { 6730 struct ata_device *dev = &link->device[i]; 6731 6732 dev->link = link; 6733 dev->devno = dev - link->device; 6734 ata_dev_init(dev); 6735 } 6736 } 6737 6738 /** 6739 * sata_link_init_spd - Initialize link->sata_spd_limit 6740 * @link: Link to configure sata_spd_limit for 6741 * 6742 * Initialize @link->[hw_]sata_spd_limit to the currently 6743 * configured value. 6744 * 6745 * LOCKING: 6746 * Kernel thread context (may sleep). 6747 * 6748 * RETURNS: 6749 * 0 on success, -errno on failure. 6750 */ 6751 int sata_link_init_spd(struct ata_link *link) 6752 { 6753 u32 scontrol; 6754 u8 spd; 6755 int rc; 6756 6757 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 6758 if (rc) 6759 return rc; 6760 6761 spd = (scontrol >> 4) & 0xf; 6762 if (spd) 6763 link->hw_sata_spd_limit &= (1 << spd) - 1; 6764 6765 ata_force_spd_limit(link); 6766 6767 link->sata_spd_limit = link->hw_sata_spd_limit; 6768 6769 return 0; 6770 } 6771 6772 /** 6773 * ata_port_alloc - allocate and initialize basic ATA port resources 6774 * @host: ATA host this allocated port belongs to 6775 * 6776 * Allocate and initialize basic ATA port resources. 6777 * 6778 * RETURNS: 6779 * Allocate ATA port on success, NULL on failure. 6780 * 6781 * LOCKING: 6782 * Inherited from calling layer (may sleep). 6783 */ 6784 struct ata_port *ata_port_alloc(struct ata_host *host) 6785 { 6786 struct ata_port *ap; 6787 6788 DPRINTK("ENTER\n"); 6789 6790 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 6791 if (!ap) 6792 return NULL; 6793 6794 ap->pflags |= ATA_PFLAG_INITIALIZING; 6795 ap->lock = &host->lock; 6796 ap->flags = ATA_FLAG_DISABLED; 6797 ap->print_id = -1; 6798 ap->ctl = ATA_DEVCTL_OBS; 6799 ap->host = host; 6800 ap->dev = host->dev; 6801 ap->last_ctl = 0xFF; 6802 6803 #if defined(ATA_VERBOSE_DEBUG) 6804 /* turn on all debugging levels */ 6805 ap->msg_enable = 0x00FF; 6806 #elif defined(ATA_DEBUG) 6807 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 6808 #else 6809 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 6810 #endif 6811 6812 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); 6813 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 6814 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 6815 INIT_LIST_HEAD(&ap->eh_done_q); 6816 init_waitqueue_head(&ap->eh_wait_q); 6817 init_timer_deferrable(&ap->fastdrain_timer); 6818 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 6819 ap->fastdrain_timer.data = (unsigned long)ap; 6820 6821 ap->cbl = ATA_CBL_NONE; 6822 6823 ata_link_init(ap, &ap->link, 0); 6824 6825 #ifdef ATA_IRQ_TRAP 6826 ap->stats.unhandled_irq = 1; 6827 ap->stats.idle_irq = 1; 6828 #endif 6829 return ap; 6830 } 6831 6832 static void ata_host_release(struct device *gendev, void *res) 6833 { 6834 struct ata_host *host = dev_get_drvdata(gendev); 6835 int i; 6836 6837 for (i = 0; i < host->n_ports; i++) { 6838 struct ata_port *ap = host->ports[i]; 6839 6840 if (!ap) 6841 continue; 6842 6843 if (ap->scsi_host) 6844 scsi_host_put(ap->scsi_host); 6845 6846 kfree(ap->pmp_link); 6847 kfree(ap); 6848 host->ports[i] = NULL; 6849 } 6850 6851 dev_set_drvdata(gendev, NULL); 6852 } 6853 6854 /** 6855 * ata_host_alloc - allocate and init basic ATA host resources 6856 * @dev: generic device this host is associated with 6857 * @max_ports: maximum number of ATA ports associated with this host 6858 * 6859 * Allocate and initialize basic ATA host resources. LLD calls 6860 * this function to allocate a host, initializes it fully and 6861 * attaches it using ata_host_register(). 6862 * 6863 * @max_ports ports are allocated and host->n_ports is 6864 * initialized to @max_ports. The caller is allowed to decrease 6865 * host->n_ports before calling ata_host_register(). The unused 6866 * ports will be automatically freed on registration. 6867 * 6868 * RETURNS: 6869 * Allocate ATA host on success, NULL on failure. 6870 * 6871 * LOCKING: 6872 * Inherited from calling layer (may sleep). 6873 */ 6874 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 6875 { 6876 struct ata_host *host; 6877 size_t sz; 6878 int i; 6879 6880 DPRINTK("ENTER\n"); 6881 6882 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 6883 return NULL; 6884 6885 /* alloc a container for our list of ATA ports (buses) */ 6886 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 6887 /* alloc a container for our list of ATA ports (buses) */ 6888 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 6889 if (!host) 6890 goto err_out; 6891 6892 devres_add(dev, host); 6893 dev_set_drvdata(dev, host); 6894 6895 spin_lock_init(&host->lock); 6896 host->dev = dev; 6897 host->n_ports = max_ports; 6898 6899 /* allocate ports bound to this host */ 6900 for (i = 0; i < max_ports; i++) { 6901 struct ata_port *ap; 6902 6903 ap = ata_port_alloc(host); 6904 if (!ap) 6905 goto err_out; 6906 6907 ap->port_no = i; 6908 host->ports[i] = ap; 6909 } 6910 6911 devres_remove_group(dev, NULL); 6912 return host; 6913 6914 err_out: 6915 devres_release_group(dev, NULL); 6916 return NULL; 6917 } 6918 6919 /** 6920 * ata_host_alloc_pinfo - alloc host and init with port_info array 6921 * @dev: generic device this host is associated with 6922 * @ppi: array of ATA port_info to initialize host with 6923 * @n_ports: number of ATA ports attached to this host 6924 * 6925 * Allocate ATA host and initialize with info from @ppi. If NULL 6926 * terminated, @ppi may contain fewer entries than @n_ports. The 6927 * last entry will be used for the remaining ports. 6928 * 6929 * RETURNS: 6930 * Allocate ATA host on success, NULL on failure. 6931 * 6932 * LOCKING: 6933 * Inherited from calling layer (may sleep). 6934 */ 6935 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 6936 const struct ata_port_info * const * ppi, 6937 int n_ports) 6938 { 6939 const struct ata_port_info *pi; 6940 struct ata_host *host; 6941 int i, j; 6942 6943 host = ata_host_alloc(dev, n_ports); 6944 if (!host) 6945 return NULL; 6946 6947 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 6948 struct ata_port *ap = host->ports[i]; 6949 6950 if (ppi[j]) 6951 pi = ppi[j++]; 6952 6953 ap->pio_mask = pi->pio_mask; 6954 ap->mwdma_mask = pi->mwdma_mask; 6955 ap->udma_mask = pi->udma_mask; 6956 ap->flags |= pi->flags; 6957 ap->link.flags |= pi->link_flags; 6958 ap->ops = pi->port_ops; 6959 6960 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 6961 host->ops = pi->port_ops; 6962 if (!host->private_data && pi->private_data) 6963 host->private_data = pi->private_data; 6964 } 6965 6966 return host; 6967 } 6968 6969 static void ata_host_stop(struct device *gendev, void *res) 6970 { 6971 struct ata_host *host = dev_get_drvdata(gendev); 6972 int i; 6973 6974 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 6975 6976 for (i = 0; i < host->n_ports; i++) { 6977 struct ata_port *ap = host->ports[i]; 6978 6979 if (ap->ops->port_stop) 6980 ap->ops->port_stop(ap); 6981 } 6982 6983 if (host->ops->host_stop) 6984 host->ops->host_stop(host); 6985 } 6986 6987 /** 6988 * ata_host_start - start and freeze ports of an ATA host 6989 * @host: ATA host to start ports for 6990 * 6991 * Start and then freeze ports of @host. Started status is 6992 * recorded in host->flags, so this function can be called 6993 * multiple times. Ports are guaranteed to get started only 6994 * once. If host->ops isn't initialized yet, its set to the 6995 * first non-dummy port ops. 6996 * 6997 * LOCKING: 6998 * Inherited from calling layer (may sleep). 6999 * 7000 * RETURNS: 7001 * 0 if all ports are started successfully, -errno otherwise. 7002 */ 7003 int ata_host_start(struct ata_host *host) 7004 { 7005 int have_stop = 0; 7006 void *start_dr = NULL; 7007 int i, rc; 7008 7009 if (host->flags & ATA_HOST_STARTED) 7010 return 0; 7011 7012 for (i = 0; i < host->n_ports; i++) { 7013 struct ata_port *ap = host->ports[i]; 7014 7015 if (!host->ops && !ata_port_is_dummy(ap)) 7016 host->ops = ap->ops; 7017 7018 if (ap->ops->port_stop) 7019 have_stop = 1; 7020 } 7021 7022 if (host->ops->host_stop) 7023 have_stop = 1; 7024 7025 if (have_stop) { 7026 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 7027 if (!start_dr) 7028 return -ENOMEM; 7029 } 7030 7031 for (i = 0; i < host->n_ports; i++) { 7032 struct ata_port *ap = host->ports[i]; 7033 7034 if (ap->ops->port_start) { 7035 rc = ap->ops->port_start(ap); 7036 if (rc) { 7037 if (rc != -ENODEV) 7038 dev_printk(KERN_ERR, host->dev, 7039 "failed to start port %d " 7040 "(errno=%d)\n", i, rc); 7041 goto err_out; 7042 } 7043 } 7044 ata_eh_freeze_port(ap); 7045 } 7046 7047 if (start_dr) 7048 devres_add(host->dev, start_dr); 7049 host->flags |= ATA_HOST_STARTED; 7050 return 0; 7051 7052 err_out: 7053 while (--i >= 0) { 7054 struct ata_port *ap = host->ports[i]; 7055 7056 if (ap->ops->port_stop) 7057 ap->ops->port_stop(ap); 7058 } 7059 devres_free(start_dr); 7060 return rc; 7061 } 7062 7063 /** 7064 * ata_sas_host_init - Initialize a host struct 7065 * @host: host to initialize 7066 * @dev: device host is attached to 7067 * @flags: host flags 7068 * @ops: port_ops 7069 * 7070 * LOCKING: 7071 * PCI/etc. bus probe sem. 7072 * 7073 */ 7074 /* KILLME - the only user left is ipr */ 7075 void ata_host_init(struct ata_host *host, struct device *dev, 7076 unsigned long flags, const struct ata_port_operations *ops) 7077 { 7078 spin_lock_init(&host->lock); 7079 host->dev = dev; 7080 host->flags = flags; 7081 host->ops = ops; 7082 } 7083 7084 /** 7085 * ata_host_register - register initialized ATA host 7086 * @host: ATA host to register 7087 * @sht: template for SCSI host 7088 * 7089 * Register initialized ATA host. @host is allocated using 7090 * ata_host_alloc() and fully initialized by LLD. This function 7091 * starts ports, registers @host with ATA and SCSI layers and 7092 * probe registered devices. 7093 * 7094 * LOCKING: 7095 * Inherited from calling layer (may sleep). 7096 * 7097 * RETURNS: 7098 * 0 on success, -errno otherwise. 7099 */ 7100 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 7101 { 7102 int i, rc; 7103 7104 /* host must have been started */ 7105 if (!(host->flags & ATA_HOST_STARTED)) { 7106 dev_printk(KERN_ERR, host->dev, 7107 "BUG: trying to register unstarted host\n"); 7108 WARN_ON(1); 7109 return -EINVAL; 7110 } 7111 7112 /* Blow away unused ports. This happens when LLD can't 7113 * determine the exact number of ports to allocate at 7114 * allocation time. 7115 */ 7116 for (i = host->n_ports; host->ports[i]; i++) 7117 kfree(host->ports[i]); 7118 7119 /* give ports names and add SCSI hosts */ 7120 for (i = 0; i < host->n_ports; i++) 7121 host->ports[i]->print_id = ata_print_id++; 7122 7123 rc = ata_scsi_add_hosts(host, sht); 7124 if (rc) 7125 return rc; 7126 7127 /* associate with ACPI nodes */ 7128 ata_acpi_associate(host); 7129 7130 /* set cable, sata_spd_limit and report */ 7131 for (i = 0; i < host->n_ports; i++) { 7132 struct ata_port *ap = host->ports[i]; 7133 unsigned long xfer_mask; 7134 7135 /* set SATA cable type if still unset */ 7136 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 7137 ap->cbl = ATA_CBL_SATA; 7138 7139 /* init sata_spd_limit to the current value */ 7140 sata_link_init_spd(&ap->link); 7141 7142 /* print per-port info to dmesg */ 7143 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 7144 ap->udma_mask); 7145 7146 if (!ata_port_is_dummy(ap)) { 7147 ata_port_printk(ap, KERN_INFO, 7148 "%cATA max %s %s\n", 7149 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 7150 ata_mode_string(xfer_mask), 7151 ap->link.eh_info.desc); 7152 ata_ehi_clear_desc(&ap->link.eh_info); 7153 } else 7154 ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 7155 } 7156 7157 /* perform each probe synchronously */ 7158 DPRINTK("probe begin\n"); 7159 for (i = 0; i < host->n_ports; i++) { 7160 struct ata_port *ap = host->ports[i]; 7161 7162 /* probe */ 7163 if (ap->ops->error_handler) { 7164 struct ata_eh_info *ehi = &ap->link.eh_info; 7165 unsigned long flags; 7166 7167 ata_port_probe(ap); 7168 7169 /* kick EH for boot probing */ 7170 spin_lock_irqsave(ap->lock, flags); 7171 7172 ehi->probe_mask = 7173 (1 << ata_link_max_devices(&ap->link)) - 1; 7174 ehi->action |= ATA_EH_SOFTRESET; 7175 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 7176 7177 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 7178 ap->pflags |= ATA_PFLAG_LOADING; 7179 ata_port_schedule_eh(ap); 7180 7181 spin_unlock_irqrestore(ap->lock, flags); 7182 7183 /* wait for EH to finish */ 7184 ata_port_wait_eh(ap); 7185 } else { 7186 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 7187 rc = ata_bus_probe(ap); 7188 DPRINTK("ata%u: bus probe end\n", ap->print_id); 7189 7190 if (rc) { 7191 /* FIXME: do something useful here? 7192 * Current libata behavior will 7193 * tear down everything when 7194 * the module is removed 7195 * or the h/w is unplugged. 7196 */ 7197 } 7198 } 7199 } 7200 7201 /* probes are done, now scan each port's disk(s) */ 7202 DPRINTK("host probe begin\n"); 7203 for (i = 0; i < host->n_ports; i++) { 7204 struct ata_port *ap = host->ports[i]; 7205 7206 ata_scsi_scan_host(ap, 1); 7207 ata_lpm_schedule(ap, ap->pm_policy); 7208 } 7209 7210 return 0; 7211 } 7212 7213 /** 7214 * ata_host_activate - start host, request IRQ and register it 7215 * @host: target ATA host 7216 * @irq: IRQ to request 7217 * @irq_handler: irq_handler used when requesting IRQ 7218 * @irq_flags: irq_flags used when requesting IRQ 7219 * @sht: scsi_host_template to use when registering the host 7220 * 7221 * After allocating an ATA host and initializing it, most libata 7222 * LLDs perform three steps to activate the host - start host, 7223 * request IRQ and register it. This helper takes necessasry 7224 * arguments and performs the three steps in one go. 7225 * 7226 * An invalid IRQ skips the IRQ registration and expects the host to 7227 * have set polling mode on the port. In this case, @irq_handler 7228 * should be NULL. 7229 * 7230 * LOCKING: 7231 * Inherited from calling layer (may sleep). 7232 * 7233 * RETURNS: 7234 * 0 on success, -errno otherwise. 7235 */ 7236 int ata_host_activate(struct ata_host *host, int irq, 7237 irq_handler_t irq_handler, unsigned long irq_flags, 7238 struct scsi_host_template *sht) 7239 { 7240 int i, rc; 7241 7242 rc = ata_host_start(host); 7243 if (rc) 7244 return rc; 7245 7246 /* Special case for polling mode */ 7247 if (!irq) { 7248 WARN_ON(irq_handler); 7249 return ata_host_register(host, sht); 7250 } 7251 7252 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 7253 dev_driver_string(host->dev), host); 7254 if (rc) 7255 return rc; 7256 7257 for (i = 0; i < host->n_ports; i++) 7258 ata_port_desc(host->ports[i], "irq %d", irq); 7259 7260 rc = ata_host_register(host, sht); 7261 /* if failed, just free the IRQ and leave ports alone */ 7262 if (rc) 7263 devm_free_irq(host->dev, irq, host); 7264 7265 return rc; 7266 } 7267 7268 /** 7269 * ata_port_detach - Detach ATA port in prepration of device removal 7270 * @ap: ATA port to be detached 7271 * 7272 * Detach all ATA devices and the associated SCSI devices of @ap; 7273 * then, remove the associated SCSI host. @ap is guaranteed to 7274 * be quiescent on return from this function. 7275 * 7276 * LOCKING: 7277 * Kernel thread context (may sleep). 7278 */ 7279 static void ata_port_detach(struct ata_port *ap) 7280 { 7281 unsigned long flags; 7282 struct ata_link *link; 7283 struct ata_device *dev; 7284 7285 if (!ap->ops->error_handler) 7286 goto skip_eh; 7287 7288 /* tell EH we're leaving & flush EH */ 7289 spin_lock_irqsave(ap->lock, flags); 7290 ap->pflags |= ATA_PFLAG_UNLOADING; 7291 spin_unlock_irqrestore(ap->lock, flags); 7292 7293 ata_port_wait_eh(ap); 7294 7295 /* EH is now guaranteed to see UNLOADING - EH context belongs 7296 * to us. Disable all existing devices. 7297 */ 7298 ata_port_for_each_link(link, ap) { 7299 ata_link_for_each_dev(dev, link) 7300 ata_dev_disable(dev); 7301 } 7302 7303 /* Final freeze & EH. All in-flight commands are aborted. EH 7304 * will be skipped and retrials will be terminated with bad 7305 * target. 7306 */ 7307 spin_lock_irqsave(ap->lock, flags); 7308 ata_port_freeze(ap); /* won't be thawed */ 7309 spin_unlock_irqrestore(ap->lock, flags); 7310 7311 ata_port_wait_eh(ap); 7312 cancel_rearming_delayed_work(&ap->hotplug_task); 7313 7314 skip_eh: 7315 /* remove the associated SCSI host */ 7316 scsi_remove_host(ap->scsi_host); 7317 } 7318 7319 /** 7320 * ata_host_detach - Detach all ports of an ATA host 7321 * @host: Host to detach 7322 * 7323 * Detach all ports of @host. 7324 * 7325 * LOCKING: 7326 * Kernel thread context (may sleep). 7327 */ 7328 void ata_host_detach(struct ata_host *host) 7329 { 7330 int i; 7331 7332 for (i = 0; i < host->n_ports; i++) 7333 ata_port_detach(host->ports[i]); 7334 7335 /* the host is dead now, dissociate ACPI */ 7336 ata_acpi_dissociate(host); 7337 } 7338 7339 /** 7340 * ata_std_ports - initialize ioaddr with standard port offsets. 7341 * @ioaddr: IO address structure to be initialized 7342 * 7343 * Utility function which initializes data_addr, error_addr, 7344 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr, 7345 * device_addr, status_addr, and command_addr to standard offsets 7346 * relative to cmd_addr. 7347 * 7348 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr. 7349 */ 7350 7351 void ata_std_ports(struct ata_ioports *ioaddr) 7352 { 7353 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA; 7354 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR; 7355 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE; 7356 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT; 7357 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL; 7358 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM; 7359 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH; 7360 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE; 7361 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS; 7362 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD; 7363 } 7364 7365 7366 #ifdef CONFIG_PCI 7367 7368 /** 7369 * ata_pci_remove_one - PCI layer callback for device removal 7370 * @pdev: PCI device that was removed 7371 * 7372 * PCI layer indicates to libata via this hook that hot-unplug or 7373 * module unload event has occurred. Detach all ports. Resource 7374 * release is handled via devres. 7375 * 7376 * LOCKING: 7377 * Inherited from PCI layer (may sleep). 7378 */ 7379 void ata_pci_remove_one(struct pci_dev *pdev) 7380 { 7381 struct device *dev = &pdev->dev; 7382 struct ata_host *host = dev_get_drvdata(dev); 7383 7384 ata_host_detach(host); 7385 } 7386 7387 /* move to PCI subsystem */ 7388 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 7389 { 7390 unsigned long tmp = 0; 7391 7392 switch (bits->width) { 7393 case 1: { 7394 u8 tmp8 = 0; 7395 pci_read_config_byte(pdev, bits->reg, &tmp8); 7396 tmp = tmp8; 7397 break; 7398 } 7399 case 2: { 7400 u16 tmp16 = 0; 7401 pci_read_config_word(pdev, bits->reg, &tmp16); 7402 tmp = tmp16; 7403 break; 7404 } 7405 case 4: { 7406 u32 tmp32 = 0; 7407 pci_read_config_dword(pdev, bits->reg, &tmp32); 7408 tmp = tmp32; 7409 break; 7410 } 7411 7412 default: 7413 return -EINVAL; 7414 } 7415 7416 tmp &= bits->mask; 7417 7418 return (tmp == bits->val) ? 1 : 0; 7419 } 7420 7421 #ifdef CONFIG_PM 7422 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 7423 { 7424 pci_save_state(pdev); 7425 pci_disable_device(pdev); 7426 7427 if (mesg.event & PM_EVENT_SLEEP) 7428 pci_set_power_state(pdev, PCI_D3hot); 7429 } 7430 7431 int ata_pci_device_do_resume(struct pci_dev *pdev) 7432 { 7433 int rc; 7434 7435 pci_set_power_state(pdev, PCI_D0); 7436 pci_restore_state(pdev); 7437 7438 rc = pcim_enable_device(pdev); 7439 if (rc) { 7440 dev_printk(KERN_ERR, &pdev->dev, 7441 "failed to enable device after resume (%d)\n", rc); 7442 return rc; 7443 } 7444 7445 pci_set_master(pdev); 7446 return 0; 7447 } 7448 7449 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 7450 { 7451 struct ata_host *host = dev_get_drvdata(&pdev->dev); 7452 int rc = 0; 7453 7454 rc = ata_host_suspend(host, mesg); 7455 if (rc) 7456 return rc; 7457 7458 ata_pci_device_do_suspend(pdev, mesg); 7459 7460 return 0; 7461 } 7462 7463 int ata_pci_device_resume(struct pci_dev *pdev) 7464 { 7465 struct ata_host *host = dev_get_drvdata(&pdev->dev); 7466 int rc; 7467 7468 rc = ata_pci_device_do_resume(pdev); 7469 if (rc == 0) 7470 ata_host_resume(host); 7471 return rc; 7472 } 7473 #endif /* CONFIG_PM */ 7474 7475 #endif /* CONFIG_PCI */ 7476 7477 static int __init ata_parse_force_one(char **cur, 7478 struct ata_force_ent *force_ent, 7479 const char **reason) 7480 { 7481 /* FIXME: Currently, there's no way to tag init const data and 7482 * using __initdata causes build failure on some versions of 7483 * gcc. Once __initdataconst is implemented, add const to the 7484 * following structure. 7485 */ 7486 static struct ata_force_param force_tbl[] __initdata = { 7487 { "40c", .cbl = ATA_CBL_PATA40 }, 7488 { "80c", .cbl = ATA_CBL_PATA80 }, 7489 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 7490 { "unk", .cbl = ATA_CBL_PATA_UNK }, 7491 { "ign", .cbl = ATA_CBL_PATA_IGN }, 7492 { "sata", .cbl = ATA_CBL_SATA }, 7493 { "1.5Gbps", .spd_limit = 1 }, 7494 { "3.0Gbps", .spd_limit = 2 }, 7495 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 7496 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 7497 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 7498 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 7499 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 7500 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 7501 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 7502 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 7503 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 7504 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 7505 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 7506 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 7507 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 7508 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 7509 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 7510 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 7511 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 7512 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 7513 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 7514 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 7515 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 7516 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 7517 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 7518 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 7519 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 7520 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 7521 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 7522 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 7523 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 7524 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 7525 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 7526 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 7527 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 7528 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 7529 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 7530 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 7531 }; 7532 char *start = *cur, *p = *cur; 7533 char *id, *val, *endp; 7534 const struct ata_force_param *match_fp = NULL; 7535 int nr_matches = 0, i; 7536 7537 /* find where this param ends and update *cur */ 7538 while (*p != '\0' && *p != ',') 7539 p++; 7540 7541 if (*p == '\0') 7542 *cur = p; 7543 else 7544 *cur = p + 1; 7545 7546 *p = '\0'; 7547 7548 /* parse */ 7549 p = strchr(start, ':'); 7550 if (!p) { 7551 val = strstrip(start); 7552 goto parse_val; 7553 } 7554 *p = '\0'; 7555 7556 id = strstrip(start); 7557 val = strstrip(p + 1); 7558 7559 /* parse id */ 7560 p = strchr(id, '.'); 7561 if (p) { 7562 *p++ = '\0'; 7563 force_ent->device = simple_strtoul(p, &endp, 10); 7564 if (p == endp || *endp != '\0') { 7565 *reason = "invalid device"; 7566 return -EINVAL; 7567 } 7568 } 7569 7570 force_ent->port = simple_strtoul(id, &endp, 10); 7571 if (p == endp || *endp != '\0') { 7572 *reason = "invalid port/link"; 7573 return -EINVAL; 7574 } 7575 7576 parse_val: 7577 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 7578 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 7579 const struct ata_force_param *fp = &force_tbl[i]; 7580 7581 if (strncasecmp(val, fp->name, strlen(val))) 7582 continue; 7583 7584 nr_matches++; 7585 match_fp = fp; 7586 7587 if (strcasecmp(val, fp->name) == 0) { 7588 nr_matches = 1; 7589 break; 7590 } 7591 } 7592 7593 if (!nr_matches) { 7594 *reason = "unknown value"; 7595 return -EINVAL; 7596 } 7597 if (nr_matches > 1) { 7598 *reason = "ambigious value"; 7599 return -EINVAL; 7600 } 7601 7602 force_ent->param = *match_fp; 7603 7604 return 0; 7605 } 7606 7607 static void __init ata_parse_force_param(void) 7608 { 7609 int idx = 0, size = 1; 7610 int last_port = -1, last_device = -1; 7611 char *p, *cur, *next; 7612 7613 /* calculate maximum number of params and allocate force_tbl */ 7614 for (p = ata_force_param_buf; *p; p++) 7615 if (*p == ',') 7616 size++; 7617 7618 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 7619 if (!ata_force_tbl) { 7620 printk(KERN_WARNING "ata: failed to extend force table, " 7621 "libata.force ignored\n"); 7622 return; 7623 } 7624 7625 /* parse and populate the table */ 7626 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 7627 const char *reason = ""; 7628 struct ata_force_ent te = { .port = -1, .device = -1 }; 7629 7630 next = cur; 7631 if (ata_parse_force_one(&next, &te, &reason)) { 7632 printk(KERN_WARNING "ata: failed to parse force " 7633 "parameter \"%s\" (%s)\n", 7634 cur, reason); 7635 continue; 7636 } 7637 7638 if (te.port == -1) { 7639 te.port = last_port; 7640 te.device = last_device; 7641 } 7642 7643 ata_force_tbl[idx++] = te; 7644 7645 last_port = te.port; 7646 last_device = te.device; 7647 } 7648 7649 ata_force_tbl_size = idx; 7650 } 7651 7652 static int __init ata_init(void) 7653 { 7654 ata_probe_timeout *= HZ; 7655 7656 ata_parse_force_param(); 7657 7658 ata_wq = create_workqueue("ata"); 7659 if (!ata_wq) 7660 return -ENOMEM; 7661 7662 ata_aux_wq = create_singlethread_workqueue("ata_aux"); 7663 if (!ata_aux_wq) { 7664 destroy_workqueue(ata_wq); 7665 return -ENOMEM; 7666 } 7667 7668 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 7669 return 0; 7670 } 7671 7672 static void __exit ata_exit(void) 7673 { 7674 kfree(ata_force_tbl); 7675 destroy_workqueue(ata_wq); 7676 destroy_workqueue(ata_aux_wq); 7677 } 7678 7679 subsys_initcall(ata_init); 7680 module_exit(ata_exit); 7681 7682 static unsigned long ratelimit_time; 7683 static DEFINE_SPINLOCK(ata_ratelimit_lock); 7684 7685 int ata_ratelimit(void) 7686 { 7687 int rc; 7688 unsigned long flags; 7689 7690 spin_lock_irqsave(&ata_ratelimit_lock, flags); 7691 7692 if (time_after(jiffies, ratelimit_time)) { 7693 rc = 1; 7694 ratelimit_time = jiffies + (HZ/5); 7695 } else 7696 rc = 0; 7697 7698 spin_unlock_irqrestore(&ata_ratelimit_lock, flags); 7699 7700 return rc; 7701 } 7702 7703 /** 7704 * ata_wait_register - wait until register value changes 7705 * @reg: IO-mapped register 7706 * @mask: Mask to apply to read register value 7707 * @val: Wait condition 7708 * @interval_msec: polling interval in milliseconds 7709 * @timeout_msec: timeout in milliseconds 7710 * 7711 * Waiting for some bits of register to change is a common 7712 * operation for ATA controllers. This function reads 32bit LE 7713 * IO-mapped register @reg and tests for the following condition. 7714 * 7715 * (*@reg & mask) != val 7716 * 7717 * If the condition is met, it returns; otherwise, the process is 7718 * repeated after @interval_msec until timeout. 7719 * 7720 * LOCKING: 7721 * Kernel thread context (may sleep) 7722 * 7723 * RETURNS: 7724 * The final register value. 7725 */ 7726 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 7727 unsigned long interval_msec, 7728 unsigned long timeout_msec) 7729 { 7730 unsigned long timeout; 7731 u32 tmp; 7732 7733 tmp = ioread32(reg); 7734 7735 /* Calculate timeout _after_ the first read to make sure 7736 * preceding writes reach the controller before starting to 7737 * eat away the timeout. 7738 */ 7739 timeout = jiffies + (timeout_msec * HZ) / 1000; 7740 7741 while ((tmp & mask) == val && time_before(jiffies, timeout)) { 7742 msleep(interval_msec); 7743 tmp = ioread32(reg); 7744 } 7745 7746 return tmp; 7747 } 7748 7749 /* 7750 * Dummy port_ops 7751 */ 7752 static void ata_dummy_noret(struct ata_port *ap) { } 7753 static int ata_dummy_ret0(struct ata_port *ap) { return 0; } 7754 static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { } 7755 7756 static u8 ata_dummy_check_status(struct ata_port *ap) 7757 { 7758 return ATA_DRDY; 7759 } 7760 7761 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 7762 { 7763 return AC_ERR_SYSTEM; 7764 } 7765 7766 const struct ata_port_operations ata_dummy_port_ops = { 7767 .check_status = ata_dummy_check_status, 7768 .check_altstatus = ata_dummy_check_status, 7769 .dev_select = ata_noop_dev_select, 7770 .qc_prep = ata_noop_qc_prep, 7771 .qc_issue = ata_dummy_qc_issue, 7772 .freeze = ata_dummy_noret, 7773 .thaw = ata_dummy_noret, 7774 .error_handler = ata_dummy_noret, 7775 .post_internal_cmd = ata_dummy_qc_noret, 7776 .irq_clear = ata_dummy_noret, 7777 .port_start = ata_dummy_ret0, 7778 .port_stop = ata_dummy_noret, 7779 }; 7780 7781 const struct ata_port_info ata_dummy_port_info = { 7782 .port_ops = &ata_dummy_port_ops, 7783 }; 7784 7785 /* 7786 * libata is essentially a library of internal helper functions for 7787 * low-level ATA host controller drivers. As such, the API/ABI is 7788 * likely to change as new drivers are added and updated. 7789 * Do not depend on ABI/API stability. 7790 */ 7791 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 7792 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 7793 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 7794 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 7795 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 7796 EXPORT_SYMBOL_GPL(ata_std_bios_param); 7797 EXPORT_SYMBOL_GPL(ata_std_ports); 7798 EXPORT_SYMBOL_GPL(ata_host_init); 7799 EXPORT_SYMBOL_GPL(ata_host_alloc); 7800 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 7801 EXPORT_SYMBOL_GPL(ata_host_start); 7802 EXPORT_SYMBOL_GPL(ata_host_register); 7803 EXPORT_SYMBOL_GPL(ata_host_activate); 7804 EXPORT_SYMBOL_GPL(ata_host_detach); 7805 EXPORT_SYMBOL_GPL(ata_sg_init); 7806 EXPORT_SYMBOL_GPL(ata_hsm_move); 7807 EXPORT_SYMBOL_GPL(ata_qc_complete); 7808 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 7809 EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 7810 EXPORT_SYMBOL_GPL(ata_tf_load); 7811 EXPORT_SYMBOL_GPL(ata_tf_read); 7812 EXPORT_SYMBOL_GPL(ata_noop_dev_select); 7813 EXPORT_SYMBOL_GPL(ata_std_dev_select); 7814 EXPORT_SYMBOL_GPL(sata_print_link_status); 7815 EXPORT_SYMBOL_GPL(atapi_cmd_type); 7816 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 7817 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 7818 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 7819 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 7820 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 7821 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 7822 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 7823 EXPORT_SYMBOL_GPL(ata_mode_string); 7824 EXPORT_SYMBOL_GPL(ata_id_xfermask); 7825 EXPORT_SYMBOL_GPL(ata_check_status); 7826 EXPORT_SYMBOL_GPL(ata_altstatus); 7827 EXPORT_SYMBOL_GPL(ata_exec_command); 7828 EXPORT_SYMBOL_GPL(ata_port_start); 7829 EXPORT_SYMBOL_GPL(ata_sff_port_start); 7830 EXPORT_SYMBOL_GPL(ata_interrupt); 7831 EXPORT_SYMBOL_GPL(ata_do_set_mode); 7832 EXPORT_SYMBOL_GPL(ata_data_xfer); 7833 EXPORT_SYMBOL_GPL(ata_data_xfer_noirq); 7834 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 7835 EXPORT_SYMBOL_GPL(ata_qc_prep); 7836 EXPORT_SYMBOL_GPL(ata_dumb_qc_prep); 7837 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 7838 EXPORT_SYMBOL_GPL(ata_bmdma_setup); 7839 EXPORT_SYMBOL_GPL(ata_bmdma_start); 7840 EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); 7841 EXPORT_SYMBOL_GPL(ata_bmdma_status); 7842 EXPORT_SYMBOL_GPL(ata_bmdma_stop); 7843 EXPORT_SYMBOL_GPL(ata_bmdma_freeze); 7844 EXPORT_SYMBOL_GPL(ata_bmdma_thaw); 7845 EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh); 7846 EXPORT_SYMBOL_GPL(ata_bmdma_error_handler); 7847 EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd); 7848 EXPORT_SYMBOL_GPL(ata_port_probe); 7849 EXPORT_SYMBOL_GPL(ata_dev_disable); 7850 EXPORT_SYMBOL_GPL(sata_set_spd); 7851 EXPORT_SYMBOL_GPL(sata_link_debounce); 7852 EXPORT_SYMBOL_GPL(sata_link_resume); 7853 EXPORT_SYMBOL_GPL(ata_bus_reset); 7854 EXPORT_SYMBOL_GPL(ata_std_prereset); 7855 EXPORT_SYMBOL_GPL(ata_std_softreset); 7856 EXPORT_SYMBOL_GPL(sata_link_hardreset); 7857 EXPORT_SYMBOL_GPL(sata_std_hardreset); 7858 EXPORT_SYMBOL_GPL(ata_std_postreset); 7859 EXPORT_SYMBOL_GPL(ata_dev_classify); 7860 EXPORT_SYMBOL_GPL(ata_dev_pair); 7861 EXPORT_SYMBOL_GPL(ata_port_disable); 7862 EXPORT_SYMBOL_GPL(ata_ratelimit); 7863 EXPORT_SYMBOL_GPL(ata_wait_register); 7864 EXPORT_SYMBOL_GPL(ata_busy_sleep); 7865 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 7866 EXPORT_SYMBOL_GPL(ata_wait_ready); 7867 EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 7868 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 7869 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 7870 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 7871 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 7872 EXPORT_SYMBOL_GPL(ata_host_intr); 7873 EXPORT_SYMBOL_GPL(sata_scr_valid); 7874 EXPORT_SYMBOL_GPL(sata_scr_read); 7875 EXPORT_SYMBOL_GPL(sata_scr_write); 7876 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 7877 EXPORT_SYMBOL_GPL(ata_link_online); 7878 EXPORT_SYMBOL_GPL(ata_link_offline); 7879 #ifdef CONFIG_PM 7880 EXPORT_SYMBOL_GPL(ata_host_suspend); 7881 EXPORT_SYMBOL_GPL(ata_host_resume); 7882 #endif /* CONFIG_PM */ 7883 EXPORT_SYMBOL_GPL(ata_id_string); 7884 EXPORT_SYMBOL_GPL(ata_id_c_string); 7885 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 7886 7887 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 7888 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 7889 EXPORT_SYMBOL_GPL(ata_timing_compute); 7890 EXPORT_SYMBOL_GPL(ata_timing_merge); 7891 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 7892 7893 #ifdef CONFIG_PCI 7894 EXPORT_SYMBOL_GPL(pci_test_config_bits); 7895 EXPORT_SYMBOL_GPL(ata_pci_init_sff_host); 7896 EXPORT_SYMBOL_GPL(ata_pci_init_bmdma); 7897 EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host); 7898 EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host); 7899 EXPORT_SYMBOL_GPL(ata_pci_init_one); 7900 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 7901 #ifdef CONFIG_PM 7902 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 7903 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 7904 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 7905 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 7906 #endif /* CONFIG_PM */ 7907 EXPORT_SYMBOL_GPL(ata_pci_default_filter); 7908 EXPORT_SYMBOL_GPL(ata_pci_clear_simplex); 7909 #endif /* CONFIG_PCI */ 7910 7911 EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch); 7912 EXPORT_SYMBOL_GPL(sata_pmp_std_prereset); 7913 EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset); 7914 EXPORT_SYMBOL_GPL(sata_pmp_std_postreset); 7915 EXPORT_SYMBOL_GPL(sata_pmp_do_eh); 7916 7917 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 7918 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 7919 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 7920 EXPORT_SYMBOL_GPL(ata_port_desc); 7921 #ifdef CONFIG_PCI 7922 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 7923 #endif /* CONFIG_PCI */ 7924 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 7925 EXPORT_SYMBOL_GPL(ata_link_abort); 7926 EXPORT_SYMBOL_GPL(ata_port_abort); 7927 EXPORT_SYMBOL_GPL(ata_port_freeze); 7928 EXPORT_SYMBOL_GPL(sata_async_notification); 7929 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 7930 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 7931 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7932 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7933 EXPORT_SYMBOL_GPL(ata_do_eh); 7934 EXPORT_SYMBOL_GPL(ata_irq_on); 7935 EXPORT_SYMBOL_GPL(ata_dev_try_classify); 7936 7937 EXPORT_SYMBOL_GPL(ata_cable_40wire); 7938 EXPORT_SYMBOL_GPL(ata_cable_80wire); 7939 EXPORT_SYMBOL_GPL(ata_cable_unknown); 7940 EXPORT_SYMBOL_GPL(ata_cable_ignore); 7941 EXPORT_SYMBOL_GPL(ata_cable_sata); 7942