1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Tejun Heo <tj@kernel.org> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/time.h> 54 #include <linux/interrupt.h> 55 #include <linux/completion.h> 56 #include <linux/suspend.h> 57 #include <linux/workqueue.h> 58 #include <linux/scatterlist.h> 59 #include <linux/io.h> 60 #include <linux/async.h> 61 #include <linux/log2.h> 62 #include <linux/slab.h> 63 #include <linux/glob.h> 64 #include <scsi/scsi.h> 65 #include <scsi/scsi_cmnd.h> 66 #include <scsi/scsi_host.h> 67 #include <linux/libata.h> 68 #include <asm/byteorder.h> 69 #include <linux/cdrom.h> 70 #include <linux/ratelimit.h> 71 #include <linux/pm_runtime.h> 72 #include <linux/platform_device.h> 73 74 #define CREATE_TRACE_POINTS 75 #include <trace/events/libata.h> 76 77 #include "libata.h" 78 #include "libata-transport.h" 79 80 /* debounce timing parameters in msecs { interval, duration, timeout } */ 81 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 82 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 83 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 84 85 const struct ata_port_operations ata_base_port_ops = { 86 .prereset = ata_std_prereset, 87 .postreset = ata_std_postreset, 88 .error_handler = ata_std_error_handler, 89 .sched_eh = ata_std_sched_eh, 90 .end_eh = ata_std_end_eh, 91 }; 92 93 const struct ata_port_operations sata_port_ops = { 94 .inherits = &ata_base_port_ops, 95 96 .qc_defer = ata_std_qc_defer, 97 .hardreset = sata_std_hardreset, 98 }; 99 100 static unsigned int ata_dev_init_params(struct ata_device *dev, 101 u16 heads, u16 sectors); 102 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 103 static void ata_dev_xfermask(struct ata_device *dev); 104 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 105 106 atomic_t ata_print_id = ATOMIC_INIT(0); 107 108 struct ata_force_param { 109 const char *name; 110 unsigned int cbl; 111 int spd_limit; 112 unsigned long xfer_mask; 113 unsigned int horkage_on; 114 unsigned int horkage_off; 115 unsigned int lflags; 116 }; 117 118 struct ata_force_ent { 119 int port; 120 int device; 121 struct ata_force_param param; 122 }; 123 124 static struct ata_force_ent *ata_force_tbl; 125 static int ata_force_tbl_size; 126 127 static char ata_force_param_buf[PAGE_SIZE] __initdata; 128 /* param_buf is thrown away after initialization, disallow read */ 129 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 130 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 131 132 static int atapi_enabled = 1; 133 module_param(atapi_enabled, int, 0444); 134 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 135 136 static int atapi_dmadir = 0; 137 module_param(atapi_dmadir, int, 0444); 138 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 139 140 int atapi_passthru16 = 1; 141 module_param(atapi_passthru16, int, 0444); 142 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 143 144 int libata_fua = 0; 145 module_param_named(fua, libata_fua, int, 0444); 146 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 147 148 static int ata_ignore_hpa; 149 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 150 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 151 152 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 153 module_param_named(dma, libata_dma_mask, int, 0444); 154 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 155 156 static int ata_probe_timeout; 157 module_param(ata_probe_timeout, int, 0444); 158 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 159 160 int libata_noacpi = 0; 161 module_param_named(noacpi, libata_noacpi, int, 0444); 162 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 163 164 int libata_allow_tpm = 0; 165 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 166 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 167 168 static int atapi_an; 169 module_param(atapi_an, int, 0444); 170 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 171 172 MODULE_AUTHOR("Jeff Garzik"); 173 MODULE_DESCRIPTION("Library module for ATA devices"); 174 MODULE_LICENSE("GPL"); 175 MODULE_VERSION(DRV_VERSION); 176 177 178 static bool ata_sstatus_online(u32 sstatus) 179 { 180 return (sstatus & 0xf) == 0x3; 181 } 182 183 /** 184 * ata_link_next - link iteration helper 185 * @link: the previous link, NULL to start 186 * @ap: ATA port containing links to iterate 187 * @mode: iteration mode, one of ATA_LITER_* 188 * 189 * LOCKING: 190 * Host lock or EH context. 191 * 192 * RETURNS: 193 * Pointer to the next link. 194 */ 195 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 196 enum ata_link_iter_mode mode) 197 { 198 BUG_ON(mode != ATA_LITER_EDGE && 199 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 200 201 /* NULL link indicates start of iteration */ 202 if (!link) 203 switch (mode) { 204 case ATA_LITER_EDGE: 205 case ATA_LITER_PMP_FIRST: 206 if (sata_pmp_attached(ap)) 207 return ap->pmp_link; 208 /* fall through */ 209 case ATA_LITER_HOST_FIRST: 210 return &ap->link; 211 } 212 213 /* we just iterated over the host link, what's next? */ 214 if (link == &ap->link) 215 switch (mode) { 216 case ATA_LITER_HOST_FIRST: 217 if (sata_pmp_attached(ap)) 218 return ap->pmp_link; 219 /* fall through */ 220 case ATA_LITER_PMP_FIRST: 221 if (unlikely(ap->slave_link)) 222 return ap->slave_link; 223 /* fall through */ 224 case ATA_LITER_EDGE: 225 return NULL; 226 } 227 228 /* slave_link excludes PMP */ 229 if (unlikely(link == ap->slave_link)) 230 return NULL; 231 232 /* we were over a PMP link */ 233 if (++link < ap->pmp_link + ap->nr_pmp_links) 234 return link; 235 236 if (mode == ATA_LITER_PMP_FIRST) 237 return &ap->link; 238 239 return NULL; 240 } 241 242 /** 243 * ata_dev_next - device iteration helper 244 * @dev: the previous device, NULL to start 245 * @link: ATA link containing devices to iterate 246 * @mode: iteration mode, one of ATA_DITER_* 247 * 248 * LOCKING: 249 * Host lock or EH context. 250 * 251 * RETURNS: 252 * Pointer to the next device. 253 */ 254 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 255 enum ata_dev_iter_mode mode) 256 { 257 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 258 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 259 260 /* NULL dev indicates start of iteration */ 261 if (!dev) 262 switch (mode) { 263 case ATA_DITER_ENABLED: 264 case ATA_DITER_ALL: 265 dev = link->device; 266 goto check; 267 case ATA_DITER_ENABLED_REVERSE: 268 case ATA_DITER_ALL_REVERSE: 269 dev = link->device + ata_link_max_devices(link) - 1; 270 goto check; 271 } 272 273 next: 274 /* move to the next one */ 275 switch (mode) { 276 case ATA_DITER_ENABLED: 277 case ATA_DITER_ALL: 278 if (++dev < link->device + ata_link_max_devices(link)) 279 goto check; 280 return NULL; 281 case ATA_DITER_ENABLED_REVERSE: 282 case ATA_DITER_ALL_REVERSE: 283 if (--dev >= link->device) 284 goto check; 285 return NULL; 286 } 287 288 check: 289 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 290 !ata_dev_enabled(dev)) 291 goto next; 292 return dev; 293 } 294 295 /** 296 * ata_dev_phys_link - find physical link for a device 297 * @dev: ATA device to look up physical link for 298 * 299 * Look up physical link which @dev is attached to. Note that 300 * this is different from @dev->link only when @dev is on slave 301 * link. For all other cases, it's the same as @dev->link. 302 * 303 * LOCKING: 304 * Don't care. 305 * 306 * RETURNS: 307 * Pointer to the found physical link. 308 */ 309 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 310 { 311 struct ata_port *ap = dev->link->ap; 312 313 if (!ap->slave_link) 314 return dev->link; 315 if (!dev->devno) 316 return &ap->link; 317 return ap->slave_link; 318 } 319 320 /** 321 * ata_force_cbl - force cable type according to libata.force 322 * @ap: ATA port of interest 323 * 324 * Force cable type according to libata.force and whine about it. 325 * The last entry which has matching port number is used, so it 326 * can be specified as part of device force parameters. For 327 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 328 * same effect. 329 * 330 * LOCKING: 331 * EH context. 332 */ 333 void ata_force_cbl(struct ata_port *ap) 334 { 335 int i; 336 337 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 338 const struct ata_force_ent *fe = &ata_force_tbl[i]; 339 340 if (fe->port != -1 && fe->port != ap->print_id) 341 continue; 342 343 if (fe->param.cbl == ATA_CBL_NONE) 344 continue; 345 346 ap->cbl = fe->param.cbl; 347 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 348 return; 349 } 350 } 351 352 /** 353 * ata_force_link_limits - force link limits according to libata.force 354 * @link: ATA link of interest 355 * 356 * Force link flags and SATA spd limit according to libata.force 357 * and whine about it. When only the port part is specified 358 * (e.g. 1:), the limit applies to all links connected to both 359 * the host link and all fan-out ports connected via PMP. If the 360 * device part is specified as 0 (e.g. 1.00:), it specifies the 361 * first fan-out link not the host link. Device number 15 always 362 * points to the host link whether PMP is attached or not. If the 363 * controller has slave link, device number 16 points to it. 364 * 365 * LOCKING: 366 * EH context. 367 */ 368 static void ata_force_link_limits(struct ata_link *link) 369 { 370 bool did_spd = false; 371 int linkno = link->pmp; 372 int i; 373 374 if (ata_is_host_link(link)) 375 linkno += 15; 376 377 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 378 const struct ata_force_ent *fe = &ata_force_tbl[i]; 379 380 if (fe->port != -1 && fe->port != link->ap->print_id) 381 continue; 382 383 if (fe->device != -1 && fe->device != linkno) 384 continue; 385 386 /* only honor the first spd limit */ 387 if (!did_spd && fe->param.spd_limit) { 388 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 389 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 390 fe->param.name); 391 did_spd = true; 392 } 393 394 /* let lflags stack */ 395 if (fe->param.lflags) { 396 link->flags |= fe->param.lflags; 397 ata_link_notice(link, 398 "FORCE: link flag 0x%x forced -> 0x%x\n", 399 fe->param.lflags, link->flags); 400 } 401 } 402 } 403 404 /** 405 * ata_force_xfermask - force xfermask according to libata.force 406 * @dev: ATA device of interest 407 * 408 * Force xfer_mask according to libata.force and whine about it. 409 * For consistency with link selection, device number 15 selects 410 * the first device connected to the host link. 411 * 412 * LOCKING: 413 * EH context. 414 */ 415 static void ata_force_xfermask(struct ata_device *dev) 416 { 417 int devno = dev->link->pmp + dev->devno; 418 int alt_devno = devno; 419 int i; 420 421 /* allow n.15/16 for devices attached to host port */ 422 if (ata_is_host_link(dev->link)) 423 alt_devno += 15; 424 425 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 426 const struct ata_force_ent *fe = &ata_force_tbl[i]; 427 unsigned long pio_mask, mwdma_mask, udma_mask; 428 429 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 430 continue; 431 432 if (fe->device != -1 && fe->device != devno && 433 fe->device != alt_devno) 434 continue; 435 436 if (!fe->param.xfer_mask) 437 continue; 438 439 ata_unpack_xfermask(fe->param.xfer_mask, 440 &pio_mask, &mwdma_mask, &udma_mask); 441 if (udma_mask) 442 dev->udma_mask = udma_mask; 443 else if (mwdma_mask) { 444 dev->udma_mask = 0; 445 dev->mwdma_mask = mwdma_mask; 446 } else { 447 dev->udma_mask = 0; 448 dev->mwdma_mask = 0; 449 dev->pio_mask = pio_mask; 450 } 451 452 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 453 fe->param.name); 454 return; 455 } 456 } 457 458 /** 459 * ata_force_horkage - force horkage according to libata.force 460 * @dev: ATA device of interest 461 * 462 * Force horkage according to libata.force and whine about it. 463 * For consistency with link selection, device number 15 selects 464 * the first device connected to the host link. 465 * 466 * LOCKING: 467 * EH context. 468 */ 469 static void ata_force_horkage(struct ata_device *dev) 470 { 471 int devno = dev->link->pmp + dev->devno; 472 int alt_devno = devno; 473 int i; 474 475 /* allow n.15/16 for devices attached to host port */ 476 if (ata_is_host_link(dev->link)) 477 alt_devno += 15; 478 479 for (i = 0; i < ata_force_tbl_size; i++) { 480 const struct ata_force_ent *fe = &ata_force_tbl[i]; 481 482 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 483 continue; 484 485 if (fe->device != -1 && fe->device != devno && 486 fe->device != alt_devno) 487 continue; 488 489 if (!(~dev->horkage & fe->param.horkage_on) && 490 !(dev->horkage & fe->param.horkage_off)) 491 continue; 492 493 dev->horkage |= fe->param.horkage_on; 494 dev->horkage &= ~fe->param.horkage_off; 495 496 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 497 fe->param.name); 498 } 499 } 500 501 /** 502 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 503 * @opcode: SCSI opcode 504 * 505 * Determine ATAPI command type from @opcode. 506 * 507 * LOCKING: 508 * None. 509 * 510 * RETURNS: 511 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 512 */ 513 int atapi_cmd_type(u8 opcode) 514 { 515 switch (opcode) { 516 case GPCMD_READ_10: 517 case GPCMD_READ_12: 518 return ATAPI_READ; 519 520 case GPCMD_WRITE_10: 521 case GPCMD_WRITE_12: 522 case GPCMD_WRITE_AND_VERIFY_10: 523 return ATAPI_WRITE; 524 525 case GPCMD_READ_CD: 526 case GPCMD_READ_CD_MSF: 527 return ATAPI_READ_CD; 528 529 case ATA_16: 530 case ATA_12: 531 if (atapi_passthru16) 532 return ATAPI_PASS_THRU; 533 /* fall thru */ 534 default: 535 return ATAPI_MISC; 536 } 537 } 538 539 /** 540 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 541 * @tf: Taskfile to convert 542 * @pmp: Port multiplier port 543 * @is_cmd: This FIS is for command 544 * @fis: Buffer into which data will output 545 * 546 * Converts a standard ATA taskfile to a Serial ATA 547 * FIS structure (Register - Host to Device). 548 * 549 * LOCKING: 550 * Inherited from caller. 551 */ 552 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 553 { 554 fis[0] = 0x27; /* Register - Host to Device FIS */ 555 fis[1] = pmp & 0xf; /* Port multiplier number*/ 556 if (is_cmd) 557 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 558 559 fis[2] = tf->command; 560 fis[3] = tf->feature; 561 562 fis[4] = tf->lbal; 563 fis[5] = tf->lbam; 564 fis[6] = tf->lbah; 565 fis[7] = tf->device; 566 567 fis[8] = tf->hob_lbal; 568 fis[9] = tf->hob_lbam; 569 fis[10] = tf->hob_lbah; 570 fis[11] = tf->hob_feature; 571 572 fis[12] = tf->nsect; 573 fis[13] = tf->hob_nsect; 574 fis[14] = 0; 575 fis[15] = tf->ctl; 576 577 fis[16] = tf->auxiliary & 0xff; 578 fis[17] = (tf->auxiliary >> 8) & 0xff; 579 fis[18] = (tf->auxiliary >> 16) & 0xff; 580 fis[19] = (tf->auxiliary >> 24) & 0xff; 581 } 582 583 /** 584 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 585 * @fis: Buffer from which data will be input 586 * @tf: Taskfile to output 587 * 588 * Converts a serial ATA FIS structure to a standard ATA taskfile. 589 * 590 * LOCKING: 591 * Inherited from caller. 592 */ 593 594 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 595 { 596 tf->command = fis[2]; /* status */ 597 tf->feature = fis[3]; /* error */ 598 599 tf->lbal = fis[4]; 600 tf->lbam = fis[5]; 601 tf->lbah = fis[6]; 602 tf->device = fis[7]; 603 604 tf->hob_lbal = fis[8]; 605 tf->hob_lbam = fis[9]; 606 tf->hob_lbah = fis[10]; 607 608 tf->nsect = fis[12]; 609 tf->hob_nsect = fis[13]; 610 } 611 612 static const u8 ata_rw_cmds[] = { 613 /* pio multi */ 614 ATA_CMD_READ_MULTI, 615 ATA_CMD_WRITE_MULTI, 616 ATA_CMD_READ_MULTI_EXT, 617 ATA_CMD_WRITE_MULTI_EXT, 618 0, 619 0, 620 0, 621 ATA_CMD_WRITE_MULTI_FUA_EXT, 622 /* pio */ 623 ATA_CMD_PIO_READ, 624 ATA_CMD_PIO_WRITE, 625 ATA_CMD_PIO_READ_EXT, 626 ATA_CMD_PIO_WRITE_EXT, 627 0, 628 0, 629 0, 630 0, 631 /* dma */ 632 ATA_CMD_READ, 633 ATA_CMD_WRITE, 634 ATA_CMD_READ_EXT, 635 ATA_CMD_WRITE_EXT, 636 0, 637 0, 638 0, 639 ATA_CMD_WRITE_FUA_EXT 640 }; 641 642 /** 643 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 644 * @tf: command to examine and configure 645 * @dev: device tf belongs to 646 * 647 * Examine the device configuration and tf->flags to calculate 648 * the proper read/write commands and protocol to use. 649 * 650 * LOCKING: 651 * caller. 652 */ 653 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 654 { 655 u8 cmd; 656 657 int index, fua, lba48, write; 658 659 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 660 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 661 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 662 663 if (dev->flags & ATA_DFLAG_PIO) { 664 tf->protocol = ATA_PROT_PIO; 665 index = dev->multi_count ? 0 : 8; 666 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 667 /* Unable to use DMA due to host limitation */ 668 tf->protocol = ATA_PROT_PIO; 669 index = dev->multi_count ? 0 : 8; 670 } else { 671 tf->protocol = ATA_PROT_DMA; 672 index = 16; 673 } 674 675 cmd = ata_rw_cmds[index + fua + lba48 + write]; 676 if (cmd) { 677 tf->command = cmd; 678 return 0; 679 } 680 return -1; 681 } 682 683 /** 684 * ata_tf_read_block - Read block address from ATA taskfile 685 * @tf: ATA taskfile of interest 686 * @dev: ATA device @tf belongs to 687 * 688 * LOCKING: 689 * None. 690 * 691 * Read block address from @tf. This function can handle all 692 * three address formats - LBA, LBA48 and CHS. tf->protocol and 693 * flags select the address format to use. 694 * 695 * RETURNS: 696 * Block address read from @tf. 697 */ 698 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 699 { 700 u64 block = 0; 701 702 if (tf->flags & ATA_TFLAG_LBA) { 703 if (tf->flags & ATA_TFLAG_LBA48) { 704 block |= (u64)tf->hob_lbah << 40; 705 block |= (u64)tf->hob_lbam << 32; 706 block |= (u64)tf->hob_lbal << 24; 707 } else 708 block |= (tf->device & 0xf) << 24; 709 710 block |= tf->lbah << 16; 711 block |= tf->lbam << 8; 712 block |= tf->lbal; 713 } else { 714 u32 cyl, head, sect; 715 716 cyl = tf->lbam | (tf->lbah << 8); 717 head = tf->device & 0xf; 718 sect = tf->lbal; 719 720 if (!sect) { 721 ata_dev_warn(dev, 722 "device reported invalid CHS sector 0\n"); 723 sect = 1; /* oh well */ 724 } 725 726 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 727 } 728 729 return block; 730 } 731 732 /** 733 * ata_build_rw_tf - Build ATA taskfile for given read/write request 734 * @tf: Target ATA taskfile 735 * @dev: ATA device @tf belongs to 736 * @block: Block address 737 * @n_block: Number of blocks 738 * @tf_flags: RW/FUA etc... 739 * @tag: tag 740 * 741 * LOCKING: 742 * None. 743 * 744 * Build ATA taskfile @tf for read/write request described by 745 * @block, @n_block, @tf_flags and @tag on @dev. 746 * 747 * RETURNS: 748 * 749 * 0 on success, -ERANGE if the request is too large for @dev, 750 * -EINVAL if the request is invalid. 751 */ 752 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 753 u64 block, u32 n_block, unsigned int tf_flags, 754 unsigned int tag) 755 { 756 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 757 tf->flags |= tf_flags; 758 759 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 760 /* yay, NCQ */ 761 if (!lba_48_ok(block, n_block)) 762 return -ERANGE; 763 764 tf->protocol = ATA_PROT_NCQ; 765 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 766 767 if (tf->flags & ATA_TFLAG_WRITE) 768 tf->command = ATA_CMD_FPDMA_WRITE; 769 else 770 tf->command = ATA_CMD_FPDMA_READ; 771 772 tf->nsect = tag << 3; 773 tf->hob_feature = (n_block >> 8) & 0xff; 774 tf->feature = n_block & 0xff; 775 776 tf->hob_lbah = (block >> 40) & 0xff; 777 tf->hob_lbam = (block >> 32) & 0xff; 778 tf->hob_lbal = (block >> 24) & 0xff; 779 tf->lbah = (block >> 16) & 0xff; 780 tf->lbam = (block >> 8) & 0xff; 781 tf->lbal = block & 0xff; 782 783 tf->device = ATA_LBA; 784 if (tf->flags & ATA_TFLAG_FUA) 785 tf->device |= 1 << 7; 786 } else if (dev->flags & ATA_DFLAG_LBA) { 787 tf->flags |= ATA_TFLAG_LBA; 788 789 if (lba_28_ok(block, n_block)) { 790 /* use LBA28 */ 791 tf->device |= (block >> 24) & 0xf; 792 } else if (lba_48_ok(block, n_block)) { 793 if (!(dev->flags & ATA_DFLAG_LBA48)) 794 return -ERANGE; 795 796 /* use LBA48 */ 797 tf->flags |= ATA_TFLAG_LBA48; 798 799 tf->hob_nsect = (n_block >> 8) & 0xff; 800 801 tf->hob_lbah = (block >> 40) & 0xff; 802 tf->hob_lbam = (block >> 32) & 0xff; 803 tf->hob_lbal = (block >> 24) & 0xff; 804 } else 805 /* request too large even for LBA48 */ 806 return -ERANGE; 807 808 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 809 return -EINVAL; 810 811 tf->nsect = n_block & 0xff; 812 813 tf->lbah = (block >> 16) & 0xff; 814 tf->lbam = (block >> 8) & 0xff; 815 tf->lbal = block & 0xff; 816 817 tf->device |= ATA_LBA; 818 } else { 819 /* CHS */ 820 u32 sect, head, cyl, track; 821 822 /* The request -may- be too large for CHS addressing. */ 823 if (!lba_28_ok(block, n_block)) 824 return -ERANGE; 825 826 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 827 return -EINVAL; 828 829 /* Convert LBA to CHS */ 830 track = (u32)block / dev->sectors; 831 cyl = track / dev->heads; 832 head = track % dev->heads; 833 sect = (u32)block % dev->sectors + 1; 834 835 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 836 (u32)block, track, cyl, head, sect); 837 838 /* Check whether the converted CHS can fit. 839 Cylinder: 0-65535 840 Head: 0-15 841 Sector: 1-255*/ 842 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 843 return -ERANGE; 844 845 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 846 tf->lbal = sect; 847 tf->lbam = cyl; 848 tf->lbah = cyl >> 8; 849 tf->device |= head; 850 } 851 852 return 0; 853 } 854 855 /** 856 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 857 * @pio_mask: pio_mask 858 * @mwdma_mask: mwdma_mask 859 * @udma_mask: udma_mask 860 * 861 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 862 * unsigned int xfer_mask. 863 * 864 * LOCKING: 865 * None. 866 * 867 * RETURNS: 868 * Packed xfer_mask. 869 */ 870 unsigned long ata_pack_xfermask(unsigned long pio_mask, 871 unsigned long mwdma_mask, 872 unsigned long udma_mask) 873 { 874 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 875 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 876 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 877 } 878 879 /** 880 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 881 * @xfer_mask: xfer_mask to unpack 882 * @pio_mask: resulting pio_mask 883 * @mwdma_mask: resulting mwdma_mask 884 * @udma_mask: resulting udma_mask 885 * 886 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 887 * Any NULL distination masks will be ignored. 888 */ 889 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 890 unsigned long *mwdma_mask, unsigned long *udma_mask) 891 { 892 if (pio_mask) 893 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 894 if (mwdma_mask) 895 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 896 if (udma_mask) 897 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 898 } 899 900 static const struct ata_xfer_ent { 901 int shift, bits; 902 u8 base; 903 } ata_xfer_tbl[] = { 904 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 905 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 906 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 907 { -1, }, 908 }; 909 910 /** 911 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 912 * @xfer_mask: xfer_mask of interest 913 * 914 * Return matching XFER_* value for @xfer_mask. Only the highest 915 * bit of @xfer_mask is considered. 916 * 917 * LOCKING: 918 * None. 919 * 920 * RETURNS: 921 * Matching XFER_* value, 0xff if no match found. 922 */ 923 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 924 { 925 int highbit = fls(xfer_mask) - 1; 926 const struct ata_xfer_ent *ent; 927 928 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 929 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 930 return ent->base + highbit - ent->shift; 931 return 0xff; 932 } 933 934 /** 935 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 936 * @xfer_mode: XFER_* of interest 937 * 938 * Return matching xfer_mask for @xfer_mode. 939 * 940 * LOCKING: 941 * None. 942 * 943 * RETURNS: 944 * Matching xfer_mask, 0 if no match found. 945 */ 946 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 947 { 948 const struct ata_xfer_ent *ent; 949 950 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 951 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 952 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 953 & ~((1 << ent->shift) - 1); 954 return 0; 955 } 956 957 /** 958 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 959 * @xfer_mode: XFER_* of interest 960 * 961 * Return matching xfer_shift for @xfer_mode. 962 * 963 * LOCKING: 964 * None. 965 * 966 * RETURNS: 967 * Matching xfer_shift, -1 if no match found. 968 */ 969 int ata_xfer_mode2shift(unsigned long xfer_mode) 970 { 971 const struct ata_xfer_ent *ent; 972 973 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 974 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 975 return ent->shift; 976 return -1; 977 } 978 979 /** 980 * ata_mode_string - convert xfer_mask to string 981 * @xfer_mask: mask of bits supported; only highest bit counts. 982 * 983 * Determine string which represents the highest speed 984 * (highest bit in @modemask). 985 * 986 * LOCKING: 987 * None. 988 * 989 * RETURNS: 990 * Constant C string representing highest speed listed in 991 * @mode_mask, or the constant C string "<n/a>". 992 */ 993 const char *ata_mode_string(unsigned long xfer_mask) 994 { 995 static const char * const xfer_mode_str[] = { 996 "PIO0", 997 "PIO1", 998 "PIO2", 999 "PIO3", 1000 "PIO4", 1001 "PIO5", 1002 "PIO6", 1003 "MWDMA0", 1004 "MWDMA1", 1005 "MWDMA2", 1006 "MWDMA3", 1007 "MWDMA4", 1008 "UDMA/16", 1009 "UDMA/25", 1010 "UDMA/33", 1011 "UDMA/44", 1012 "UDMA/66", 1013 "UDMA/100", 1014 "UDMA/133", 1015 "UDMA7", 1016 }; 1017 int highbit; 1018 1019 highbit = fls(xfer_mask) - 1; 1020 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1021 return xfer_mode_str[highbit]; 1022 return "<n/a>"; 1023 } 1024 1025 const char *sata_spd_string(unsigned int spd) 1026 { 1027 static const char * const spd_str[] = { 1028 "1.5 Gbps", 1029 "3.0 Gbps", 1030 "6.0 Gbps", 1031 }; 1032 1033 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1034 return "<unknown>"; 1035 return spd_str[spd - 1]; 1036 } 1037 1038 /** 1039 * ata_dev_classify - determine device type based on ATA-spec signature 1040 * @tf: ATA taskfile register set for device to be identified 1041 * 1042 * Determine from taskfile register contents whether a device is 1043 * ATA or ATAPI, as per "Signature and persistence" section 1044 * of ATA/PI spec (volume 1, sect 5.14). 1045 * 1046 * LOCKING: 1047 * None. 1048 * 1049 * RETURNS: 1050 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, 1051 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. 1052 */ 1053 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1054 { 1055 /* Apple's open source Darwin code hints that some devices only 1056 * put a proper signature into the LBA mid/high registers, 1057 * So, we only check those. It's sufficient for uniqueness. 1058 * 1059 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1060 * signatures for ATA and ATAPI devices attached on SerialATA, 1061 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1062 * spec has never mentioned about using different signatures 1063 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1064 * Multiplier specification began to use 0x69/0x96 to identify 1065 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1066 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1067 * 0x69/0x96 shortly and described them as reserved for 1068 * SerialATA. 1069 * 1070 * We follow the current spec and consider that 0x69/0x96 1071 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1072 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1073 * SEMB signature. This is worked around in 1074 * ata_dev_read_id(). 1075 */ 1076 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1077 DPRINTK("found ATA device by sig\n"); 1078 return ATA_DEV_ATA; 1079 } 1080 1081 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1082 DPRINTK("found ATAPI device by sig\n"); 1083 return ATA_DEV_ATAPI; 1084 } 1085 1086 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1087 DPRINTK("found PMP device by sig\n"); 1088 return ATA_DEV_PMP; 1089 } 1090 1091 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1092 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1093 return ATA_DEV_SEMB; 1094 } 1095 1096 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { 1097 DPRINTK("found ZAC device by sig\n"); 1098 return ATA_DEV_ZAC; 1099 } 1100 1101 DPRINTK("unknown device\n"); 1102 return ATA_DEV_UNKNOWN; 1103 } 1104 1105 /** 1106 * ata_id_string - Convert IDENTIFY DEVICE page into string 1107 * @id: IDENTIFY DEVICE results we will examine 1108 * @s: string into which data is output 1109 * @ofs: offset into identify device page 1110 * @len: length of string to return. must be an even number. 1111 * 1112 * The strings in the IDENTIFY DEVICE page are broken up into 1113 * 16-bit chunks. Run through the string, and output each 1114 * 8-bit chunk linearly, regardless of platform. 1115 * 1116 * LOCKING: 1117 * caller. 1118 */ 1119 1120 void ata_id_string(const u16 *id, unsigned char *s, 1121 unsigned int ofs, unsigned int len) 1122 { 1123 unsigned int c; 1124 1125 BUG_ON(len & 1); 1126 1127 while (len > 0) { 1128 c = id[ofs] >> 8; 1129 *s = c; 1130 s++; 1131 1132 c = id[ofs] & 0xff; 1133 *s = c; 1134 s++; 1135 1136 ofs++; 1137 len -= 2; 1138 } 1139 } 1140 1141 /** 1142 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1143 * @id: IDENTIFY DEVICE results we will examine 1144 * @s: string into which data is output 1145 * @ofs: offset into identify device page 1146 * @len: length of string to return. must be an odd number. 1147 * 1148 * This function is identical to ata_id_string except that it 1149 * trims trailing spaces and terminates the resulting string with 1150 * null. @len must be actual maximum length (even number) + 1. 1151 * 1152 * LOCKING: 1153 * caller. 1154 */ 1155 void ata_id_c_string(const u16 *id, unsigned char *s, 1156 unsigned int ofs, unsigned int len) 1157 { 1158 unsigned char *p; 1159 1160 ata_id_string(id, s, ofs, len - 1); 1161 1162 p = s + strnlen(s, len - 1); 1163 while (p > s && p[-1] == ' ') 1164 p--; 1165 *p = '\0'; 1166 } 1167 1168 static u64 ata_id_n_sectors(const u16 *id) 1169 { 1170 if (ata_id_has_lba(id)) { 1171 if (ata_id_has_lba48(id)) 1172 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1173 else 1174 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1175 } else { 1176 if (ata_id_current_chs_valid(id)) 1177 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1178 id[ATA_ID_CUR_SECTORS]; 1179 else 1180 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1181 id[ATA_ID_SECTORS]; 1182 } 1183 } 1184 1185 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1186 { 1187 u64 sectors = 0; 1188 1189 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1190 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1191 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1192 sectors |= (tf->lbah & 0xff) << 16; 1193 sectors |= (tf->lbam & 0xff) << 8; 1194 sectors |= (tf->lbal & 0xff); 1195 1196 return sectors; 1197 } 1198 1199 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1200 { 1201 u64 sectors = 0; 1202 1203 sectors |= (tf->device & 0x0f) << 24; 1204 sectors |= (tf->lbah & 0xff) << 16; 1205 sectors |= (tf->lbam & 0xff) << 8; 1206 sectors |= (tf->lbal & 0xff); 1207 1208 return sectors; 1209 } 1210 1211 /** 1212 * ata_read_native_max_address - Read native max address 1213 * @dev: target device 1214 * @max_sectors: out parameter for the result native max address 1215 * 1216 * Perform an LBA48 or LBA28 native size query upon the device in 1217 * question. 1218 * 1219 * RETURNS: 1220 * 0 on success, -EACCES if command is aborted by the drive. 1221 * -EIO on other errors. 1222 */ 1223 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1224 { 1225 unsigned int err_mask; 1226 struct ata_taskfile tf; 1227 int lba48 = ata_id_has_lba48(dev->id); 1228 1229 ata_tf_init(dev, &tf); 1230 1231 /* always clear all address registers */ 1232 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1233 1234 if (lba48) { 1235 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1236 tf.flags |= ATA_TFLAG_LBA48; 1237 } else 1238 tf.command = ATA_CMD_READ_NATIVE_MAX; 1239 1240 tf.protocol |= ATA_PROT_NODATA; 1241 tf.device |= ATA_LBA; 1242 1243 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1244 if (err_mask) { 1245 ata_dev_warn(dev, 1246 "failed to read native max address (err_mask=0x%x)\n", 1247 err_mask); 1248 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1249 return -EACCES; 1250 return -EIO; 1251 } 1252 1253 if (lba48) 1254 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1255 else 1256 *max_sectors = ata_tf_to_lba(&tf) + 1; 1257 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1258 (*max_sectors)--; 1259 return 0; 1260 } 1261 1262 /** 1263 * ata_set_max_sectors - Set max sectors 1264 * @dev: target device 1265 * @new_sectors: new max sectors value to set for the device 1266 * 1267 * Set max sectors of @dev to @new_sectors. 1268 * 1269 * RETURNS: 1270 * 0 on success, -EACCES if command is aborted or denied (due to 1271 * previous non-volatile SET_MAX) by the drive. -EIO on other 1272 * errors. 1273 */ 1274 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1275 { 1276 unsigned int err_mask; 1277 struct ata_taskfile tf; 1278 int lba48 = ata_id_has_lba48(dev->id); 1279 1280 new_sectors--; 1281 1282 ata_tf_init(dev, &tf); 1283 1284 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1285 1286 if (lba48) { 1287 tf.command = ATA_CMD_SET_MAX_EXT; 1288 tf.flags |= ATA_TFLAG_LBA48; 1289 1290 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1291 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1292 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1293 } else { 1294 tf.command = ATA_CMD_SET_MAX; 1295 1296 tf.device |= (new_sectors >> 24) & 0xf; 1297 } 1298 1299 tf.protocol |= ATA_PROT_NODATA; 1300 tf.device |= ATA_LBA; 1301 1302 tf.lbal = (new_sectors >> 0) & 0xff; 1303 tf.lbam = (new_sectors >> 8) & 0xff; 1304 tf.lbah = (new_sectors >> 16) & 0xff; 1305 1306 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1307 if (err_mask) { 1308 ata_dev_warn(dev, 1309 "failed to set max address (err_mask=0x%x)\n", 1310 err_mask); 1311 if (err_mask == AC_ERR_DEV && 1312 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1313 return -EACCES; 1314 return -EIO; 1315 } 1316 1317 return 0; 1318 } 1319 1320 /** 1321 * ata_hpa_resize - Resize a device with an HPA set 1322 * @dev: Device to resize 1323 * 1324 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1325 * it if required to the full size of the media. The caller must check 1326 * the drive has the HPA feature set enabled. 1327 * 1328 * RETURNS: 1329 * 0 on success, -errno on failure. 1330 */ 1331 static int ata_hpa_resize(struct ata_device *dev) 1332 { 1333 struct ata_eh_context *ehc = &dev->link->eh_context; 1334 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1335 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1336 u64 sectors = ata_id_n_sectors(dev->id); 1337 u64 native_sectors; 1338 int rc; 1339 1340 /* do we need to do it? */ 1341 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || 1342 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1343 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1344 return 0; 1345 1346 /* read native max address */ 1347 rc = ata_read_native_max_address(dev, &native_sectors); 1348 if (rc) { 1349 /* If device aborted the command or HPA isn't going to 1350 * be unlocked, skip HPA resizing. 1351 */ 1352 if (rc == -EACCES || !unlock_hpa) { 1353 ata_dev_warn(dev, 1354 "HPA support seems broken, skipping HPA handling\n"); 1355 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1356 1357 /* we can continue if device aborted the command */ 1358 if (rc == -EACCES) 1359 rc = 0; 1360 } 1361 1362 return rc; 1363 } 1364 dev->n_native_sectors = native_sectors; 1365 1366 /* nothing to do? */ 1367 if (native_sectors <= sectors || !unlock_hpa) { 1368 if (!print_info || native_sectors == sectors) 1369 return 0; 1370 1371 if (native_sectors > sectors) 1372 ata_dev_info(dev, 1373 "HPA detected: current %llu, native %llu\n", 1374 (unsigned long long)sectors, 1375 (unsigned long long)native_sectors); 1376 else if (native_sectors < sectors) 1377 ata_dev_warn(dev, 1378 "native sectors (%llu) is smaller than sectors (%llu)\n", 1379 (unsigned long long)native_sectors, 1380 (unsigned long long)sectors); 1381 return 0; 1382 } 1383 1384 /* let's unlock HPA */ 1385 rc = ata_set_max_sectors(dev, native_sectors); 1386 if (rc == -EACCES) { 1387 /* if device aborted the command, skip HPA resizing */ 1388 ata_dev_warn(dev, 1389 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1390 (unsigned long long)sectors, 1391 (unsigned long long)native_sectors); 1392 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1393 return 0; 1394 } else if (rc) 1395 return rc; 1396 1397 /* re-read IDENTIFY data */ 1398 rc = ata_dev_reread_id(dev, 0); 1399 if (rc) { 1400 ata_dev_err(dev, 1401 "failed to re-read IDENTIFY data after HPA resizing\n"); 1402 return rc; 1403 } 1404 1405 if (print_info) { 1406 u64 new_sectors = ata_id_n_sectors(dev->id); 1407 ata_dev_info(dev, 1408 "HPA unlocked: %llu -> %llu, native %llu\n", 1409 (unsigned long long)sectors, 1410 (unsigned long long)new_sectors, 1411 (unsigned long long)native_sectors); 1412 } 1413 1414 return 0; 1415 } 1416 1417 /** 1418 * ata_dump_id - IDENTIFY DEVICE info debugging output 1419 * @id: IDENTIFY DEVICE page to dump 1420 * 1421 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1422 * page. 1423 * 1424 * LOCKING: 1425 * caller. 1426 */ 1427 1428 static inline void ata_dump_id(const u16 *id) 1429 { 1430 DPRINTK("49==0x%04x " 1431 "53==0x%04x " 1432 "63==0x%04x " 1433 "64==0x%04x " 1434 "75==0x%04x \n", 1435 id[49], 1436 id[53], 1437 id[63], 1438 id[64], 1439 id[75]); 1440 DPRINTK("80==0x%04x " 1441 "81==0x%04x " 1442 "82==0x%04x " 1443 "83==0x%04x " 1444 "84==0x%04x \n", 1445 id[80], 1446 id[81], 1447 id[82], 1448 id[83], 1449 id[84]); 1450 DPRINTK("88==0x%04x " 1451 "93==0x%04x\n", 1452 id[88], 1453 id[93]); 1454 } 1455 1456 /** 1457 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1458 * @id: IDENTIFY data to compute xfer mask from 1459 * 1460 * Compute the xfermask for this device. This is not as trivial 1461 * as it seems if we must consider early devices correctly. 1462 * 1463 * FIXME: pre IDE drive timing (do we care ?). 1464 * 1465 * LOCKING: 1466 * None. 1467 * 1468 * RETURNS: 1469 * Computed xfermask 1470 */ 1471 unsigned long ata_id_xfermask(const u16 *id) 1472 { 1473 unsigned long pio_mask, mwdma_mask, udma_mask; 1474 1475 /* Usual case. Word 53 indicates word 64 is valid */ 1476 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1477 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1478 pio_mask <<= 3; 1479 pio_mask |= 0x7; 1480 } else { 1481 /* If word 64 isn't valid then Word 51 high byte holds 1482 * the PIO timing number for the maximum. Turn it into 1483 * a mask. 1484 */ 1485 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1486 if (mode < 5) /* Valid PIO range */ 1487 pio_mask = (2 << mode) - 1; 1488 else 1489 pio_mask = 1; 1490 1491 /* But wait.. there's more. Design your standards by 1492 * committee and you too can get a free iordy field to 1493 * process. However its the speeds not the modes that 1494 * are supported... Note drivers using the timing API 1495 * will get this right anyway 1496 */ 1497 } 1498 1499 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1500 1501 if (ata_id_is_cfa(id)) { 1502 /* 1503 * Process compact flash extended modes 1504 */ 1505 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1506 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1507 1508 if (pio) 1509 pio_mask |= (1 << 5); 1510 if (pio > 1) 1511 pio_mask |= (1 << 6); 1512 if (dma) 1513 mwdma_mask |= (1 << 3); 1514 if (dma > 1) 1515 mwdma_mask |= (1 << 4); 1516 } 1517 1518 udma_mask = 0; 1519 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1520 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1521 1522 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1523 } 1524 1525 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1526 { 1527 struct completion *waiting = qc->private_data; 1528 1529 complete(waiting); 1530 } 1531 1532 /** 1533 * ata_exec_internal_sg - execute libata internal command 1534 * @dev: Device to which the command is sent 1535 * @tf: Taskfile registers for the command and the result 1536 * @cdb: CDB for packet command 1537 * @dma_dir: Data transfer direction of the command 1538 * @sgl: sg list for the data buffer of the command 1539 * @n_elem: Number of sg entries 1540 * @timeout: Timeout in msecs (0 for default) 1541 * 1542 * Executes libata internal command with timeout. @tf contains 1543 * command on entry and result on return. Timeout and error 1544 * conditions are reported via return value. No recovery action 1545 * is taken after a command times out. It's caller's duty to 1546 * clean up after timeout. 1547 * 1548 * LOCKING: 1549 * None. Should be called with kernel context, might sleep. 1550 * 1551 * RETURNS: 1552 * Zero on success, AC_ERR_* mask on failure 1553 */ 1554 unsigned ata_exec_internal_sg(struct ata_device *dev, 1555 struct ata_taskfile *tf, const u8 *cdb, 1556 int dma_dir, struct scatterlist *sgl, 1557 unsigned int n_elem, unsigned long timeout) 1558 { 1559 struct ata_link *link = dev->link; 1560 struct ata_port *ap = link->ap; 1561 u8 command = tf->command; 1562 int auto_timeout = 0; 1563 struct ata_queued_cmd *qc; 1564 unsigned int tag, preempted_tag; 1565 u32 preempted_sactive, preempted_qc_active; 1566 int preempted_nr_active_links; 1567 DECLARE_COMPLETION_ONSTACK(wait); 1568 unsigned long flags; 1569 unsigned int err_mask; 1570 int rc; 1571 1572 spin_lock_irqsave(ap->lock, flags); 1573 1574 /* no internal command while frozen */ 1575 if (ap->pflags & ATA_PFLAG_FROZEN) { 1576 spin_unlock_irqrestore(ap->lock, flags); 1577 return AC_ERR_SYSTEM; 1578 } 1579 1580 /* initialize internal qc */ 1581 1582 /* XXX: Tag 0 is used for drivers with legacy EH as some 1583 * drivers choke if any other tag is given. This breaks 1584 * ata_tag_internal() test for those drivers. Don't use new 1585 * EH stuff without converting to it. 1586 */ 1587 if (ap->ops->error_handler) 1588 tag = ATA_TAG_INTERNAL; 1589 else 1590 tag = 0; 1591 1592 qc = __ata_qc_from_tag(ap, tag); 1593 1594 qc->tag = tag; 1595 qc->scsicmd = NULL; 1596 qc->ap = ap; 1597 qc->dev = dev; 1598 ata_qc_reinit(qc); 1599 1600 preempted_tag = link->active_tag; 1601 preempted_sactive = link->sactive; 1602 preempted_qc_active = ap->qc_active; 1603 preempted_nr_active_links = ap->nr_active_links; 1604 link->active_tag = ATA_TAG_POISON; 1605 link->sactive = 0; 1606 ap->qc_active = 0; 1607 ap->nr_active_links = 0; 1608 1609 /* prepare & issue qc */ 1610 qc->tf = *tf; 1611 if (cdb) 1612 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1613 1614 /* some SATA bridges need us to indicate data xfer direction */ 1615 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && 1616 dma_dir == DMA_FROM_DEVICE) 1617 qc->tf.feature |= ATAPI_DMADIR; 1618 1619 qc->flags |= ATA_QCFLAG_RESULT_TF; 1620 qc->dma_dir = dma_dir; 1621 if (dma_dir != DMA_NONE) { 1622 unsigned int i, buflen = 0; 1623 struct scatterlist *sg; 1624 1625 for_each_sg(sgl, sg, n_elem, i) 1626 buflen += sg->length; 1627 1628 ata_sg_init(qc, sgl, n_elem); 1629 qc->nbytes = buflen; 1630 } 1631 1632 qc->private_data = &wait; 1633 qc->complete_fn = ata_qc_complete_internal; 1634 1635 ata_qc_issue(qc); 1636 1637 spin_unlock_irqrestore(ap->lock, flags); 1638 1639 if (!timeout) { 1640 if (ata_probe_timeout) 1641 timeout = ata_probe_timeout * 1000; 1642 else { 1643 timeout = ata_internal_cmd_timeout(dev, command); 1644 auto_timeout = 1; 1645 } 1646 } 1647 1648 if (ap->ops->error_handler) 1649 ata_eh_release(ap); 1650 1651 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1652 1653 if (ap->ops->error_handler) 1654 ata_eh_acquire(ap); 1655 1656 ata_sff_flush_pio_task(ap); 1657 1658 if (!rc) { 1659 spin_lock_irqsave(ap->lock, flags); 1660 1661 /* We're racing with irq here. If we lose, the 1662 * following test prevents us from completing the qc 1663 * twice. If we win, the port is frozen and will be 1664 * cleaned up by ->post_internal_cmd(). 1665 */ 1666 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1667 qc->err_mask |= AC_ERR_TIMEOUT; 1668 1669 if (ap->ops->error_handler) 1670 ata_port_freeze(ap); 1671 else 1672 ata_qc_complete(qc); 1673 1674 if (ata_msg_warn(ap)) 1675 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1676 command); 1677 } 1678 1679 spin_unlock_irqrestore(ap->lock, flags); 1680 } 1681 1682 /* do post_internal_cmd */ 1683 if (ap->ops->post_internal_cmd) 1684 ap->ops->post_internal_cmd(qc); 1685 1686 /* perform minimal error analysis */ 1687 if (qc->flags & ATA_QCFLAG_FAILED) { 1688 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1689 qc->err_mask |= AC_ERR_DEV; 1690 1691 if (!qc->err_mask) 1692 qc->err_mask |= AC_ERR_OTHER; 1693 1694 if (qc->err_mask & ~AC_ERR_OTHER) 1695 qc->err_mask &= ~AC_ERR_OTHER; 1696 } 1697 1698 /* finish up */ 1699 spin_lock_irqsave(ap->lock, flags); 1700 1701 *tf = qc->result_tf; 1702 err_mask = qc->err_mask; 1703 1704 ata_qc_free(qc); 1705 link->active_tag = preempted_tag; 1706 link->sactive = preempted_sactive; 1707 ap->qc_active = preempted_qc_active; 1708 ap->nr_active_links = preempted_nr_active_links; 1709 1710 spin_unlock_irqrestore(ap->lock, flags); 1711 1712 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1713 ata_internal_cmd_timed_out(dev, command); 1714 1715 return err_mask; 1716 } 1717 1718 /** 1719 * ata_exec_internal - execute libata internal command 1720 * @dev: Device to which the command is sent 1721 * @tf: Taskfile registers for the command and the result 1722 * @cdb: CDB for packet command 1723 * @dma_dir: Data transfer direction of the command 1724 * @buf: Data buffer of the command 1725 * @buflen: Length of data buffer 1726 * @timeout: Timeout in msecs (0 for default) 1727 * 1728 * Wrapper around ata_exec_internal_sg() which takes simple 1729 * buffer instead of sg list. 1730 * 1731 * LOCKING: 1732 * None. Should be called with kernel context, might sleep. 1733 * 1734 * RETURNS: 1735 * Zero on success, AC_ERR_* mask on failure 1736 */ 1737 unsigned ata_exec_internal(struct ata_device *dev, 1738 struct ata_taskfile *tf, const u8 *cdb, 1739 int dma_dir, void *buf, unsigned int buflen, 1740 unsigned long timeout) 1741 { 1742 struct scatterlist *psg = NULL, sg; 1743 unsigned int n_elem = 0; 1744 1745 if (dma_dir != DMA_NONE) { 1746 WARN_ON(!buf); 1747 sg_init_one(&sg, buf, buflen); 1748 psg = &sg; 1749 n_elem++; 1750 } 1751 1752 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1753 timeout); 1754 } 1755 1756 /** 1757 * ata_pio_need_iordy - check if iordy needed 1758 * @adev: ATA device 1759 * 1760 * Check if the current speed of the device requires IORDY. Used 1761 * by various controllers for chip configuration. 1762 */ 1763 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1764 { 1765 /* Don't set IORDY if we're preparing for reset. IORDY may 1766 * lead to controller lock up on certain controllers if the 1767 * port is not occupied. See bko#11703 for details. 1768 */ 1769 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1770 return 0; 1771 /* Controller doesn't support IORDY. Probably a pointless 1772 * check as the caller should know this. 1773 */ 1774 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1775 return 0; 1776 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1777 if (ata_id_is_cfa(adev->id) 1778 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1779 return 0; 1780 /* PIO3 and higher it is mandatory */ 1781 if (adev->pio_mode > XFER_PIO_2) 1782 return 1; 1783 /* We turn it on when possible */ 1784 if (ata_id_has_iordy(adev->id)) 1785 return 1; 1786 return 0; 1787 } 1788 1789 /** 1790 * ata_pio_mask_no_iordy - Return the non IORDY mask 1791 * @adev: ATA device 1792 * 1793 * Compute the highest mode possible if we are not using iordy. Return 1794 * -1 if no iordy mode is available. 1795 */ 1796 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1797 { 1798 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1799 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1800 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1801 /* Is the speed faster than the drive allows non IORDY ? */ 1802 if (pio) { 1803 /* This is cycle times not frequency - watch the logic! */ 1804 if (pio > 240) /* PIO2 is 240nS per cycle */ 1805 return 3 << ATA_SHIFT_PIO; 1806 return 7 << ATA_SHIFT_PIO; 1807 } 1808 } 1809 return 3 << ATA_SHIFT_PIO; 1810 } 1811 1812 /** 1813 * ata_do_dev_read_id - default ID read method 1814 * @dev: device 1815 * @tf: proposed taskfile 1816 * @id: data buffer 1817 * 1818 * Issue the identify taskfile and hand back the buffer containing 1819 * identify data. For some RAID controllers and for pre ATA devices 1820 * this function is wrapped or replaced by the driver 1821 */ 1822 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1823 struct ata_taskfile *tf, u16 *id) 1824 { 1825 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1826 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1827 } 1828 1829 /** 1830 * ata_dev_read_id - Read ID data from the specified device 1831 * @dev: target device 1832 * @p_class: pointer to class of the target device (may be changed) 1833 * @flags: ATA_READID_* flags 1834 * @id: buffer to read IDENTIFY data into 1835 * 1836 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1837 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1838 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1839 * for pre-ATA4 drives. 1840 * 1841 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1842 * now we abort if we hit that case. 1843 * 1844 * LOCKING: 1845 * Kernel thread context (may sleep) 1846 * 1847 * RETURNS: 1848 * 0 on success, -errno otherwise. 1849 */ 1850 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1851 unsigned int flags, u16 *id) 1852 { 1853 struct ata_port *ap = dev->link->ap; 1854 unsigned int class = *p_class; 1855 struct ata_taskfile tf; 1856 unsigned int err_mask = 0; 1857 const char *reason; 1858 bool is_semb = class == ATA_DEV_SEMB; 1859 int may_fallback = 1, tried_spinup = 0; 1860 int rc; 1861 1862 if (ata_msg_ctl(ap)) 1863 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1864 1865 retry: 1866 ata_tf_init(dev, &tf); 1867 1868 switch (class) { 1869 case ATA_DEV_SEMB: 1870 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1871 case ATA_DEV_ATA: 1872 case ATA_DEV_ZAC: 1873 tf.command = ATA_CMD_ID_ATA; 1874 break; 1875 case ATA_DEV_ATAPI: 1876 tf.command = ATA_CMD_ID_ATAPI; 1877 break; 1878 default: 1879 rc = -ENODEV; 1880 reason = "unsupported class"; 1881 goto err_out; 1882 } 1883 1884 tf.protocol = ATA_PROT_PIO; 1885 1886 /* Some devices choke if TF registers contain garbage. Make 1887 * sure those are properly initialized. 1888 */ 1889 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1890 1891 /* Device presence detection is unreliable on some 1892 * controllers. Always poll IDENTIFY if available. 1893 */ 1894 tf.flags |= ATA_TFLAG_POLLING; 1895 1896 if (ap->ops->read_id) 1897 err_mask = ap->ops->read_id(dev, &tf, id); 1898 else 1899 err_mask = ata_do_dev_read_id(dev, &tf, id); 1900 1901 if (err_mask) { 1902 if (err_mask & AC_ERR_NODEV_HINT) { 1903 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1904 return -ENOENT; 1905 } 1906 1907 if (is_semb) { 1908 ata_dev_info(dev, 1909 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1910 /* SEMB is not supported yet */ 1911 *p_class = ATA_DEV_SEMB_UNSUP; 1912 return 0; 1913 } 1914 1915 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1916 /* Device or controller might have reported 1917 * the wrong device class. Give a shot at the 1918 * other IDENTIFY if the current one is 1919 * aborted by the device. 1920 */ 1921 if (may_fallback) { 1922 may_fallback = 0; 1923 1924 if (class == ATA_DEV_ATA) 1925 class = ATA_DEV_ATAPI; 1926 else 1927 class = ATA_DEV_ATA; 1928 goto retry; 1929 } 1930 1931 /* Control reaches here iff the device aborted 1932 * both flavors of IDENTIFYs which happens 1933 * sometimes with phantom devices. 1934 */ 1935 ata_dev_dbg(dev, 1936 "both IDENTIFYs aborted, assuming NODEV\n"); 1937 return -ENOENT; 1938 } 1939 1940 rc = -EIO; 1941 reason = "I/O error"; 1942 goto err_out; 1943 } 1944 1945 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1946 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1947 "class=%d may_fallback=%d tried_spinup=%d\n", 1948 class, may_fallback, tried_spinup); 1949 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1950 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1951 } 1952 1953 /* Falling back doesn't make sense if ID data was read 1954 * successfully at least once. 1955 */ 1956 may_fallback = 0; 1957 1958 swap_buf_le16(id, ATA_ID_WORDS); 1959 1960 /* sanity check */ 1961 rc = -EINVAL; 1962 reason = "device reports invalid type"; 1963 1964 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) { 1965 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1966 goto err_out; 1967 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1968 ata_id_is_ata(id)) { 1969 ata_dev_dbg(dev, 1970 "host indicates ignore ATA devices, ignored\n"); 1971 return -ENOENT; 1972 } 1973 } else { 1974 if (ata_id_is_ata(id)) 1975 goto err_out; 1976 } 1977 1978 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1979 tried_spinup = 1; 1980 /* 1981 * Drive powered-up in standby mode, and requires a specific 1982 * SET_FEATURES spin-up subcommand before it will accept 1983 * anything other than the original IDENTIFY command. 1984 */ 1985 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 1986 if (err_mask && id[2] != 0x738c) { 1987 rc = -EIO; 1988 reason = "SPINUP failed"; 1989 goto err_out; 1990 } 1991 /* 1992 * If the drive initially returned incomplete IDENTIFY info, 1993 * we now must reissue the IDENTIFY command. 1994 */ 1995 if (id[2] == 0x37c8) 1996 goto retry; 1997 } 1998 1999 if ((flags & ATA_READID_POSTRESET) && 2000 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) { 2001 /* 2002 * The exact sequence expected by certain pre-ATA4 drives is: 2003 * SRST RESET 2004 * IDENTIFY (optional in early ATA) 2005 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2006 * anything else.. 2007 * Some drives were very specific about that exact sequence. 2008 * 2009 * Note that ATA4 says lba is mandatory so the second check 2010 * should never trigger. 2011 */ 2012 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2013 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2014 if (err_mask) { 2015 rc = -EIO; 2016 reason = "INIT_DEV_PARAMS failed"; 2017 goto err_out; 2018 } 2019 2020 /* current CHS translation info (id[53-58]) might be 2021 * changed. reread the identify device info. 2022 */ 2023 flags &= ~ATA_READID_POSTRESET; 2024 goto retry; 2025 } 2026 } 2027 2028 *p_class = class; 2029 2030 return 0; 2031 2032 err_out: 2033 if (ata_msg_warn(ap)) 2034 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2035 reason, err_mask); 2036 return rc; 2037 } 2038 2039 static int ata_do_link_spd_horkage(struct ata_device *dev) 2040 { 2041 struct ata_link *plink = ata_dev_phys_link(dev); 2042 u32 target, target_limit; 2043 2044 if (!sata_scr_valid(plink)) 2045 return 0; 2046 2047 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2048 target = 1; 2049 else 2050 return 0; 2051 2052 target_limit = (1 << target) - 1; 2053 2054 /* if already on stricter limit, no need to push further */ 2055 if (plink->sata_spd_limit <= target_limit) 2056 return 0; 2057 2058 plink->sata_spd_limit = target_limit; 2059 2060 /* Request another EH round by returning -EAGAIN if link is 2061 * going faster than the target speed. Forward progress is 2062 * guaranteed by setting sata_spd_limit to target_limit above. 2063 */ 2064 if (plink->sata_spd > target) { 2065 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2066 sata_spd_string(target)); 2067 return -EAGAIN; 2068 } 2069 return 0; 2070 } 2071 2072 static inline u8 ata_dev_knobble(struct ata_device *dev) 2073 { 2074 struct ata_port *ap = dev->link->ap; 2075 2076 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2077 return 0; 2078 2079 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2080 } 2081 2082 static int ata_dev_config_ncq(struct ata_device *dev, 2083 char *desc, size_t desc_sz) 2084 { 2085 struct ata_port *ap = dev->link->ap; 2086 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2087 unsigned int err_mask; 2088 char *aa_desc = ""; 2089 2090 if (!ata_id_has_ncq(dev->id)) { 2091 desc[0] = '\0'; 2092 return 0; 2093 } 2094 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2095 snprintf(desc, desc_sz, "NCQ (not used)"); 2096 return 0; 2097 } 2098 if (ap->flags & ATA_FLAG_NCQ) { 2099 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2100 dev->flags |= ATA_DFLAG_NCQ; 2101 } 2102 2103 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2104 (ap->flags & ATA_FLAG_FPDMA_AA) && 2105 ata_id_has_fpdma_aa(dev->id)) { 2106 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2107 SATA_FPDMA_AA); 2108 if (err_mask) { 2109 ata_dev_err(dev, 2110 "failed to enable AA (error_mask=0x%x)\n", 2111 err_mask); 2112 if (err_mask != AC_ERR_DEV) { 2113 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2114 return -EIO; 2115 } 2116 } else 2117 aa_desc = ", AA"; 2118 } 2119 2120 if (hdepth >= ddepth) 2121 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2122 else 2123 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2124 ddepth, aa_desc); 2125 2126 if ((ap->flags & ATA_FLAG_FPDMA_AUX) && 2127 ata_id_has_ncq_send_and_recv(dev->id)) { 2128 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, 2129 0, ap->sector_buf, 1); 2130 if (err_mask) { 2131 ata_dev_dbg(dev, 2132 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2133 err_mask); 2134 } else { 2135 u8 *cmds = dev->ncq_send_recv_cmds; 2136 2137 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2138 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); 2139 2140 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { 2141 ata_dev_dbg(dev, "disabling queued TRIM support\n"); 2142 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= 2143 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; 2144 } 2145 } 2146 } 2147 2148 return 0; 2149 } 2150 2151 /** 2152 * ata_dev_configure - Configure the specified ATA/ATAPI device 2153 * @dev: Target device to configure 2154 * 2155 * Configure @dev according to @dev->id. Generic and low-level 2156 * driver specific fixups are also applied. 2157 * 2158 * LOCKING: 2159 * Kernel thread context (may sleep) 2160 * 2161 * RETURNS: 2162 * 0 on success, -errno otherwise 2163 */ 2164 int ata_dev_configure(struct ata_device *dev) 2165 { 2166 struct ata_port *ap = dev->link->ap; 2167 struct ata_eh_context *ehc = &dev->link->eh_context; 2168 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2169 const u16 *id = dev->id; 2170 unsigned long xfer_mask; 2171 unsigned int err_mask; 2172 char revbuf[7]; /* XYZ-99\0 */ 2173 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2174 char modelbuf[ATA_ID_PROD_LEN+1]; 2175 int rc; 2176 2177 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2178 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2179 return 0; 2180 } 2181 2182 if (ata_msg_probe(ap)) 2183 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2184 2185 /* set horkage */ 2186 dev->horkage |= ata_dev_blacklisted(dev); 2187 ata_force_horkage(dev); 2188 2189 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2190 ata_dev_info(dev, "unsupported device, disabling\n"); 2191 ata_dev_disable(dev); 2192 return 0; 2193 } 2194 2195 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2196 dev->class == ATA_DEV_ATAPI) { 2197 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2198 atapi_enabled ? "not supported with this driver" 2199 : "disabled"); 2200 ata_dev_disable(dev); 2201 return 0; 2202 } 2203 2204 rc = ata_do_link_spd_horkage(dev); 2205 if (rc) 2206 return rc; 2207 2208 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ 2209 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && 2210 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2211 dev->horkage |= ATA_HORKAGE_NOLPM; 2212 2213 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2214 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2215 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2216 } 2217 2218 /* let ACPI work its magic */ 2219 rc = ata_acpi_on_devcfg(dev); 2220 if (rc) 2221 return rc; 2222 2223 /* massage HPA, do it early as it might change IDENTIFY data */ 2224 rc = ata_hpa_resize(dev); 2225 if (rc) 2226 return rc; 2227 2228 /* print device capabilities */ 2229 if (ata_msg_probe(ap)) 2230 ata_dev_dbg(dev, 2231 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2232 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2233 __func__, 2234 id[49], id[82], id[83], id[84], 2235 id[85], id[86], id[87], id[88]); 2236 2237 /* initialize to-be-configured parameters */ 2238 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2239 dev->max_sectors = 0; 2240 dev->cdb_len = 0; 2241 dev->n_sectors = 0; 2242 dev->cylinders = 0; 2243 dev->heads = 0; 2244 dev->sectors = 0; 2245 dev->multi_count = 0; 2246 2247 /* 2248 * common ATA, ATAPI feature tests 2249 */ 2250 2251 /* find max transfer mode; for printk only */ 2252 xfer_mask = ata_id_xfermask(id); 2253 2254 if (ata_msg_probe(ap)) 2255 ata_dump_id(id); 2256 2257 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2258 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2259 sizeof(fwrevbuf)); 2260 2261 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2262 sizeof(modelbuf)); 2263 2264 /* ATA-specific feature tests */ 2265 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { 2266 if (ata_id_is_cfa(id)) { 2267 /* CPRM may make this media unusable */ 2268 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2269 ata_dev_warn(dev, 2270 "supports DRM functions and may not be fully accessible\n"); 2271 snprintf(revbuf, 7, "CFA"); 2272 } else { 2273 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2274 /* Warn the user if the device has TPM extensions */ 2275 if (ata_id_has_tpm(id)) 2276 ata_dev_warn(dev, 2277 "supports DRM functions and may not be fully accessible\n"); 2278 } 2279 2280 dev->n_sectors = ata_id_n_sectors(id); 2281 2282 /* get current R/W Multiple count setting */ 2283 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2284 unsigned int max = dev->id[47] & 0xff; 2285 unsigned int cnt = dev->id[59] & 0xff; 2286 /* only recognize/allow powers of two here */ 2287 if (is_power_of_2(max) && is_power_of_2(cnt)) 2288 if (cnt <= max) 2289 dev->multi_count = cnt; 2290 } 2291 2292 if (ata_id_has_lba(id)) { 2293 const char *lba_desc; 2294 char ncq_desc[24]; 2295 2296 lba_desc = "LBA"; 2297 dev->flags |= ATA_DFLAG_LBA; 2298 if (ata_id_has_lba48(id)) { 2299 dev->flags |= ATA_DFLAG_LBA48; 2300 lba_desc = "LBA48"; 2301 2302 if (dev->n_sectors >= (1UL << 28) && 2303 ata_id_has_flush_ext(id)) 2304 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2305 } 2306 2307 /* config NCQ */ 2308 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2309 if (rc) 2310 return rc; 2311 2312 /* print device info to dmesg */ 2313 if (ata_msg_drv(ap) && print_info) { 2314 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2315 revbuf, modelbuf, fwrevbuf, 2316 ata_mode_string(xfer_mask)); 2317 ata_dev_info(dev, 2318 "%llu sectors, multi %u: %s %s\n", 2319 (unsigned long long)dev->n_sectors, 2320 dev->multi_count, lba_desc, ncq_desc); 2321 } 2322 } else { 2323 /* CHS */ 2324 2325 /* Default translation */ 2326 dev->cylinders = id[1]; 2327 dev->heads = id[3]; 2328 dev->sectors = id[6]; 2329 2330 if (ata_id_current_chs_valid(id)) { 2331 /* Current CHS translation is valid. */ 2332 dev->cylinders = id[54]; 2333 dev->heads = id[55]; 2334 dev->sectors = id[56]; 2335 } 2336 2337 /* print device info to dmesg */ 2338 if (ata_msg_drv(ap) && print_info) { 2339 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2340 revbuf, modelbuf, fwrevbuf, 2341 ata_mode_string(xfer_mask)); 2342 ata_dev_info(dev, 2343 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2344 (unsigned long long)dev->n_sectors, 2345 dev->multi_count, dev->cylinders, 2346 dev->heads, dev->sectors); 2347 } 2348 } 2349 2350 /* Check and mark DevSlp capability. Get DevSlp timing variables 2351 * from SATA Settings page of Identify Device Data Log. 2352 */ 2353 if (ata_id_has_devslp(dev->id)) { 2354 u8 *sata_setting = ap->sector_buf; 2355 int i, j; 2356 2357 dev->flags |= ATA_DFLAG_DEVSLP; 2358 err_mask = ata_read_log_page(dev, 2359 ATA_LOG_SATA_ID_DEV_DATA, 2360 ATA_LOG_SATA_SETTINGS, 2361 sata_setting, 2362 1); 2363 if (err_mask) 2364 ata_dev_dbg(dev, 2365 "failed to get Identify Device Data, Emask 0x%x\n", 2366 err_mask); 2367 else 2368 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { 2369 j = ATA_LOG_DEVSLP_OFFSET + i; 2370 dev->devslp_timing[i] = sata_setting[j]; 2371 } 2372 } 2373 2374 dev->cdb_len = 16; 2375 } 2376 2377 /* ATAPI-specific feature tests */ 2378 else if (dev->class == ATA_DEV_ATAPI) { 2379 const char *cdb_intr_string = ""; 2380 const char *atapi_an_string = ""; 2381 const char *dma_dir_string = ""; 2382 u32 sntf; 2383 2384 rc = atapi_cdb_len(id); 2385 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2386 if (ata_msg_warn(ap)) 2387 ata_dev_warn(dev, "unsupported CDB len\n"); 2388 rc = -EINVAL; 2389 goto err_out_nosup; 2390 } 2391 dev->cdb_len = (unsigned int) rc; 2392 2393 /* Enable ATAPI AN if both the host and device have 2394 * the support. If PMP is attached, SNTF is required 2395 * to enable ATAPI AN to discern between PHY status 2396 * changed notifications and ATAPI ANs. 2397 */ 2398 if (atapi_an && 2399 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2400 (!sata_pmp_attached(ap) || 2401 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2402 /* issue SET feature command to turn this on */ 2403 err_mask = ata_dev_set_feature(dev, 2404 SETFEATURES_SATA_ENABLE, SATA_AN); 2405 if (err_mask) 2406 ata_dev_err(dev, 2407 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2408 err_mask); 2409 else { 2410 dev->flags |= ATA_DFLAG_AN; 2411 atapi_an_string = ", ATAPI AN"; 2412 } 2413 } 2414 2415 if (ata_id_cdb_intr(dev->id)) { 2416 dev->flags |= ATA_DFLAG_CDB_INTR; 2417 cdb_intr_string = ", CDB intr"; 2418 } 2419 2420 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { 2421 dev->flags |= ATA_DFLAG_DMADIR; 2422 dma_dir_string = ", DMADIR"; 2423 } 2424 2425 if (ata_id_has_da(dev->id)) { 2426 dev->flags |= ATA_DFLAG_DA; 2427 zpodd_init(dev); 2428 } 2429 2430 /* print device info to dmesg */ 2431 if (ata_msg_drv(ap) && print_info) 2432 ata_dev_info(dev, 2433 "ATAPI: %s, %s, max %s%s%s%s\n", 2434 modelbuf, fwrevbuf, 2435 ata_mode_string(xfer_mask), 2436 cdb_intr_string, atapi_an_string, 2437 dma_dir_string); 2438 } 2439 2440 /* determine max_sectors */ 2441 dev->max_sectors = ATA_MAX_SECTORS; 2442 if (dev->flags & ATA_DFLAG_LBA48) 2443 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2444 2445 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2446 200 sectors */ 2447 if (ata_dev_knobble(dev)) { 2448 if (ata_msg_drv(ap) && print_info) 2449 ata_dev_info(dev, "applying bridge limits\n"); 2450 dev->udma_mask &= ATA_UDMA5; 2451 dev->max_sectors = ATA_MAX_SECTORS; 2452 } 2453 2454 if ((dev->class == ATA_DEV_ATAPI) && 2455 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2456 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2457 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2458 } 2459 2460 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2461 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2462 dev->max_sectors); 2463 2464 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) 2465 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, 2466 dev->max_sectors); 2467 2468 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2469 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2470 2471 if (ap->ops->dev_config) 2472 ap->ops->dev_config(dev); 2473 2474 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2475 /* Let the user know. We don't want to disallow opens for 2476 rescue purposes, or in case the vendor is just a blithering 2477 idiot. Do this after the dev_config call as some controllers 2478 with buggy firmware may want to avoid reporting false device 2479 bugs */ 2480 2481 if (print_info) { 2482 ata_dev_warn(dev, 2483 "Drive reports diagnostics failure. This may indicate a drive\n"); 2484 ata_dev_warn(dev, 2485 "fault or invalid emulation. Contact drive vendor for information.\n"); 2486 } 2487 } 2488 2489 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2490 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2491 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2492 } 2493 2494 return 0; 2495 2496 err_out_nosup: 2497 if (ata_msg_probe(ap)) 2498 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2499 return rc; 2500 } 2501 2502 /** 2503 * ata_cable_40wire - return 40 wire cable type 2504 * @ap: port 2505 * 2506 * Helper method for drivers which want to hardwire 40 wire cable 2507 * detection. 2508 */ 2509 2510 int ata_cable_40wire(struct ata_port *ap) 2511 { 2512 return ATA_CBL_PATA40; 2513 } 2514 2515 /** 2516 * ata_cable_80wire - return 80 wire cable type 2517 * @ap: port 2518 * 2519 * Helper method for drivers which want to hardwire 80 wire cable 2520 * detection. 2521 */ 2522 2523 int ata_cable_80wire(struct ata_port *ap) 2524 { 2525 return ATA_CBL_PATA80; 2526 } 2527 2528 /** 2529 * ata_cable_unknown - return unknown PATA cable. 2530 * @ap: port 2531 * 2532 * Helper method for drivers which have no PATA cable detection. 2533 */ 2534 2535 int ata_cable_unknown(struct ata_port *ap) 2536 { 2537 return ATA_CBL_PATA_UNK; 2538 } 2539 2540 /** 2541 * ata_cable_ignore - return ignored PATA cable. 2542 * @ap: port 2543 * 2544 * Helper method for drivers which don't use cable type to limit 2545 * transfer mode. 2546 */ 2547 int ata_cable_ignore(struct ata_port *ap) 2548 { 2549 return ATA_CBL_PATA_IGN; 2550 } 2551 2552 /** 2553 * ata_cable_sata - return SATA cable type 2554 * @ap: port 2555 * 2556 * Helper method for drivers which have SATA cables 2557 */ 2558 2559 int ata_cable_sata(struct ata_port *ap) 2560 { 2561 return ATA_CBL_SATA; 2562 } 2563 2564 /** 2565 * ata_bus_probe - Reset and probe ATA bus 2566 * @ap: Bus to probe 2567 * 2568 * Master ATA bus probing function. Initiates a hardware-dependent 2569 * bus reset, then attempts to identify any devices found on 2570 * the bus. 2571 * 2572 * LOCKING: 2573 * PCI/etc. bus probe sem. 2574 * 2575 * RETURNS: 2576 * Zero on success, negative errno otherwise. 2577 */ 2578 2579 int ata_bus_probe(struct ata_port *ap) 2580 { 2581 unsigned int classes[ATA_MAX_DEVICES]; 2582 int tries[ATA_MAX_DEVICES]; 2583 int rc; 2584 struct ata_device *dev; 2585 2586 ata_for_each_dev(dev, &ap->link, ALL) 2587 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2588 2589 retry: 2590 ata_for_each_dev(dev, &ap->link, ALL) { 2591 /* If we issue an SRST then an ATA drive (not ATAPI) 2592 * may change configuration and be in PIO0 timing. If 2593 * we do a hard reset (or are coming from power on) 2594 * this is true for ATA or ATAPI. Until we've set a 2595 * suitable controller mode we should not touch the 2596 * bus as we may be talking too fast. 2597 */ 2598 dev->pio_mode = XFER_PIO_0; 2599 dev->dma_mode = 0xff; 2600 2601 /* If the controller has a pio mode setup function 2602 * then use it to set the chipset to rights. Don't 2603 * touch the DMA setup as that will be dealt with when 2604 * configuring devices. 2605 */ 2606 if (ap->ops->set_piomode) 2607 ap->ops->set_piomode(ap, dev); 2608 } 2609 2610 /* reset and determine device classes */ 2611 ap->ops->phy_reset(ap); 2612 2613 ata_for_each_dev(dev, &ap->link, ALL) { 2614 if (dev->class != ATA_DEV_UNKNOWN) 2615 classes[dev->devno] = dev->class; 2616 else 2617 classes[dev->devno] = ATA_DEV_NONE; 2618 2619 dev->class = ATA_DEV_UNKNOWN; 2620 } 2621 2622 /* read IDENTIFY page and configure devices. We have to do the identify 2623 specific sequence bass-ackwards so that PDIAG- is released by 2624 the slave device */ 2625 2626 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2627 if (tries[dev->devno]) 2628 dev->class = classes[dev->devno]; 2629 2630 if (!ata_dev_enabled(dev)) 2631 continue; 2632 2633 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2634 dev->id); 2635 if (rc) 2636 goto fail; 2637 } 2638 2639 /* Now ask for the cable type as PDIAG- should have been released */ 2640 if (ap->ops->cable_detect) 2641 ap->cbl = ap->ops->cable_detect(ap); 2642 2643 /* We may have SATA bridge glue hiding here irrespective of 2644 * the reported cable types and sensed types. When SATA 2645 * drives indicate we have a bridge, we don't know which end 2646 * of the link the bridge is which is a problem. 2647 */ 2648 ata_for_each_dev(dev, &ap->link, ENABLED) 2649 if (ata_id_is_sata(dev->id)) 2650 ap->cbl = ATA_CBL_SATA; 2651 2652 /* After the identify sequence we can now set up the devices. We do 2653 this in the normal order so that the user doesn't get confused */ 2654 2655 ata_for_each_dev(dev, &ap->link, ENABLED) { 2656 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2657 rc = ata_dev_configure(dev); 2658 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2659 if (rc) 2660 goto fail; 2661 } 2662 2663 /* configure transfer mode */ 2664 rc = ata_set_mode(&ap->link, &dev); 2665 if (rc) 2666 goto fail; 2667 2668 ata_for_each_dev(dev, &ap->link, ENABLED) 2669 return 0; 2670 2671 return -ENODEV; 2672 2673 fail: 2674 tries[dev->devno]--; 2675 2676 switch (rc) { 2677 case -EINVAL: 2678 /* eeek, something went very wrong, give up */ 2679 tries[dev->devno] = 0; 2680 break; 2681 2682 case -ENODEV: 2683 /* give it just one more chance */ 2684 tries[dev->devno] = min(tries[dev->devno], 1); 2685 case -EIO: 2686 if (tries[dev->devno] == 1) { 2687 /* This is the last chance, better to slow 2688 * down than lose it. 2689 */ 2690 sata_down_spd_limit(&ap->link, 0); 2691 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2692 } 2693 } 2694 2695 if (!tries[dev->devno]) 2696 ata_dev_disable(dev); 2697 2698 goto retry; 2699 } 2700 2701 /** 2702 * sata_print_link_status - Print SATA link status 2703 * @link: SATA link to printk link status about 2704 * 2705 * This function prints link speed and status of a SATA link. 2706 * 2707 * LOCKING: 2708 * None. 2709 */ 2710 static void sata_print_link_status(struct ata_link *link) 2711 { 2712 u32 sstatus, scontrol, tmp; 2713 2714 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2715 return; 2716 sata_scr_read(link, SCR_CONTROL, &scontrol); 2717 2718 if (ata_phys_link_online(link)) { 2719 tmp = (sstatus >> 4) & 0xf; 2720 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 2721 sata_spd_string(tmp), sstatus, scontrol); 2722 } else { 2723 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 2724 sstatus, scontrol); 2725 } 2726 } 2727 2728 /** 2729 * ata_dev_pair - return other device on cable 2730 * @adev: device 2731 * 2732 * Obtain the other device on the same cable, or if none is 2733 * present NULL is returned 2734 */ 2735 2736 struct ata_device *ata_dev_pair(struct ata_device *adev) 2737 { 2738 struct ata_link *link = adev->link; 2739 struct ata_device *pair = &link->device[1 - adev->devno]; 2740 if (!ata_dev_enabled(pair)) 2741 return NULL; 2742 return pair; 2743 } 2744 2745 /** 2746 * sata_down_spd_limit - adjust SATA spd limit downward 2747 * @link: Link to adjust SATA spd limit for 2748 * @spd_limit: Additional limit 2749 * 2750 * Adjust SATA spd limit of @link downward. Note that this 2751 * function only adjusts the limit. The change must be applied 2752 * using sata_set_spd(). 2753 * 2754 * If @spd_limit is non-zero, the speed is limited to equal to or 2755 * lower than @spd_limit if such speed is supported. If 2756 * @spd_limit is slower than any supported speed, only the lowest 2757 * supported speed is allowed. 2758 * 2759 * LOCKING: 2760 * Inherited from caller. 2761 * 2762 * RETURNS: 2763 * 0 on success, negative errno on failure 2764 */ 2765 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2766 { 2767 u32 sstatus, spd, mask; 2768 int rc, bit; 2769 2770 if (!sata_scr_valid(link)) 2771 return -EOPNOTSUPP; 2772 2773 /* If SCR can be read, use it to determine the current SPD. 2774 * If not, use cached value in link->sata_spd. 2775 */ 2776 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2777 if (rc == 0 && ata_sstatus_online(sstatus)) 2778 spd = (sstatus >> 4) & 0xf; 2779 else 2780 spd = link->sata_spd; 2781 2782 mask = link->sata_spd_limit; 2783 if (mask <= 1) 2784 return -EINVAL; 2785 2786 /* unconditionally mask off the highest bit */ 2787 bit = fls(mask) - 1; 2788 mask &= ~(1 << bit); 2789 2790 /* Mask off all speeds higher than or equal to the current 2791 * one. Force 1.5Gbps if current SPD is not available. 2792 */ 2793 if (spd > 1) 2794 mask &= (1 << (spd - 1)) - 1; 2795 else 2796 mask &= 1; 2797 2798 /* were we already at the bottom? */ 2799 if (!mask) 2800 return -EINVAL; 2801 2802 if (spd_limit) { 2803 if (mask & ((1 << spd_limit) - 1)) 2804 mask &= (1 << spd_limit) - 1; 2805 else { 2806 bit = ffs(mask) - 1; 2807 mask = 1 << bit; 2808 } 2809 } 2810 2811 link->sata_spd_limit = mask; 2812 2813 ata_link_warn(link, "limiting SATA link speed to %s\n", 2814 sata_spd_string(fls(mask))); 2815 2816 return 0; 2817 } 2818 2819 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2820 { 2821 struct ata_link *host_link = &link->ap->link; 2822 u32 limit, target, spd; 2823 2824 limit = link->sata_spd_limit; 2825 2826 /* Don't configure downstream link faster than upstream link. 2827 * It doesn't speed up anything and some PMPs choke on such 2828 * configuration. 2829 */ 2830 if (!ata_is_host_link(link) && host_link->sata_spd) 2831 limit &= (1 << host_link->sata_spd) - 1; 2832 2833 if (limit == UINT_MAX) 2834 target = 0; 2835 else 2836 target = fls(limit); 2837 2838 spd = (*scontrol >> 4) & 0xf; 2839 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2840 2841 return spd != target; 2842 } 2843 2844 /** 2845 * sata_set_spd_needed - is SATA spd configuration needed 2846 * @link: Link in question 2847 * 2848 * Test whether the spd limit in SControl matches 2849 * @link->sata_spd_limit. This function is used to determine 2850 * whether hardreset is necessary to apply SATA spd 2851 * configuration. 2852 * 2853 * LOCKING: 2854 * Inherited from caller. 2855 * 2856 * RETURNS: 2857 * 1 if SATA spd configuration is needed, 0 otherwise. 2858 */ 2859 static int sata_set_spd_needed(struct ata_link *link) 2860 { 2861 u32 scontrol; 2862 2863 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2864 return 1; 2865 2866 return __sata_set_spd_needed(link, &scontrol); 2867 } 2868 2869 /** 2870 * sata_set_spd - set SATA spd according to spd limit 2871 * @link: Link to set SATA spd for 2872 * 2873 * Set SATA spd of @link according to sata_spd_limit. 2874 * 2875 * LOCKING: 2876 * Inherited from caller. 2877 * 2878 * RETURNS: 2879 * 0 if spd doesn't need to be changed, 1 if spd has been 2880 * changed. Negative errno if SCR registers are inaccessible. 2881 */ 2882 int sata_set_spd(struct ata_link *link) 2883 { 2884 u32 scontrol; 2885 int rc; 2886 2887 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2888 return rc; 2889 2890 if (!__sata_set_spd_needed(link, &scontrol)) 2891 return 0; 2892 2893 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 2894 return rc; 2895 2896 return 1; 2897 } 2898 2899 /* 2900 * This mode timing computation functionality is ported over from 2901 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 2902 */ 2903 /* 2904 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 2905 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 2906 * for UDMA6, which is currently supported only by Maxtor drives. 2907 * 2908 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 2909 */ 2910 2911 static const struct ata_timing ata_timing[] = { 2912 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 2913 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 2914 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 2915 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 2916 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 2917 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 2918 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 2919 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 2920 2921 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 2922 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 2923 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 2924 2925 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 2926 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 2927 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 2928 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 2929 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 2930 2931 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2932 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 2933 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 2934 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 2935 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 2936 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 2937 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 2938 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 2939 2940 { 0xFF } 2941 }; 2942 2943 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 2944 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 2945 2946 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2947 { 2948 q->setup = EZ(t->setup * 1000, T); 2949 q->act8b = EZ(t->act8b * 1000, T); 2950 q->rec8b = EZ(t->rec8b * 1000, T); 2951 q->cyc8b = EZ(t->cyc8b * 1000, T); 2952 q->active = EZ(t->active * 1000, T); 2953 q->recover = EZ(t->recover * 1000, T); 2954 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 2955 q->cycle = EZ(t->cycle * 1000, T); 2956 q->udma = EZ(t->udma * 1000, UT); 2957 } 2958 2959 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 2960 struct ata_timing *m, unsigned int what) 2961 { 2962 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 2963 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 2964 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 2965 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 2966 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 2967 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 2968 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 2969 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 2970 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2971 } 2972 2973 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 2974 { 2975 const struct ata_timing *t = ata_timing; 2976 2977 while (xfer_mode > t->mode) 2978 t++; 2979 2980 if (xfer_mode == t->mode) 2981 return t; 2982 2983 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", 2984 __func__, xfer_mode); 2985 2986 return NULL; 2987 } 2988 2989 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 2990 struct ata_timing *t, int T, int UT) 2991 { 2992 const u16 *id = adev->id; 2993 const struct ata_timing *s; 2994 struct ata_timing p; 2995 2996 /* 2997 * Find the mode. 2998 */ 2999 3000 if (!(s = ata_timing_find_mode(speed))) 3001 return -EINVAL; 3002 3003 memcpy(t, s, sizeof(*s)); 3004 3005 /* 3006 * If the drive is an EIDE drive, it can tell us it needs extended 3007 * PIO/MW_DMA cycle timing. 3008 */ 3009 3010 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3011 memset(&p, 0, sizeof(p)); 3012 3013 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { 3014 if (speed <= XFER_PIO_2) 3015 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 3016 else if ((speed <= XFER_PIO_4) || 3017 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 3018 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 3019 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3020 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3021 3022 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3023 } 3024 3025 /* 3026 * Convert the timing to bus clock counts. 3027 */ 3028 3029 ata_timing_quantize(t, t, T, UT); 3030 3031 /* 3032 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3033 * S.M.A.R.T * and some other commands. We have to ensure that the 3034 * DMA cycle timing is slower/equal than the fastest PIO timing. 3035 */ 3036 3037 if (speed > XFER_PIO_6) { 3038 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3039 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3040 } 3041 3042 /* 3043 * Lengthen active & recovery time so that cycle time is correct. 3044 */ 3045 3046 if (t->act8b + t->rec8b < t->cyc8b) { 3047 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3048 t->rec8b = t->cyc8b - t->act8b; 3049 } 3050 3051 if (t->active + t->recover < t->cycle) { 3052 t->active += (t->cycle - (t->active + t->recover)) / 2; 3053 t->recover = t->cycle - t->active; 3054 } 3055 3056 /* In a few cases quantisation may produce enough errors to 3057 leave t->cycle too low for the sum of active and recovery 3058 if so we must correct this */ 3059 if (t->active + t->recover > t->cycle) 3060 t->cycle = t->active + t->recover; 3061 3062 return 0; 3063 } 3064 3065 /** 3066 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3067 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3068 * @cycle: cycle duration in ns 3069 * 3070 * Return matching xfer mode for @cycle. The returned mode is of 3071 * the transfer type specified by @xfer_shift. If @cycle is too 3072 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3073 * than the fastest known mode, the fasted mode is returned. 3074 * 3075 * LOCKING: 3076 * None. 3077 * 3078 * RETURNS: 3079 * Matching xfer_mode, 0xff if no match found. 3080 */ 3081 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3082 { 3083 u8 base_mode = 0xff, last_mode = 0xff; 3084 const struct ata_xfer_ent *ent; 3085 const struct ata_timing *t; 3086 3087 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3088 if (ent->shift == xfer_shift) 3089 base_mode = ent->base; 3090 3091 for (t = ata_timing_find_mode(base_mode); 3092 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3093 unsigned short this_cycle; 3094 3095 switch (xfer_shift) { 3096 case ATA_SHIFT_PIO: 3097 case ATA_SHIFT_MWDMA: 3098 this_cycle = t->cycle; 3099 break; 3100 case ATA_SHIFT_UDMA: 3101 this_cycle = t->udma; 3102 break; 3103 default: 3104 return 0xff; 3105 } 3106 3107 if (cycle > this_cycle) 3108 break; 3109 3110 last_mode = t->mode; 3111 } 3112 3113 return last_mode; 3114 } 3115 3116 /** 3117 * ata_down_xfermask_limit - adjust dev xfer masks downward 3118 * @dev: Device to adjust xfer masks 3119 * @sel: ATA_DNXFER_* selector 3120 * 3121 * Adjust xfer masks of @dev downward. Note that this function 3122 * does not apply the change. Invoking ata_set_mode() afterwards 3123 * will apply the limit. 3124 * 3125 * LOCKING: 3126 * Inherited from caller. 3127 * 3128 * RETURNS: 3129 * 0 on success, negative errno on failure 3130 */ 3131 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3132 { 3133 char buf[32]; 3134 unsigned long orig_mask, xfer_mask; 3135 unsigned long pio_mask, mwdma_mask, udma_mask; 3136 int quiet, highbit; 3137 3138 quiet = !!(sel & ATA_DNXFER_QUIET); 3139 sel &= ~ATA_DNXFER_QUIET; 3140 3141 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3142 dev->mwdma_mask, 3143 dev->udma_mask); 3144 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3145 3146 switch (sel) { 3147 case ATA_DNXFER_PIO: 3148 highbit = fls(pio_mask) - 1; 3149 pio_mask &= ~(1 << highbit); 3150 break; 3151 3152 case ATA_DNXFER_DMA: 3153 if (udma_mask) { 3154 highbit = fls(udma_mask) - 1; 3155 udma_mask &= ~(1 << highbit); 3156 if (!udma_mask) 3157 return -ENOENT; 3158 } else if (mwdma_mask) { 3159 highbit = fls(mwdma_mask) - 1; 3160 mwdma_mask &= ~(1 << highbit); 3161 if (!mwdma_mask) 3162 return -ENOENT; 3163 } 3164 break; 3165 3166 case ATA_DNXFER_40C: 3167 udma_mask &= ATA_UDMA_MASK_40C; 3168 break; 3169 3170 case ATA_DNXFER_FORCE_PIO0: 3171 pio_mask &= 1; 3172 case ATA_DNXFER_FORCE_PIO: 3173 mwdma_mask = 0; 3174 udma_mask = 0; 3175 break; 3176 3177 default: 3178 BUG(); 3179 } 3180 3181 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3182 3183 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3184 return -ENOENT; 3185 3186 if (!quiet) { 3187 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3188 snprintf(buf, sizeof(buf), "%s:%s", 3189 ata_mode_string(xfer_mask), 3190 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3191 else 3192 snprintf(buf, sizeof(buf), "%s", 3193 ata_mode_string(xfer_mask)); 3194 3195 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3196 } 3197 3198 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3199 &dev->udma_mask); 3200 3201 return 0; 3202 } 3203 3204 static int ata_dev_set_mode(struct ata_device *dev) 3205 { 3206 struct ata_port *ap = dev->link->ap; 3207 struct ata_eh_context *ehc = &dev->link->eh_context; 3208 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3209 const char *dev_err_whine = ""; 3210 int ign_dev_err = 0; 3211 unsigned int err_mask = 0; 3212 int rc; 3213 3214 dev->flags &= ~ATA_DFLAG_PIO; 3215 if (dev->xfer_shift == ATA_SHIFT_PIO) 3216 dev->flags |= ATA_DFLAG_PIO; 3217 3218 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3219 dev_err_whine = " (SET_XFERMODE skipped)"; 3220 else { 3221 if (nosetxfer) 3222 ata_dev_warn(dev, 3223 "NOSETXFER but PATA detected - can't " 3224 "skip SETXFER, might malfunction\n"); 3225 err_mask = ata_dev_set_xfermode(dev); 3226 } 3227 3228 if (err_mask & ~AC_ERR_DEV) 3229 goto fail; 3230 3231 /* revalidate */ 3232 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3233 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3234 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3235 if (rc) 3236 return rc; 3237 3238 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3239 /* Old CFA may refuse this command, which is just fine */ 3240 if (ata_id_is_cfa(dev->id)) 3241 ign_dev_err = 1; 3242 /* Catch several broken garbage emulations plus some pre 3243 ATA devices */ 3244 if (ata_id_major_version(dev->id) == 0 && 3245 dev->pio_mode <= XFER_PIO_2) 3246 ign_dev_err = 1; 3247 /* Some very old devices and some bad newer ones fail 3248 any kind of SET_XFERMODE request but support PIO0-2 3249 timings and no IORDY */ 3250 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3251 ign_dev_err = 1; 3252 } 3253 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3254 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3255 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3256 dev->dma_mode == XFER_MW_DMA_0 && 3257 (dev->id[63] >> 8) & 1) 3258 ign_dev_err = 1; 3259 3260 /* if the device is actually configured correctly, ignore dev err */ 3261 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3262 ign_dev_err = 1; 3263 3264 if (err_mask & AC_ERR_DEV) { 3265 if (!ign_dev_err) 3266 goto fail; 3267 else 3268 dev_err_whine = " (device error ignored)"; 3269 } 3270 3271 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3272 dev->xfer_shift, (int)dev->xfer_mode); 3273 3274 ata_dev_info(dev, "configured for %s%s\n", 3275 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3276 dev_err_whine); 3277 3278 return 0; 3279 3280 fail: 3281 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3282 return -EIO; 3283 } 3284 3285 /** 3286 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3287 * @link: link on which timings will be programmed 3288 * @r_failed_dev: out parameter for failed device 3289 * 3290 * Standard implementation of the function used to tune and set 3291 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3292 * ata_dev_set_mode() fails, pointer to the failing device is 3293 * returned in @r_failed_dev. 3294 * 3295 * LOCKING: 3296 * PCI/etc. bus probe sem. 3297 * 3298 * RETURNS: 3299 * 0 on success, negative errno otherwise 3300 */ 3301 3302 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3303 { 3304 struct ata_port *ap = link->ap; 3305 struct ata_device *dev; 3306 int rc = 0, used_dma = 0, found = 0; 3307 3308 /* step 1: calculate xfer_mask */ 3309 ata_for_each_dev(dev, link, ENABLED) { 3310 unsigned long pio_mask, dma_mask; 3311 unsigned int mode_mask; 3312 3313 mode_mask = ATA_DMA_MASK_ATA; 3314 if (dev->class == ATA_DEV_ATAPI) 3315 mode_mask = ATA_DMA_MASK_ATAPI; 3316 else if (ata_id_is_cfa(dev->id)) 3317 mode_mask = ATA_DMA_MASK_CFA; 3318 3319 ata_dev_xfermask(dev); 3320 ata_force_xfermask(dev); 3321 3322 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3323 3324 if (libata_dma_mask & mode_mask) 3325 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, 3326 dev->udma_mask); 3327 else 3328 dma_mask = 0; 3329 3330 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3331 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3332 3333 found = 1; 3334 if (ata_dma_enabled(dev)) 3335 used_dma = 1; 3336 } 3337 if (!found) 3338 goto out; 3339 3340 /* step 2: always set host PIO timings */ 3341 ata_for_each_dev(dev, link, ENABLED) { 3342 if (dev->pio_mode == 0xff) { 3343 ata_dev_warn(dev, "no PIO support\n"); 3344 rc = -EINVAL; 3345 goto out; 3346 } 3347 3348 dev->xfer_mode = dev->pio_mode; 3349 dev->xfer_shift = ATA_SHIFT_PIO; 3350 if (ap->ops->set_piomode) 3351 ap->ops->set_piomode(ap, dev); 3352 } 3353 3354 /* step 3: set host DMA timings */ 3355 ata_for_each_dev(dev, link, ENABLED) { 3356 if (!ata_dma_enabled(dev)) 3357 continue; 3358 3359 dev->xfer_mode = dev->dma_mode; 3360 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3361 if (ap->ops->set_dmamode) 3362 ap->ops->set_dmamode(ap, dev); 3363 } 3364 3365 /* step 4: update devices' xfer mode */ 3366 ata_for_each_dev(dev, link, ENABLED) { 3367 rc = ata_dev_set_mode(dev); 3368 if (rc) 3369 goto out; 3370 } 3371 3372 /* Record simplex status. If we selected DMA then the other 3373 * host channels are not permitted to do so. 3374 */ 3375 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3376 ap->host->simplex_claimed = ap; 3377 3378 out: 3379 if (rc) 3380 *r_failed_dev = dev; 3381 return rc; 3382 } 3383 3384 /** 3385 * ata_wait_ready - wait for link to become ready 3386 * @link: link to be waited on 3387 * @deadline: deadline jiffies for the operation 3388 * @check_ready: callback to check link readiness 3389 * 3390 * Wait for @link to become ready. @check_ready should return 3391 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3392 * link doesn't seem to be occupied, other errno for other error 3393 * conditions. 3394 * 3395 * Transient -ENODEV conditions are allowed for 3396 * ATA_TMOUT_FF_WAIT. 3397 * 3398 * LOCKING: 3399 * EH context. 3400 * 3401 * RETURNS: 3402 * 0 if @linke is ready before @deadline; otherwise, -errno. 3403 */ 3404 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3405 int (*check_ready)(struct ata_link *link)) 3406 { 3407 unsigned long start = jiffies; 3408 unsigned long nodev_deadline; 3409 int warned = 0; 3410 3411 /* choose which 0xff timeout to use, read comment in libata.h */ 3412 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3413 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3414 else 3415 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3416 3417 /* Slave readiness can't be tested separately from master. On 3418 * M/S emulation configuration, this function should be called 3419 * only on the master and it will handle both master and slave. 3420 */ 3421 WARN_ON(link == link->ap->slave_link); 3422 3423 if (time_after(nodev_deadline, deadline)) 3424 nodev_deadline = deadline; 3425 3426 while (1) { 3427 unsigned long now = jiffies; 3428 int ready, tmp; 3429 3430 ready = tmp = check_ready(link); 3431 if (ready > 0) 3432 return 0; 3433 3434 /* 3435 * -ENODEV could be transient. Ignore -ENODEV if link 3436 * is online. Also, some SATA devices take a long 3437 * time to clear 0xff after reset. Wait for 3438 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3439 * offline. 3440 * 3441 * Note that some PATA controllers (pata_ali) explode 3442 * if status register is read more than once when 3443 * there's no device attached. 3444 */ 3445 if (ready == -ENODEV) { 3446 if (ata_link_online(link)) 3447 ready = 0; 3448 else if ((link->ap->flags & ATA_FLAG_SATA) && 3449 !ata_link_offline(link) && 3450 time_before(now, nodev_deadline)) 3451 ready = 0; 3452 } 3453 3454 if (ready) 3455 return ready; 3456 if (time_after(now, deadline)) 3457 return -EBUSY; 3458 3459 if (!warned && time_after(now, start + 5 * HZ) && 3460 (deadline - now > 3 * HZ)) { 3461 ata_link_warn(link, 3462 "link is slow to respond, please be patient " 3463 "(ready=%d)\n", tmp); 3464 warned = 1; 3465 } 3466 3467 ata_msleep(link->ap, 50); 3468 } 3469 } 3470 3471 /** 3472 * ata_wait_after_reset - wait for link to become ready after reset 3473 * @link: link to be waited on 3474 * @deadline: deadline jiffies for the operation 3475 * @check_ready: callback to check link readiness 3476 * 3477 * Wait for @link to become ready after reset. 3478 * 3479 * LOCKING: 3480 * EH context. 3481 * 3482 * RETURNS: 3483 * 0 if @linke is ready before @deadline; otherwise, -errno. 3484 */ 3485 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3486 int (*check_ready)(struct ata_link *link)) 3487 { 3488 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3489 3490 return ata_wait_ready(link, deadline, check_ready); 3491 } 3492 3493 /** 3494 * sata_link_debounce - debounce SATA phy status 3495 * @link: ATA link to debounce SATA phy status for 3496 * @params: timing parameters { interval, duratinon, timeout } in msec 3497 * @deadline: deadline jiffies for the operation 3498 * 3499 * Make sure SStatus of @link reaches stable state, determined by 3500 * holding the same value where DET is not 1 for @duration polled 3501 * every @interval, before @timeout. Timeout constraints the 3502 * beginning of the stable state. Because DET gets stuck at 1 on 3503 * some controllers after hot unplugging, this functions waits 3504 * until timeout then returns 0 if DET is stable at 1. 3505 * 3506 * @timeout is further limited by @deadline. The sooner of the 3507 * two is used. 3508 * 3509 * LOCKING: 3510 * Kernel thread context (may sleep) 3511 * 3512 * RETURNS: 3513 * 0 on success, -errno on failure. 3514 */ 3515 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3516 unsigned long deadline) 3517 { 3518 unsigned long interval = params[0]; 3519 unsigned long duration = params[1]; 3520 unsigned long last_jiffies, t; 3521 u32 last, cur; 3522 int rc; 3523 3524 t = ata_deadline(jiffies, params[2]); 3525 if (time_before(t, deadline)) 3526 deadline = t; 3527 3528 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3529 return rc; 3530 cur &= 0xf; 3531 3532 last = cur; 3533 last_jiffies = jiffies; 3534 3535 while (1) { 3536 ata_msleep(link->ap, interval); 3537 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3538 return rc; 3539 cur &= 0xf; 3540 3541 /* DET stable? */ 3542 if (cur == last) { 3543 if (cur == 1 && time_before(jiffies, deadline)) 3544 continue; 3545 if (time_after(jiffies, 3546 ata_deadline(last_jiffies, duration))) 3547 return 0; 3548 continue; 3549 } 3550 3551 /* unstable, start over */ 3552 last = cur; 3553 last_jiffies = jiffies; 3554 3555 /* Check deadline. If debouncing failed, return 3556 * -EPIPE to tell upper layer to lower link speed. 3557 */ 3558 if (time_after(jiffies, deadline)) 3559 return -EPIPE; 3560 } 3561 } 3562 3563 /** 3564 * sata_link_resume - resume SATA link 3565 * @link: ATA link to resume SATA 3566 * @params: timing parameters { interval, duratinon, timeout } in msec 3567 * @deadline: deadline jiffies for the operation 3568 * 3569 * Resume SATA phy @link and debounce it. 3570 * 3571 * LOCKING: 3572 * Kernel thread context (may sleep) 3573 * 3574 * RETURNS: 3575 * 0 on success, -errno on failure. 3576 */ 3577 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3578 unsigned long deadline) 3579 { 3580 int tries = ATA_LINK_RESUME_TRIES; 3581 u32 scontrol, serror; 3582 int rc; 3583 3584 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3585 return rc; 3586 3587 /* 3588 * Writes to SControl sometimes get ignored under certain 3589 * controllers (ata_piix SIDPR). Make sure DET actually is 3590 * cleared. 3591 */ 3592 do { 3593 scontrol = (scontrol & 0x0f0) | 0x300; 3594 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3595 return rc; 3596 /* 3597 * Some PHYs react badly if SStatus is pounded 3598 * immediately after resuming. Delay 200ms before 3599 * debouncing. 3600 */ 3601 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) 3602 ata_msleep(link->ap, 200); 3603 3604 /* is SControl restored correctly? */ 3605 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3606 return rc; 3607 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3608 3609 if ((scontrol & 0xf0f) != 0x300) { 3610 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3611 scontrol); 3612 return 0; 3613 } 3614 3615 if (tries < ATA_LINK_RESUME_TRIES) 3616 ata_link_warn(link, "link resume succeeded after %d retries\n", 3617 ATA_LINK_RESUME_TRIES - tries); 3618 3619 if ((rc = sata_link_debounce(link, params, deadline))) 3620 return rc; 3621 3622 /* clear SError, some PHYs require this even for SRST to work */ 3623 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3624 rc = sata_scr_write(link, SCR_ERROR, serror); 3625 3626 return rc != -EINVAL ? rc : 0; 3627 } 3628 3629 /** 3630 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3631 * @link: ATA link to manipulate SControl for 3632 * @policy: LPM policy to configure 3633 * @spm_wakeup: initiate LPM transition to active state 3634 * 3635 * Manipulate the IPM field of the SControl register of @link 3636 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3637 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3638 * the link. This function also clears PHYRDY_CHG before 3639 * returning. 3640 * 3641 * LOCKING: 3642 * EH context. 3643 * 3644 * RETURNS: 3645 * 0 on success, -errno otherwise. 3646 */ 3647 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3648 bool spm_wakeup) 3649 { 3650 struct ata_eh_context *ehc = &link->eh_context; 3651 bool woken_up = false; 3652 u32 scontrol; 3653 int rc; 3654 3655 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3656 if (rc) 3657 return rc; 3658 3659 switch (policy) { 3660 case ATA_LPM_MAX_POWER: 3661 /* disable all LPM transitions */ 3662 scontrol |= (0x7 << 8); 3663 /* initiate transition to active state */ 3664 if (spm_wakeup) { 3665 scontrol |= (0x4 << 12); 3666 woken_up = true; 3667 } 3668 break; 3669 case ATA_LPM_MED_POWER: 3670 /* allow LPM to PARTIAL */ 3671 scontrol &= ~(0x1 << 8); 3672 scontrol |= (0x6 << 8); 3673 break; 3674 case ATA_LPM_MIN_POWER: 3675 if (ata_link_nr_enabled(link) > 0) 3676 /* no restrictions on LPM transitions */ 3677 scontrol &= ~(0x7 << 8); 3678 else { 3679 /* empty port, power off */ 3680 scontrol &= ~0xf; 3681 scontrol |= (0x1 << 2); 3682 } 3683 break; 3684 default: 3685 WARN_ON(1); 3686 } 3687 3688 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3689 if (rc) 3690 return rc; 3691 3692 /* give the link time to transit out of LPM state */ 3693 if (woken_up) 3694 msleep(10); 3695 3696 /* clear PHYRDY_CHG from SError */ 3697 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3698 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3699 } 3700 3701 /** 3702 * ata_std_prereset - prepare for reset 3703 * @link: ATA link to be reset 3704 * @deadline: deadline jiffies for the operation 3705 * 3706 * @link is about to be reset. Initialize it. Failure from 3707 * prereset makes libata abort whole reset sequence and give up 3708 * that port, so prereset should be best-effort. It does its 3709 * best to prepare for reset sequence but if things go wrong, it 3710 * should just whine, not fail. 3711 * 3712 * LOCKING: 3713 * Kernel thread context (may sleep) 3714 * 3715 * RETURNS: 3716 * 0 on success, -errno otherwise. 3717 */ 3718 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3719 { 3720 struct ata_port *ap = link->ap; 3721 struct ata_eh_context *ehc = &link->eh_context; 3722 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3723 int rc; 3724 3725 /* if we're about to do hardreset, nothing more to do */ 3726 if (ehc->i.action & ATA_EH_HARDRESET) 3727 return 0; 3728 3729 /* if SATA, resume link */ 3730 if (ap->flags & ATA_FLAG_SATA) { 3731 rc = sata_link_resume(link, timing, deadline); 3732 /* whine about phy resume failure but proceed */ 3733 if (rc && rc != -EOPNOTSUPP) 3734 ata_link_warn(link, 3735 "failed to resume link for reset (errno=%d)\n", 3736 rc); 3737 } 3738 3739 /* no point in trying softreset on offline link */ 3740 if (ata_phys_link_offline(link)) 3741 ehc->i.action &= ~ATA_EH_SOFTRESET; 3742 3743 return 0; 3744 } 3745 3746 /** 3747 * sata_link_hardreset - reset link via SATA phy reset 3748 * @link: link to reset 3749 * @timing: timing parameters { interval, duratinon, timeout } in msec 3750 * @deadline: deadline jiffies for the operation 3751 * @online: optional out parameter indicating link onlineness 3752 * @check_ready: optional callback to check link readiness 3753 * 3754 * SATA phy-reset @link using DET bits of SControl register. 3755 * After hardreset, link readiness is waited upon using 3756 * ata_wait_ready() if @check_ready is specified. LLDs are 3757 * allowed to not specify @check_ready and wait itself after this 3758 * function returns. Device classification is LLD's 3759 * responsibility. 3760 * 3761 * *@online is set to one iff reset succeeded and @link is online 3762 * after reset. 3763 * 3764 * LOCKING: 3765 * Kernel thread context (may sleep) 3766 * 3767 * RETURNS: 3768 * 0 on success, -errno otherwise. 3769 */ 3770 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3771 unsigned long deadline, 3772 bool *online, int (*check_ready)(struct ata_link *)) 3773 { 3774 u32 scontrol; 3775 int rc; 3776 3777 DPRINTK("ENTER\n"); 3778 3779 if (online) 3780 *online = false; 3781 3782 if (sata_set_spd_needed(link)) { 3783 /* SATA spec says nothing about how to reconfigure 3784 * spd. To be on the safe side, turn off phy during 3785 * reconfiguration. This works for at least ICH7 AHCI 3786 * and Sil3124. 3787 */ 3788 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3789 goto out; 3790 3791 scontrol = (scontrol & 0x0f0) | 0x304; 3792 3793 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3794 goto out; 3795 3796 sata_set_spd(link); 3797 } 3798 3799 /* issue phy wake/reset */ 3800 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3801 goto out; 3802 3803 scontrol = (scontrol & 0x0f0) | 0x301; 3804 3805 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3806 goto out; 3807 3808 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3809 * 10.4.2 says at least 1 ms. 3810 */ 3811 ata_msleep(link->ap, 1); 3812 3813 /* bring link back */ 3814 rc = sata_link_resume(link, timing, deadline); 3815 if (rc) 3816 goto out; 3817 /* if link is offline nothing more to do */ 3818 if (ata_phys_link_offline(link)) 3819 goto out; 3820 3821 /* Link is online. From this point, -ENODEV too is an error. */ 3822 if (online) 3823 *online = true; 3824 3825 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3826 /* If PMP is supported, we have to do follow-up SRST. 3827 * Some PMPs don't send D2H Reg FIS after hardreset if 3828 * the first port is empty. Wait only for 3829 * ATA_TMOUT_PMP_SRST_WAIT. 3830 */ 3831 if (check_ready) { 3832 unsigned long pmp_deadline; 3833 3834 pmp_deadline = ata_deadline(jiffies, 3835 ATA_TMOUT_PMP_SRST_WAIT); 3836 if (time_after(pmp_deadline, deadline)) 3837 pmp_deadline = deadline; 3838 ata_wait_ready(link, pmp_deadline, check_ready); 3839 } 3840 rc = -EAGAIN; 3841 goto out; 3842 } 3843 3844 rc = 0; 3845 if (check_ready) 3846 rc = ata_wait_ready(link, deadline, check_ready); 3847 out: 3848 if (rc && rc != -EAGAIN) { 3849 /* online is set iff link is online && reset succeeded */ 3850 if (online) 3851 *online = false; 3852 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 3853 } 3854 DPRINTK("EXIT, rc=%d\n", rc); 3855 return rc; 3856 } 3857 3858 /** 3859 * sata_std_hardreset - COMRESET w/o waiting or classification 3860 * @link: link to reset 3861 * @class: resulting class of attached device 3862 * @deadline: deadline jiffies for the operation 3863 * 3864 * Standard SATA COMRESET w/o waiting or classification. 3865 * 3866 * LOCKING: 3867 * Kernel thread context (may sleep) 3868 * 3869 * RETURNS: 3870 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3871 */ 3872 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3873 unsigned long deadline) 3874 { 3875 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3876 bool online; 3877 int rc; 3878 3879 /* do hardreset */ 3880 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3881 return online ? -EAGAIN : rc; 3882 } 3883 3884 /** 3885 * ata_std_postreset - standard postreset callback 3886 * @link: the target ata_link 3887 * @classes: classes of attached devices 3888 * 3889 * This function is invoked after a successful reset. Note that 3890 * the device might have been reset more than once using 3891 * different reset methods before postreset is invoked. 3892 * 3893 * LOCKING: 3894 * Kernel thread context (may sleep) 3895 */ 3896 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3897 { 3898 u32 serror; 3899 3900 DPRINTK("ENTER\n"); 3901 3902 /* reset complete, clear SError */ 3903 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3904 sata_scr_write(link, SCR_ERROR, serror); 3905 3906 /* print link status */ 3907 sata_print_link_status(link); 3908 3909 DPRINTK("EXIT\n"); 3910 } 3911 3912 /** 3913 * ata_dev_same_device - Determine whether new ID matches configured device 3914 * @dev: device to compare against 3915 * @new_class: class of the new device 3916 * @new_id: IDENTIFY page of the new device 3917 * 3918 * Compare @new_class and @new_id against @dev and determine 3919 * whether @dev is the device indicated by @new_class and 3920 * @new_id. 3921 * 3922 * LOCKING: 3923 * None. 3924 * 3925 * RETURNS: 3926 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3927 */ 3928 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3929 const u16 *new_id) 3930 { 3931 const u16 *old_id = dev->id; 3932 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3933 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3934 3935 if (dev->class != new_class) { 3936 ata_dev_info(dev, "class mismatch %d != %d\n", 3937 dev->class, new_class); 3938 return 0; 3939 } 3940 3941 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3942 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3943 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3944 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3945 3946 if (strcmp(model[0], model[1])) { 3947 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 3948 model[0], model[1]); 3949 return 0; 3950 } 3951 3952 if (strcmp(serial[0], serial[1])) { 3953 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 3954 serial[0], serial[1]); 3955 return 0; 3956 } 3957 3958 return 1; 3959 } 3960 3961 /** 3962 * ata_dev_reread_id - Re-read IDENTIFY data 3963 * @dev: target ATA device 3964 * @readid_flags: read ID flags 3965 * 3966 * Re-read IDENTIFY page and make sure @dev is still attached to 3967 * the port. 3968 * 3969 * LOCKING: 3970 * Kernel thread context (may sleep) 3971 * 3972 * RETURNS: 3973 * 0 on success, negative errno otherwise 3974 */ 3975 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 3976 { 3977 unsigned int class = dev->class; 3978 u16 *id = (void *)dev->link->ap->sector_buf; 3979 int rc; 3980 3981 /* read ID data */ 3982 rc = ata_dev_read_id(dev, &class, readid_flags, id); 3983 if (rc) 3984 return rc; 3985 3986 /* is the device still there? */ 3987 if (!ata_dev_same_device(dev, class, id)) 3988 return -ENODEV; 3989 3990 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 3991 return 0; 3992 } 3993 3994 /** 3995 * ata_dev_revalidate - Revalidate ATA device 3996 * @dev: device to revalidate 3997 * @new_class: new class code 3998 * @readid_flags: read ID flags 3999 * 4000 * Re-read IDENTIFY page, make sure @dev is still attached to the 4001 * port and reconfigure it according to the new IDENTIFY page. 4002 * 4003 * LOCKING: 4004 * Kernel thread context (may sleep) 4005 * 4006 * RETURNS: 4007 * 0 on success, negative errno otherwise 4008 */ 4009 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4010 unsigned int readid_flags) 4011 { 4012 u64 n_sectors = dev->n_sectors; 4013 u64 n_native_sectors = dev->n_native_sectors; 4014 int rc; 4015 4016 if (!ata_dev_enabled(dev)) 4017 return -ENODEV; 4018 4019 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4020 if (ata_class_enabled(new_class) && 4021 new_class != ATA_DEV_ATA && 4022 new_class != ATA_DEV_ATAPI && 4023 new_class != ATA_DEV_ZAC && 4024 new_class != ATA_DEV_SEMB) { 4025 ata_dev_info(dev, "class mismatch %u != %u\n", 4026 dev->class, new_class); 4027 rc = -ENODEV; 4028 goto fail; 4029 } 4030 4031 /* re-read ID */ 4032 rc = ata_dev_reread_id(dev, readid_flags); 4033 if (rc) 4034 goto fail; 4035 4036 /* configure device according to the new ID */ 4037 rc = ata_dev_configure(dev); 4038 if (rc) 4039 goto fail; 4040 4041 /* verify n_sectors hasn't changed */ 4042 if (dev->class != ATA_DEV_ATA || !n_sectors || 4043 dev->n_sectors == n_sectors) 4044 return 0; 4045 4046 /* n_sectors has changed */ 4047 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 4048 (unsigned long long)n_sectors, 4049 (unsigned long long)dev->n_sectors); 4050 4051 /* 4052 * Something could have caused HPA to be unlocked 4053 * involuntarily. If n_native_sectors hasn't changed and the 4054 * new size matches it, keep the device. 4055 */ 4056 if (dev->n_native_sectors == n_native_sectors && 4057 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4058 ata_dev_warn(dev, 4059 "new n_sectors matches native, probably " 4060 "late HPA unlock, n_sectors updated\n"); 4061 /* use the larger n_sectors */ 4062 return 0; 4063 } 4064 4065 /* 4066 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4067 * unlocking HPA in those cases. 4068 * 4069 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4070 */ 4071 if (dev->n_native_sectors == n_native_sectors && 4072 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4073 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4074 ata_dev_warn(dev, 4075 "old n_sectors matches native, probably " 4076 "late HPA lock, will try to unlock HPA\n"); 4077 /* try unlocking HPA */ 4078 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4079 rc = -EIO; 4080 } else 4081 rc = -ENODEV; 4082 4083 /* restore original n_[native_]sectors and fail */ 4084 dev->n_native_sectors = n_native_sectors; 4085 dev->n_sectors = n_sectors; 4086 fail: 4087 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4088 return rc; 4089 } 4090 4091 struct ata_blacklist_entry { 4092 const char *model_num; 4093 const char *model_rev; 4094 unsigned long horkage; 4095 }; 4096 4097 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4098 /* Devices with DMA related problems under Linux */ 4099 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4100 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4101 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4102 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4103 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4104 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4105 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4106 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4107 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4108 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4109 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4110 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4111 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4112 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4113 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4114 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4115 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4116 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4117 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4118 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4119 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4120 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4121 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4122 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4123 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4124 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4125 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4126 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4127 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4128 /* Odd clown on sil3726/4726 PMPs */ 4129 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4130 4131 /* Weird ATAPI devices */ 4132 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4133 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4134 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4135 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4136 4137 /* 4138 * Causes silent data corruption with higher max sects. 4139 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com 4140 */ 4141 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, 4142 4143 /* Devices we expect to fail diagnostics */ 4144 4145 /* Devices where NCQ should be avoided */ 4146 /* NCQ is slow */ 4147 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4148 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4149 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4150 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4151 /* NCQ is broken */ 4152 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4153 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4154 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4155 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4156 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4157 4158 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4159 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4160 ATA_HORKAGE_FIRMWARE_WARN }, 4161 4162 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4163 ATA_HORKAGE_FIRMWARE_WARN }, 4164 4165 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4166 ATA_HORKAGE_FIRMWARE_WARN }, 4167 4168 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4169 ATA_HORKAGE_FIRMWARE_WARN }, 4170 4171 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ 4172 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4173 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4174 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4175 4176 /* Blacklist entries taken from Silicon Image 3124/3132 4177 Windows driver .inf file - also several Linux problem reports */ 4178 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4179 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4180 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4181 4182 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4183 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4184 4185 /* devices which puke on READ_NATIVE_MAX */ 4186 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4187 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4188 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4189 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4190 4191 /* this one allows HPA unlocking but fails IOs on the area */ 4192 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4193 4194 /* Devices which report 1 sector over size HPA */ 4195 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4196 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4197 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4198 4199 /* Devices which get the IVB wrong */ 4200 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4201 /* Maybe we should just blacklist TSSTcorp... */ 4202 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4203 4204 /* Devices that do not need bridging limits applied */ 4205 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4206 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4207 4208 /* Devices which aren't very happy with higher link speeds */ 4209 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4210 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, 4211 4212 /* 4213 * Devices which choke on SETXFER. Applies only if both the 4214 * device and controller are SATA. 4215 */ 4216 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4217 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, 4218 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, 4219 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4220 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4221 4222 /* devices that don't properly handle queued TRIM commands */ 4223 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4224 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4225 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4226 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4227 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4228 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4229 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4230 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4231 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4232 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4233 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4234 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4235 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4236 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4237 4238 /* devices that don't properly handle TRIM commands */ 4239 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, 4240 4241 /* 4242 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4243 * (Return Zero After Trim) flags in the ATA Command Set are 4244 * unreliable in the sense that they only define what happens if 4245 * the device successfully executed the DSM TRIM command. TRIM 4246 * is only advisory, however, and the device is free to silently 4247 * ignore all or parts of the request. 4248 * 4249 * Whitelist drives that are known to reliably return zeroes 4250 * after TRIM. 4251 */ 4252 4253 /* 4254 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude 4255 * that model before whitelisting all other intel SSDs. 4256 */ 4257 { "INTEL*SSDSC2MH*", NULL, 0, }, 4258 4259 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4260 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4261 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4262 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4263 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4264 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4265 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4266 4267 /* 4268 * Some WD SATA-I drives spin up and down erratically when the link 4269 * is put into the slumber mode. We don't have full list of the 4270 * affected devices. Disable LPM if the device matches one of the 4271 * known prefixes and is SATA-1. As a side effect LPM partial is 4272 * lost too. 4273 * 4274 * https://bugzilla.kernel.org/show_bug.cgi?id=57211 4275 */ 4276 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4277 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4278 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4279 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4280 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4281 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4282 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4283 4284 /* End Marker */ 4285 { } 4286 }; 4287 4288 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4289 { 4290 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4291 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4292 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4293 4294 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4295 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4296 4297 while (ad->model_num) { 4298 if (glob_match(ad->model_num, model_num)) { 4299 if (ad->model_rev == NULL) 4300 return ad->horkage; 4301 if (glob_match(ad->model_rev, model_rev)) 4302 return ad->horkage; 4303 } 4304 ad++; 4305 } 4306 return 0; 4307 } 4308 4309 static int ata_dma_blacklisted(const struct ata_device *dev) 4310 { 4311 /* We don't support polling DMA. 4312 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4313 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4314 */ 4315 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4316 (dev->flags & ATA_DFLAG_CDB_INTR)) 4317 return 1; 4318 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4319 } 4320 4321 /** 4322 * ata_is_40wire - check drive side detection 4323 * @dev: device 4324 * 4325 * Perform drive side detection decoding, allowing for device vendors 4326 * who can't follow the documentation. 4327 */ 4328 4329 static int ata_is_40wire(struct ata_device *dev) 4330 { 4331 if (dev->horkage & ATA_HORKAGE_IVB) 4332 return ata_drive_40wire_relaxed(dev->id); 4333 return ata_drive_40wire(dev->id); 4334 } 4335 4336 /** 4337 * cable_is_40wire - 40/80/SATA decider 4338 * @ap: port to consider 4339 * 4340 * This function encapsulates the policy for speed management 4341 * in one place. At the moment we don't cache the result but 4342 * there is a good case for setting ap->cbl to the result when 4343 * we are called with unknown cables (and figuring out if it 4344 * impacts hotplug at all). 4345 * 4346 * Return 1 if the cable appears to be 40 wire. 4347 */ 4348 4349 static int cable_is_40wire(struct ata_port *ap) 4350 { 4351 struct ata_link *link; 4352 struct ata_device *dev; 4353 4354 /* If the controller thinks we are 40 wire, we are. */ 4355 if (ap->cbl == ATA_CBL_PATA40) 4356 return 1; 4357 4358 /* If the controller thinks we are 80 wire, we are. */ 4359 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4360 return 0; 4361 4362 /* If the system is known to be 40 wire short cable (eg 4363 * laptop), then we allow 80 wire modes even if the drive 4364 * isn't sure. 4365 */ 4366 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4367 return 0; 4368 4369 /* If the controller doesn't know, we scan. 4370 * 4371 * Note: We look for all 40 wire detects at this point. Any 4372 * 80 wire detect is taken to be 80 wire cable because 4373 * - in many setups only the one drive (slave if present) will 4374 * give a valid detect 4375 * - if you have a non detect capable drive you don't want it 4376 * to colour the choice 4377 */ 4378 ata_for_each_link(link, ap, EDGE) { 4379 ata_for_each_dev(dev, link, ENABLED) { 4380 if (!ata_is_40wire(dev)) 4381 return 0; 4382 } 4383 } 4384 return 1; 4385 } 4386 4387 /** 4388 * ata_dev_xfermask - Compute supported xfermask of the given device 4389 * @dev: Device to compute xfermask for 4390 * 4391 * Compute supported xfermask of @dev and store it in 4392 * dev->*_mask. This function is responsible for applying all 4393 * known limits including host controller limits, device 4394 * blacklist, etc... 4395 * 4396 * LOCKING: 4397 * None. 4398 */ 4399 static void ata_dev_xfermask(struct ata_device *dev) 4400 { 4401 struct ata_link *link = dev->link; 4402 struct ata_port *ap = link->ap; 4403 struct ata_host *host = ap->host; 4404 unsigned long xfer_mask; 4405 4406 /* controller modes available */ 4407 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4408 ap->mwdma_mask, ap->udma_mask); 4409 4410 /* drive modes available */ 4411 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4412 dev->mwdma_mask, dev->udma_mask); 4413 xfer_mask &= ata_id_xfermask(dev->id); 4414 4415 /* 4416 * CFA Advanced TrueIDE timings are not allowed on a shared 4417 * cable 4418 */ 4419 if (ata_dev_pair(dev)) { 4420 /* No PIO5 or PIO6 */ 4421 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4422 /* No MWDMA3 or MWDMA 4 */ 4423 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4424 } 4425 4426 if (ata_dma_blacklisted(dev)) { 4427 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4428 ata_dev_warn(dev, 4429 "device is on DMA blacklist, disabling DMA\n"); 4430 } 4431 4432 if ((host->flags & ATA_HOST_SIMPLEX) && 4433 host->simplex_claimed && host->simplex_claimed != ap) { 4434 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4435 ata_dev_warn(dev, 4436 "simplex DMA is claimed by other device, disabling DMA\n"); 4437 } 4438 4439 if (ap->flags & ATA_FLAG_NO_IORDY) 4440 xfer_mask &= ata_pio_mask_no_iordy(dev); 4441 4442 if (ap->ops->mode_filter) 4443 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4444 4445 /* Apply cable rule here. Don't apply it early because when 4446 * we handle hot plug the cable type can itself change. 4447 * Check this last so that we know if the transfer rate was 4448 * solely limited by the cable. 4449 * Unknown or 80 wire cables reported host side are checked 4450 * drive side as well. Cases where we know a 40wire cable 4451 * is used safely for 80 are not checked here. 4452 */ 4453 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4454 /* UDMA/44 or higher would be available */ 4455 if (cable_is_40wire(ap)) { 4456 ata_dev_warn(dev, 4457 "limited to UDMA/33 due to 40-wire cable\n"); 4458 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4459 } 4460 4461 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4462 &dev->mwdma_mask, &dev->udma_mask); 4463 } 4464 4465 /** 4466 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4467 * @dev: Device to which command will be sent 4468 * 4469 * Issue SET FEATURES - XFER MODE command to device @dev 4470 * on port @ap. 4471 * 4472 * LOCKING: 4473 * PCI/etc. bus probe sem. 4474 * 4475 * RETURNS: 4476 * 0 on success, AC_ERR_* mask otherwise. 4477 */ 4478 4479 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4480 { 4481 struct ata_taskfile tf; 4482 unsigned int err_mask; 4483 4484 /* set up set-features taskfile */ 4485 DPRINTK("set features - xfer mode\n"); 4486 4487 /* Some controllers and ATAPI devices show flaky interrupt 4488 * behavior after setting xfer mode. Use polling instead. 4489 */ 4490 ata_tf_init(dev, &tf); 4491 tf.command = ATA_CMD_SET_FEATURES; 4492 tf.feature = SETFEATURES_XFER; 4493 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4494 tf.protocol = ATA_PROT_NODATA; 4495 /* If we are using IORDY we must send the mode setting command */ 4496 if (ata_pio_need_iordy(dev)) 4497 tf.nsect = dev->xfer_mode; 4498 /* If the device has IORDY and the controller does not - turn it off */ 4499 else if (ata_id_has_iordy(dev->id)) 4500 tf.nsect = 0x01; 4501 else /* In the ancient relic department - skip all of this */ 4502 return 0; 4503 4504 /* On some disks, this command causes spin-up, so we need longer timeout */ 4505 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); 4506 4507 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4508 return err_mask; 4509 } 4510 4511 /** 4512 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4513 * @dev: Device to which command will be sent 4514 * @enable: Whether to enable or disable the feature 4515 * @feature: The sector count represents the feature to set 4516 * 4517 * Issue SET FEATURES - SATA FEATURES command to device @dev 4518 * on port @ap with sector count 4519 * 4520 * LOCKING: 4521 * PCI/etc. bus probe sem. 4522 * 4523 * RETURNS: 4524 * 0 on success, AC_ERR_* mask otherwise. 4525 */ 4526 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4527 { 4528 struct ata_taskfile tf; 4529 unsigned int err_mask; 4530 4531 /* set up set-features taskfile */ 4532 DPRINTK("set features - SATA features\n"); 4533 4534 ata_tf_init(dev, &tf); 4535 tf.command = ATA_CMD_SET_FEATURES; 4536 tf.feature = enable; 4537 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4538 tf.protocol = ATA_PROT_NODATA; 4539 tf.nsect = feature; 4540 4541 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4542 4543 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4544 return err_mask; 4545 } 4546 EXPORT_SYMBOL_GPL(ata_dev_set_feature); 4547 4548 /** 4549 * ata_dev_init_params - Issue INIT DEV PARAMS command 4550 * @dev: Device to which command will be sent 4551 * @heads: Number of heads (taskfile parameter) 4552 * @sectors: Number of sectors (taskfile parameter) 4553 * 4554 * LOCKING: 4555 * Kernel thread context (may sleep) 4556 * 4557 * RETURNS: 4558 * 0 on success, AC_ERR_* mask otherwise. 4559 */ 4560 static unsigned int ata_dev_init_params(struct ata_device *dev, 4561 u16 heads, u16 sectors) 4562 { 4563 struct ata_taskfile tf; 4564 unsigned int err_mask; 4565 4566 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4567 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4568 return AC_ERR_INVALID; 4569 4570 /* set up init dev params taskfile */ 4571 DPRINTK("init dev params \n"); 4572 4573 ata_tf_init(dev, &tf); 4574 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4575 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4576 tf.protocol = ATA_PROT_NODATA; 4577 tf.nsect = sectors; 4578 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4579 4580 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4581 /* A clean abort indicates an original or just out of spec drive 4582 and we should continue as we issue the setup based on the 4583 drive reported working geometry */ 4584 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4585 err_mask = 0; 4586 4587 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4588 return err_mask; 4589 } 4590 4591 /** 4592 * ata_sg_clean - Unmap DMA memory associated with command 4593 * @qc: Command containing DMA memory to be released 4594 * 4595 * Unmap all mapped DMA memory associated with this command. 4596 * 4597 * LOCKING: 4598 * spin_lock_irqsave(host lock) 4599 */ 4600 void ata_sg_clean(struct ata_queued_cmd *qc) 4601 { 4602 struct ata_port *ap = qc->ap; 4603 struct scatterlist *sg = qc->sg; 4604 int dir = qc->dma_dir; 4605 4606 WARN_ON_ONCE(sg == NULL); 4607 4608 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4609 4610 if (qc->n_elem) 4611 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4612 4613 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4614 qc->sg = NULL; 4615 } 4616 4617 /** 4618 * atapi_check_dma - Check whether ATAPI DMA can be supported 4619 * @qc: Metadata associated with taskfile to check 4620 * 4621 * Allow low-level driver to filter ATA PACKET commands, returning 4622 * a status indicating whether or not it is OK to use DMA for the 4623 * supplied PACKET command. 4624 * 4625 * LOCKING: 4626 * spin_lock_irqsave(host lock) 4627 * 4628 * RETURNS: 0 when ATAPI DMA can be used 4629 * nonzero otherwise 4630 */ 4631 int atapi_check_dma(struct ata_queued_cmd *qc) 4632 { 4633 struct ata_port *ap = qc->ap; 4634 4635 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4636 * few ATAPI devices choke on such DMA requests. 4637 */ 4638 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4639 unlikely(qc->nbytes & 15)) 4640 return 1; 4641 4642 if (ap->ops->check_atapi_dma) 4643 return ap->ops->check_atapi_dma(qc); 4644 4645 return 0; 4646 } 4647 4648 /** 4649 * ata_std_qc_defer - Check whether a qc needs to be deferred 4650 * @qc: ATA command in question 4651 * 4652 * Non-NCQ commands cannot run with any other command, NCQ or 4653 * not. As upper layer only knows the queue depth, we are 4654 * responsible for maintaining exclusion. This function checks 4655 * whether a new command @qc can be issued. 4656 * 4657 * LOCKING: 4658 * spin_lock_irqsave(host lock) 4659 * 4660 * RETURNS: 4661 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4662 */ 4663 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4664 { 4665 struct ata_link *link = qc->dev->link; 4666 4667 if (qc->tf.protocol == ATA_PROT_NCQ) { 4668 if (!ata_tag_valid(link->active_tag)) 4669 return 0; 4670 } else { 4671 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4672 return 0; 4673 } 4674 4675 return ATA_DEFER_LINK; 4676 } 4677 4678 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4679 4680 /** 4681 * ata_sg_init - Associate command with scatter-gather table. 4682 * @qc: Command to be associated 4683 * @sg: Scatter-gather table. 4684 * @n_elem: Number of elements in s/g table. 4685 * 4686 * Initialize the data-related elements of queued_cmd @qc 4687 * to point to a scatter-gather table @sg, containing @n_elem 4688 * elements. 4689 * 4690 * LOCKING: 4691 * spin_lock_irqsave(host lock) 4692 */ 4693 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4694 unsigned int n_elem) 4695 { 4696 qc->sg = sg; 4697 qc->n_elem = n_elem; 4698 qc->cursg = qc->sg; 4699 } 4700 4701 /** 4702 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4703 * @qc: Command with scatter-gather table to be mapped. 4704 * 4705 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4706 * 4707 * LOCKING: 4708 * spin_lock_irqsave(host lock) 4709 * 4710 * RETURNS: 4711 * Zero on success, negative on error. 4712 * 4713 */ 4714 static int ata_sg_setup(struct ata_queued_cmd *qc) 4715 { 4716 struct ata_port *ap = qc->ap; 4717 unsigned int n_elem; 4718 4719 VPRINTK("ENTER, ata%u\n", ap->print_id); 4720 4721 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4722 if (n_elem < 1) 4723 return -1; 4724 4725 DPRINTK("%d sg elements mapped\n", n_elem); 4726 qc->orig_n_elem = qc->n_elem; 4727 qc->n_elem = n_elem; 4728 qc->flags |= ATA_QCFLAG_DMAMAP; 4729 4730 return 0; 4731 } 4732 4733 /** 4734 * swap_buf_le16 - swap halves of 16-bit words in place 4735 * @buf: Buffer to swap 4736 * @buf_words: Number of 16-bit words in buffer. 4737 * 4738 * Swap halves of 16-bit words if needed to convert from 4739 * little-endian byte order to native cpu byte order, or 4740 * vice-versa. 4741 * 4742 * LOCKING: 4743 * Inherited from caller. 4744 */ 4745 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4746 { 4747 #ifdef __BIG_ENDIAN 4748 unsigned int i; 4749 4750 for (i = 0; i < buf_words; i++) 4751 buf[i] = le16_to_cpu(buf[i]); 4752 #endif /* __BIG_ENDIAN */ 4753 } 4754 4755 /** 4756 * ata_qc_new_init - Request an available ATA command, and initialize it 4757 * @dev: Device from whom we request an available command structure 4758 * @tag: tag 4759 * 4760 * LOCKING: 4761 * None. 4762 */ 4763 4764 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) 4765 { 4766 struct ata_port *ap = dev->link->ap; 4767 struct ata_queued_cmd *qc; 4768 4769 /* no command while frozen */ 4770 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4771 return NULL; 4772 4773 /* libsas case */ 4774 if (ap->flags & ATA_FLAG_SAS_HOST) { 4775 tag = ata_sas_allocate_tag(ap); 4776 if (tag < 0) 4777 return NULL; 4778 } 4779 4780 qc = __ata_qc_from_tag(ap, tag); 4781 qc->tag = tag; 4782 qc->scsicmd = NULL; 4783 qc->ap = ap; 4784 qc->dev = dev; 4785 4786 ata_qc_reinit(qc); 4787 4788 return qc; 4789 } 4790 4791 /** 4792 * ata_qc_free - free unused ata_queued_cmd 4793 * @qc: Command to complete 4794 * 4795 * Designed to free unused ata_queued_cmd object 4796 * in case something prevents using it. 4797 * 4798 * LOCKING: 4799 * spin_lock_irqsave(host lock) 4800 */ 4801 void ata_qc_free(struct ata_queued_cmd *qc) 4802 { 4803 struct ata_port *ap; 4804 unsigned int tag; 4805 4806 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4807 ap = qc->ap; 4808 4809 qc->flags = 0; 4810 tag = qc->tag; 4811 if (likely(ata_tag_valid(tag))) { 4812 qc->tag = ATA_TAG_POISON; 4813 if (ap->flags & ATA_FLAG_SAS_HOST) 4814 ata_sas_free_tag(tag, ap); 4815 } 4816 } 4817 4818 void __ata_qc_complete(struct ata_queued_cmd *qc) 4819 { 4820 struct ata_port *ap; 4821 struct ata_link *link; 4822 4823 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4824 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4825 ap = qc->ap; 4826 link = qc->dev->link; 4827 4828 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4829 ata_sg_clean(qc); 4830 4831 /* command should be marked inactive atomically with qc completion */ 4832 if (qc->tf.protocol == ATA_PROT_NCQ) { 4833 link->sactive &= ~(1 << qc->tag); 4834 if (!link->sactive) 4835 ap->nr_active_links--; 4836 } else { 4837 link->active_tag = ATA_TAG_POISON; 4838 ap->nr_active_links--; 4839 } 4840 4841 /* clear exclusive status */ 4842 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4843 ap->excl_link == link)) 4844 ap->excl_link = NULL; 4845 4846 /* atapi: mark qc as inactive to prevent the interrupt handler 4847 * from completing the command twice later, before the error handler 4848 * is called. (when rc != 0 and atapi request sense is needed) 4849 */ 4850 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4851 ap->qc_active &= ~(1 << qc->tag); 4852 4853 /* call completion callback */ 4854 qc->complete_fn(qc); 4855 } 4856 4857 static void fill_result_tf(struct ata_queued_cmd *qc) 4858 { 4859 struct ata_port *ap = qc->ap; 4860 4861 qc->result_tf.flags = qc->tf.flags; 4862 ap->ops->qc_fill_rtf(qc); 4863 } 4864 4865 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4866 { 4867 struct ata_device *dev = qc->dev; 4868 4869 if (ata_is_nodata(qc->tf.protocol)) 4870 return; 4871 4872 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4873 return; 4874 4875 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4876 } 4877 4878 /** 4879 * ata_qc_complete - Complete an active ATA command 4880 * @qc: Command to complete 4881 * 4882 * Indicate to the mid and upper layers that an ATA command has 4883 * completed, with either an ok or not-ok status. 4884 * 4885 * Refrain from calling this function multiple times when 4886 * successfully completing multiple NCQ commands. 4887 * ata_qc_complete_multiple() should be used instead, which will 4888 * properly update IRQ expect state. 4889 * 4890 * LOCKING: 4891 * spin_lock_irqsave(host lock) 4892 */ 4893 void ata_qc_complete(struct ata_queued_cmd *qc) 4894 { 4895 struct ata_port *ap = qc->ap; 4896 4897 /* XXX: New EH and old EH use different mechanisms to 4898 * synchronize EH with regular execution path. 4899 * 4900 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4901 * Normal execution path is responsible for not accessing a 4902 * failed qc. libata core enforces the rule by returning NULL 4903 * from ata_qc_from_tag() for failed qcs. 4904 * 4905 * Old EH depends on ata_qc_complete() nullifying completion 4906 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4907 * not synchronize with interrupt handler. Only PIO task is 4908 * taken care of. 4909 */ 4910 if (ap->ops->error_handler) { 4911 struct ata_device *dev = qc->dev; 4912 struct ata_eh_info *ehi = &dev->link->eh_info; 4913 4914 if (unlikely(qc->err_mask)) 4915 qc->flags |= ATA_QCFLAG_FAILED; 4916 4917 /* 4918 * Finish internal commands without any further processing 4919 * and always with the result TF filled. 4920 */ 4921 if (unlikely(ata_tag_internal(qc->tag))) { 4922 fill_result_tf(qc); 4923 trace_ata_qc_complete_internal(qc); 4924 __ata_qc_complete(qc); 4925 return; 4926 } 4927 4928 /* 4929 * Non-internal qc has failed. Fill the result TF and 4930 * summon EH. 4931 */ 4932 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4933 fill_result_tf(qc); 4934 trace_ata_qc_complete_failed(qc); 4935 ata_qc_schedule_eh(qc); 4936 return; 4937 } 4938 4939 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4940 4941 /* read result TF if requested */ 4942 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4943 fill_result_tf(qc); 4944 4945 trace_ata_qc_complete_done(qc); 4946 /* Some commands need post-processing after successful 4947 * completion. 4948 */ 4949 switch (qc->tf.command) { 4950 case ATA_CMD_SET_FEATURES: 4951 if (qc->tf.feature != SETFEATURES_WC_ON && 4952 qc->tf.feature != SETFEATURES_WC_OFF) 4953 break; 4954 /* fall through */ 4955 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 4956 case ATA_CMD_SET_MULTI: /* multi_count changed */ 4957 /* revalidate device */ 4958 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 4959 ata_port_schedule_eh(ap); 4960 break; 4961 4962 case ATA_CMD_SLEEP: 4963 dev->flags |= ATA_DFLAG_SLEEPING; 4964 break; 4965 } 4966 4967 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 4968 ata_verify_xfer(qc); 4969 4970 __ata_qc_complete(qc); 4971 } else { 4972 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 4973 return; 4974 4975 /* read result TF if failed or requested */ 4976 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 4977 fill_result_tf(qc); 4978 4979 __ata_qc_complete(qc); 4980 } 4981 } 4982 4983 /** 4984 * ata_qc_complete_multiple - Complete multiple qcs successfully 4985 * @ap: port in question 4986 * @qc_active: new qc_active mask 4987 * 4988 * Complete in-flight commands. This functions is meant to be 4989 * called from low-level driver's interrupt routine to complete 4990 * requests normally. ap->qc_active and @qc_active is compared 4991 * and commands are completed accordingly. 4992 * 4993 * Always use this function when completing multiple NCQ commands 4994 * from IRQ handlers instead of calling ata_qc_complete() 4995 * multiple times to keep IRQ expect status properly in sync. 4996 * 4997 * LOCKING: 4998 * spin_lock_irqsave(host lock) 4999 * 5000 * RETURNS: 5001 * Number of completed commands on success, -errno otherwise. 5002 */ 5003 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5004 { 5005 int nr_done = 0; 5006 u32 done_mask; 5007 5008 done_mask = ap->qc_active ^ qc_active; 5009 5010 if (unlikely(done_mask & qc_active)) { 5011 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", 5012 ap->qc_active, qc_active); 5013 return -EINVAL; 5014 } 5015 5016 while (done_mask) { 5017 struct ata_queued_cmd *qc; 5018 unsigned int tag = __ffs(done_mask); 5019 5020 qc = ata_qc_from_tag(ap, tag); 5021 if (qc) { 5022 ata_qc_complete(qc); 5023 nr_done++; 5024 } 5025 done_mask &= ~(1 << tag); 5026 } 5027 5028 return nr_done; 5029 } 5030 5031 /** 5032 * ata_qc_issue - issue taskfile to device 5033 * @qc: command to issue to device 5034 * 5035 * Prepare an ATA command to submission to device. 5036 * This includes mapping the data into a DMA-able 5037 * area, filling in the S/G table, and finally 5038 * writing the taskfile to hardware, starting the command. 5039 * 5040 * LOCKING: 5041 * spin_lock_irqsave(host lock) 5042 */ 5043 void ata_qc_issue(struct ata_queued_cmd *qc) 5044 { 5045 struct ata_port *ap = qc->ap; 5046 struct ata_link *link = qc->dev->link; 5047 u8 prot = qc->tf.protocol; 5048 5049 /* Make sure only one non-NCQ command is outstanding. The 5050 * check is skipped for old EH because it reuses active qc to 5051 * request ATAPI sense. 5052 */ 5053 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5054 5055 if (ata_is_ncq(prot)) { 5056 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5057 5058 if (!link->sactive) 5059 ap->nr_active_links++; 5060 link->sactive |= 1 << qc->tag; 5061 } else { 5062 WARN_ON_ONCE(link->sactive); 5063 5064 ap->nr_active_links++; 5065 link->active_tag = qc->tag; 5066 } 5067 5068 qc->flags |= ATA_QCFLAG_ACTIVE; 5069 ap->qc_active |= 1 << qc->tag; 5070 5071 /* 5072 * We guarantee to LLDs that they will have at least one 5073 * non-zero sg if the command is a data command. 5074 */ 5075 if (WARN_ON_ONCE(ata_is_data(prot) && 5076 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5077 goto sys_err; 5078 5079 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5080 (ap->flags & ATA_FLAG_PIO_DMA))) 5081 if (ata_sg_setup(qc)) 5082 goto sys_err; 5083 5084 /* if device is sleeping, schedule reset and abort the link */ 5085 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5086 link->eh_info.action |= ATA_EH_RESET; 5087 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5088 ata_link_abort(link); 5089 return; 5090 } 5091 5092 ap->ops->qc_prep(qc); 5093 trace_ata_qc_issue(qc); 5094 qc->err_mask |= ap->ops->qc_issue(qc); 5095 if (unlikely(qc->err_mask)) 5096 goto err; 5097 return; 5098 5099 sys_err: 5100 qc->err_mask |= AC_ERR_SYSTEM; 5101 err: 5102 ata_qc_complete(qc); 5103 } 5104 5105 /** 5106 * sata_scr_valid - test whether SCRs are accessible 5107 * @link: ATA link to test SCR accessibility for 5108 * 5109 * Test whether SCRs are accessible for @link. 5110 * 5111 * LOCKING: 5112 * None. 5113 * 5114 * RETURNS: 5115 * 1 if SCRs are accessible, 0 otherwise. 5116 */ 5117 int sata_scr_valid(struct ata_link *link) 5118 { 5119 struct ata_port *ap = link->ap; 5120 5121 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5122 } 5123 5124 /** 5125 * sata_scr_read - read SCR register of the specified port 5126 * @link: ATA link to read SCR for 5127 * @reg: SCR to read 5128 * @val: Place to store read value 5129 * 5130 * Read SCR register @reg of @link into *@val. This function is 5131 * guaranteed to succeed if @link is ap->link, the cable type of 5132 * the port is SATA and the port implements ->scr_read. 5133 * 5134 * LOCKING: 5135 * None if @link is ap->link. Kernel thread context otherwise. 5136 * 5137 * RETURNS: 5138 * 0 on success, negative errno on failure. 5139 */ 5140 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5141 { 5142 if (ata_is_host_link(link)) { 5143 if (sata_scr_valid(link)) 5144 return link->ap->ops->scr_read(link, reg, val); 5145 return -EOPNOTSUPP; 5146 } 5147 5148 return sata_pmp_scr_read(link, reg, val); 5149 } 5150 5151 /** 5152 * sata_scr_write - write SCR register of the specified port 5153 * @link: ATA link to write SCR for 5154 * @reg: SCR to write 5155 * @val: value to write 5156 * 5157 * Write @val to SCR register @reg of @link. This function is 5158 * guaranteed to succeed if @link is ap->link, the cable type of 5159 * the port is SATA and the port implements ->scr_read. 5160 * 5161 * LOCKING: 5162 * None if @link is ap->link. Kernel thread context otherwise. 5163 * 5164 * RETURNS: 5165 * 0 on success, negative errno on failure. 5166 */ 5167 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5168 { 5169 if (ata_is_host_link(link)) { 5170 if (sata_scr_valid(link)) 5171 return link->ap->ops->scr_write(link, reg, val); 5172 return -EOPNOTSUPP; 5173 } 5174 5175 return sata_pmp_scr_write(link, reg, val); 5176 } 5177 5178 /** 5179 * sata_scr_write_flush - write SCR register of the specified port and flush 5180 * @link: ATA link to write SCR for 5181 * @reg: SCR to write 5182 * @val: value to write 5183 * 5184 * This function is identical to sata_scr_write() except that this 5185 * function performs flush after writing to the register. 5186 * 5187 * LOCKING: 5188 * None if @link is ap->link. Kernel thread context otherwise. 5189 * 5190 * RETURNS: 5191 * 0 on success, negative errno on failure. 5192 */ 5193 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5194 { 5195 if (ata_is_host_link(link)) { 5196 int rc; 5197 5198 if (sata_scr_valid(link)) { 5199 rc = link->ap->ops->scr_write(link, reg, val); 5200 if (rc == 0) 5201 rc = link->ap->ops->scr_read(link, reg, &val); 5202 return rc; 5203 } 5204 return -EOPNOTSUPP; 5205 } 5206 5207 return sata_pmp_scr_write(link, reg, val); 5208 } 5209 5210 /** 5211 * ata_phys_link_online - test whether the given link is online 5212 * @link: ATA link to test 5213 * 5214 * Test whether @link is online. Note that this function returns 5215 * 0 if online status of @link cannot be obtained, so 5216 * ata_link_online(link) != !ata_link_offline(link). 5217 * 5218 * LOCKING: 5219 * None. 5220 * 5221 * RETURNS: 5222 * True if the port online status is available and online. 5223 */ 5224 bool ata_phys_link_online(struct ata_link *link) 5225 { 5226 u32 sstatus; 5227 5228 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5229 ata_sstatus_online(sstatus)) 5230 return true; 5231 return false; 5232 } 5233 5234 /** 5235 * ata_phys_link_offline - test whether the given link is offline 5236 * @link: ATA link to test 5237 * 5238 * Test whether @link is offline. Note that this function 5239 * returns 0 if offline status of @link cannot be obtained, so 5240 * ata_link_online(link) != !ata_link_offline(link). 5241 * 5242 * LOCKING: 5243 * None. 5244 * 5245 * RETURNS: 5246 * True if the port offline status is available and offline. 5247 */ 5248 bool ata_phys_link_offline(struct ata_link *link) 5249 { 5250 u32 sstatus; 5251 5252 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5253 !ata_sstatus_online(sstatus)) 5254 return true; 5255 return false; 5256 } 5257 5258 /** 5259 * ata_link_online - test whether the given link is online 5260 * @link: ATA link to test 5261 * 5262 * Test whether @link is online. This is identical to 5263 * ata_phys_link_online() when there's no slave link. When 5264 * there's a slave link, this function should only be called on 5265 * the master link and will return true if any of M/S links is 5266 * online. 5267 * 5268 * LOCKING: 5269 * None. 5270 * 5271 * RETURNS: 5272 * True if the port online status is available and online. 5273 */ 5274 bool ata_link_online(struct ata_link *link) 5275 { 5276 struct ata_link *slave = link->ap->slave_link; 5277 5278 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5279 5280 return ata_phys_link_online(link) || 5281 (slave && ata_phys_link_online(slave)); 5282 } 5283 5284 /** 5285 * ata_link_offline - test whether the given link is offline 5286 * @link: ATA link to test 5287 * 5288 * Test whether @link is offline. This is identical to 5289 * ata_phys_link_offline() when there's no slave link. When 5290 * there's a slave link, this function should only be called on 5291 * the master link and will return true if both M/S links are 5292 * offline. 5293 * 5294 * LOCKING: 5295 * None. 5296 * 5297 * RETURNS: 5298 * True if the port offline status is available and offline. 5299 */ 5300 bool ata_link_offline(struct ata_link *link) 5301 { 5302 struct ata_link *slave = link->ap->slave_link; 5303 5304 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5305 5306 return ata_phys_link_offline(link) && 5307 (!slave || ata_phys_link_offline(slave)); 5308 } 5309 5310 #ifdef CONFIG_PM 5311 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5312 unsigned int action, unsigned int ehi_flags, 5313 bool async) 5314 { 5315 struct ata_link *link; 5316 unsigned long flags; 5317 5318 /* Previous resume operation might still be in 5319 * progress. Wait for PM_PENDING to clear. 5320 */ 5321 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5322 ata_port_wait_eh(ap); 5323 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5324 } 5325 5326 /* request PM ops to EH */ 5327 spin_lock_irqsave(ap->lock, flags); 5328 5329 ap->pm_mesg = mesg; 5330 ap->pflags |= ATA_PFLAG_PM_PENDING; 5331 ata_for_each_link(link, ap, HOST_FIRST) { 5332 link->eh_info.action |= action; 5333 link->eh_info.flags |= ehi_flags; 5334 } 5335 5336 ata_port_schedule_eh(ap); 5337 5338 spin_unlock_irqrestore(ap->lock, flags); 5339 5340 if (!async) { 5341 ata_port_wait_eh(ap); 5342 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5343 } 5344 } 5345 5346 /* 5347 * On some hardware, device fails to respond after spun down for suspend. As 5348 * the device won't be used before being resumed, we don't need to touch the 5349 * device. Ask EH to skip the usual stuff and proceed directly to suspend. 5350 * 5351 * http://thread.gmane.org/gmane.linux.ide/46764 5352 */ 5353 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET 5354 | ATA_EHI_NO_AUTOPSY 5355 | ATA_EHI_NO_RECOVERY; 5356 5357 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) 5358 { 5359 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); 5360 } 5361 5362 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) 5363 { 5364 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); 5365 } 5366 5367 static int ata_port_pm_suspend(struct device *dev) 5368 { 5369 struct ata_port *ap = to_ata_port(dev); 5370 5371 if (pm_runtime_suspended(dev)) 5372 return 0; 5373 5374 ata_port_suspend(ap, PMSG_SUSPEND); 5375 return 0; 5376 } 5377 5378 static int ata_port_pm_freeze(struct device *dev) 5379 { 5380 struct ata_port *ap = to_ata_port(dev); 5381 5382 if (pm_runtime_suspended(dev)) 5383 return 0; 5384 5385 ata_port_suspend(ap, PMSG_FREEZE); 5386 return 0; 5387 } 5388 5389 static int ata_port_pm_poweroff(struct device *dev) 5390 { 5391 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE); 5392 return 0; 5393 } 5394 5395 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY 5396 | ATA_EHI_QUIET; 5397 5398 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) 5399 { 5400 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); 5401 } 5402 5403 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) 5404 { 5405 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); 5406 } 5407 5408 static int ata_port_pm_resume(struct device *dev) 5409 { 5410 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME); 5411 pm_runtime_disable(dev); 5412 pm_runtime_set_active(dev); 5413 pm_runtime_enable(dev); 5414 return 0; 5415 } 5416 5417 /* 5418 * For ODDs, the upper layer will poll for media change every few seconds, 5419 * which will make it enter and leave suspend state every few seconds. And 5420 * as each suspend will cause a hard/soft reset, the gain of runtime suspend 5421 * is very little and the ODD may malfunction after constantly being reset. 5422 * So the idle callback here will not proceed to suspend if a non-ZPODD capable 5423 * ODD is attached to the port. 5424 */ 5425 static int ata_port_runtime_idle(struct device *dev) 5426 { 5427 struct ata_port *ap = to_ata_port(dev); 5428 struct ata_link *link; 5429 struct ata_device *adev; 5430 5431 ata_for_each_link(link, ap, HOST_FIRST) { 5432 ata_for_each_dev(adev, link, ENABLED) 5433 if (adev->class == ATA_DEV_ATAPI && 5434 !zpodd_dev_enabled(adev)) 5435 return -EBUSY; 5436 } 5437 5438 return 0; 5439 } 5440 5441 static int ata_port_runtime_suspend(struct device *dev) 5442 { 5443 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND); 5444 return 0; 5445 } 5446 5447 static int ata_port_runtime_resume(struct device *dev) 5448 { 5449 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME); 5450 return 0; 5451 } 5452 5453 static const struct dev_pm_ops ata_port_pm_ops = { 5454 .suspend = ata_port_pm_suspend, 5455 .resume = ata_port_pm_resume, 5456 .freeze = ata_port_pm_freeze, 5457 .thaw = ata_port_pm_resume, 5458 .poweroff = ata_port_pm_poweroff, 5459 .restore = ata_port_pm_resume, 5460 5461 .runtime_suspend = ata_port_runtime_suspend, 5462 .runtime_resume = ata_port_runtime_resume, 5463 .runtime_idle = ata_port_runtime_idle, 5464 }; 5465 5466 /* sas ports don't participate in pm runtime management of ata_ports, 5467 * and need to resume ata devices at the domain level, not the per-port 5468 * level. sas suspend/resume is async to allow parallel port recovery 5469 * since sas has multiple ata_port instances per Scsi_Host. 5470 */ 5471 void ata_sas_port_suspend(struct ata_port *ap) 5472 { 5473 ata_port_suspend_async(ap, PMSG_SUSPEND); 5474 } 5475 EXPORT_SYMBOL_GPL(ata_sas_port_suspend); 5476 5477 void ata_sas_port_resume(struct ata_port *ap) 5478 { 5479 ata_port_resume_async(ap, PMSG_RESUME); 5480 } 5481 EXPORT_SYMBOL_GPL(ata_sas_port_resume); 5482 5483 /** 5484 * ata_host_suspend - suspend host 5485 * @host: host to suspend 5486 * @mesg: PM message 5487 * 5488 * Suspend @host. Actual operation is performed by port suspend. 5489 */ 5490 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5491 { 5492 host->dev->power.power_state = mesg; 5493 return 0; 5494 } 5495 5496 /** 5497 * ata_host_resume - resume host 5498 * @host: host to resume 5499 * 5500 * Resume @host. Actual operation is performed by port resume. 5501 */ 5502 void ata_host_resume(struct ata_host *host) 5503 { 5504 host->dev->power.power_state = PMSG_ON; 5505 } 5506 #endif 5507 5508 struct device_type ata_port_type = { 5509 .name = "ata_port", 5510 #ifdef CONFIG_PM 5511 .pm = &ata_port_pm_ops, 5512 #endif 5513 }; 5514 5515 /** 5516 * ata_dev_init - Initialize an ata_device structure 5517 * @dev: Device structure to initialize 5518 * 5519 * Initialize @dev in preparation for probing. 5520 * 5521 * LOCKING: 5522 * Inherited from caller. 5523 */ 5524 void ata_dev_init(struct ata_device *dev) 5525 { 5526 struct ata_link *link = ata_dev_phys_link(dev); 5527 struct ata_port *ap = link->ap; 5528 unsigned long flags; 5529 5530 /* SATA spd limit is bound to the attached device, reset together */ 5531 link->sata_spd_limit = link->hw_sata_spd_limit; 5532 link->sata_spd = 0; 5533 5534 /* High bits of dev->flags are used to record warm plug 5535 * requests which occur asynchronously. Synchronize using 5536 * host lock. 5537 */ 5538 spin_lock_irqsave(ap->lock, flags); 5539 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5540 dev->horkage = 0; 5541 spin_unlock_irqrestore(ap->lock, flags); 5542 5543 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5544 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5545 dev->pio_mask = UINT_MAX; 5546 dev->mwdma_mask = UINT_MAX; 5547 dev->udma_mask = UINT_MAX; 5548 } 5549 5550 /** 5551 * ata_link_init - Initialize an ata_link structure 5552 * @ap: ATA port link is attached to 5553 * @link: Link structure to initialize 5554 * @pmp: Port multiplier port number 5555 * 5556 * Initialize @link. 5557 * 5558 * LOCKING: 5559 * Kernel thread context (may sleep) 5560 */ 5561 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5562 { 5563 int i; 5564 5565 /* clear everything except for devices */ 5566 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5567 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5568 5569 link->ap = ap; 5570 link->pmp = pmp; 5571 link->active_tag = ATA_TAG_POISON; 5572 link->hw_sata_spd_limit = UINT_MAX; 5573 5574 /* can't use iterator, ap isn't initialized yet */ 5575 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5576 struct ata_device *dev = &link->device[i]; 5577 5578 dev->link = link; 5579 dev->devno = dev - link->device; 5580 #ifdef CONFIG_ATA_ACPI 5581 dev->gtf_filter = ata_acpi_gtf_filter; 5582 #endif 5583 ata_dev_init(dev); 5584 } 5585 } 5586 5587 /** 5588 * sata_link_init_spd - Initialize link->sata_spd_limit 5589 * @link: Link to configure sata_spd_limit for 5590 * 5591 * Initialize @link->[hw_]sata_spd_limit to the currently 5592 * configured value. 5593 * 5594 * LOCKING: 5595 * Kernel thread context (may sleep). 5596 * 5597 * RETURNS: 5598 * 0 on success, -errno on failure. 5599 */ 5600 int sata_link_init_spd(struct ata_link *link) 5601 { 5602 u8 spd; 5603 int rc; 5604 5605 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5606 if (rc) 5607 return rc; 5608 5609 spd = (link->saved_scontrol >> 4) & 0xf; 5610 if (spd) 5611 link->hw_sata_spd_limit &= (1 << spd) - 1; 5612 5613 ata_force_link_limits(link); 5614 5615 link->sata_spd_limit = link->hw_sata_spd_limit; 5616 5617 return 0; 5618 } 5619 5620 /** 5621 * ata_port_alloc - allocate and initialize basic ATA port resources 5622 * @host: ATA host this allocated port belongs to 5623 * 5624 * Allocate and initialize basic ATA port resources. 5625 * 5626 * RETURNS: 5627 * Allocate ATA port on success, NULL on failure. 5628 * 5629 * LOCKING: 5630 * Inherited from calling layer (may sleep). 5631 */ 5632 struct ata_port *ata_port_alloc(struct ata_host *host) 5633 { 5634 struct ata_port *ap; 5635 5636 DPRINTK("ENTER\n"); 5637 5638 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5639 if (!ap) 5640 return NULL; 5641 5642 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5643 ap->lock = &host->lock; 5644 ap->print_id = -1; 5645 ap->local_port_no = -1; 5646 ap->host = host; 5647 ap->dev = host->dev; 5648 5649 #if defined(ATA_VERBOSE_DEBUG) 5650 /* turn on all debugging levels */ 5651 ap->msg_enable = 0x00FF; 5652 #elif defined(ATA_DEBUG) 5653 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5654 #else 5655 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5656 #endif 5657 5658 mutex_init(&ap->scsi_scan_mutex); 5659 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5660 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5661 INIT_LIST_HEAD(&ap->eh_done_q); 5662 init_waitqueue_head(&ap->eh_wait_q); 5663 init_completion(&ap->park_req_pending); 5664 init_timer_deferrable(&ap->fastdrain_timer); 5665 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5666 ap->fastdrain_timer.data = (unsigned long)ap; 5667 5668 ap->cbl = ATA_CBL_NONE; 5669 5670 ata_link_init(ap, &ap->link, 0); 5671 5672 #ifdef ATA_IRQ_TRAP 5673 ap->stats.unhandled_irq = 1; 5674 ap->stats.idle_irq = 1; 5675 #endif 5676 ata_sff_port_init(ap); 5677 5678 return ap; 5679 } 5680 5681 static void ata_host_release(struct device *gendev, void *res) 5682 { 5683 struct ata_host *host = dev_get_drvdata(gendev); 5684 int i; 5685 5686 for (i = 0; i < host->n_ports; i++) { 5687 struct ata_port *ap = host->ports[i]; 5688 5689 if (!ap) 5690 continue; 5691 5692 if (ap->scsi_host) 5693 scsi_host_put(ap->scsi_host); 5694 5695 kfree(ap->pmp_link); 5696 kfree(ap->slave_link); 5697 kfree(ap); 5698 host->ports[i] = NULL; 5699 } 5700 5701 dev_set_drvdata(gendev, NULL); 5702 } 5703 5704 /** 5705 * ata_host_alloc - allocate and init basic ATA host resources 5706 * @dev: generic device this host is associated with 5707 * @max_ports: maximum number of ATA ports associated with this host 5708 * 5709 * Allocate and initialize basic ATA host resources. LLD calls 5710 * this function to allocate a host, initializes it fully and 5711 * attaches it using ata_host_register(). 5712 * 5713 * @max_ports ports are allocated and host->n_ports is 5714 * initialized to @max_ports. The caller is allowed to decrease 5715 * host->n_ports before calling ata_host_register(). The unused 5716 * ports will be automatically freed on registration. 5717 * 5718 * RETURNS: 5719 * Allocate ATA host on success, NULL on failure. 5720 * 5721 * LOCKING: 5722 * Inherited from calling layer (may sleep). 5723 */ 5724 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5725 { 5726 struct ata_host *host; 5727 size_t sz; 5728 int i; 5729 5730 DPRINTK("ENTER\n"); 5731 5732 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5733 return NULL; 5734 5735 /* alloc a container for our list of ATA ports (buses) */ 5736 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5737 /* alloc a container for our list of ATA ports (buses) */ 5738 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5739 if (!host) 5740 goto err_out; 5741 5742 devres_add(dev, host); 5743 dev_set_drvdata(dev, host); 5744 5745 spin_lock_init(&host->lock); 5746 mutex_init(&host->eh_mutex); 5747 host->dev = dev; 5748 host->n_ports = max_ports; 5749 5750 /* allocate ports bound to this host */ 5751 for (i = 0; i < max_ports; i++) { 5752 struct ata_port *ap; 5753 5754 ap = ata_port_alloc(host); 5755 if (!ap) 5756 goto err_out; 5757 5758 ap->port_no = i; 5759 host->ports[i] = ap; 5760 } 5761 5762 devres_remove_group(dev, NULL); 5763 return host; 5764 5765 err_out: 5766 devres_release_group(dev, NULL); 5767 return NULL; 5768 } 5769 5770 /** 5771 * ata_host_alloc_pinfo - alloc host and init with port_info array 5772 * @dev: generic device this host is associated with 5773 * @ppi: array of ATA port_info to initialize host with 5774 * @n_ports: number of ATA ports attached to this host 5775 * 5776 * Allocate ATA host and initialize with info from @ppi. If NULL 5777 * terminated, @ppi may contain fewer entries than @n_ports. The 5778 * last entry will be used for the remaining ports. 5779 * 5780 * RETURNS: 5781 * Allocate ATA host on success, NULL on failure. 5782 * 5783 * LOCKING: 5784 * Inherited from calling layer (may sleep). 5785 */ 5786 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5787 const struct ata_port_info * const * ppi, 5788 int n_ports) 5789 { 5790 const struct ata_port_info *pi; 5791 struct ata_host *host; 5792 int i, j; 5793 5794 host = ata_host_alloc(dev, n_ports); 5795 if (!host) 5796 return NULL; 5797 5798 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5799 struct ata_port *ap = host->ports[i]; 5800 5801 if (ppi[j]) 5802 pi = ppi[j++]; 5803 5804 ap->pio_mask = pi->pio_mask; 5805 ap->mwdma_mask = pi->mwdma_mask; 5806 ap->udma_mask = pi->udma_mask; 5807 ap->flags |= pi->flags; 5808 ap->link.flags |= pi->link_flags; 5809 ap->ops = pi->port_ops; 5810 5811 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5812 host->ops = pi->port_ops; 5813 } 5814 5815 return host; 5816 } 5817 5818 /** 5819 * ata_slave_link_init - initialize slave link 5820 * @ap: port to initialize slave link for 5821 * 5822 * Create and initialize slave link for @ap. This enables slave 5823 * link handling on the port. 5824 * 5825 * In libata, a port contains links and a link contains devices. 5826 * There is single host link but if a PMP is attached to it, 5827 * there can be multiple fan-out links. On SATA, there's usually 5828 * a single device connected to a link but PATA and SATA 5829 * controllers emulating TF based interface can have two - master 5830 * and slave. 5831 * 5832 * However, there are a few controllers which don't fit into this 5833 * abstraction too well - SATA controllers which emulate TF 5834 * interface with both master and slave devices but also have 5835 * separate SCR register sets for each device. These controllers 5836 * need separate links for physical link handling 5837 * (e.g. onlineness, link speed) but should be treated like a 5838 * traditional M/S controller for everything else (e.g. command 5839 * issue, softreset). 5840 * 5841 * slave_link is libata's way of handling this class of 5842 * controllers without impacting core layer too much. For 5843 * anything other than physical link handling, the default host 5844 * link is used for both master and slave. For physical link 5845 * handling, separate @ap->slave_link is used. All dirty details 5846 * are implemented inside libata core layer. From LLD's POV, the 5847 * only difference is that prereset, hardreset and postreset are 5848 * called once more for the slave link, so the reset sequence 5849 * looks like the following. 5850 * 5851 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5852 * softreset(M) -> postreset(M) -> postreset(S) 5853 * 5854 * Note that softreset is called only for the master. Softreset 5855 * resets both M/S by definition, so SRST on master should handle 5856 * both (the standard method will work just fine). 5857 * 5858 * LOCKING: 5859 * Should be called before host is registered. 5860 * 5861 * RETURNS: 5862 * 0 on success, -errno on failure. 5863 */ 5864 int ata_slave_link_init(struct ata_port *ap) 5865 { 5866 struct ata_link *link; 5867 5868 WARN_ON(ap->slave_link); 5869 WARN_ON(ap->flags & ATA_FLAG_PMP); 5870 5871 link = kzalloc(sizeof(*link), GFP_KERNEL); 5872 if (!link) 5873 return -ENOMEM; 5874 5875 ata_link_init(ap, link, 1); 5876 ap->slave_link = link; 5877 return 0; 5878 } 5879 5880 static void ata_host_stop(struct device *gendev, void *res) 5881 { 5882 struct ata_host *host = dev_get_drvdata(gendev); 5883 int i; 5884 5885 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5886 5887 for (i = 0; i < host->n_ports; i++) { 5888 struct ata_port *ap = host->ports[i]; 5889 5890 if (ap->ops->port_stop) 5891 ap->ops->port_stop(ap); 5892 } 5893 5894 if (host->ops->host_stop) 5895 host->ops->host_stop(host); 5896 } 5897 5898 /** 5899 * ata_finalize_port_ops - finalize ata_port_operations 5900 * @ops: ata_port_operations to finalize 5901 * 5902 * An ata_port_operations can inherit from another ops and that 5903 * ops can again inherit from another. This can go on as many 5904 * times as necessary as long as there is no loop in the 5905 * inheritance chain. 5906 * 5907 * Ops tables are finalized when the host is started. NULL or 5908 * unspecified entries are inherited from the closet ancestor 5909 * which has the method and the entry is populated with it. 5910 * After finalization, the ops table directly points to all the 5911 * methods and ->inherits is no longer necessary and cleared. 5912 * 5913 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5914 * 5915 * LOCKING: 5916 * None. 5917 */ 5918 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5919 { 5920 static DEFINE_SPINLOCK(lock); 5921 const struct ata_port_operations *cur; 5922 void **begin = (void **)ops; 5923 void **end = (void **)&ops->inherits; 5924 void **pp; 5925 5926 if (!ops || !ops->inherits) 5927 return; 5928 5929 spin_lock(&lock); 5930 5931 for (cur = ops->inherits; cur; cur = cur->inherits) { 5932 void **inherit = (void **)cur; 5933 5934 for (pp = begin; pp < end; pp++, inherit++) 5935 if (!*pp) 5936 *pp = *inherit; 5937 } 5938 5939 for (pp = begin; pp < end; pp++) 5940 if (IS_ERR(*pp)) 5941 *pp = NULL; 5942 5943 ops->inherits = NULL; 5944 5945 spin_unlock(&lock); 5946 } 5947 5948 /** 5949 * ata_host_start - start and freeze ports of an ATA host 5950 * @host: ATA host to start ports for 5951 * 5952 * Start and then freeze ports of @host. Started status is 5953 * recorded in host->flags, so this function can be called 5954 * multiple times. Ports are guaranteed to get started only 5955 * once. If host->ops isn't initialized yet, its set to the 5956 * first non-dummy port ops. 5957 * 5958 * LOCKING: 5959 * Inherited from calling layer (may sleep). 5960 * 5961 * RETURNS: 5962 * 0 if all ports are started successfully, -errno otherwise. 5963 */ 5964 int ata_host_start(struct ata_host *host) 5965 { 5966 int have_stop = 0; 5967 void *start_dr = NULL; 5968 int i, rc; 5969 5970 if (host->flags & ATA_HOST_STARTED) 5971 return 0; 5972 5973 ata_finalize_port_ops(host->ops); 5974 5975 for (i = 0; i < host->n_ports; i++) { 5976 struct ata_port *ap = host->ports[i]; 5977 5978 ata_finalize_port_ops(ap->ops); 5979 5980 if (!host->ops && !ata_port_is_dummy(ap)) 5981 host->ops = ap->ops; 5982 5983 if (ap->ops->port_stop) 5984 have_stop = 1; 5985 } 5986 5987 if (host->ops->host_stop) 5988 have_stop = 1; 5989 5990 if (have_stop) { 5991 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 5992 if (!start_dr) 5993 return -ENOMEM; 5994 } 5995 5996 for (i = 0; i < host->n_ports; i++) { 5997 struct ata_port *ap = host->ports[i]; 5998 5999 if (ap->ops->port_start) { 6000 rc = ap->ops->port_start(ap); 6001 if (rc) { 6002 if (rc != -ENODEV) 6003 dev_err(host->dev, 6004 "failed to start port %d (errno=%d)\n", 6005 i, rc); 6006 goto err_out; 6007 } 6008 } 6009 ata_eh_freeze_port(ap); 6010 } 6011 6012 if (start_dr) 6013 devres_add(host->dev, start_dr); 6014 host->flags |= ATA_HOST_STARTED; 6015 return 0; 6016 6017 err_out: 6018 while (--i >= 0) { 6019 struct ata_port *ap = host->ports[i]; 6020 6021 if (ap->ops->port_stop) 6022 ap->ops->port_stop(ap); 6023 } 6024 devres_free(start_dr); 6025 return rc; 6026 } 6027 6028 /** 6029 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) 6030 * @host: host to initialize 6031 * @dev: device host is attached to 6032 * @ops: port_ops 6033 * 6034 */ 6035 void ata_host_init(struct ata_host *host, struct device *dev, 6036 struct ata_port_operations *ops) 6037 { 6038 spin_lock_init(&host->lock); 6039 mutex_init(&host->eh_mutex); 6040 host->n_tags = ATA_MAX_QUEUE - 1; 6041 host->dev = dev; 6042 host->ops = ops; 6043 } 6044 6045 void __ata_port_probe(struct ata_port *ap) 6046 { 6047 struct ata_eh_info *ehi = &ap->link.eh_info; 6048 unsigned long flags; 6049 6050 /* kick EH for boot probing */ 6051 spin_lock_irqsave(ap->lock, flags); 6052 6053 ehi->probe_mask |= ATA_ALL_DEVICES; 6054 ehi->action |= ATA_EH_RESET; 6055 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6056 6057 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6058 ap->pflags |= ATA_PFLAG_LOADING; 6059 ata_port_schedule_eh(ap); 6060 6061 spin_unlock_irqrestore(ap->lock, flags); 6062 } 6063 6064 int ata_port_probe(struct ata_port *ap) 6065 { 6066 int rc = 0; 6067 6068 if (ap->ops->error_handler) { 6069 __ata_port_probe(ap); 6070 ata_port_wait_eh(ap); 6071 } else { 6072 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6073 rc = ata_bus_probe(ap); 6074 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6075 } 6076 return rc; 6077 } 6078 6079 6080 static void async_port_probe(void *data, async_cookie_t cookie) 6081 { 6082 struct ata_port *ap = data; 6083 6084 /* 6085 * If we're not allowed to scan this host in parallel, 6086 * we need to wait until all previous scans have completed 6087 * before going further. 6088 * Jeff Garzik says this is only within a controller, so we 6089 * don't need to wait for port 0, only for later ports. 6090 */ 6091 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6092 async_synchronize_cookie(cookie); 6093 6094 (void)ata_port_probe(ap); 6095 6096 /* in order to keep device order, we need to synchronize at this point */ 6097 async_synchronize_cookie(cookie); 6098 6099 ata_scsi_scan_host(ap, 1); 6100 } 6101 6102 /** 6103 * ata_host_register - register initialized ATA host 6104 * @host: ATA host to register 6105 * @sht: template for SCSI host 6106 * 6107 * Register initialized ATA host. @host is allocated using 6108 * ata_host_alloc() and fully initialized by LLD. This function 6109 * starts ports, registers @host with ATA and SCSI layers and 6110 * probe registered devices. 6111 * 6112 * LOCKING: 6113 * Inherited from calling layer (may sleep). 6114 * 6115 * RETURNS: 6116 * 0 on success, -errno otherwise. 6117 */ 6118 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6119 { 6120 int i, rc; 6121 6122 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); 6123 6124 /* host must have been started */ 6125 if (!(host->flags & ATA_HOST_STARTED)) { 6126 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6127 WARN_ON(1); 6128 return -EINVAL; 6129 } 6130 6131 /* Blow away unused ports. This happens when LLD can't 6132 * determine the exact number of ports to allocate at 6133 * allocation time. 6134 */ 6135 for (i = host->n_ports; host->ports[i]; i++) 6136 kfree(host->ports[i]); 6137 6138 /* give ports names and add SCSI hosts */ 6139 for (i = 0; i < host->n_ports; i++) { 6140 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6141 host->ports[i]->local_port_no = i + 1; 6142 } 6143 6144 /* Create associated sysfs transport objects */ 6145 for (i = 0; i < host->n_ports; i++) { 6146 rc = ata_tport_add(host->dev,host->ports[i]); 6147 if (rc) { 6148 goto err_tadd; 6149 } 6150 } 6151 6152 rc = ata_scsi_add_hosts(host, sht); 6153 if (rc) 6154 goto err_tadd; 6155 6156 /* set cable, sata_spd_limit and report */ 6157 for (i = 0; i < host->n_ports; i++) { 6158 struct ata_port *ap = host->ports[i]; 6159 unsigned long xfer_mask; 6160 6161 /* set SATA cable type if still unset */ 6162 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6163 ap->cbl = ATA_CBL_SATA; 6164 6165 /* init sata_spd_limit to the current value */ 6166 sata_link_init_spd(&ap->link); 6167 if (ap->slave_link) 6168 sata_link_init_spd(ap->slave_link); 6169 6170 /* print per-port info to dmesg */ 6171 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6172 ap->udma_mask); 6173 6174 if (!ata_port_is_dummy(ap)) { 6175 ata_port_info(ap, "%cATA max %s %s\n", 6176 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6177 ata_mode_string(xfer_mask), 6178 ap->link.eh_info.desc); 6179 ata_ehi_clear_desc(&ap->link.eh_info); 6180 } else 6181 ata_port_info(ap, "DUMMY\n"); 6182 } 6183 6184 /* perform each probe asynchronously */ 6185 for (i = 0; i < host->n_ports; i++) { 6186 struct ata_port *ap = host->ports[i]; 6187 async_schedule(async_port_probe, ap); 6188 } 6189 6190 return 0; 6191 6192 err_tadd: 6193 while (--i >= 0) { 6194 ata_tport_delete(host->ports[i]); 6195 } 6196 return rc; 6197 6198 } 6199 6200 /** 6201 * ata_host_activate - start host, request IRQ and register it 6202 * @host: target ATA host 6203 * @irq: IRQ to request 6204 * @irq_handler: irq_handler used when requesting IRQ 6205 * @irq_flags: irq_flags used when requesting IRQ 6206 * @sht: scsi_host_template to use when registering the host 6207 * 6208 * After allocating an ATA host and initializing it, most libata 6209 * LLDs perform three steps to activate the host - start host, 6210 * request IRQ and register it. This helper takes necessasry 6211 * arguments and performs the three steps in one go. 6212 * 6213 * An invalid IRQ skips the IRQ registration and expects the host to 6214 * have set polling mode on the port. In this case, @irq_handler 6215 * should be NULL. 6216 * 6217 * LOCKING: 6218 * Inherited from calling layer (may sleep). 6219 * 6220 * RETURNS: 6221 * 0 on success, -errno otherwise. 6222 */ 6223 int ata_host_activate(struct ata_host *host, int irq, 6224 irq_handler_t irq_handler, unsigned long irq_flags, 6225 struct scsi_host_template *sht) 6226 { 6227 int i, rc; 6228 char *irq_desc; 6229 6230 rc = ata_host_start(host); 6231 if (rc) 6232 return rc; 6233 6234 /* Special case for polling mode */ 6235 if (!irq) { 6236 WARN_ON(irq_handler); 6237 return ata_host_register(host, sht); 6238 } 6239 6240 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]", 6241 dev_driver_string(host->dev), 6242 dev_name(host->dev)); 6243 if (!irq_desc) 6244 return -ENOMEM; 6245 6246 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6247 irq_desc, host); 6248 if (rc) 6249 return rc; 6250 6251 for (i = 0; i < host->n_ports; i++) 6252 ata_port_desc(host->ports[i], "irq %d", irq); 6253 6254 rc = ata_host_register(host, sht); 6255 /* if failed, just free the IRQ and leave ports alone */ 6256 if (rc) 6257 devm_free_irq(host->dev, irq, host); 6258 6259 return rc; 6260 } 6261 6262 /** 6263 * ata_port_detach - Detach ATA port in prepration of device removal 6264 * @ap: ATA port to be detached 6265 * 6266 * Detach all ATA devices and the associated SCSI devices of @ap; 6267 * then, remove the associated SCSI host. @ap is guaranteed to 6268 * be quiescent on return from this function. 6269 * 6270 * LOCKING: 6271 * Kernel thread context (may sleep). 6272 */ 6273 static void ata_port_detach(struct ata_port *ap) 6274 { 6275 unsigned long flags; 6276 struct ata_link *link; 6277 struct ata_device *dev; 6278 6279 if (!ap->ops->error_handler) 6280 goto skip_eh; 6281 6282 /* tell EH we're leaving & flush EH */ 6283 spin_lock_irqsave(ap->lock, flags); 6284 ap->pflags |= ATA_PFLAG_UNLOADING; 6285 ata_port_schedule_eh(ap); 6286 spin_unlock_irqrestore(ap->lock, flags); 6287 6288 /* wait till EH commits suicide */ 6289 ata_port_wait_eh(ap); 6290 6291 /* it better be dead now */ 6292 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6293 6294 cancel_delayed_work_sync(&ap->hotplug_task); 6295 6296 skip_eh: 6297 /* clean up zpodd on port removal */ 6298 ata_for_each_link(link, ap, HOST_FIRST) { 6299 ata_for_each_dev(dev, link, ALL) { 6300 if (zpodd_dev_enabled(dev)) 6301 zpodd_exit(dev); 6302 } 6303 } 6304 if (ap->pmp_link) { 6305 int i; 6306 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6307 ata_tlink_delete(&ap->pmp_link[i]); 6308 } 6309 /* remove the associated SCSI host */ 6310 scsi_remove_host(ap->scsi_host); 6311 ata_tport_delete(ap); 6312 } 6313 6314 /** 6315 * ata_host_detach - Detach all ports of an ATA host 6316 * @host: Host to detach 6317 * 6318 * Detach all ports of @host. 6319 * 6320 * LOCKING: 6321 * Kernel thread context (may sleep). 6322 */ 6323 void ata_host_detach(struct ata_host *host) 6324 { 6325 int i; 6326 6327 for (i = 0; i < host->n_ports; i++) 6328 ata_port_detach(host->ports[i]); 6329 6330 /* the host is dead now, dissociate ACPI */ 6331 ata_acpi_dissociate(host); 6332 } 6333 6334 #ifdef CONFIG_PCI 6335 6336 /** 6337 * ata_pci_remove_one - PCI layer callback for device removal 6338 * @pdev: PCI device that was removed 6339 * 6340 * PCI layer indicates to libata via this hook that hot-unplug or 6341 * module unload event has occurred. Detach all ports. Resource 6342 * release is handled via devres. 6343 * 6344 * LOCKING: 6345 * Inherited from PCI layer (may sleep). 6346 */ 6347 void ata_pci_remove_one(struct pci_dev *pdev) 6348 { 6349 struct ata_host *host = pci_get_drvdata(pdev); 6350 6351 ata_host_detach(host); 6352 } 6353 6354 /* move to PCI subsystem */ 6355 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6356 { 6357 unsigned long tmp = 0; 6358 6359 switch (bits->width) { 6360 case 1: { 6361 u8 tmp8 = 0; 6362 pci_read_config_byte(pdev, bits->reg, &tmp8); 6363 tmp = tmp8; 6364 break; 6365 } 6366 case 2: { 6367 u16 tmp16 = 0; 6368 pci_read_config_word(pdev, bits->reg, &tmp16); 6369 tmp = tmp16; 6370 break; 6371 } 6372 case 4: { 6373 u32 tmp32 = 0; 6374 pci_read_config_dword(pdev, bits->reg, &tmp32); 6375 tmp = tmp32; 6376 break; 6377 } 6378 6379 default: 6380 return -EINVAL; 6381 } 6382 6383 tmp &= bits->mask; 6384 6385 return (tmp == bits->val) ? 1 : 0; 6386 } 6387 6388 #ifdef CONFIG_PM 6389 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6390 { 6391 pci_save_state(pdev); 6392 pci_disable_device(pdev); 6393 6394 if (mesg.event & PM_EVENT_SLEEP) 6395 pci_set_power_state(pdev, PCI_D3hot); 6396 } 6397 6398 int ata_pci_device_do_resume(struct pci_dev *pdev) 6399 { 6400 int rc; 6401 6402 pci_set_power_state(pdev, PCI_D0); 6403 pci_restore_state(pdev); 6404 6405 rc = pcim_enable_device(pdev); 6406 if (rc) { 6407 dev_err(&pdev->dev, 6408 "failed to enable device after resume (%d)\n", rc); 6409 return rc; 6410 } 6411 6412 pci_set_master(pdev); 6413 return 0; 6414 } 6415 6416 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6417 { 6418 struct ata_host *host = pci_get_drvdata(pdev); 6419 int rc = 0; 6420 6421 rc = ata_host_suspend(host, mesg); 6422 if (rc) 6423 return rc; 6424 6425 ata_pci_device_do_suspend(pdev, mesg); 6426 6427 return 0; 6428 } 6429 6430 int ata_pci_device_resume(struct pci_dev *pdev) 6431 { 6432 struct ata_host *host = pci_get_drvdata(pdev); 6433 int rc; 6434 6435 rc = ata_pci_device_do_resume(pdev); 6436 if (rc == 0) 6437 ata_host_resume(host); 6438 return rc; 6439 } 6440 #endif /* CONFIG_PM */ 6441 6442 #endif /* CONFIG_PCI */ 6443 6444 /** 6445 * ata_platform_remove_one - Platform layer callback for device removal 6446 * @pdev: Platform device that was removed 6447 * 6448 * Platform layer indicates to libata via this hook that hot-unplug or 6449 * module unload event has occurred. Detach all ports. Resource 6450 * release is handled via devres. 6451 * 6452 * LOCKING: 6453 * Inherited from platform layer (may sleep). 6454 */ 6455 int ata_platform_remove_one(struct platform_device *pdev) 6456 { 6457 struct ata_host *host = platform_get_drvdata(pdev); 6458 6459 ata_host_detach(host); 6460 6461 return 0; 6462 } 6463 6464 static int __init ata_parse_force_one(char **cur, 6465 struct ata_force_ent *force_ent, 6466 const char **reason) 6467 { 6468 static const struct ata_force_param force_tbl[] __initconst = { 6469 { "40c", .cbl = ATA_CBL_PATA40 }, 6470 { "80c", .cbl = ATA_CBL_PATA80 }, 6471 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6472 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6473 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6474 { "sata", .cbl = ATA_CBL_SATA }, 6475 { "1.5Gbps", .spd_limit = 1 }, 6476 { "3.0Gbps", .spd_limit = 2 }, 6477 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6478 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6479 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM }, 6480 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM }, 6481 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6482 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6483 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6484 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6485 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6486 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6487 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6488 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6489 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6490 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6491 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6492 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6493 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6494 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6495 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6496 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6497 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6498 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6499 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6500 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6501 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6502 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6503 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6504 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6505 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6506 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6507 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6508 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6509 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6510 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6511 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6512 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6513 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6514 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6515 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6516 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6517 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6518 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6519 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6520 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6521 { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, 6522 }; 6523 char *start = *cur, *p = *cur; 6524 char *id, *val, *endp; 6525 const struct ata_force_param *match_fp = NULL; 6526 int nr_matches = 0, i; 6527 6528 /* find where this param ends and update *cur */ 6529 while (*p != '\0' && *p != ',') 6530 p++; 6531 6532 if (*p == '\0') 6533 *cur = p; 6534 else 6535 *cur = p + 1; 6536 6537 *p = '\0'; 6538 6539 /* parse */ 6540 p = strchr(start, ':'); 6541 if (!p) { 6542 val = strstrip(start); 6543 goto parse_val; 6544 } 6545 *p = '\0'; 6546 6547 id = strstrip(start); 6548 val = strstrip(p + 1); 6549 6550 /* parse id */ 6551 p = strchr(id, '.'); 6552 if (p) { 6553 *p++ = '\0'; 6554 force_ent->device = simple_strtoul(p, &endp, 10); 6555 if (p == endp || *endp != '\0') { 6556 *reason = "invalid device"; 6557 return -EINVAL; 6558 } 6559 } 6560 6561 force_ent->port = simple_strtoul(id, &endp, 10); 6562 if (p == endp || *endp != '\0') { 6563 *reason = "invalid port/link"; 6564 return -EINVAL; 6565 } 6566 6567 parse_val: 6568 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6569 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6570 const struct ata_force_param *fp = &force_tbl[i]; 6571 6572 if (strncasecmp(val, fp->name, strlen(val))) 6573 continue; 6574 6575 nr_matches++; 6576 match_fp = fp; 6577 6578 if (strcasecmp(val, fp->name) == 0) { 6579 nr_matches = 1; 6580 break; 6581 } 6582 } 6583 6584 if (!nr_matches) { 6585 *reason = "unknown value"; 6586 return -EINVAL; 6587 } 6588 if (nr_matches > 1) { 6589 *reason = "ambigious value"; 6590 return -EINVAL; 6591 } 6592 6593 force_ent->param = *match_fp; 6594 6595 return 0; 6596 } 6597 6598 static void __init ata_parse_force_param(void) 6599 { 6600 int idx = 0, size = 1; 6601 int last_port = -1, last_device = -1; 6602 char *p, *cur, *next; 6603 6604 /* calculate maximum number of params and allocate force_tbl */ 6605 for (p = ata_force_param_buf; *p; p++) 6606 if (*p == ',') 6607 size++; 6608 6609 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6610 if (!ata_force_tbl) { 6611 printk(KERN_WARNING "ata: failed to extend force table, " 6612 "libata.force ignored\n"); 6613 return; 6614 } 6615 6616 /* parse and populate the table */ 6617 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6618 const char *reason = ""; 6619 struct ata_force_ent te = { .port = -1, .device = -1 }; 6620 6621 next = cur; 6622 if (ata_parse_force_one(&next, &te, &reason)) { 6623 printk(KERN_WARNING "ata: failed to parse force " 6624 "parameter \"%s\" (%s)\n", 6625 cur, reason); 6626 continue; 6627 } 6628 6629 if (te.port == -1) { 6630 te.port = last_port; 6631 te.device = last_device; 6632 } 6633 6634 ata_force_tbl[idx++] = te; 6635 6636 last_port = te.port; 6637 last_device = te.device; 6638 } 6639 6640 ata_force_tbl_size = idx; 6641 } 6642 6643 static int __init ata_init(void) 6644 { 6645 int rc; 6646 6647 ata_parse_force_param(); 6648 6649 rc = ata_sff_init(); 6650 if (rc) { 6651 kfree(ata_force_tbl); 6652 return rc; 6653 } 6654 6655 libata_transport_init(); 6656 ata_scsi_transport_template = ata_attach_transport(); 6657 if (!ata_scsi_transport_template) { 6658 ata_sff_exit(); 6659 rc = -ENOMEM; 6660 goto err_out; 6661 } 6662 6663 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6664 return 0; 6665 6666 err_out: 6667 return rc; 6668 } 6669 6670 static void __exit ata_exit(void) 6671 { 6672 ata_release_transport(ata_scsi_transport_template); 6673 libata_transport_exit(); 6674 ata_sff_exit(); 6675 kfree(ata_force_tbl); 6676 } 6677 6678 subsys_initcall(ata_init); 6679 module_exit(ata_exit); 6680 6681 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6682 6683 int ata_ratelimit(void) 6684 { 6685 return __ratelimit(&ratelimit); 6686 } 6687 6688 /** 6689 * ata_msleep - ATA EH owner aware msleep 6690 * @ap: ATA port to attribute the sleep to 6691 * @msecs: duration to sleep in milliseconds 6692 * 6693 * Sleeps @msecs. If the current task is owner of @ap's EH, the 6694 * ownership is released before going to sleep and reacquired 6695 * after the sleep is complete. IOW, other ports sharing the 6696 * @ap->host will be allowed to own the EH while this task is 6697 * sleeping. 6698 * 6699 * LOCKING: 6700 * Might sleep. 6701 */ 6702 void ata_msleep(struct ata_port *ap, unsigned int msecs) 6703 { 6704 bool owns_eh = ap && ap->host->eh_owner == current; 6705 6706 if (owns_eh) 6707 ata_eh_release(ap); 6708 6709 if (msecs < 20) { 6710 unsigned long usecs = msecs * USEC_PER_MSEC; 6711 usleep_range(usecs, usecs + 50); 6712 } else { 6713 msleep(msecs); 6714 } 6715 6716 if (owns_eh) 6717 ata_eh_acquire(ap); 6718 } 6719 6720 /** 6721 * ata_wait_register - wait until register value changes 6722 * @ap: ATA port to wait register for, can be NULL 6723 * @reg: IO-mapped register 6724 * @mask: Mask to apply to read register value 6725 * @val: Wait condition 6726 * @interval: polling interval in milliseconds 6727 * @timeout: timeout in milliseconds 6728 * 6729 * Waiting for some bits of register to change is a common 6730 * operation for ATA controllers. This function reads 32bit LE 6731 * IO-mapped register @reg and tests for the following condition. 6732 * 6733 * (*@reg & mask) != val 6734 * 6735 * If the condition is met, it returns; otherwise, the process is 6736 * repeated after @interval_msec until timeout. 6737 * 6738 * LOCKING: 6739 * Kernel thread context (may sleep) 6740 * 6741 * RETURNS: 6742 * The final register value. 6743 */ 6744 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 6745 unsigned long interval, unsigned long timeout) 6746 { 6747 unsigned long deadline; 6748 u32 tmp; 6749 6750 tmp = ioread32(reg); 6751 6752 /* Calculate timeout _after_ the first read to make sure 6753 * preceding writes reach the controller before starting to 6754 * eat away the timeout. 6755 */ 6756 deadline = ata_deadline(jiffies, timeout); 6757 6758 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6759 ata_msleep(ap, interval); 6760 tmp = ioread32(reg); 6761 } 6762 6763 return tmp; 6764 } 6765 6766 /** 6767 * sata_lpm_ignore_phy_events - test if PHY event should be ignored 6768 * @link: Link receiving the event 6769 * 6770 * Test whether the received PHY event has to be ignored or not. 6771 * 6772 * LOCKING: 6773 * None: 6774 * 6775 * RETURNS: 6776 * True if the event has to be ignored. 6777 */ 6778 bool sata_lpm_ignore_phy_events(struct ata_link *link) 6779 { 6780 unsigned long lpm_timeout = link->last_lpm_change + 6781 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); 6782 6783 /* if LPM is enabled, PHYRDY doesn't mean anything */ 6784 if (link->lpm_policy > ATA_LPM_MAX_POWER) 6785 return true; 6786 6787 /* ignore the first PHY event after the LPM policy changed 6788 * as it is might be spurious 6789 */ 6790 if ((link->flags & ATA_LFLAG_CHANGED) && 6791 time_before(jiffies, lpm_timeout)) 6792 return true; 6793 6794 return false; 6795 } 6796 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); 6797 6798 /* 6799 * Dummy port_ops 6800 */ 6801 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6802 { 6803 return AC_ERR_SYSTEM; 6804 } 6805 6806 static void ata_dummy_error_handler(struct ata_port *ap) 6807 { 6808 /* truly dummy */ 6809 } 6810 6811 struct ata_port_operations ata_dummy_port_ops = { 6812 .qc_prep = ata_noop_qc_prep, 6813 .qc_issue = ata_dummy_qc_issue, 6814 .error_handler = ata_dummy_error_handler, 6815 .sched_eh = ata_std_sched_eh, 6816 .end_eh = ata_std_end_eh, 6817 }; 6818 6819 const struct ata_port_info ata_dummy_port_info = { 6820 .port_ops = &ata_dummy_port_ops, 6821 }; 6822 6823 /* 6824 * Utility print functions 6825 */ 6826 void ata_port_printk(const struct ata_port *ap, const char *level, 6827 const char *fmt, ...) 6828 { 6829 struct va_format vaf; 6830 va_list args; 6831 6832 va_start(args, fmt); 6833 6834 vaf.fmt = fmt; 6835 vaf.va = &args; 6836 6837 printk("%sata%u: %pV", level, ap->print_id, &vaf); 6838 6839 va_end(args); 6840 } 6841 EXPORT_SYMBOL(ata_port_printk); 6842 6843 void ata_link_printk(const struct ata_link *link, const char *level, 6844 const char *fmt, ...) 6845 { 6846 struct va_format vaf; 6847 va_list args; 6848 6849 va_start(args, fmt); 6850 6851 vaf.fmt = fmt; 6852 vaf.va = &args; 6853 6854 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 6855 printk("%sata%u.%02u: %pV", 6856 level, link->ap->print_id, link->pmp, &vaf); 6857 else 6858 printk("%sata%u: %pV", 6859 level, link->ap->print_id, &vaf); 6860 6861 va_end(args); 6862 } 6863 EXPORT_SYMBOL(ata_link_printk); 6864 6865 void ata_dev_printk(const struct ata_device *dev, const char *level, 6866 const char *fmt, ...) 6867 { 6868 struct va_format vaf; 6869 va_list args; 6870 6871 va_start(args, fmt); 6872 6873 vaf.fmt = fmt; 6874 vaf.va = &args; 6875 6876 printk("%sata%u.%02u: %pV", 6877 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 6878 &vaf); 6879 6880 va_end(args); 6881 } 6882 EXPORT_SYMBOL(ata_dev_printk); 6883 6884 void ata_print_version(const struct device *dev, const char *version) 6885 { 6886 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 6887 } 6888 EXPORT_SYMBOL(ata_print_version); 6889 6890 /* 6891 * libata is essentially a library of internal helper functions for 6892 * low-level ATA host controller drivers. As such, the API/ABI is 6893 * likely to change as new drivers are added and updated. 6894 * Do not depend on ABI/API stability. 6895 */ 6896 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6897 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6898 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6899 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6900 EXPORT_SYMBOL_GPL(sata_port_ops); 6901 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6902 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6903 EXPORT_SYMBOL_GPL(ata_link_next); 6904 EXPORT_SYMBOL_GPL(ata_dev_next); 6905 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6906 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 6907 EXPORT_SYMBOL_GPL(ata_host_init); 6908 EXPORT_SYMBOL_GPL(ata_host_alloc); 6909 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6910 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6911 EXPORT_SYMBOL_GPL(ata_host_start); 6912 EXPORT_SYMBOL_GPL(ata_host_register); 6913 EXPORT_SYMBOL_GPL(ata_host_activate); 6914 EXPORT_SYMBOL_GPL(ata_host_detach); 6915 EXPORT_SYMBOL_GPL(ata_sg_init); 6916 EXPORT_SYMBOL_GPL(ata_qc_complete); 6917 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6918 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6919 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6920 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6921 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6922 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6923 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6924 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6925 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6926 EXPORT_SYMBOL_GPL(ata_mode_string); 6927 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6928 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6929 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6930 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6931 EXPORT_SYMBOL_GPL(ata_dev_disable); 6932 EXPORT_SYMBOL_GPL(sata_set_spd); 6933 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6934 EXPORT_SYMBOL_GPL(sata_link_debounce); 6935 EXPORT_SYMBOL_GPL(sata_link_resume); 6936 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 6937 EXPORT_SYMBOL_GPL(ata_std_prereset); 6938 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6939 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6940 EXPORT_SYMBOL_GPL(ata_std_postreset); 6941 EXPORT_SYMBOL_GPL(ata_dev_classify); 6942 EXPORT_SYMBOL_GPL(ata_dev_pair); 6943 EXPORT_SYMBOL_GPL(ata_ratelimit); 6944 EXPORT_SYMBOL_GPL(ata_msleep); 6945 EXPORT_SYMBOL_GPL(ata_wait_register); 6946 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6947 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6948 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6949 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6950 EXPORT_SYMBOL_GPL(__ata_change_queue_depth); 6951 EXPORT_SYMBOL_GPL(sata_scr_valid); 6952 EXPORT_SYMBOL_GPL(sata_scr_read); 6953 EXPORT_SYMBOL_GPL(sata_scr_write); 6954 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6955 EXPORT_SYMBOL_GPL(ata_link_online); 6956 EXPORT_SYMBOL_GPL(ata_link_offline); 6957 #ifdef CONFIG_PM 6958 EXPORT_SYMBOL_GPL(ata_host_suspend); 6959 EXPORT_SYMBOL_GPL(ata_host_resume); 6960 #endif /* CONFIG_PM */ 6961 EXPORT_SYMBOL_GPL(ata_id_string); 6962 EXPORT_SYMBOL_GPL(ata_id_c_string); 6963 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6964 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6965 6966 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6967 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6968 EXPORT_SYMBOL_GPL(ata_timing_compute); 6969 EXPORT_SYMBOL_GPL(ata_timing_merge); 6970 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6971 6972 #ifdef CONFIG_PCI 6973 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6974 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6975 #ifdef CONFIG_PM 6976 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6977 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6978 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6979 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6980 #endif /* CONFIG_PM */ 6981 #endif /* CONFIG_PCI */ 6982 6983 EXPORT_SYMBOL_GPL(ata_platform_remove_one); 6984 6985 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6986 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6987 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6988 EXPORT_SYMBOL_GPL(ata_port_desc); 6989 #ifdef CONFIG_PCI 6990 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6991 #endif /* CONFIG_PCI */ 6992 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6993 EXPORT_SYMBOL_GPL(ata_link_abort); 6994 EXPORT_SYMBOL_GPL(ata_port_abort); 6995 EXPORT_SYMBOL_GPL(ata_port_freeze); 6996 EXPORT_SYMBOL_GPL(sata_async_notification); 6997 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6998 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6999 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7000 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7001 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 7002 EXPORT_SYMBOL_GPL(ata_do_eh); 7003 EXPORT_SYMBOL_GPL(ata_std_error_handler); 7004 7005 EXPORT_SYMBOL_GPL(ata_cable_40wire); 7006 EXPORT_SYMBOL_GPL(ata_cable_80wire); 7007 EXPORT_SYMBOL_GPL(ata_cable_unknown); 7008 EXPORT_SYMBOL_GPL(ata_cable_ignore); 7009 EXPORT_SYMBOL_GPL(ata_cable_sata); 7010