1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Tejun Heo <tj@kernel.org> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/time.h> 54 #include <linux/interrupt.h> 55 #include <linux/completion.h> 56 #include <linux/suspend.h> 57 #include <linux/workqueue.h> 58 #include <linux/scatterlist.h> 59 #include <linux/io.h> 60 #include <linux/async.h> 61 #include <linux/log2.h> 62 #include <linux/slab.h> 63 #include <linux/glob.h> 64 #include <scsi/scsi.h> 65 #include <scsi/scsi_cmnd.h> 66 #include <scsi/scsi_host.h> 67 #include <linux/libata.h> 68 #include <asm/byteorder.h> 69 #include <asm/unaligned.h> 70 #include <linux/cdrom.h> 71 #include <linux/ratelimit.h> 72 #include <linux/leds.h> 73 #include <linux/pm_runtime.h> 74 #include <linux/platform_device.h> 75 76 #define CREATE_TRACE_POINTS 77 #include <trace/events/libata.h> 78 79 #include "libata.h" 80 #include "libata-transport.h" 81 82 /* debounce timing parameters in msecs { interval, duration, timeout } */ 83 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 84 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 85 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 86 87 const struct ata_port_operations ata_base_port_ops = { 88 .prereset = ata_std_prereset, 89 .postreset = ata_std_postreset, 90 .error_handler = ata_std_error_handler, 91 .sched_eh = ata_std_sched_eh, 92 .end_eh = ata_std_end_eh, 93 }; 94 95 const struct ata_port_operations sata_port_ops = { 96 .inherits = &ata_base_port_ops, 97 98 .qc_defer = ata_std_qc_defer, 99 .hardreset = sata_std_hardreset, 100 }; 101 102 static unsigned int ata_dev_init_params(struct ata_device *dev, 103 u16 heads, u16 sectors); 104 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 105 static void ata_dev_xfermask(struct ata_device *dev); 106 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 107 108 atomic_t ata_print_id = ATOMIC_INIT(0); 109 110 struct ata_force_param { 111 const char *name; 112 unsigned int cbl; 113 int spd_limit; 114 unsigned long xfer_mask; 115 unsigned int horkage_on; 116 unsigned int horkage_off; 117 unsigned int lflags; 118 }; 119 120 struct ata_force_ent { 121 int port; 122 int device; 123 struct ata_force_param param; 124 }; 125 126 static struct ata_force_ent *ata_force_tbl; 127 static int ata_force_tbl_size; 128 129 static char ata_force_param_buf[PAGE_SIZE] __initdata; 130 /* param_buf is thrown away after initialization, disallow read */ 131 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 132 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 133 134 static int atapi_enabled = 1; 135 module_param(atapi_enabled, int, 0444); 136 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 137 138 static int atapi_dmadir = 0; 139 module_param(atapi_dmadir, int, 0444); 140 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 141 142 int atapi_passthru16 = 1; 143 module_param(atapi_passthru16, int, 0444); 144 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 145 146 int libata_fua = 0; 147 module_param_named(fua, libata_fua, int, 0444); 148 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 149 150 static int ata_ignore_hpa; 151 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 152 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 153 154 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 155 module_param_named(dma, libata_dma_mask, int, 0444); 156 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 157 158 static int ata_probe_timeout; 159 module_param(ata_probe_timeout, int, 0444); 160 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 161 162 int libata_noacpi = 0; 163 module_param_named(noacpi, libata_noacpi, int, 0444); 164 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 165 166 int libata_allow_tpm = 0; 167 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 168 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 169 170 static int atapi_an; 171 module_param(atapi_an, int, 0444); 172 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 173 174 MODULE_AUTHOR("Jeff Garzik"); 175 MODULE_DESCRIPTION("Library module for ATA devices"); 176 MODULE_LICENSE("GPL"); 177 MODULE_VERSION(DRV_VERSION); 178 179 180 static bool ata_sstatus_online(u32 sstatus) 181 { 182 return (sstatus & 0xf) == 0x3; 183 } 184 185 /** 186 * ata_link_next - link iteration helper 187 * @link: the previous link, NULL to start 188 * @ap: ATA port containing links to iterate 189 * @mode: iteration mode, one of ATA_LITER_* 190 * 191 * LOCKING: 192 * Host lock or EH context. 193 * 194 * RETURNS: 195 * Pointer to the next link. 196 */ 197 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 198 enum ata_link_iter_mode mode) 199 { 200 BUG_ON(mode != ATA_LITER_EDGE && 201 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 202 203 /* NULL link indicates start of iteration */ 204 if (!link) 205 switch (mode) { 206 case ATA_LITER_EDGE: 207 case ATA_LITER_PMP_FIRST: 208 if (sata_pmp_attached(ap)) 209 return ap->pmp_link; 210 /* fall through */ 211 case ATA_LITER_HOST_FIRST: 212 return &ap->link; 213 } 214 215 /* we just iterated over the host link, what's next? */ 216 if (link == &ap->link) 217 switch (mode) { 218 case ATA_LITER_HOST_FIRST: 219 if (sata_pmp_attached(ap)) 220 return ap->pmp_link; 221 /* fall through */ 222 case ATA_LITER_PMP_FIRST: 223 if (unlikely(ap->slave_link)) 224 return ap->slave_link; 225 /* fall through */ 226 case ATA_LITER_EDGE: 227 return NULL; 228 } 229 230 /* slave_link excludes PMP */ 231 if (unlikely(link == ap->slave_link)) 232 return NULL; 233 234 /* we were over a PMP link */ 235 if (++link < ap->pmp_link + ap->nr_pmp_links) 236 return link; 237 238 if (mode == ATA_LITER_PMP_FIRST) 239 return &ap->link; 240 241 return NULL; 242 } 243 244 /** 245 * ata_dev_next - device iteration helper 246 * @dev: the previous device, NULL to start 247 * @link: ATA link containing devices to iterate 248 * @mode: iteration mode, one of ATA_DITER_* 249 * 250 * LOCKING: 251 * Host lock or EH context. 252 * 253 * RETURNS: 254 * Pointer to the next device. 255 */ 256 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 257 enum ata_dev_iter_mode mode) 258 { 259 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 260 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 261 262 /* NULL dev indicates start of iteration */ 263 if (!dev) 264 switch (mode) { 265 case ATA_DITER_ENABLED: 266 case ATA_DITER_ALL: 267 dev = link->device; 268 goto check; 269 case ATA_DITER_ENABLED_REVERSE: 270 case ATA_DITER_ALL_REVERSE: 271 dev = link->device + ata_link_max_devices(link) - 1; 272 goto check; 273 } 274 275 next: 276 /* move to the next one */ 277 switch (mode) { 278 case ATA_DITER_ENABLED: 279 case ATA_DITER_ALL: 280 if (++dev < link->device + ata_link_max_devices(link)) 281 goto check; 282 return NULL; 283 case ATA_DITER_ENABLED_REVERSE: 284 case ATA_DITER_ALL_REVERSE: 285 if (--dev >= link->device) 286 goto check; 287 return NULL; 288 } 289 290 check: 291 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 292 !ata_dev_enabled(dev)) 293 goto next; 294 return dev; 295 } 296 297 /** 298 * ata_dev_phys_link - find physical link for a device 299 * @dev: ATA device to look up physical link for 300 * 301 * Look up physical link which @dev is attached to. Note that 302 * this is different from @dev->link only when @dev is on slave 303 * link. For all other cases, it's the same as @dev->link. 304 * 305 * LOCKING: 306 * Don't care. 307 * 308 * RETURNS: 309 * Pointer to the found physical link. 310 */ 311 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 312 { 313 struct ata_port *ap = dev->link->ap; 314 315 if (!ap->slave_link) 316 return dev->link; 317 if (!dev->devno) 318 return &ap->link; 319 return ap->slave_link; 320 } 321 322 /** 323 * ata_force_cbl - force cable type according to libata.force 324 * @ap: ATA port of interest 325 * 326 * Force cable type according to libata.force and whine about it. 327 * The last entry which has matching port number is used, so it 328 * can be specified as part of device force parameters. For 329 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 330 * same effect. 331 * 332 * LOCKING: 333 * EH context. 334 */ 335 void ata_force_cbl(struct ata_port *ap) 336 { 337 int i; 338 339 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 340 const struct ata_force_ent *fe = &ata_force_tbl[i]; 341 342 if (fe->port != -1 && fe->port != ap->print_id) 343 continue; 344 345 if (fe->param.cbl == ATA_CBL_NONE) 346 continue; 347 348 ap->cbl = fe->param.cbl; 349 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 350 return; 351 } 352 } 353 354 /** 355 * ata_force_link_limits - force link limits according to libata.force 356 * @link: ATA link of interest 357 * 358 * Force link flags and SATA spd limit according to libata.force 359 * and whine about it. When only the port part is specified 360 * (e.g. 1:), the limit applies to all links connected to both 361 * the host link and all fan-out ports connected via PMP. If the 362 * device part is specified as 0 (e.g. 1.00:), it specifies the 363 * first fan-out link not the host link. Device number 15 always 364 * points to the host link whether PMP is attached or not. If the 365 * controller has slave link, device number 16 points to it. 366 * 367 * LOCKING: 368 * EH context. 369 */ 370 static void ata_force_link_limits(struct ata_link *link) 371 { 372 bool did_spd = false; 373 int linkno = link->pmp; 374 int i; 375 376 if (ata_is_host_link(link)) 377 linkno += 15; 378 379 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 380 const struct ata_force_ent *fe = &ata_force_tbl[i]; 381 382 if (fe->port != -1 && fe->port != link->ap->print_id) 383 continue; 384 385 if (fe->device != -1 && fe->device != linkno) 386 continue; 387 388 /* only honor the first spd limit */ 389 if (!did_spd && fe->param.spd_limit) { 390 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 391 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 392 fe->param.name); 393 did_spd = true; 394 } 395 396 /* let lflags stack */ 397 if (fe->param.lflags) { 398 link->flags |= fe->param.lflags; 399 ata_link_notice(link, 400 "FORCE: link flag 0x%x forced -> 0x%x\n", 401 fe->param.lflags, link->flags); 402 } 403 } 404 } 405 406 /** 407 * ata_force_xfermask - force xfermask according to libata.force 408 * @dev: ATA device of interest 409 * 410 * Force xfer_mask according to libata.force and whine about it. 411 * For consistency with link selection, device number 15 selects 412 * the first device connected to the host link. 413 * 414 * LOCKING: 415 * EH context. 416 */ 417 static void ata_force_xfermask(struct ata_device *dev) 418 { 419 int devno = dev->link->pmp + dev->devno; 420 int alt_devno = devno; 421 int i; 422 423 /* allow n.15/16 for devices attached to host port */ 424 if (ata_is_host_link(dev->link)) 425 alt_devno += 15; 426 427 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 428 const struct ata_force_ent *fe = &ata_force_tbl[i]; 429 unsigned long pio_mask, mwdma_mask, udma_mask; 430 431 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 432 continue; 433 434 if (fe->device != -1 && fe->device != devno && 435 fe->device != alt_devno) 436 continue; 437 438 if (!fe->param.xfer_mask) 439 continue; 440 441 ata_unpack_xfermask(fe->param.xfer_mask, 442 &pio_mask, &mwdma_mask, &udma_mask); 443 if (udma_mask) 444 dev->udma_mask = udma_mask; 445 else if (mwdma_mask) { 446 dev->udma_mask = 0; 447 dev->mwdma_mask = mwdma_mask; 448 } else { 449 dev->udma_mask = 0; 450 dev->mwdma_mask = 0; 451 dev->pio_mask = pio_mask; 452 } 453 454 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 455 fe->param.name); 456 return; 457 } 458 } 459 460 /** 461 * ata_force_horkage - force horkage according to libata.force 462 * @dev: ATA device of interest 463 * 464 * Force horkage according to libata.force and whine about it. 465 * For consistency with link selection, device number 15 selects 466 * the first device connected to the host link. 467 * 468 * LOCKING: 469 * EH context. 470 */ 471 static void ata_force_horkage(struct ata_device *dev) 472 { 473 int devno = dev->link->pmp + dev->devno; 474 int alt_devno = devno; 475 int i; 476 477 /* allow n.15/16 for devices attached to host port */ 478 if (ata_is_host_link(dev->link)) 479 alt_devno += 15; 480 481 for (i = 0; i < ata_force_tbl_size; i++) { 482 const struct ata_force_ent *fe = &ata_force_tbl[i]; 483 484 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 485 continue; 486 487 if (fe->device != -1 && fe->device != devno && 488 fe->device != alt_devno) 489 continue; 490 491 if (!(~dev->horkage & fe->param.horkage_on) && 492 !(dev->horkage & fe->param.horkage_off)) 493 continue; 494 495 dev->horkage |= fe->param.horkage_on; 496 dev->horkage &= ~fe->param.horkage_off; 497 498 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 499 fe->param.name); 500 } 501 } 502 503 /** 504 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 505 * @opcode: SCSI opcode 506 * 507 * Determine ATAPI command type from @opcode. 508 * 509 * LOCKING: 510 * None. 511 * 512 * RETURNS: 513 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 514 */ 515 int atapi_cmd_type(u8 opcode) 516 { 517 switch (opcode) { 518 case GPCMD_READ_10: 519 case GPCMD_READ_12: 520 return ATAPI_READ; 521 522 case GPCMD_WRITE_10: 523 case GPCMD_WRITE_12: 524 case GPCMD_WRITE_AND_VERIFY_10: 525 return ATAPI_WRITE; 526 527 case GPCMD_READ_CD: 528 case GPCMD_READ_CD_MSF: 529 return ATAPI_READ_CD; 530 531 case ATA_16: 532 case ATA_12: 533 if (atapi_passthru16) 534 return ATAPI_PASS_THRU; 535 /* fall thru */ 536 default: 537 return ATAPI_MISC; 538 } 539 } 540 541 /** 542 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 543 * @tf: Taskfile to convert 544 * @pmp: Port multiplier port 545 * @is_cmd: This FIS is for command 546 * @fis: Buffer into which data will output 547 * 548 * Converts a standard ATA taskfile to a Serial ATA 549 * FIS structure (Register - Host to Device). 550 * 551 * LOCKING: 552 * Inherited from caller. 553 */ 554 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 555 { 556 fis[0] = 0x27; /* Register - Host to Device FIS */ 557 fis[1] = pmp & 0xf; /* Port multiplier number*/ 558 if (is_cmd) 559 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 560 561 fis[2] = tf->command; 562 fis[3] = tf->feature; 563 564 fis[4] = tf->lbal; 565 fis[5] = tf->lbam; 566 fis[6] = tf->lbah; 567 fis[7] = tf->device; 568 569 fis[8] = tf->hob_lbal; 570 fis[9] = tf->hob_lbam; 571 fis[10] = tf->hob_lbah; 572 fis[11] = tf->hob_feature; 573 574 fis[12] = tf->nsect; 575 fis[13] = tf->hob_nsect; 576 fis[14] = 0; 577 fis[15] = tf->ctl; 578 579 fis[16] = tf->auxiliary & 0xff; 580 fis[17] = (tf->auxiliary >> 8) & 0xff; 581 fis[18] = (tf->auxiliary >> 16) & 0xff; 582 fis[19] = (tf->auxiliary >> 24) & 0xff; 583 } 584 585 /** 586 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 587 * @fis: Buffer from which data will be input 588 * @tf: Taskfile to output 589 * 590 * Converts a serial ATA FIS structure to a standard ATA taskfile. 591 * 592 * LOCKING: 593 * Inherited from caller. 594 */ 595 596 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 597 { 598 tf->command = fis[2]; /* status */ 599 tf->feature = fis[3]; /* error */ 600 601 tf->lbal = fis[4]; 602 tf->lbam = fis[5]; 603 tf->lbah = fis[6]; 604 tf->device = fis[7]; 605 606 tf->hob_lbal = fis[8]; 607 tf->hob_lbam = fis[9]; 608 tf->hob_lbah = fis[10]; 609 610 tf->nsect = fis[12]; 611 tf->hob_nsect = fis[13]; 612 } 613 614 static const u8 ata_rw_cmds[] = { 615 /* pio multi */ 616 ATA_CMD_READ_MULTI, 617 ATA_CMD_WRITE_MULTI, 618 ATA_CMD_READ_MULTI_EXT, 619 ATA_CMD_WRITE_MULTI_EXT, 620 0, 621 0, 622 0, 623 ATA_CMD_WRITE_MULTI_FUA_EXT, 624 /* pio */ 625 ATA_CMD_PIO_READ, 626 ATA_CMD_PIO_WRITE, 627 ATA_CMD_PIO_READ_EXT, 628 ATA_CMD_PIO_WRITE_EXT, 629 0, 630 0, 631 0, 632 0, 633 /* dma */ 634 ATA_CMD_READ, 635 ATA_CMD_WRITE, 636 ATA_CMD_READ_EXT, 637 ATA_CMD_WRITE_EXT, 638 0, 639 0, 640 0, 641 ATA_CMD_WRITE_FUA_EXT 642 }; 643 644 /** 645 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 646 * @tf: command to examine and configure 647 * @dev: device tf belongs to 648 * 649 * Examine the device configuration and tf->flags to calculate 650 * the proper read/write commands and protocol to use. 651 * 652 * LOCKING: 653 * caller. 654 */ 655 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 656 { 657 u8 cmd; 658 659 int index, fua, lba48, write; 660 661 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 662 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 663 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 664 665 if (dev->flags & ATA_DFLAG_PIO) { 666 tf->protocol = ATA_PROT_PIO; 667 index = dev->multi_count ? 0 : 8; 668 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 669 /* Unable to use DMA due to host limitation */ 670 tf->protocol = ATA_PROT_PIO; 671 index = dev->multi_count ? 0 : 8; 672 } else { 673 tf->protocol = ATA_PROT_DMA; 674 index = 16; 675 } 676 677 cmd = ata_rw_cmds[index + fua + lba48 + write]; 678 if (cmd) { 679 tf->command = cmd; 680 return 0; 681 } 682 return -1; 683 } 684 685 /** 686 * ata_tf_read_block - Read block address from ATA taskfile 687 * @tf: ATA taskfile of interest 688 * @dev: ATA device @tf belongs to 689 * 690 * LOCKING: 691 * None. 692 * 693 * Read block address from @tf. This function can handle all 694 * three address formats - LBA, LBA48 and CHS. tf->protocol and 695 * flags select the address format to use. 696 * 697 * RETURNS: 698 * Block address read from @tf. 699 */ 700 u64 ata_tf_read_block(const struct ata_taskfile *tf, struct ata_device *dev) 701 { 702 u64 block = 0; 703 704 if (tf->flags & ATA_TFLAG_LBA) { 705 if (tf->flags & ATA_TFLAG_LBA48) { 706 block |= (u64)tf->hob_lbah << 40; 707 block |= (u64)tf->hob_lbam << 32; 708 block |= (u64)tf->hob_lbal << 24; 709 } else 710 block |= (tf->device & 0xf) << 24; 711 712 block |= tf->lbah << 16; 713 block |= tf->lbam << 8; 714 block |= tf->lbal; 715 } else { 716 u32 cyl, head, sect; 717 718 cyl = tf->lbam | (tf->lbah << 8); 719 head = tf->device & 0xf; 720 sect = tf->lbal; 721 722 if (!sect) { 723 ata_dev_warn(dev, 724 "device reported invalid CHS sector 0\n"); 725 return U64_MAX; 726 } 727 728 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 729 } 730 731 return block; 732 } 733 734 /** 735 * ata_build_rw_tf - Build ATA taskfile for given read/write request 736 * @tf: Target ATA taskfile 737 * @dev: ATA device @tf belongs to 738 * @block: Block address 739 * @n_block: Number of blocks 740 * @tf_flags: RW/FUA etc... 741 * @tag: tag 742 * 743 * LOCKING: 744 * None. 745 * 746 * Build ATA taskfile @tf for read/write request described by 747 * @block, @n_block, @tf_flags and @tag on @dev. 748 * 749 * RETURNS: 750 * 751 * 0 on success, -ERANGE if the request is too large for @dev, 752 * -EINVAL if the request is invalid. 753 */ 754 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 755 u64 block, u32 n_block, unsigned int tf_flags, 756 unsigned int tag) 757 { 758 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 759 tf->flags |= tf_flags; 760 761 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 762 /* yay, NCQ */ 763 if (!lba_48_ok(block, n_block)) 764 return -ERANGE; 765 766 tf->protocol = ATA_PROT_NCQ; 767 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 768 769 if (tf->flags & ATA_TFLAG_WRITE) 770 tf->command = ATA_CMD_FPDMA_WRITE; 771 else 772 tf->command = ATA_CMD_FPDMA_READ; 773 774 tf->nsect = tag << 3; 775 tf->hob_feature = (n_block >> 8) & 0xff; 776 tf->feature = n_block & 0xff; 777 778 tf->hob_lbah = (block >> 40) & 0xff; 779 tf->hob_lbam = (block >> 32) & 0xff; 780 tf->hob_lbal = (block >> 24) & 0xff; 781 tf->lbah = (block >> 16) & 0xff; 782 tf->lbam = (block >> 8) & 0xff; 783 tf->lbal = block & 0xff; 784 785 tf->device = ATA_LBA; 786 if (tf->flags & ATA_TFLAG_FUA) 787 tf->device |= 1 << 7; 788 } else if (dev->flags & ATA_DFLAG_LBA) { 789 tf->flags |= ATA_TFLAG_LBA; 790 791 if (lba_28_ok(block, n_block)) { 792 /* use LBA28 */ 793 tf->device |= (block >> 24) & 0xf; 794 } else if (lba_48_ok(block, n_block)) { 795 if (!(dev->flags & ATA_DFLAG_LBA48)) 796 return -ERANGE; 797 798 /* use LBA48 */ 799 tf->flags |= ATA_TFLAG_LBA48; 800 801 tf->hob_nsect = (n_block >> 8) & 0xff; 802 803 tf->hob_lbah = (block >> 40) & 0xff; 804 tf->hob_lbam = (block >> 32) & 0xff; 805 tf->hob_lbal = (block >> 24) & 0xff; 806 } else 807 /* request too large even for LBA48 */ 808 return -ERANGE; 809 810 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 811 return -EINVAL; 812 813 tf->nsect = n_block & 0xff; 814 815 tf->lbah = (block >> 16) & 0xff; 816 tf->lbam = (block >> 8) & 0xff; 817 tf->lbal = block & 0xff; 818 819 tf->device |= ATA_LBA; 820 } else { 821 /* CHS */ 822 u32 sect, head, cyl, track; 823 824 /* The request -may- be too large for CHS addressing. */ 825 if (!lba_28_ok(block, n_block)) 826 return -ERANGE; 827 828 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 829 return -EINVAL; 830 831 /* Convert LBA to CHS */ 832 track = (u32)block / dev->sectors; 833 cyl = track / dev->heads; 834 head = track % dev->heads; 835 sect = (u32)block % dev->sectors + 1; 836 837 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 838 (u32)block, track, cyl, head, sect); 839 840 /* Check whether the converted CHS can fit. 841 Cylinder: 0-65535 842 Head: 0-15 843 Sector: 1-255*/ 844 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 845 return -ERANGE; 846 847 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 848 tf->lbal = sect; 849 tf->lbam = cyl; 850 tf->lbah = cyl >> 8; 851 tf->device |= head; 852 } 853 854 return 0; 855 } 856 857 /** 858 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 859 * @pio_mask: pio_mask 860 * @mwdma_mask: mwdma_mask 861 * @udma_mask: udma_mask 862 * 863 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 864 * unsigned int xfer_mask. 865 * 866 * LOCKING: 867 * None. 868 * 869 * RETURNS: 870 * Packed xfer_mask. 871 */ 872 unsigned long ata_pack_xfermask(unsigned long pio_mask, 873 unsigned long mwdma_mask, 874 unsigned long udma_mask) 875 { 876 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 877 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 878 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 879 } 880 881 /** 882 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 883 * @xfer_mask: xfer_mask to unpack 884 * @pio_mask: resulting pio_mask 885 * @mwdma_mask: resulting mwdma_mask 886 * @udma_mask: resulting udma_mask 887 * 888 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 889 * Any NULL destination masks will be ignored. 890 */ 891 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 892 unsigned long *mwdma_mask, unsigned long *udma_mask) 893 { 894 if (pio_mask) 895 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 896 if (mwdma_mask) 897 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 898 if (udma_mask) 899 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 900 } 901 902 static const struct ata_xfer_ent { 903 int shift, bits; 904 u8 base; 905 } ata_xfer_tbl[] = { 906 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 907 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 908 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 909 { -1, }, 910 }; 911 912 /** 913 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 914 * @xfer_mask: xfer_mask of interest 915 * 916 * Return matching XFER_* value for @xfer_mask. Only the highest 917 * bit of @xfer_mask is considered. 918 * 919 * LOCKING: 920 * None. 921 * 922 * RETURNS: 923 * Matching XFER_* value, 0xff if no match found. 924 */ 925 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 926 { 927 int highbit = fls(xfer_mask) - 1; 928 const struct ata_xfer_ent *ent; 929 930 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 931 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 932 return ent->base + highbit - ent->shift; 933 return 0xff; 934 } 935 936 /** 937 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 938 * @xfer_mode: XFER_* of interest 939 * 940 * Return matching xfer_mask for @xfer_mode. 941 * 942 * LOCKING: 943 * None. 944 * 945 * RETURNS: 946 * Matching xfer_mask, 0 if no match found. 947 */ 948 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 949 { 950 const struct ata_xfer_ent *ent; 951 952 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 953 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 954 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 955 & ~((1 << ent->shift) - 1); 956 return 0; 957 } 958 959 /** 960 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 961 * @xfer_mode: XFER_* of interest 962 * 963 * Return matching xfer_shift for @xfer_mode. 964 * 965 * LOCKING: 966 * None. 967 * 968 * RETURNS: 969 * Matching xfer_shift, -1 if no match found. 970 */ 971 int ata_xfer_mode2shift(unsigned long xfer_mode) 972 { 973 const struct ata_xfer_ent *ent; 974 975 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 976 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 977 return ent->shift; 978 return -1; 979 } 980 981 /** 982 * ata_mode_string - convert xfer_mask to string 983 * @xfer_mask: mask of bits supported; only highest bit counts. 984 * 985 * Determine string which represents the highest speed 986 * (highest bit in @modemask). 987 * 988 * LOCKING: 989 * None. 990 * 991 * RETURNS: 992 * Constant C string representing highest speed listed in 993 * @mode_mask, or the constant C string "<n/a>". 994 */ 995 const char *ata_mode_string(unsigned long xfer_mask) 996 { 997 static const char * const xfer_mode_str[] = { 998 "PIO0", 999 "PIO1", 1000 "PIO2", 1001 "PIO3", 1002 "PIO4", 1003 "PIO5", 1004 "PIO6", 1005 "MWDMA0", 1006 "MWDMA1", 1007 "MWDMA2", 1008 "MWDMA3", 1009 "MWDMA4", 1010 "UDMA/16", 1011 "UDMA/25", 1012 "UDMA/33", 1013 "UDMA/44", 1014 "UDMA/66", 1015 "UDMA/100", 1016 "UDMA/133", 1017 "UDMA7", 1018 }; 1019 int highbit; 1020 1021 highbit = fls(xfer_mask) - 1; 1022 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1023 return xfer_mode_str[highbit]; 1024 return "<n/a>"; 1025 } 1026 1027 const char *sata_spd_string(unsigned int spd) 1028 { 1029 static const char * const spd_str[] = { 1030 "1.5 Gbps", 1031 "3.0 Gbps", 1032 "6.0 Gbps", 1033 }; 1034 1035 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1036 return "<unknown>"; 1037 return spd_str[spd - 1]; 1038 } 1039 1040 /** 1041 * ata_dev_classify - determine device type based on ATA-spec signature 1042 * @tf: ATA taskfile register set for device to be identified 1043 * 1044 * Determine from taskfile register contents whether a device is 1045 * ATA or ATAPI, as per "Signature and persistence" section 1046 * of ATA/PI spec (volume 1, sect 5.14). 1047 * 1048 * LOCKING: 1049 * None. 1050 * 1051 * RETURNS: 1052 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP, 1053 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure. 1054 */ 1055 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1056 { 1057 /* Apple's open source Darwin code hints that some devices only 1058 * put a proper signature into the LBA mid/high registers, 1059 * So, we only check those. It's sufficient for uniqueness. 1060 * 1061 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1062 * signatures for ATA and ATAPI devices attached on SerialATA, 1063 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1064 * spec has never mentioned about using different signatures 1065 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1066 * Multiplier specification began to use 0x69/0x96 to identify 1067 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1068 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1069 * 0x69/0x96 shortly and described them as reserved for 1070 * SerialATA. 1071 * 1072 * We follow the current spec and consider that 0x69/0x96 1073 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1074 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1075 * SEMB signature. This is worked around in 1076 * ata_dev_read_id(). 1077 */ 1078 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1079 DPRINTK("found ATA device by sig\n"); 1080 return ATA_DEV_ATA; 1081 } 1082 1083 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1084 DPRINTK("found ATAPI device by sig\n"); 1085 return ATA_DEV_ATAPI; 1086 } 1087 1088 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1089 DPRINTK("found PMP device by sig\n"); 1090 return ATA_DEV_PMP; 1091 } 1092 1093 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1094 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1095 return ATA_DEV_SEMB; 1096 } 1097 1098 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) { 1099 DPRINTK("found ZAC device by sig\n"); 1100 return ATA_DEV_ZAC; 1101 } 1102 1103 DPRINTK("unknown device\n"); 1104 return ATA_DEV_UNKNOWN; 1105 } 1106 1107 /** 1108 * ata_id_string - Convert IDENTIFY DEVICE page into string 1109 * @id: IDENTIFY DEVICE results we will examine 1110 * @s: string into which data is output 1111 * @ofs: offset into identify device page 1112 * @len: length of string to return. must be an even number. 1113 * 1114 * The strings in the IDENTIFY DEVICE page are broken up into 1115 * 16-bit chunks. Run through the string, and output each 1116 * 8-bit chunk linearly, regardless of platform. 1117 * 1118 * LOCKING: 1119 * caller. 1120 */ 1121 1122 void ata_id_string(const u16 *id, unsigned char *s, 1123 unsigned int ofs, unsigned int len) 1124 { 1125 unsigned int c; 1126 1127 BUG_ON(len & 1); 1128 1129 while (len > 0) { 1130 c = id[ofs] >> 8; 1131 *s = c; 1132 s++; 1133 1134 c = id[ofs] & 0xff; 1135 *s = c; 1136 s++; 1137 1138 ofs++; 1139 len -= 2; 1140 } 1141 } 1142 1143 /** 1144 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1145 * @id: IDENTIFY DEVICE results we will examine 1146 * @s: string into which data is output 1147 * @ofs: offset into identify device page 1148 * @len: length of string to return. must be an odd number. 1149 * 1150 * This function is identical to ata_id_string except that it 1151 * trims trailing spaces and terminates the resulting string with 1152 * null. @len must be actual maximum length (even number) + 1. 1153 * 1154 * LOCKING: 1155 * caller. 1156 */ 1157 void ata_id_c_string(const u16 *id, unsigned char *s, 1158 unsigned int ofs, unsigned int len) 1159 { 1160 unsigned char *p; 1161 1162 ata_id_string(id, s, ofs, len - 1); 1163 1164 p = s + strnlen(s, len - 1); 1165 while (p > s && p[-1] == ' ') 1166 p--; 1167 *p = '\0'; 1168 } 1169 1170 static u64 ata_id_n_sectors(const u16 *id) 1171 { 1172 if (ata_id_has_lba(id)) { 1173 if (ata_id_has_lba48(id)) 1174 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1175 else 1176 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1177 } else { 1178 if (ata_id_current_chs_valid(id)) 1179 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1180 id[ATA_ID_CUR_SECTORS]; 1181 else 1182 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1183 id[ATA_ID_SECTORS]; 1184 } 1185 } 1186 1187 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1188 { 1189 u64 sectors = 0; 1190 1191 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1192 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1193 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1194 sectors |= (tf->lbah & 0xff) << 16; 1195 sectors |= (tf->lbam & 0xff) << 8; 1196 sectors |= (tf->lbal & 0xff); 1197 1198 return sectors; 1199 } 1200 1201 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1202 { 1203 u64 sectors = 0; 1204 1205 sectors |= (tf->device & 0x0f) << 24; 1206 sectors |= (tf->lbah & 0xff) << 16; 1207 sectors |= (tf->lbam & 0xff) << 8; 1208 sectors |= (tf->lbal & 0xff); 1209 1210 return sectors; 1211 } 1212 1213 /** 1214 * ata_read_native_max_address - Read native max address 1215 * @dev: target device 1216 * @max_sectors: out parameter for the result native max address 1217 * 1218 * Perform an LBA48 or LBA28 native size query upon the device in 1219 * question. 1220 * 1221 * RETURNS: 1222 * 0 on success, -EACCES if command is aborted by the drive. 1223 * -EIO on other errors. 1224 */ 1225 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1226 { 1227 unsigned int err_mask; 1228 struct ata_taskfile tf; 1229 int lba48 = ata_id_has_lba48(dev->id); 1230 1231 ata_tf_init(dev, &tf); 1232 1233 /* always clear all address registers */ 1234 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1235 1236 if (lba48) { 1237 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1238 tf.flags |= ATA_TFLAG_LBA48; 1239 } else 1240 tf.command = ATA_CMD_READ_NATIVE_MAX; 1241 1242 tf.protocol = ATA_PROT_NODATA; 1243 tf.device |= ATA_LBA; 1244 1245 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1246 if (err_mask) { 1247 ata_dev_warn(dev, 1248 "failed to read native max address (err_mask=0x%x)\n", 1249 err_mask); 1250 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1251 return -EACCES; 1252 return -EIO; 1253 } 1254 1255 if (lba48) 1256 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1257 else 1258 *max_sectors = ata_tf_to_lba(&tf) + 1; 1259 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1260 (*max_sectors)--; 1261 return 0; 1262 } 1263 1264 /** 1265 * ata_set_max_sectors - Set max sectors 1266 * @dev: target device 1267 * @new_sectors: new max sectors value to set for the device 1268 * 1269 * Set max sectors of @dev to @new_sectors. 1270 * 1271 * RETURNS: 1272 * 0 on success, -EACCES if command is aborted or denied (due to 1273 * previous non-volatile SET_MAX) by the drive. -EIO on other 1274 * errors. 1275 */ 1276 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1277 { 1278 unsigned int err_mask; 1279 struct ata_taskfile tf; 1280 int lba48 = ata_id_has_lba48(dev->id); 1281 1282 new_sectors--; 1283 1284 ata_tf_init(dev, &tf); 1285 1286 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1287 1288 if (lba48) { 1289 tf.command = ATA_CMD_SET_MAX_EXT; 1290 tf.flags |= ATA_TFLAG_LBA48; 1291 1292 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1293 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1294 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1295 } else { 1296 tf.command = ATA_CMD_SET_MAX; 1297 1298 tf.device |= (new_sectors >> 24) & 0xf; 1299 } 1300 1301 tf.protocol = ATA_PROT_NODATA; 1302 tf.device |= ATA_LBA; 1303 1304 tf.lbal = (new_sectors >> 0) & 0xff; 1305 tf.lbam = (new_sectors >> 8) & 0xff; 1306 tf.lbah = (new_sectors >> 16) & 0xff; 1307 1308 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1309 if (err_mask) { 1310 ata_dev_warn(dev, 1311 "failed to set max address (err_mask=0x%x)\n", 1312 err_mask); 1313 if (err_mask == AC_ERR_DEV && 1314 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1315 return -EACCES; 1316 return -EIO; 1317 } 1318 1319 return 0; 1320 } 1321 1322 /** 1323 * ata_hpa_resize - Resize a device with an HPA set 1324 * @dev: Device to resize 1325 * 1326 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1327 * it if required to the full size of the media. The caller must check 1328 * the drive has the HPA feature set enabled. 1329 * 1330 * RETURNS: 1331 * 0 on success, -errno on failure. 1332 */ 1333 static int ata_hpa_resize(struct ata_device *dev) 1334 { 1335 struct ata_eh_context *ehc = &dev->link->eh_context; 1336 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1337 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1338 u64 sectors = ata_id_n_sectors(dev->id); 1339 u64 native_sectors; 1340 int rc; 1341 1342 /* do we need to do it? */ 1343 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) || 1344 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1345 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1346 return 0; 1347 1348 /* read native max address */ 1349 rc = ata_read_native_max_address(dev, &native_sectors); 1350 if (rc) { 1351 /* If device aborted the command or HPA isn't going to 1352 * be unlocked, skip HPA resizing. 1353 */ 1354 if (rc == -EACCES || !unlock_hpa) { 1355 ata_dev_warn(dev, 1356 "HPA support seems broken, skipping HPA handling\n"); 1357 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1358 1359 /* we can continue if device aborted the command */ 1360 if (rc == -EACCES) 1361 rc = 0; 1362 } 1363 1364 return rc; 1365 } 1366 dev->n_native_sectors = native_sectors; 1367 1368 /* nothing to do? */ 1369 if (native_sectors <= sectors || !unlock_hpa) { 1370 if (!print_info || native_sectors == sectors) 1371 return 0; 1372 1373 if (native_sectors > sectors) 1374 ata_dev_info(dev, 1375 "HPA detected: current %llu, native %llu\n", 1376 (unsigned long long)sectors, 1377 (unsigned long long)native_sectors); 1378 else if (native_sectors < sectors) 1379 ata_dev_warn(dev, 1380 "native sectors (%llu) is smaller than sectors (%llu)\n", 1381 (unsigned long long)native_sectors, 1382 (unsigned long long)sectors); 1383 return 0; 1384 } 1385 1386 /* let's unlock HPA */ 1387 rc = ata_set_max_sectors(dev, native_sectors); 1388 if (rc == -EACCES) { 1389 /* if device aborted the command, skip HPA resizing */ 1390 ata_dev_warn(dev, 1391 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1392 (unsigned long long)sectors, 1393 (unsigned long long)native_sectors); 1394 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1395 return 0; 1396 } else if (rc) 1397 return rc; 1398 1399 /* re-read IDENTIFY data */ 1400 rc = ata_dev_reread_id(dev, 0); 1401 if (rc) { 1402 ata_dev_err(dev, 1403 "failed to re-read IDENTIFY data after HPA resizing\n"); 1404 return rc; 1405 } 1406 1407 if (print_info) { 1408 u64 new_sectors = ata_id_n_sectors(dev->id); 1409 ata_dev_info(dev, 1410 "HPA unlocked: %llu -> %llu, native %llu\n", 1411 (unsigned long long)sectors, 1412 (unsigned long long)new_sectors, 1413 (unsigned long long)native_sectors); 1414 } 1415 1416 return 0; 1417 } 1418 1419 /** 1420 * ata_dump_id - IDENTIFY DEVICE info debugging output 1421 * @id: IDENTIFY DEVICE page to dump 1422 * 1423 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1424 * page. 1425 * 1426 * LOCKING: 1427 * caller. 1428 */ 1429 1430 static inline void ata_dump_id(const u16 *id) 1431 { 1432 DPRINTK("49==0x%04x " 1433 "53==0x%04x " 1434 "63==0x%04x " 1435 "64==0x%04x " 1436 "75==0x%04x \n", 1437 id[49], 1438 id[53], 1439 id[63], 1440 id[64], 1441 id[75]); 1442 DPRINTK("80==0x%04x " 1443 "81==0x%04x " 1444 "82==0x%04x " 1445 "83==0x%04x " 1446 "84==0x%04x \n", 1447 id[80], 1448 id[81], 1449 id[82], 1450 id[83], 1451 id[84]); 1452 DPRINTK("88==0x%04x " 1453 "93==0x%04x\n", 1454 id[88], 1455 id[93]); 1456 } 1457 1458 /** 1459 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1460 * @id: IDENTIFY data to compute xfer mask from 1461 * 1462 * Compute the xfermask for this device. This is not as trivial 1463 * as it seems if we must consider early devices correctly. 1464 * 1465 * FIXME: pre IDE drive timing (do we care ?). 1466 * 1467 * LOCKING: 1468 * None. 1469 * 1470 * RETURNS: 1471 * Computed xfermask 1472 */ 1473 unsigned long ata_id_xfermask(const u16 *id) 1474 { 1475 unsigned long pio_mask, mwdma_mask, udma_mask; 1476 1477 /* Usual case. Word 53 indicates word 64 is valid */ 1478 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1479 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1480 pio_mask <<= 3; 1481 pio_mask |= 0x7; 1482 } else { 1483 /* If word 64 isn't valid then Word 51 high byte holds 1484 * the PIO timing number for the maximum. Turn it into 1485 * a mask. 1486 */ 1487 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1488 if (mode < 5) /* Valid PIO range */ 1489 pio_mask = (2 << mode) - 1; 1490 else 1491 pio_mask = 1; 1492 1493 /* But wait.. there's more. Design your standards by 1494 * committee and you too can get a free iordy field to 1495 * process. However its the speeds not the modes that 1496 * are supported... Note drivers using the timing API 1497 * will get this right anyway 1498 */ 1499 } 1500 1501 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1502 1503 if (ata_id_is_cfa(id)) { 1504 /* 1505 * Process compact flash extended modes 1506 */ 1507 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1508 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1509 1510 if (pio) 1511 pio_mask |= (1 << 5); 1512 if (pio > 1) 1513 pio_mask |= (1 << 6); 1514 if (dma) 1515 mwdma_mask |= (1 << 3); 1516 if (dma > 1) 1517 mwdma_mask |= (1 << 4); 1518 } 1519 1520 udma_mask = 0; 1521 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1522 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1523 1524 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1525 } 1526 1527 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1528 { 1529 struct completion *waiting = qc->private_data; 1530 1531 complete(waiting); 1532 } 1533 1534 /** 1535 * ata_exec_internal_sg - execute libata internal command 1536 * @dev: Device to which the command is sent 1537 * @tf: Taskfile registers for the command and the result 1538 * @cdb: CDB for packet command 1539 * @dma_dir: Data transfer direction of the command 1540 * @sgl: sg list for the data buffer of the command 1541 * @n_elem: Number of sg entries 1542 * @timeout: Timeout in msecs (0 for default) 1543 * 1544 * Executes libata internal command with timeout. @tf contains 1545 * command on entry and result on return. Timeout and error 1546 * conditions are reported via return value. No recovery action 1547 * is taken after a command times out. It's caller's duty to 1548 * clean up after timeout. 1549 * 1550 * LOCKING: 1551 * None. Should be called with kernel context, might sleep. 1552 * 1553 * RETURNS: 1554 * Zero on success, AC_ERR_* mask on failure 1555 */ 1556 unsigned ata_exec_internal_sg(struct ata_device *dev, 1557 struct ata_taskfile *tf, const u8 *cdb, 1558 int dma_dir, struct scatterlist *sgl, 1559 unsigned int n_elem, unsigned long timeout) 1560 { 1561 struct ata_link *link = dev->link; 1562 struct ata_port *ap = link->ap; 1563 u8 command = tf->command; 1564 int auto_timeout = 0; 1565 struct ata_queued_cmd *qc; 1566 unsigned int tag, preempted_tag; 1567 u32 preempted_sactive, preempted_qc_active; 1568 int preempted_nr_active_links; 1569 DECLARE_COMPLETION_ONSTACK(wait); 1570 unsigned long flags; 1571 unsigned int err_mask; 1572 int rc; 1573 1574 spin_lock_irqsave(ap->lock, flags); 1575 1576 /* no internal command while frozen */ 1577 if (ap->pflags & ATA_PFLAG_FROZEN) { 1578 spin_unlock_irqrestore(ap->lock, flags); 1579 return AC_ERR_SYSTEM; 1580 } 1581 1582 /* initialize internal qc */ 1583 1584 /* XXX: Tag 0 is used for drivers with legacy EH as some 1585 * drivers choke if any other tag is given. This breaks 1586 * ata_tag_internal() test for those drivers. Don't use new 1587 * EH stuff without converting to it. 1588 */ 1589 if (ap->ops->error_handler) 1590 tag = ATA_TAG_INTERNAL; 1591 else 1592 tag = 0; 1593 1594 qc = __ata_qc_from_tag(ap, tag); 1595 1596 qc->tag = tag; 1597 qc->scsicmd = NULL; 1598 qc->ap = ap; 1599 qc->dev = dev; 1600 ata_qc_reinit(qc); 1601 1602 preempted_tag = link->active_tag; 1603 preempted_sactive = link->sactive; 1604 preempted_qc_active = ap->qc_active; 1605 preempted_nr_active_links = ap->nr_active_links; 1606 link->active_tag = ATA_TAG_POISON; 1607 link->sactive = 0; 1608 ap->qc_active = 0; 1609 ap->nr_active_links = 0; 1610 1611 /* prepare & issue qc */ 1612 qc->tf = *tf; 1613 if (cdb) 1614 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1615 1616 /* some SATA bridges need us to indicate data xfer direction */ 1617 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && 1618 dma_dir == DMA_FROM_DEVICE) 1619 qc->tf.feature |= ATAPI_DMADIR; 1620 1621 qc->flags |= ATA_QCFLAG_RESULT_TF; 1622 qc->dma_dir = dma_dir; 1623 if (dma_dir != DMA_NONE) { 1624 unsigned int i, buflen = 0; 1625 struct scatterlist *sg; 1626 1627 for_each_sg(sgl, sg, n_elem, i) 1628 buflen += sg->length; 1629 1630 ata_sg_init(qc, sgl, n_elem); 1631 qc->nbytes = buflen; 1632 } 1633 1634 qc->private_data = &wait; 1635 qc->complete_fn = ata_qc_complete_internal; 1636 1637 ata_qc_issue(qc); 1638 1639 spin_unlock_irqrestore(ap->lock, flags); 1640 1641 if (!timeout) { 1642 if (ata_probe_timeout) 1643 timeout = ata_probe_timeout * 1000; 1644 else { 1645 timeout = ata_internal_cmd_timeout(dev, command); 1646 auto_timeout = 1; 1647 } 1648 } 1649 1650 if (ap->ops->error_handler) 1651 ata_eh_release(ap); 1652 1653 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1654 1655 if (ap->ops->error_handler) 1656 ata_eh_acquire(ap); 1657 1658 ata_sff_flush_pio_task(ap); 1659 1660 if (!rc) { 1661 spin_lock_irqsave(ap->lock, flags); 1662 1663 /* We're racing with irq here. If we lose, the 1664 * following test prevents us from completing the qc 1665 * twice. If we win, the port is frozen and will be 1666 * cleaned up by ->post_internal_cmd(). 1667 */ 1668 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1669 qc->err_mask |= AC_ERR_TIMEOUT; 1670 1671 if (ap->ops->error_handler) 1672 ata_port_freeze(ap); 1673 else 1674 ata_qc_complete(qc); 1675 1676 if (ata_msg_warn(ap)) 1677 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1678 command); 1679 } 1680 1681 spin_unlock_irqrestore(ap->lock, flags); 1682 } 1683 1684 /* do post_internal_cmd */ 1685 if (ap->ops->post_internal_cmd) 1686 ap->ops->post_internal_cmd(qc); 1687 1688 /* perform minimal error analysis */ 1689 if (qc->flags & ATA_QCFLAG_FAILED) { 1690 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1691 qc->err_mask |= AC_ERR_DEV; 1692 1693 if (!qc->err_mask) 1694 qc->err_mask |= AC_ERR_OTHER; 1695 1696 if (qc->err_mask & ~AC_ERR_OTHER) 1697 qc->err_mask &= ~AC_ERR_OTHER; 1698 } 1699 1700 /* finish up */ 1701 spin_lock_irqsave(ap->lock, flags); 1702 1703 *tf = qc->result_tf; 1704 err_mask = qc->err_mask; 1705 1706 ata_qc_free(qc); 1707 link->active_tag = preempted_tag; 1708 link->sactive = preempted_sactive; 1709 ap->qc_active = preempted_qc_active; 1710 ap->nr_active_links = preempted_nr_active_links; 1711 1712 spin_unlock_irqrestore(ap->lock, flags); 1713 1714 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1715 ata_internal_cmd_timed_out(dev, command); 1716 1717 return err_mask; 1718 } 1719 1720 /** 1721 * ata_exec_internal - execute libata internal command 1722 * @dev: Device to which the command is sent 1723 * @tf: Taskfile registers for the command and the result 1724 * @cdb: CDB for packet command 1725 * @dma_dir: Data transfer direction of the command 1726 * @buf: Data buffer of the command 1727 * @buflen: Length of data buffer 1728 * @timeout: Timeout in msecs (0 for default) 1729 * 1730 * Wrapper around ata_exec_internal_sg() which takes simple 1731 * buffer instead of sg list. 1732 * 1733 * LOCKING: 1734 * None. Should be called with kernel context, might sleep. 1735 * 1736 * RETURNS: 1737 * Zero on success, AC_ERR_* mask on failure 1738 */ 1739 unsigned ata_exec_internal(struct ata_device *dev, 1740 struct ata_taskfile *tf, const u8 *cdb, 1741 int dma_dir, void *buf, unsigned int buflen, 1742 unsigned long timeout) 1743 { 1744 struct scatterlist *psg = NULL, sg; 1745 unsigned int n_elem = 0; 1746 1747 if (dma_dir != DMA_NONE) { 1748 WARN_ON(!buf); 1749 sg_init_one(&sg, buf, buflen); 1750 psg = &sg; 1751 n_elem++; 1752 } 1753 1754 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1755 timeout); 1756 } 1757 1758 /** 1759 * ata_pio_need_iordy - check if iordy needed 1760 * @adev: ATA device 1761 * 1762 * Check if the current speed of the device requires IORDY. Used 1763 * by various controllers for chip configuration. 1764 */ 1765 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1766 { 1767 /* Don't set IORDY if we're preparing for reset. IORDY may 1768 * lead to controller lock up on certain controllers if the 1769 * port is not occupied. See bko#11703 for details. 1770 */ 1771 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1772 return 0; 1773 /* Controller doesn't support IORDY. Probably a pointless 1774 * check as the caller should know this. 1775 */ 1776 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1777 return 0; 1778 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1779 if (ata_id_is_cfa(adev->id) 1780 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1781 return 0; 1782 /* PIO3 and higher it is mandatory */ 1783 if (adev->pio_mode > XFER_PIO_2) 1784 return 1; 1785 /* We turn it on when possible */ 1786 if (ata_id_has_iordy(adev->id)) 1787 return 1; 1788 return 0; 1789 } 1790 1791 /** 1792 * ata_pio_mask_no_iordy - Return the non IORDY mask 1793 * @adev: ATA device 1794 * 1795 * Compute the highest mode possible if we are not using iordy. Return 1796 * -1 if no iordy mode is available. 1797 */ 1798 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1799 { 1800 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1801 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1802 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1803 /* Is the speed faster than the drive allows non IORDY ? */ 1804 if (pio) { 1805 /* This is cycle times not frequency - watch the logic! */ 1806 if (pio > 240) /* PIO2 is 240nS per cycle */ 1807 return 3 << ATA_SHIFT_PIO; 1808 return 7 << ATA_SHIFT_PIO; 1809 } 1810 } 1811 return 3 << ATA_SHIFT_PIO; 1812 } 1813 1814 /** 1815 * ata_do_dev_read_id - default ID read method 1816 * @dev: device 1817 * @tf: proposed taskfile 1818 * @id: data buffer 1819 * 1820 * Issue the identify taskfile and hand back the buffer containing 1821 * identify data. For some RAID controllers and for pre ATA devices 1822 * this function is wrapped or replaced by the driver 1823 */ 1824 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1825 struct ata_taskfile *tf, u16 *id) 1826 { 1827 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1828 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1829 } 1830 1831 /** 1832 * ata_dev_read_id - Read ID data from the specified device 1833 * @dev: target device 1834 * @p_class: pointer to class of the target device (may be changed) 1835 * @flags: ATA_READID_* flags 1836 * @id: buffer to read IDENTIFY data into 1837 * 1838 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1839 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1840 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1841 * for pre-ATA4 drives. 1842 * 1843 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1844 * now we abort if we hit that case. 1845 * 1846 * LOCKING: 1847 * Kernel thread context (may sleep) 1848 * 1849 * RETURNS: 1850 * 0 on success, -errno otherwise. 1851 */ 1852 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1853 unsigned int flags, u16 *id) 1854 { 1855 struct ata_port *ap = dev->link->ap; 1856 unsigned int class = *p_class; 1857 struct ata_taskfile tf; 1858 unsigned int err_mask = 0; 1859 const char *reason; 1860 bool is_semb = class == ATA_DEV_SEMB; 1861 int may_fallback = 1, tried_spinup = 0; 1862 int rc; 1863 1864 if (ata_msg_ctl(ap)) 1865 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1866 1867 retry: 1868 ata_tf_init(dev, &tf); 1869 1870 switch (class) { 1871 case ATA_DEV_SEMB: 1872 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1873 case ATA_DEV_ATA: 1874 case ATA_DEV_ZAC: 1875 tf.command = ATA_CMD_ID_ATA; 1876 break; 1877 case ATA_DEV_ATAPI: 1878 tf.command = ATA_CMD_ID_ATAPI; 1879 break; 1880 default: 1881 rc = -ENODEV; 1882 reason = "unsupported class"; 1883 goto err_out; 1884 } 1885 1886 tf.protocol = ATA_PROT_PIO; 1887 1888 /* Some devices choke if TF registers contain garbage. Make 1889 * sure those are properly initialized. 1890 */ 1891 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1892 1893 /* Device presence detection is unreliable on some 1894 * controllers. Always poll IDENTIFY if available. 1895 */ 1896 tf.flags |= ATA_TFLAG_POLLING; 1897 1898 if (ap->ops->read_id) 1899 err_mask = ap->ops->read_id(dev, &tf, id); 1900 else 1901 err_mask = ata_do_dev_read_id(dev, &tf, id); 1902 1903 if (err_mask) { 1904 if (err_mask & AC_ERR_NODEV_HINT) { 1905 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1906 return -ENOENT; 1907 } 1908 1909 if (is_semb) { 1910 ata_dev_info(dev, 1911 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1912 /* SEMB is not supported yet */ 1913 *p_class = ATA_DEV_SEMB_UNSUP; 1914 return 0; 1915 } 1916 1917 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1918 /* Device or controller might have reported 1919 * the wrong device class. Give a shot at the 1920 * other IDENTIFY if the current one is 1921 * aborted by the device. 1922 */ 1923 if (may_fallback) { 1924 may_fallback = 0; 1925 1926 if (class == ATA_DEV_ATA) 1927 class = ATA_DEV_ATAPI; 1928 else 1929 class = ATA_DEV_ATA; 1930 goto retry; 1931 } 1932 1933 /* Control reaches here iff the device aborted 1934 * both flavors of IDENTIFYs which happens 1935 * sometimes with phantom devices. 1936 */ 1937 ata_dev_dbg(dev, 1938 "both IDENTIFYs aborted, assuming NODEV\n"); 1939 return -ENOENT; 1940 } 1941 1942 rc = -EIO; 1943 reason = "I/O error"; 1944 goto err_out; 1945 } 1946 1947 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1948 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1949 "class=%d may_fallback=%d tried_spinup=%d\n", 1950 class, may_fallback, tried_spinup); 1951 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1952 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1953 } 1954 1955 /* Falling back doesn't make sense if ID data was read 1956 * successfully at least once. 1957 */ 1958 may_fallback = 0; 1959 1960 swap_buf_le16(id, ATA_ID_WORDS); 1961 1962 /* sanity check */ 1963 rc = -EINVAL; 1964 reason = "device reports invalid type"; 1965 1966 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) { 1967 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1968 goto err_out; 1969 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1970 ata_id_is_ata(id)) { 1971 ata_dev_dbg(dev, 1972 "host indicates ignore ATA devices, ignored\n"); 1973 return -ENOENT; 1974 } 1975 } else { 1976 if (ata_id_is_ata(id)) 1977 goto err_out; 1978 } 1979 1980 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1981 tried_spinup = 1; 1982 /* 1983 * Drive powered-up in standby mode, and requires a specific 1984 * SET_FEATURES spin-up subcommand before it will accept 1985 * anything other than the original IDENTIFY command. 1986 */ 1987 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 1988 if (err_mask && id[2] != 0x738c) { 1989 rc = -EIO; 1990 reason = "SPINUP failed"; 1991 goto err_out; 1992 } 1993 /* 1994 * If the drive initially returned incomplete IDENTIFY info, 1995 * we now must reissue the IDENTIFY command. 1996 */ 1997 if (id[2] == 0x37c8) 1998 goto retry; 1999 } 2000 2001 if ((flags & ATA_READID_POSTRESET) && 2002 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) { 2003 /* 2004 * The exact sequence expected by certain pre-ATA4 drives is: 2005 * SRST RESET 2006 * IDENTIFY (optional in early ATA) 2007 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2008 * anything else.. 2009 * Some drives were very specific about that exact sequence. 2010 * 2011 * Note that ATA4 says lba is mandatory so the second check 2012 * should never trigger. 2013 */ 2014 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2015 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2016 if (err_mask) { 2017 rc = -EIO; 2018 reason = "INIT_DEV_PARAMS failed"; 2019 goto err_out; 2020 } 2021 2022 /* current CHS translation info (id[53-58]) might be 2023 * changed. reread the identify device info. 2024 */ 2025 flags &= ~ATA_READID_POSTRESET; 2026 goto retry; 2027 } 2028 } 2029 2030 *p_class = class; 2031 2032 return 0; 2033 2034 err_out: 2035 if (ata_msg_warn(ap)) 2036 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2037 reason, err_mask); 2038 return rc; 2039 } 2040 2041 static int ata_do_link_spd_horkage(struct ata_device *dev) 2042 { 2043 struct ata_link *plink = ata_dev_phys_link(dev); 2044 u32 target, target_limit; 2045 2046 if (!sata_scr_valid(plink)) 2047 return 0; 2048 2049 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2050 target = 1; 2051 else 2052 return 0; 2053 2054 target_limit = (1 << target) - 1; 2055 2056 /* if already on stricter limit, no need to push further */ 2057 if (plink->sata_spd_limit <= target_limit) 2058 return 0; 2059 2060 plink->sata_spd_limit = target_limit; 2061 2062 /* Request another EH round by returning -EAGAIN if link is 2063 * going faster than the target speed. Forward progress is 2064 * guaranteed by setting sata_spd_limit to target_limit above. 2065 */ 2066 if (plink->sata_spd > target) { 2067 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2068 sata_spd_string(target)); 2069 return -EAGAIN; 2070 } 2071 return 0; 2072 } 2073 2074 static inline u8 ata_dev_knobble(struct ata_device *dev) 2075 { 2076 struct ata_port *ap = dev->link->ap; 2077 2078 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2079 return 0; 2080 2081 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2082 } 2083 2084 static void ata_dev_config_ncq_send_recv(struct ata_device *dev) 2085 { 2086 struct ata_port *ap = dev->link->ap; 2087 unsigned int err_mask; 2088 int log_index = ATA_LOG_NCQ_SEND_RECV * 2; 2089 u16 log_pages; 2090 2091 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY, 2092 0, ap->sector_buf, 1); 2093 if (err_mask) { 2094 ata_dev_dbg(dev, 2095 "failed to get Log Directory Emask 0x%x\n", 2096 err_mask); 2097 return; 2098 } 2099 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]); 2100 if (!log_pages) { 2101 ata_dev_warn(dev, 2102 "NCQ Send/Recv Log not supported\n"); 2103 return; 2104 } 2105 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_SEND_RECV, 2106 0, ap->sector_buf, 1); 2107 if (err_mask) { 2108 ata_dev_dbg(dev, 2109 "failed to get NCQ Send/Recv Log Emask 0x%x\n", 2110 err_mask); 2111 } else { 2112 u8 *cmds = dev->ncq_send_recv_cmds; 2113 2114 dev->flags |= ATA_DFLAG_NCQ_SEND_RECV; 2115 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE); 2116 2117 if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) { 2118 ata_dev_dbg(dev, "disabling queued TRIM support\n"); 2119 cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &= 2120 ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM; 2121 } 2122 } 2123 } 2124 2125 static void ata_dev_config_ncq_non_data(struct ata_device *dev) 2126 { 2127 struct ata_port *ap = dev->link->ap; 2128 unsigned int err_mask; 2129 int log_index = ATA_LOG_NCQ_NON_DATA * 2; 2130 u16 log_pages; 2131 2132 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY, 2133 0, ap->sector_buf, 1); 2134 if (err_mask) { 2135 ata_dev_dbg(dev, 2136 "failed to get Log Directory Emask 0x%x\n", 2137 err_mask); 2138 return; 2139 } 2140 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]); 2141 if (!log_pages) { 2142 ata_dev_warn(dev, 2143 "NCQ Send/Recv Log not supported\n"); 2144 return; 2145 } 2146 err_mask = ata_read_log_page(dev, ATA_LOG_NCQ_NON_DATA, 2147 0, ap->sector_buf, 1); 2148 if (err_mask) { 2149 ata_dev_dbg(dev, 2150 "failed to get NCQ Non-Data Log Emask 0x%x\n", 2151 err_mask); 2152 } else { 2153 u8 *cmds = dev->ncq_non_data_cmds; 2154 2155 memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_NON_DATA_SIZE); 2156 } 2157 } 2158 2159 static int ata_dev_config_ncq(struct ata_device *dev, 2160 char *desc, size_t desc_sz) 2161 { 2162 struct ata_port *ap = dev->link->ap; 2163 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2164 unsigned int err_mask; 2165 char *aa_desc = ""; 2166 2167 if (!ata_id_has_ncq(dev->id)) { 2168 desc[0] = '\0'; 2169 return 0; 2170 } 2171 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2172 snprintf(desc, desc_sz, "NCQ (not used)"); 2173 return 0; 2174 } 2175 if (ap->flags & ATA_FLAG_NCQ) { 2176 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2177 dev->flags |= ATA_DFLAG_NCQ; 2178 } 2179 2180 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2181 (ap->flags & ATA_FLAG_FPDMA_AA) && 2182 ata_id_has_fpdma_aa(dev->id)) { 2183 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2184 SATA_FPDMA_AA); 2185 if (err_mask) { 2186 ata_dev_err(dev, 2187 "failed to enable AA (error_mask=0x%x)\n", 2188 err_mask); 2189 if (err_mask != AC_ERR_DEV) { 2190 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2191 return -EIO; 2192 } 2193 } else 2194 aa_desc = ", AA"; 2195 } 2196 2197 if (hdepth >= ddepth) 2198 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2199 else 2200 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2201 ddepth, aa_desc); 2202 2203 if ((ap->flags & ATA_FLAG_FPDMA_AUX)) { 2204 if (ata_id_has_ncq_send_and_recv(dev->id)) 2205 ata_dev_config_ncq_send_recv(dev); 2206 if (ata_id_has_ncq_non_data(dev->id)) 2207 ata_dev_config_ncq_non_data(dev); 2208 } 2209 2210 return 0; 2211 } 2212 2213 static void ata_dev_config_sense_reporting(struct ata_device *dev) 2214 { 2215 unsigned int err_mask; 2216 2217 if (!ata_id_has_sense_reporting(dev->id)) 2218 return; 2219 2220 if (ata_id_sense_reporting_enabled(dev->id)) 2221 return; 2222 2223 err_mask = ata_dev_set_feature(dev, SETFEATURE_SENSE_DATA, 0x1); 2224 if (err_mask) { 2225 ata_dev_dbg(dev, 2226 "failed to enable Sense Data Reporting, Emask 0x%x\n", 2227 err_mask); 2228 } 2229 } 2230 2231 static void ata_dev_config_zac(struct ata_device *dev) 2232 { 2233 struct ata_port *ap = dev->link->ap; 2234 unsigned int err_mask; 2235 u8 *identify_buf = ap->sector_buf; 2236 int log_index = ATA_LOG_SATA_ID_DEV_DATA * 2, i, found = 0; 2237 u16 log_pages; 2238 2239 dev->zac_zones_optimal_open = U32_MAX; 2240 dev->zac_zones_optimal_nonseq = U32_MAX; 2241 dev->zac_zones_max_open = U32_MAX; 2242 2243 /* 2244 * Always set the 'ZAC' flag for Host-managed devices. 2245 */ 2246 if (dev->class == ATA_DEV_ZAC) 2247 dev->flags |= ATA_DFLAG_ZAC; 2248 else if (ata_id_zoned_cap(dev->id) == 0x01) 2249 /* 2250 * Check for host-aware devices. 2251 */ 2252 dev->flags |= ATA_DFLAG_ZAC; 2253 2254 if (!(dev->flags & ATA_DFLAG_ZAC)) 2255 return; 2256 2257 /* 2258 * Read Log Directory to figure out if IDENTIFY DEVICE log 2259 * is supported. 2260 */ 2261 err_mask = ata_read_log_page(dev, ATA_LOG_DIRECTORY, 2262 0, ap->sector_buf, 1); 2263 if (err_mask) { 2264 ata_dev_info(dev, 2265 "failed to get Log Directory Emask 0x%x\n", 2266 err_mask); 2267 return; 2268 } 2269 log_pages = get_unaligned_le16(&ap->sector_buf[log_index]); 2270 if (log_pages == 0) { 2271 ata_dev_warn(dev, 2272 "ATA Identify Device Log not supported\n"); 2273 return; 2274 } 2275 /* 2276 * Read IDENTIFY DEVICE data log, page 0, to figure out 2277 * if page 9 is supported. 2278 */ 2279 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 0, 2280 identify_buf, 1); 2281 if (err_mask) { 2282 ata_dev_info(dev, 2283 "failed to get Device Identify Log Emask 0x%x\n", 2284 err_mask); 2285 return; 2286 } 2287 log_pages = identify_buf[8]; 2288 for (i = 0; i < log_pages; i++) { 2289 if (identify_buf[9 + i] == ATA_LOG_ZONED_INFORMATION) { 2290 found++; 2291 break; 2292 } 2293 } 2294 if (!found) { 2295 ata_dev_warn(dev, 2296 "ATA Zoned Information Log not supported\n"); 2297 return; 2298 } 2299 2300 /* 2301 * Read IDENTIFY DEVICE data log, page 9 (Zoned-device information) 2302 */ 2303 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_ID_DEV_DATA, 2304 ATA_LOG_ZONED_INFORMATION, 2305 identify_buf, 1); 2306 if (!err_mask) { 2307 u64 zoned_cap, opt_open, opt_nonseq, max_open; 2308 2309 zoned_cap = get_unaligned_le64(&identify_buf[8]); 2310 if ((zoned_cap >> 63)) 2311 dev->zac_zoned_cap = (zoned_cap & 1); 2312 opt_open = get_unaligned_le64(&identify_buf[24]); 2313 if ((opt_open >> 63)) 2314 dev->zac_zones_optimal_open = (u32)opt_open; 2315 opt_nonseq = get_unaligned_le64(&identify_buf[32]); 2316 if ((opt_nonseq >> 63)) 2317 dev->zac_zones_optimal_nonseq = (u32)opt_nonseq; 2318 max_open = get_unaligned_le64(&identify_buf[40]); 2319 if ((max_open >> 63)) 2320 dev->zac_zones_max_open = (u32)max_open; 2321 } 2322 } 2323 2324 /** 2325 * ata_dev_configure - Configure the specified ATA/ATAPI device 2326 * @dev: Target device to configure 2327 * 2328 * Configure @dev according to @dev->id. Generic and low-level 2329 * driver specific fixups are also applied. 2330 * 2331 * LOCKING: 2332 * Kernel thread context (may sleep) 2333 * 2334 * RETURNS: 2335 * 0 on success, -errno otherwise 2336 */ 2337 int ata_dev_configure(struct ata_device *dev) 2338 { 2339 struct ata_port *ap = dev->link->ap; 2340 struct ata_eh_context *ehc = &dev->link->eh_context; 2341 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2342 const u16 *id = dev->id; 2343 unsigned long xfer_mask; 2344 unsigned int err_mask; 2345 char revbuf[7]; /* XYZ-99\0 */ 2346 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2347 char modelbuf[ATA_ID_PROD_LEN+1]; 2348 int rc; 2349 2350 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2351 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2352 return 0; 2353 } 2354 2355 if (ata_msg_probe(ap)) 2356 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2357 2358 /* set horkage */ 2359 dev->horkage |= ata_dev_blacklisted(dev); 2360 ata_force_horkage(dev); 2361 2362 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2363 ata_dev_info(dev, "unsupported device, disabling\n"); 2364 ata_dev_disable(dev); 2365 return 0; 2366 } 2367 2368 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2369 dev->class == ATA_DEV_ATAPI) { 2370 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2371 atapi_enabled ? "not supported with this driver" 2372 : "disabled"); 2373 ata_dev_disable(dev); 2374 return 0; 2375 } 2376 2377 rc = ata_do_link_spd_horkage(dev); 2378 if (rc) 2379 return rc; 2380 2381 /* some WD SATA-1 drives have issues with LPM, turn on NOLPM for them */ 2382 if ((dev->horkage & ATA_HORKAGE_WD_BROKEN_LPM) && 2383 (id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2) 2384 dev->horkage |= ATA_HORKAGE_NOLPM; 2385 2386 if (dev->horkage & ATA_HORKAGE_NOLPM) { 2387 ata_dev_warn(dev, "LPM support broken, forcing max_power\n"); 2388 dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER; 2389 } 2390 2391 /* let ACPI work its magic */ 2392 rc = ata_acpi_on_devcfg(dev); 2393 if (rc) 2394 return rc; 2395 2396 /* massage HPA, do it early as it might change IDENTIFY data */ 2397 rc = ata_hpa_resize(dev); 2398 if (rc) 2399 return rc; 2400 2401 /* print device capabilities */ 2402 if (ata_msg_probe(ap)) 2403 ata_dev_dbg(dev, 2404 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2405 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2406 __func__, 2407 id[49], id[82], id[83], id[84], 2408 id[85], id[86], id[87], id[88]); 2409 2410 /* initialize to-be-configured parameters */ 2411 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2412 dev->max_sectors = 0; 2413 dev->cdb_len = 0; 2414 dev->n_sectors = 0; 2415 dev->cylinders = 0; 2416 dev->heads = 0; 2417 dev->sectors = 0; 2418 dev->multi_count = 0; 2419 2420 /* 2421 * common ATA, ATAPI feature tests 2422 */ 2423 2424 /* find max transfer mode; for printk only */ 2425 xfer_mask = ata_id_xfermask(id); 2426 2427 if (ata_msg_probe(ap)) 2428 ata_dump_id(id); 2429 2430 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2431 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2432 sizeof(fwrevbuf)); 2433 2434 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2435 sizeof(modelbuf)); 2436 2437 /* ATA-specific feature tests */ 2438 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) { 2439 if (ata_id_is_cfa(id)) { 2440 /* CPRM may make this media unusable */ 2441 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2442 ata_dev_warn(dev, 2443 "supports DRM functions and may not be fully accessible\n"); 2444 snprintf(revbuf, 7, "CFA"); 2445 } else { 2446 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2447 /* Warn the user if the device has TPM extensions */ 2448 if (ata_id_has_tpm(id)) 2449 ata_dev_warn(dev, 2450 "supports DRM functions and may not be fully accessible\n"); 2451 } 2452 2453 dev->n_sectors = ata_id_n_sectors(id); 2454 2455 /* get current R/W Multiple count setting */ 2456 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2457 unsigned int max = dev->id[47] & 0xff; 2458 unsigned int cnt = dev->id[59] & 0xff; 2459 /* only recognize/allow powers of two here */ 2460 if (is_power_of_2(max) && is_power_of_2(cnt)) 2461 if (cnt <= max) 2462 dev->multi_count = cnt; 2463 } 2464 2465 if (ata_id_has_lba(id)) { 2466 const char *lba_desc; 2467 char ncq_desc[24]; 2468 2469 lba_desc = "LBA"; 2470 dev->flags |= ATA_DFLAG_LBA; 2471 if (ata_id_has_lba48(id)) { 2472 dev->flags |= ATA_DFLAG_LBA48; 2473 lba_desc = "LBA48"; 2474 2475 if (dev->n_sectors >= (1UL << 28) && 2476 ata_id_has_flush_ext(id)) 2477 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2478 } 2479 2480 /* config NCQ */ 2481 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2482 if (rc) 2483 return rc; 2484 2485 /* print device info to dmesg */ 2486 if (ata_msg_drv(ap) && print_info) { 2487 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2488 revbuf, modelbuf, fwrevbuf, 2489 ata_mode_string(xfer_mask)); 2490 ata_dev_info(dev, 2491 "%llu sectors, multi %u: %s %s\n", 2492 (unsigned long long)dev->n_sectors, 2493 dev->multi_count, lba_desc, ncq_desc); 2494 } 2495 } else { 2496 /* CHS */ 2497 2498 /* Default translation */ 2499 dev->cylinders = id[1]; 2500 dev->heads = id[3]; 2501 dev->sectors = id[6]; 2502 2503 if (ata_id_current_chs_valid(id)) { 2504 /* Current CHS translation is valid. */ 2505 dev->cylinders = id[54]; 2506 dev->heads = id[55]; 2507 dev->sectors = id[56]; 2508 } 2509 2510 /* print device info to dmesg */ 2511 if (ata_msg_drv(ap) && print_info) { 2512 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2513 revbuf, modelbuf, fwrevbuf, 2514 ata_mode_string(xfer_mask)); 2515 ata_dev_info(dev, 2516 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2517 (unsigned long long)dev->n_sectors, 2518 dev->multi_count, dev->cylinders, 2519 dev->heads, dev->sectors); 2520 } 2521 } 2522 2523 /* Check and mark DevSlp capability. Get DevSlp timing variables 2524 * from SATA Settings page of Identify Device Data Log. 2525 */ 2526 if (ata_id_has_devslp(dev->id)) { 2527 u8 *sata_setting = ap->sector_buf; 2528 int i, j; 2529 2530 dev->flags |= ATA_DFLAG_DEVSLP; 2531 err_mask = ata_read_log_page(dev, 2532 ATA_LOG_SATA_ID_DEV_DATA, 2533 ATA_LOG_SATA_SETTINGS, 2534 sata_setting, 2535 1); 2536 if (err_mask) 2537 ata_dev_dbg(dev, 2538 "failed to get Identify Device Data, Emask 0x%x\n", 2539 err_mask); 2540 else 2541 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { 2542 j = ATA_LOG_DEVSLP_OFFSET + i; 2543 dev->devslp_timing[i] = sata_setting[j]; 2544 } 2545 } 2546 ata_dev_config_sense_reporting(dev); 2547 ata_dev_config_zac(dev); 2548 dev->cdb_len = 16; 2549 } 2550 2551 /* ATAPI-specific feature tests */ 2552 else if (dev->class == ATA_DEV_ATAPI) { 2553 const char *cdb_intr_string = ""; 2554 const char *atapi_an_string = ""; 2555 const char *dma_dir_string = ""; 2556 u32 sntf; 2557 2558 rc = atapi_cdb_len(id); 2559 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2560 if (ata_msg_warn(ap)) 2561 ata_dev_warn(dev, "unsupported CDB len\n"); 2562 rc = -EINVAL; 2563 goto err_out_nosup; 2564 } 2565 dev->cdb_len = (unsigned int) rc; 2566 2567 /* Enable ATAPI AN if both the host and device have 2568 * the support. If PMP is attached, SNTF is required 2569 * to enable ATAPI AN to discern between PHY status 2570 * changed notifications and ATAPI ANs. 2571 */ 2572 if (atapi_an && 2573 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2574 (!sata_pmp_attached(ap) || 2575 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2576 /* issue SET feature command to turn this on */ 2577 err_mask = ata_dev_set_feature(dev, 2578 SETFEATURES_SATA_ENABLE, SATA_AN); 2579 if (err_mask) 2580 ata_dev_err(dev, 2581 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2582 err_mask); 2583 else { 2584 dev->flags |= ATA_DFLAG_AN; 2585 atapi_an_string = ", ATAPI AN"; 2586 } 2587 } 2588 2589 if (ata_id_cdb_intr(dev->id)) { 2590 dev->flags |= ATA_DFLAG_CDB_INTR; 2591 cdb_intr_string = ", CDB intr"; 2592 } 2593 2594 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { 2595 dev->flags |= ATA_DFLAG_DMADIR; 2596 dma_dir_string = ", DMADIR"; 2597 } 2598 2599 if (ata_id_has_da(dev->id)) { 2600 dev->flags |= ATA_DFLAG_DA; 2601 zpodd_init(dev); 2602 } 2603 2604 /* print device info to dmesg */ 2605 if (ata_msg_drv(ap) && print_info) 2606 ata_dev_info(dev, 2607 "ATAPI: %s, %s, max %s%s%s%s\n", 2608 modelbuf, fwrevbuf, 2609 ata_mode_string(xfer_mask), 2610 cdb_intr_string, atapi_an_string, 2611 dma_dir_string); 2612 } 2613 2614 /* determine max_sectors */ 2615 dev->max_sectors = ATA_MAX_SECTORS; 2616 if (dev->flags & ATA_DFLAG_LBA48) 2617 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2618 2619 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2620 200 sectors */ 2621 if (ata_dev_knobble(dev)) { 2622 if (ata_msg_drv(ap) && print_info) 2623 ata_dev_info(dev, "applying bridge limits\n"); 2624 dev->udma_mask &= ATA_UDMA5; 2625 dev->max_sectors = ATA_MAX_SECTORS; 2626 } 2627 2628 if ((dev->class == ATA_DEV_ATAPI) && 2629 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2630 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2631 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2632 } 2633 2634 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2635 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2636 dev->max_sectors); 2637 2638 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024) 2639 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024, 2640 dev->max_sectors); 2641 2642 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2643 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2644 2645 if (ap->ops->dev_config) 2646 ap->ops->dev_config(dev); 2647 2648 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2649 /* Let the user know. We don't want to disallow opens for 2650 rescue purposes, or in case the vendor is just a blithering 2651 idiot. Do this after the dev_config call as some controllers 2652 with buggy firmware may want to avoid reporting false device 2653 bugs */ 2654 2655 if (print_info) { 2656 ata_dev_warn(dev, 2657 "Drive reports diagnostics failure. This may indicate a drive\n"); 2658 ata_dev_warn(dev, 2659 "fault or invalid emulation. Contact drive vendor for information.\n"); 2660 } 2661 } 2662 2663 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2664 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2665 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2666 } 2667 2668 return 0; 2669 2670 err_out_nosup: 2671 if (ata_msg_probe(ap)) 2672 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2673 return rc; 2674 } 2675 2676 /** 2677 * ata_cable_40wire - return 40 wire cable type 2678 * @ap: port 2679 * 2680 * Helper method for drivers which want to hardwire 40 wire cable 2681 * detection. 2682 */ 2683 2684 int ata_cable_40wire(struct ata_port *ap) 2685 { 2686 return ATA_CBL_PATA40; 2687 } 2688 2689 /** 2690 * ata_cable_80wire - return 80 wire cable type 2691 * @ap: port 2692 * 2693 * Helper method for drivers which want to hardwire 80 wire cable 2694 * detection. 2695 */ 2696 2697 int ata_cable_80wire(struct ata_port *ap) 2698 { 2699 return ATA_CBL_PATA80; 2700 } 2701 2702 /** 2703 * ata_cable_unknown - return unknown PATA cable. 2704 * @ap: port 2705 * 2706 * Helper method for drivers which have no PATA cable detection. 2707 */ 2708 2709 int ata_cable_unknown(struct ata_port *ap) 2710 { 2711 return ATA_CBL_PATA_UNK; 2712 } 2713 2714 /** 2715 * ata_cable_ignore - return ignored PATA cable. 2716 * @ap: port 2717 * 2718 * Helper method for drivers which don't use cable type to limit 2719 * transfer mode. 2720 */ 2721 int ata_cable_ignore(struct ata_port *ap) 2722 { 2723 return ATA_CBL_PATA_IGN; 2724 } 2725 2726 /** 2727 * ata_cable_sata - return SATA cable type 2728 * @ap: port 2729 * 2730 * Helper method for drivers which have SATA cables 2731 */ 2732 2733 int ata_cable_sata(struct ata_port *ap) 2734 { 2735 return ATA_CBL_SATA; 2736 } 2737 2738 /** 2739 * ata_bus_probe - Reset and probe ATA bus 2740 * @ap: Bus to probe 2741 * 2742 * Master ATA bus probing function. Initiates a hardware-dependent 2743 * bus reset, then attempts to identify any devices found on 2744 * the bus. 2745 * 2746 * LOCKING: 2747 * PCI/etc. bus probe sem. 2748 * 2749 * RETURNS: 2750 * Zero on success, negative errno otherwise. 2751 */ 2752 2753 int ata_bus_probe(struct ata_port *ap) 2754 { 2755 unsigned int classes[ATA_MAX_DEVICES]; 2756 int tries[ATA_MAX_DEVICES]; 2757 int rc; 2758 struct ata_device *dev; 2759 2760 ata_for_each_dev(dev, &ap->link, ALL) 2761 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2762 2763 retry: 2764 ata_for_each_dev(dev, &ap->link, ALL) { 2765 /* If we issue an SRST then an ATA drive (not ATAPI) 2766 * may change configuration and be in PIO0 timing. If 2767 * we do a hard reset (or are coming from power on) 2768 * this is true for ATA or ATAPI. Until we've set a 2769 * suitable controller mode we should not touch the 2770 * bus as we may be talking too fast. 2771 */ 2772 dev->pio_mode = XFER_PIO_0; 2773 dev->dma_mode = 0xff; 2774 2775 /* If the controller has a pio mode setup function 2776 * then use it to set the chipset to rights. Don't 2777 * touch the DMA setup as that will be dealt with when 2778 * configuring devices. 2779 */ 2780 if (ap->ops->set_piomode) 2781 ap->ops->set_piomode(ap, dev); 2782 } 2783 2784 /* reset and determine device classes */ 2785 ap->ops->phy_reset(ap); 2786 2787 ata_for_each_dev(dev, &ap->link, ALL) { 2788 if (dev->class != ATA_DEV_UNKNOWN) 2789 classes[dev->devno] = dev->class; 2790 else 2791 classes[dev->devno] = ATA_DEV_NONE; 2792 2793 dev->class = ATA_DEV_UNKNOWN; 2794 } 2795 2796 /* read IDENTIFY page and configure devices. We have to do the identify 2797 specific sequence bass-ackwards so that PDIAG- is released by 2798 the slave device */ 2799 2800 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2801 if (tries[dev->devno]) 2802 dev->class = classes[dev->devno]; 2803 2804 if (!ata_dev_enabled(dev)) 2805 continue; 2806 2807 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2808 dev->id); 2809 if (rc) 2810 goto fail; 2811 } 2812 2813 /* Now ask for the cable type as PDIAG- should have been released */ 2814 if (ap->ops->cable_detect) 2815 ap->cbl = ap->ops->cable_detect(ap); 2816 2817 /* We may have SATA bridge glue hiding here irrespective of 2818 * the reported cable types and sensed types. When SATA 2819 * drives indicate we have a bridge, we don't know which end 2820 * of the link the bridge is which is a problem. 2821 */ 2822 ata_for_each_dev(dev, &ap->link, ENABLED) 2823 if (ata_id_is_sata(dev->id)) 2824 ap->cbl = ATA_CBL_SATA; 2825 2826 /* After the identify sequence we can now set up the devices. We do 2827 this in the normal order so that the user doesn't get confused */ 2828 2829 ata_for_each_dev(dev, &ap->link, ENABLED) { 2830 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2831 rc = ata_dev_configure(dev); 2832 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2833 if (rc) 2834 goto fail; 2835 } 2836 2837 /* configure transfer mode */ 2838 rc = ata_set_mode(&ap->link, &dev); 2839 if (rc) 2840 goto fail; 2841 2842 ata_for_each_dev(dev, &ap->link, ENABLED) 2843 return 0; 2844 2845 return -ENODEV; 2846 2847 fail: 2848 tries[dev->devno]--; 2849 2850 switch (rc) { 2851 case -EINVAL: 2852 /* eeek, something went very wrong, give up */ 2853 tries[dev->devno] = 0; 2854 break; 2855 2856 case -ENODEV: 2857 /* give it just one more chance */ 2858 tries[dev->devno] = min(tries[dev->devno], 1); 2859 case -EIO: 2860 if (tries[dev->devno] == 1) { 2861 /* This is the last chance, better to slow 2862 * down than lose it. 2863 */ 2864 sata_down_spd_limit(&ap->link, 0); 2865 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2866 } 2867 } 2868 2869 if (!tries[dev->devno]) 2870 ata_dev_disable(dev); 2871 2872 goto retry; 2873 } 2874 2875 /** 2876 * sata_print_link_status - Print SATA link status 2877 * @link: SATA link to printk link status about 2878 * 2879 * This function prints link speed and status of a SATA link. 2880 * 2881 * LOCKING: 2882 * None. 2883 */ 2884 static void sata_print_link_status(struct ata_link *link) 2885 { 2886 u32 sstatus, scontrol, tmp; 2887 2888 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2889 return; 2890 sata_scr_read(link, SCR_CONTROL, &scontrol); 2891 2892 if (ata_phys_link_online(link)) { 2893 tmp = (sstatus >> 4) & 0xf; 2894 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 2895 sata_spd_string(tmp), sstatus, scontrol); 2896 } else { 2897 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 2898 sstatus, scontrol); 2899 } 2900 } 2901 2902 /** 2903 * ata_dev_pair - return other device on cable 2904 * @adev: device 2905 * 2906 * Obtain the other device on the same cable, or if none is 2907 * present NULL is returned 2908 */ 2909 2910 struct ata_device *ata_dev_pair(struct ata_device *adev) 2911 { 2912 struct ata_link *link = adev->link; 2913 struct ata_device *pair = &link->device[1 - adev->devno]; 2914 if (!ata_dev_enabled(pair)) 2915 return NULL; 2916 return pair; 2917 } 2918 2919 /** 2920 * sata_down_spd_limit - adjust SATA spd limit downward 2921 * @link: Link to adjust SATA spd limit for 2922 * @spd_limit: Additional limit 2923 * 2924 * Adjust SATA spd limit of @link downward. Note that this 2925 * function only adjusts the limit. The change must be applied 2926 * using sata_set_spd(). 2927 * 2928 * If @spd_limit is non-zero, the speed is limited to equal to or 2929 * lower than @spd_limit if such speed is supported. If 2930 * @spd_limit is slower than any supported speed, only the lowest 2931 * supported speed is allowed. 2932 * 2933 * LOCKING: 2934 * Inherited from caller. 2935 * 2936 * RETURNS: 2937 * 0 on success, negative errno on failure 2938 */ 2939 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2940 { 2941 u32 sstatus, spd, mask; 2942 int rc, bit; 2943 2944 if (!sata_scr_valid(link)) 2945 return -EOPNOTSUPP; 2946 2947 /* If SCR can be read, use it to determine the current SPD. 2948 * If not, use cached value in link->sata_spd. 2949 */ 2950 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2951 if (rc == 0 && ata_sstatus_online(sstatus)) 2952 spd = (sstatus >> 4) & 0xf; 2953 else 2954 spd = link->sata_spd; 2955 2956 mask = link->sata_spd_limit; 2957 if (mask <= 1) 2958 return -EINVAL; 2959 2960 /* unconditionally mask off the highest bit */ 2961 bit = fls(mask) - 1; 2962 mask &= ~(1 << bit); 2963 2964 /* Mask off all speeds higher than or equal to the current 2965 * one. Force 1.5Gbps if current SPD is not available. 2966 */ 2967 if (spd > 1) 2968 mask &= (1 << (spd - 1)) - 1; 2969 else 2970 mask &= 1; 2971 2972 /* were we already at the bottom? */ 2973 if (!mask) 2974 return -EINVAL; 2975 2976 if (spd_limit) { 2977 if (mask & ((1 << spd_limit) - 1)) 2978 mask &= (1 << spd_limit) - 1; 2979 else { 2980 bit = ffs(mask) - 1; 2981 mask = 1 << bit; 2982 } 2983 } 2984 2985 link->sata_spd_limit = mask; 2986 2987 ata_link_warn(link, "limiting SATA link speed to %s\n", 2988 sata_spd_string(fls(mask))); 2989 2990 return 0; 2991 } 2992 2993 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2994 { 2995 struct ata_link *host_link = &link->ap->link; 2996 u32 limit, target, spd; 2997 2998 limit = link->sata_spd_limit; 2999 3000 /* Don't configure downstream link faster than upstream link. 3001 * It doesn't speed up anything and some PMPs choke on such 3002 * configuration. 3003 */ 3004 if (!ata_is_host_link(link) && host_link->sata_spd) 3005 limit &= (1 << host_link->sata_spd) - 1; 3006 3007 if (limit == UINT_MAX) 3008 target = 0; 3009 else 3010 target = fls(limit); 3011 3012 spd = (*scontrol >> 4) & 0xf; 3013 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 3014 3015 return spd != target; 3016 } 3017 3018 /** 3019 * sata_set_spd_needed - is SATA spd configuration needed 3020 * @link: Link in question 3021 * 3022 * Test whether the spd limit in SControl matches 3023 * @link->sata_spd_limit. This function is used to determine 3024 * whether hardreset is necessary to apply SATA spd 3025 * configuration. 3026 * 3027 * LOCKING: 3028 * Inherited from caller. 3029 * 3030 * RETURNS: 3031 * 1 if SATA spd configuration is needed, 0 otherwise. 3032 */ 3033 static int sata_set_spd_needed(struct ata_link *link) 3034 { 3035 u32 scontrol; 3036 3037 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 3038 return 1; 3039 3040 return __sata_set_spd_needed(link, &scontrol); 3041 } 3042 3043 /** 3044 * sata_set_spd - set SATA spd according to spd limit 3045 * @link: Link to set SATA spd for 3046 * 3047 * Set SATA spd of @link according to sata_spd_limit. 3048 * 3049 * LOCKING: 3050 * Inherited from caller. 3051 * 3052 * RETURNS: 3053 * 0 if spd doesn't need to be changed, 1 if spd has been 3054 * changed. Negative errno if SCR registers are inaccessible. 3055 */ 3056 int sata_set_spd(struct ata_link *link) 3057 { 3058 u32 scontrol; 3059 int rc; 3060 3061 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3062 return rc; 3063 3064 if (!__sata_set_spd_needed(link, &scontrol)) 3065 return 0; 3066 3067 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3068 return rc; 3069 3070 return 1; 3071 } 3072 3073 /* 3074 * This mode timing computation functionality is ported over from 3075 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 3076 */ 3077 /* 3078 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 3079 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 3080 * for UDMA6, which is currently supported only by Maxtor drives. 3081 * 3082 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 3083 */ 3084 3085 static const struct ata_timing ata_timing[] = { 3086 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 3087 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 3088 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 3089 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 3090 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 3091 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 3092 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 3093 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 3094 3095 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 3096 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 3097 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 3098 3099 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 3100 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 3101 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 3102 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 3103 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 3104 3105 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3106 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3107 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 3108 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 3109 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 3110 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 3111 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 3112 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 3113 3114 { 0xFF } 3115 }; 3116 3117 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3118 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3119 3120 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3121 { 3122 q->setup = EZ(t->setup * 1000, T); 3123 q->act8b = EZ(t->act8b * 1000, T); 3124 q->rec8b = EZ(t->rec8b * 1000, T); 3125 q->cyc8b = EZ(t->cyc8b * 1000, T); 3126 q->active = EZ(t->active * 1000, T); 3127 q->recover = EZ(t->recover * 1000, T); 3128 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 3129 q->cycle = EZ(t->cycle * 1000, T); 3130 q->udma = EZ(t->udma * 1000, UT); 3131 } 3132 3133 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3134 struct ata_timing *m, unsigned int what) 3135 { 3136 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3137 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3138 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3139 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3140 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3141 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3142 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 3143 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3144 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3145 } 3146 3147 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3148 { 3149 const struct ata_timing *t = ata_timing; 3150 3151 while (xfer_mode > t->mode) 3152 t++; 3153 3154 if (xfer_mode == t->mode) 3155 return t; 3156 3157 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", 3158 __func__, xfer_mode); 3159 3160 return NULL; 3161 } 3162 3163 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3164 struct ata_timing *t, int T, int UT) 3165 { 3166 const u16 *id = adev->id; 3167 const struct ata_timing *s; 3168 struct ata_timing p; 3169 3170 /* 3171 * Find the mode. 3172 */ 3173 3174 if (!(s = ata_timing_find_mode(speed))) 3175 return -EINVAL; 3176 3177 memcpy(t, s, sizeof(*s)); 3178 3179 /* 3180 * If the drive is an EIDE drive, it can tell us it needs extended 3181 * PIO/MW_DMA cycle timing. 3182 */ 3183 3184 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3185 memset(&p, 0, sizeof(p)); 3186 3187 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { 3188 if (speed <= XFER_PIO_2) 3189 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 3190 else if ((speed <= XFER_PIO_4) || 3191 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 3192 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 3193 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3194 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3195 3196 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3197 } 3198 3199 /* 3200 * Convert the timing to bus clock counts. 3201 */ 3202 3203 ata_timing_quantize(t, t, T, UT); 3204 3205 /* 3206 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3207 * S.M.A.R.T * and some other commands. We have to ensure that the 3208 * DMA cycle timing is slower/equal than the fastest PIO timing. 3209 */ 3210 3211 if (speed > XFER_PIO_6) { 3212 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3213 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3214 } 3215 3216 /* 3217 * Lengthen active & recovery time so that cycle time is correct. 3218 */ 3219 3220 if (t->act8b + t->rec8b < t->cyc8b) { 3221 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3222 t->rec8b = t->cyc8b - t->act8b; 3223 } 3224 3225 if (t->active + t->recover < t->cycle) { 3226 t->active += (t->cycle - (t->active + t->recover)) / 2; 3227 t->recover = t->cycle - t->active; 3228 } 3229 3230 /* In a few cases quantisation may produce enough errors to 3231 leave t->cycle too low for the sum of active and recovery 3232 if so we must correct this */ 3233 if (t->active + t->recover > t->cycle) 3234 t->cycle = t->active + t->recover; 3235 3236 return 0; 3237 } 3238 3239 /** 3240 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3241 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3242 * @cycle: cycle duration in ns 3243 * 3244 * Return matching xfer mode for @cycle. The returned mode is of 3245 * the transfer type specified by @xfer_shift. If @cycle is too 3246 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3247 * than the fastest known mode, the fasted mode is returned. 3248 * 3249 * LOCKING: 3250 * None. 3251 * 3252 * RETURNS: 3253 * Matching xfer_mode, 0xff if no match found. 3254 */ 3255 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3256 { 3257 u8 base_mode = 0xff, last_mode = 0xff; 3258 const struct ata_xfer_ent *ent; 3259 const struct ata_timing *t; 3260 3261 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3262 if (ent->shift == xfer_shift) 3263 base_mode = ent->base; 3264 3265 for (t = ata_timing_find_mode(base_mode); 3266 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3267 unsigned short this_cycle; 3268 3269 switch (xfer_shift) { 3270 case ATA_SHIFT_PIO: 3271 case ATA_SHIFT_MWDMA: 3272 this_cycle = t->cycle; 3273 break; 3274 case ATA_SHIFT_UDMA: 3275 this_cycle = t->udma; 3276 break; 3277 default: 3278 return 0xff; 3279 } 3280 3281 if (cycle > this_cycle) 3282 break; 3283 3284 last_mode = t->mode; 3285 } 3286 3287 return last_mode; 3288 } 3289 3290 /** 3291 * ata_down_xfermask_limit - adjust dev xfer masks downward 3292 * @dev: Device to adjust xfer masks 3293 * @sel: ATA_DNXFER_* selector 3294 * 3295 * Adjust xfer masks of @dev downward. Note that this function 3296 * does not apply the change. Invoking ata_set_mode() afterwards 3297 * will apply the limit. 3298 * 3299 * LOCKING: 3300 * Inherited from caller. 3301 * 3302 * RETURNS: 3303 * 0 on success, negative errno on failure 3304 */ 3305 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3306 { 3307 char buf[32]; 3308 unsigned long orig_mask, xfer_mask; 3309 unsigned long pio_mask, mwdma_mask, udma_mask; 3310 int quiet, highbit; 3311 3312 quiet = !!(sel & ATA_DNXFER_QUIET); 3313 sel &= ~ATA_DNXFER_QUIET; 3314 3315 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3316 dev->mwdma_mask, 3317 dev->udma_mask); 3318 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3319 3320 switch (sel) { 3321 case ATA_DNXFER_PIO: 3322 highbit = fls(pio_mask) - 1; 3323 pio_mask &= ~(1 << highbit); 3324 break; 3325 3326 case ATA_DNXFER_DMA: 3327 if (udma_mask) { 3328 highbit = fls(udma_mask) - 1; 3329 udma_mask &= ~(1 << highbit); 3330 if (!udma_mask) 3331 return -ENOENT; 3332 } else if (mwdma_mask) { 3333 highbit = fls(mwdma_mask) - 1; 3334 mwdma_mask &= ~(1 << highbit); 3335 if (!mwdma_mask) 3336 return -ENOENT; 3337 } 3338 break; 3339 3340 case ATA_DNXFER_40C: 3341 udma_mask &= ATA_UDMA_MASK_40C; 3342 break; 3343 3344 case ATA_DNXFER_FORCE_PIO0: 3345 pio_mask &= 1; 3346 case ATA_DNXFER_FORCE_PIO: 3347 mwdma_mask = 0; 3348 udma_mask = 0; 3349 break; 3350 3351 default: 3352 BUG(); 3353 } 3354 3355 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3356 3357 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3358 return -ENOENT; 3359 3360 if (!quiet) { 3361 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3362 snprintf(buf, sizeof(buf), "%s:%s", 3363 ata_mode_string(xfer_mask), 3364 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3365 else 3366 snprintf(buf, sizeof(buf), "%s", 3367 ata_mode_string(xfer_mask)); 3368 3369 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3370 } 3371 3372 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3373 &dev->udma_mask); 3374 3375 return 0; 3376 } 3377 3378 static int ata_dev_set_mode(struct ata_device *dev) 3379 { 3380 struct ata_port *ap = dev->link->ap; 3381 struct ata_eh_context *ehc = &dev->link->eh_context; 3382 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3383 const char *dev_err_whine = ""; 3384 int ign_dev_err = 0; 3385 unsigned int err_mask = 0; 3386 int rc; 3387 3388 dev->flags &= ~ATA_DFLAG_PIO; 3389 if (dev->xfer_shift == ATA_SHIFT_PIO) 3390 dev->flags |= ATA_DFLAG_PIO; 3391 3392 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3393 dev_err_whine = " (SET_XFERMODE skipped)"; 3394 else { 3395 if (nosetxfer) 3396 ata_dev_warn(dev, 3397 "NOSETXFER but PATA detected - can't " 3398 "skip SETXFER, might malfunction\n"); 3399 err_mask = ata_dev_set_xfermode(dev); 3400 } 3401 3402 if (err_mask & ~AC_ERR_DEV) 3403 goto fail; 3404 3405 /* revalidate */ 3406 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3407 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3408 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3409 if (rc) 3410 return rc; 3411 3412 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3413 /* Old CFA may refuse this command, which is just fine */ 3414 if (ata_id_is_cfa(dev->id)) 3415 ign_dev_err = 1; 3416 /* Catch several broken garbage emulations plus some pre 3417 ATA devices */ 3418 if (ata_id_major_version(dev->id) == 0 && 3419 dev->pio_mode <= XFER_PIO_2) 3420 ign_dev_err = 1; 3421 /* Some very old devices and some bad newer ones fail 3422 any kind of SET_XFERMODE request but support PIO0-2 3423 timings and no IORDY */ 3424 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3425 ign_dev_err = 1; 3426 } 3427 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3428 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3429 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3430 dev->dma_mode == XFER_MW_DMA_0 && 3431 (dev->id[63] >> 8) & 1) 3432 ign_dev_err = 1; 3433 3434 /* if the device is actually configured correctly, ignore dev err */ 3435 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3436 ign_dev_err = 1; 3437 3438 if (err_mask & AC_ERR_DEV) { 3439 if (!ign_dev_err) 3440 goto fail; 3441 else 3442 dev_err_whine = " (device error ignored)"; 3443 } 3444 3445 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3446 dev->xfer_shift, (int)dev->xfer_mode); 3447 3448 ata_dev_info(dev, "configured for %s%s\n", 3449 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3450 dev_err_whine); 3451 3452 return 0; 3453 3454 fail: 3455 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3456 return -EIO; 3457 } 3458 3459 /** 3460 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3461 * @link: link on which timings will be programmed 3462 * @r_failed_dev: out parameter for failed device 3463 * 3464 * Standard implementation of the function used to tune and set 3465 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3466 * ata_dev_set_mode() fails, pointer to the failing device is 3467 * returned in @r_failed_dev. 3468 * 3469 * LOCKING: 3470 * PCI/etc. bus probe sem. 3471 * 3472 * RETURNS: 3473 * 0 on success, negative errno otherwise 3474 */ 3475 3476 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3477 { 3478 struct ata_port *ap = link->ap; 3479 struct ata_device *dev; 3480 int rc = 0, used_dma = 0, found = 0; 3481 3482 /* step 1: calculate xfer_mask */ 3483 ata_for_each_dev(dev, link, ENABLED) { 3484 unsigned long pio_mask, dma_mask; 3485 unsigned int mode_mask; 3486 3487 mode_mask = ATA_DMA_MASK_ATA; 3488 if (dev->class == ATA_DEV_ATAPI) 3489 mode_mask = ATA_DMA_MASK_ATAPI; 3490 else if (ata_id_is_cfa(dev->id)) 3491 mode_mask = ATA_DMA_MASK_CFA; 3492 3493 ata_dev_xfermask(dev); 3494 ata_force_xfermask(dev); 3495 3496 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3497 3498 if (libata_dma_mask & mode_mask) 3499 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, 3500 dev->udma_mask); 3501 else 3502 dma_mask = 0; 3503 3504 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3505 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3506 3507 found = 1; 3508 if (ata_dma_enabled(dev)) 3509 used_dma = 1; 3510 } 3511 if (!found) 3512 goto out; 3513 3514 /* step 2: always set host PIO timings */ 3515 ata_for_each_dev(dev, link, ENABLED) { 3516 if (dev->pio_mode == 0xff) { 3517 ata_dev_warn(dev, "no PIO support\n"); 3518 rc = -EINVAL; 3519 goto out; 3520 } 3521 3522 dev->xfer_mode = dev->pio_mode; 3523 dev->xfer_shift = ATA_SHIFT_PIO; 3524 if (ap->ops->set_piomode) 3525 ap->ops->set_piomode(ap, dev); 3526 } 3527 3528 /* step 3: set host DMA timings */ 3529 ata_for_each_dev(dev, link, ENABLED) { 3530 if (!ata_dma_enabled(dev)) 3531 continue; 3532 3533 dev->xfer_mode = dev->dma_mode; 3534 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3535 if (ap->ops->set_dmamode) 3536 ap->ops->set_dmamode(ap, dev); 3537 } 3538 3539 /* step 4: update devices' xfer mode */ 3540 ata_for_each_dev(dev, link, ENABLED) { 3541 rc = ata_dev_set_mode(dev); 3542 if (rc) 3543 goto out; 3544 } 3545 3546 /* Record simplex status. If we selected DMA then the other 3547 * host channels are not permitted to do so. 3548 */ 3549 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3550 ap->host->simplex_claimed = ap; 3551 3552 out: 3553 if (rc) 3554 *r_failed_dev = dev; 3555 return rc; 3556 } 3557 3558 /** 3559 * ata_wait_ready - wait for link to become ready 3560 * @link: link to be waited on 3561 * @deadline: deadline jiffies for the operation 3562 * @check_ready: callback to check link readiness 3563 * 3564 * Wait for @link to become ready. @check_ready should return 3565 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3566 * link doesn't seem to be occupied, other errno for other error 3567 * conditions. 3568 * 3569 * Transient -ENODEV conditions are allowed for 3570 * ATA_TMOUT_FF_WAIT. 3571 * 3572 * LOCKING: 3573 * EH context. 3574 * 3575 * RETURNS: 3576 * 0 if @link is ready before @deadline; otherwise, -errno. 3577 */ 3578 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3579 int (*check_ready)(struct ata_link *link)) 3580 { 3581 unsigned long start = jiffies; 3582 unsigned long nodev_deadline; 3583 int warned = 0; 3584 3585 /* choose which 0xff timeout to use, read comment in libata.h */ 3586 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3587 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3588 else 3589 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3590 3591 /* Slave readiness can't be tested separately from master. On 3592 * M/S emulation configuration, this function should be called 3593 * only on the master and it will handle both master and slave. 3594 */ 3595 WARN_ON(link == link->ap->slave_link); 3596 3597 if (time_after(nodev_deadline, deadline)) 3598 nodev_deadline = deadline; 3599 3600 while (1) { 3601 unsigned long now = jiffies; 3602 int ready, tmp; 3603 3604 ready = tmp = check_ready(link); 3605 if (ready > 0) 3606 return 0; 3607 3608 /* 3609 * -ENODEV could be transient. Ignore -ENODEV if link 3610 * is online. Also, some SATA devices take a long 3611 * time to clear 0xff after reset. Wait for 3612 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3613 * offline. 3614 * 3615 * Note that some PATA controllers (pata_ali) explode 3616 * if status register is read more than once when 3617 * there's no device attached. 3618 */ 3619 if (ready == -ENODEV) { 3620 if (ata_link_online(link)) 3621 ready = 0; 3622 else if ((link->ap->flags & ATA_FLAG_SATA) && 3623 !ata_link_offline(link) && 3624 time_before(now, nodev_deadline)) 3625 ready = 0; 3626 } 3627 3628 if (ready) 3629 return ready; 3630 if (time_after(now, deadline)) 3631 return -EBUSY; 3632 3633 if (!warned && time_after(now, start + 5 * HZ) && 3634 (deadline - now > 3 * HZ)) { 3635 ata_link_warn(link, 3636 "link is slow to respond, please be patient " 3637 "(ready=%d)\n", tmp); 3638 warned = 1; 3639 } 3640 3641 ata_msleep(link->ap, 50); 3642 } 3643 } 3644 3645 /** 3646 * ata_wait_after_reset - wait for link to become ready after reset 3647 * @link: link to be waited on 3648 * @deadline: deadline jiffies for the operation 3649 * @check_ready: callback to check link readiness 3650 * 3651 * Wait for @link to become ready after reset. 3652 * 3653 * LOCKING: 3654 * EH context. 3655 * 3656 * RETURNS: 3657 * 0 if @link is ready before @deadline; otherwise, -errno. 3658 */ 3659 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3660 int (*check_ready)(struct ata_link *link)) 3661 { 3662 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3663 3664 return ata_wait_ready(link, deadline, check_ready); 3665 } 3666 3667 /** 3668 * sata_link_debounce - debounce SATA phy status 3669 * @link: ATA link to debounce SATA phy status for 3670 * @params: timing parameters { interval, duration, timeout } in msec 3671 * @deadline: deadline jiffies for the operation 3672 * 3673 * Make sure SStatus of @link reaches stable state, determined by 3674 * holding the same value where DET is not 1 for @duration polled 3675 * every @interval, before @timeout. Timeout constraints the 3676 * beginning of the stable state. Because DET gets stuck at 1 on 3677 * some controllers after hot unplugging, this functions waits 3678 * until timeout then returns 0 if DET is stable at 1. 3679 * 3680 * @timeout is further limited by @deadline. The sooner of the 3681 * two is used. 3682 * 3683 * LOCKING: 3684 * Kernel thread context (may sleep) 3685 * 3686 * RETURNS: 3687 * 0 on success, -errno on failure. 3688 */ 3689 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3690 unsigned long deadline) 3691 { 3692 unsigned long interval = params[0]; 3693 unsigned long duration = params[1]; 3694 unsigned long last_jiffies, t; 3695 u32 last, cur; 3696 int rc; 3697 3698 t = ata_deadline(jiffies, params[2]); 3699 if (time_before(t, deadline)) 3700 deadline = t; 3701 3702 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3703 return rc; 3704 cur &= 0xf; 3705 3706 last = cur; 3707 last_jiffies = jiffies; 3708 3709 while (1) { 3710 ata_msleep(link->ap, interval); 3711 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3712 return rc; 3713 cur &= 0xf; 3714 3715 /* DET stable? */ 3716 if (cur == last) { 3717 if (cur == 1 && time_before(jiffies, deadline)) 3718 continue; 3719 if (time_after(jiffies, 3720 ata_deadline(last_jiffies, duration))) 3721 return 0; 3722 continue; 3723 } 3724 3725 /* unstable, start over */ 3726 last = cur; 3727 last_jiffies = jiffies; 3728 3729 /* Check deadline. If debouncing failed, return 3730 * -EPIPE to tell upper layer to lower link speed. 3731 */ 3732 if (time_after(jiffies, deadline)) 3733 return -EPIPE; 3734 } 3735 } 3736 3737 /** 3738 * sata_link_resume - resume SATA link 3739 * @link: ATA link to resume SATA 3740 * @params: timing parameters { interval, duration, timeout } in msec 3741 * @deadline: deadline jiffies for the operation 3742 * 3743 * Resume SATA phy @link and debounce it. 3744 * 3745 * LOCKING: 3746 * Kernel thread context (may sleep) 3747 * 3748 * RETURNS: 3749 * 0 on success, -errno on failure. 3750 */ 3751 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3752 unsigned long deadline) 3753 { 3754 int tries = ATA_LINK_RESUME_TRIES; 3755 u32 scontrol, serror; 3756 int rc; 3757 3758 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3759 return rc; 3760 3761 /* 3762 * Writes to SControl sometimes get ignored under certain 3763 * controllers (ata_piix SIDPR). Make sure DET actually is 3764 * cleared. 3765 */ 3766 do { 3767 scontrol = (scontrol & 0x0f0) | 0x300; 3768 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3769 return rc; 3770 /* 3771 * Some PHYs react badly if SStatus is pounded 3772 * immediately after resuming. Delay 200ms before 3773 * debouncing. 3774 */ 3775 if (!(link->flags & ATA_LFLAG_NO_DB_DELAY)) 3776 ata_msleep(link->ap, 200); 3777 3778 /* is SControl restored correctly? */ 3779 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3780 return rc; 3781 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3782 3783 if ((scontrol & 0xf0f) != 0x300) { 3784 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3785 scontrol); 3786 return 0; 3787 } 3788 3789 if (tries < ATA_LINK_RESUME_TRIES) 3790 ata_link_warn(link, "link resume succeeded after %d retries\n", 3791 ATA_LINK_RESUME_TRIES - tries); 3792 3793 if ((rc = sata_link_debounce(link, params, deadline))) 3794 return rc; 3795 3796 /* clear SError, some PHYs require this even for SRST to work */ 3797 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3798 rc = sata_scr_write(link, SCR_ERROR, serror); 3799 3800 return rc != -EINVAL ? rc : 0; 3801 } 3802 3803 /** 3804 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3805 * @link: ATA link to manipulate SControl for 3806 * @policy: LPM policy to configure 3807 * @spm_wakeup: initiate LPM transition to active state 3808 * 3809 * Manipulate the IPM field of the SControl register of @link 3810 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3811 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3812 * the link. This function also clears PHYRDY_CHG before 3813 * returning. 3814 * 3815 * LOCKING: 3816 * EH context. 3817 * 3818 * RETURNS: 3819 * 0 on success, -errno otherwise. 3820 */ 3821 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3822 bool spm_wakeup) 3823 { 3824 struct ata_eh_context *ehc = &link->eh_context; 3825 bool woken_up = false; 3826 u32 scontrol; 3827 int rc; 3828 3829 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3830 if (rc) 3831 return rc; 3832 3833 switch (policy) { 3834 case ATA_LPM_MAX_POWER: 3835 /* disable all LPM transitions */ 3836 scontrol |= (0x7 << 8); 3837 /* initiate transition to active state */ 3838 if (spm_wakeup) { 3839 scontrol |= (0x4 << 12); 3840 woken_up = true; 3841 } 3842 break; 3843 case ATA_LPM_MED_POWER: 3844 /* allow LPM to PARTIAL */ 3845 scontrol &= ~(0x1 << 8); 3846 scontrol |= (0x6 << 8); 3847 break; 3848 case ATA_LPM_MIN_POWER: 3849 if (ata_link_nr_enabled(link) > 0) 3850 /* no restrictions on LPM transitions */ 3851 scontrol &= ~(0x7 << 8); 3852 else { 3853 /* empty port, power off */ 3854 scontrol &= ~0xf; 3855 scontrol |= (0x1 << 2); 3856 } 3857 break; 3858 default: 3859 WARN_ON(1); 3860 } 3861 3862 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3863 if (rc) 3864 return rc; 3865 3866 /* give the link time to transit out of LPM state */ 3867 if (woken_up) 3868 msleep(10); 3869 3870 /* clear PHYRDY_CHG from SError */ 3871 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3872 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3873 } 3874 3875 /** 3876 * ata_std_prereset - prepare for reset 3877 * @link: ATA link to be reset 3878 * @deadline: deadline jiffies for the operation 3879 * 3880 * @link is about to be reset. Initialize it. Failure from 3881 * prereset makes libata abort whole reset sequence and give up 3882 * that port, so prereset should be best-effort. It does its 3883 * best to prepare for reset sequence but if things go wrong, it 3884 * should just whine, not fail. 3885 * 3886 * LOCKING: 3887 * Kernel thread context (may sleep) 3888 * 3889 * RETURNS: 3890 * 0 on success, -errno otherwise. 3891 */ 3892 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3893 { 3894 struct ata_port *ap = link->ap; 3895 struct ata_eh_context *ehc = &link->eh_context; 3896 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3897 int rc; 3898 3899 /* if we're about to do hardreset, nothing more to do */ 3900 if (ehc->i.action & ATA_EH_HARDRESET) 3901 return 0; 3902 3903 /* if SATA, resume link */ 3904 if (ap->flags & ATA_FLAG_SATA) { 3905 rc = sata_link_resume(link, timing, deadline); 3906 /* whine about phy resume failure but proceed */ 3907 if (rc && rc != -EOPNOTSUPP) 3908 ata_link_warn(link, 3909 "failed to resume link for reset (errno=%d)\n", 3910 rc); 3911 } 3912 3913 /* no point in trying softreset on offline link */ 3914 if (ata_phys_link_offline(link)) 3915 ehc->i.action &= ~ATA_EH_SOFTRESET; 3916 3917 return 0; 3918 } 3919 3920 /** 3921 * sata_link_hardreset - reset link via SATA phy reset 3922 * @link: link to reset 3923 * @timing: timing parameters { interval, duration, timeout } in msec 3924 * @deadline: deadline jiffies for the operation 3925 * @online: optional out parameter indicating link onlineness 3926 * @check_ready: optional callback to check link readiness 3927 * 3928 * SATA phy-reset @link using DET bits of SControl register. 3929 * After hardreset, link readiness is waited upon using 3930 * ata_wait_ready() if @check_ready is specified. LLDs are 3931 * allowed to not specify @check_ready and wait itself after this 3932 * function returns. Device classification is LLD's 3933 * responsibility. 3934 * 3935 * *@online is set to one iff reset succeeded and @link is online 3936 * after reset. 3937 * 3938 * LOCKING: 3939 * Kernel thread context (may sleep) 3940 * 3941 * RETURNS: 3942 * 0 on success, -errno otherwise. 3943 */ 3944 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3945 unsigned long deadline, 3946 bool *online, int (*check_ready)(struct ata_link *)) 3947 { 3948 u32 scontrol; 3949 int rc; 3950 3951 DPRINTK("ENTER\n"); 3952 3953 if (online) 3954 *online = false; 3955 3956 if (sata_set_spd_needed(link)) { 3957 /* SATA spec says nothing about how to reconfigure 3958 * spd. To be on the safe side, turn off phy during 3959 * reconfiguration. This works for at least ICH7 AHCI 3960 * and Sil3124. 3961 */ 3962 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3963 goto out; 3964 3965 scontrol = (scontrol & 0x0f0) | 0x304; 3966 3967 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3968 goto out; 3969 3970 sata_set_spd(link); 3971 } 3972 3973 /* issue phy wake/reset */ 3974 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3975 goto out; 3976 3977 scontrol = (scontrol & 0x0f0) | 0x301; 3978 3979 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3980 goto out; 3981 3982 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3983 * 10.4.2 says at least 1 ms. 3984 */ 3985 ata_msleep(link->ap, 1); 3986 3987 /* bring link back */ 3988 rc = sata_link_resume(link, timing, deadline); 3989 if (rc) 3990 goto out; 3991 /* if link is offline nothing more to do */ 3992 if (ata_phys_link_offline(link)) 3993 goto out; 3994 3995 /* Link is online. From this point, -ENODEV too is an error. */ 3996 if (online) 3997 *online = true; 3998 3999 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 4000 /* If PMP is supported, we have to do follow-up SRST. 4001 * Some PMPs don't send D2H Reg FIS after hardreset if 4002 * the first port is empty. Wait only for 4003 * ATA_TMOUT_PMP_SRST_WAIT. 4004 */ 4005 if (check_ready) { 4006 unsigned long pmp_deadline; 4007 4008 pmp_deadline = ata_deadline(jiffies, 4009 ATA_TMOUT_PMP_SRST_WAIT); 4010 if (time_after(pmp_deadline, deadline)) 4011 pmp_deadline = deadline; 4012 ata_wait_ready(link, pmp_deadline, check_ready); 4013 } 4014 rc = -EAGAIN; 4015 goto out; 4016 } 4017 4018 rc = 0; 4019 if (check_ready) 4020 rc = ata_wait_ready(link, deadline, check_ready); 4021 out: 4022 if (rc && rc != -EAGAIN) { 4023 /* online is set iff link is online && reset succeeded */ 4024 if (online) 4025 *online = false; 4026 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 4027 } 4028 DPRINTK("EXIT, rc=%d\n", rc); 4029 return rc; 4030 } 4031 4032 /** 4033 * sata_std_hardreset - COMRESET w/o waiting or classification 4034 * @link: link to reset 4035 * @class: resulting class of attached device 4036 * @deadline: deadline jiffies for the operation 4037 * 4038 * Standard SATA COMRESET w/o waiting or classification. 4039 * 4040 * LOCKING: 4041 * Kernel thread context (may sleep) 4042 * 4043 * RETURNS: 4044 * 0 if link offline, -EAGAIN if link online, -errno on errors. 4045 */ 4046 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 4047 unsigned long deadline) 4048 { 4049 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 4050 bool online; 4051 int rc; 4052 4053 /* do hardreset */ 4054 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 4055 return online ? -EAGAIN : rc; 4056 } 4057 4058 /** 4059 * ata_std_postreset - standard postreset callback 4060 * @link: the target ata_link 4061 * @classes: classes of attached devices 4062 * 4063 * This function is invoked after a successful reset. Note that 4064 * the device might have been reset more than once using 4065 * different reset methods before postreset is invoked. 4066 * 4067 * LOCKING: 4068 * Kernel thread context (may sleep) 4069 */ 4070 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 4071 { 4072 u32 serror; 4073 4074 DPRINTK("ENTER\n"); 4075 4076 /* reset complete, clear SError */ 4077 if (!sata_scr_read(link, SCR_ERROR, &serror)) 4078 sata_scr_write(link, SCR_ERROR, serror); 4079 4080 /* print link status */ 4081 sata_print_link_status(link); 4082 4083 DPRINTK("EXIT\n"); 4084 } 4085 4086 /** 4087 * ata_dev_same_device - Determine whether new ID matches configured device 4088 * @dev: device to compare against 4089 * @new_class: class of the new device 4090 * @new_id: IDENTIFY page of the new device 4091 * 4092 * Compare @new_class and @new_id against @dev and determine 4093 * whether @dev is the device indicated by @new_class and 4094 * @new_id. 4095 * 4096 * LOCKING: 4097 * None. 4098 * 4099 * RETURNS: 4100 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 4101 */ 4102 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 4103 const u16 *new_id) 4104 { 4105 const u16 *old_id = dev->id; 4106 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 4107 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 4108 4109 if (dev->class != new_class) { 4110 ata_dev_info(dev, "class mismatch %d != %d\n", 4111 dev->class, new_class); 4112 return 0; 4113 } 4114 4115 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 4116 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 4117 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 4118 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 4119 4120 if (strcmp(model[0], model[1])) { 4121 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 4122 model[0], model[1]); 4123 return 0; 4124 } 4125 4126 if (strcmp(serial[0], serial[1])) { 4127 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 4128 serial[0], serial[1]); 4129 return 0; 4130 } 4131 4132 return 1; 4133 } 4134 4135 /** 4136 * ata_dev_reread_id - Re-read IDENTIFY data 4137 * @dev: target ATA device 4138 * @readid_flags: read ID flags 4139 * 4140 * Re-read IDENTIFY page and make sure @dev is still attached to 4141 * the port. 4142 * 4143 * LOCKING: 4144 * Kernel thread context (may sleep) 4145 * 4146 * RETURNS: 4147 * 0 on success, negative errno otherwise 4148 */ 4149 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4150 { 4151 unsigned int class = dev->class; 4152 u16 *id = (void *)dev->link->ap->sector_buf; 4153 int rc; 4154 4155 /* read ID data */ 4156 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4157 if (rc) 4158 return rc; 4159 4160 /* is the device still there? */ 4161 if (!ata_dev_same_device(dev, class, id)) 4162 return -ENODEV; 4163 4164 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4165 return 0; 4166 } 4167 4168 /** 4169 * ata_dev_revalidate - Revalidate ATA device 4170 * @dev: device to revalidate 4171 * @new_class: new class code 4172 * @readid_flags: read ID flags 4173 * 4174 * Re-read IDENTIFY page, make sure @dev is still attached to the 4175 * port and reconfigure it according to the new IDENTIFY page. 4176 * 4177 * LOCKING: 4178 * Kernel thread context (may sleep) 4179 * 4180 * RETURNS: 4181 * 0 on success, negative errno otherwise 4182 */ 4183 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4184 unsigned int readid_flags) 4185 { 4186 u64 n_sectors = dev->n_sectors; 4187 u64 n_native_sectors = dev->n_native_sectors; 4188 int rc; 4189 4190 if (!ata_dev_enabled(dev)) 4191 return -ENODEV; 4192 4193 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4194 if (ata_class_enabled(new_class) && 4195 new_class != ATA_DEV_ATA && 4196 new_class != ATA_DEV_ATAPI && 4197 new_class != ATA_DEV_ZAC && 4198 new_class != ATA_DEV_SEMB) { 4199 ata_dev_info(dev, "class mismatch %u != %u\n", 4200 dev->class, new_class); 4201 rc = -ENODEV; 4202 goto fail; 4203 } 4204 4205 /* re-read ID */ 4206 rc = ata_dev_reread_id(dev, readid_flags); 4207 if (rc) 4208 goto fail; 4209 4210 /* configure device according to the new ID */ 4211 rc = ata_dev_configure(dev); 4212 if (rc) 4213 goto fail; 4214 4215 /* verify n_sectors hasn't changed */ 4216 if (dev->class != ATA_DEV_ATA || !n_sectors || 4217 dev->n_sectors == n_sectors) 4218 return 0; 4219 4220 /* n_sectors has changed */ 4221 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 4222 (unsigned long long)n_sectors, 4223 (unsigned long long)dev->n_sectors); 4224 4225 /* 4226 * Something could have caused HPA to be unlocked 4227 * involuntarily. If n_native_sectors hasn't changed and the 4228 * new size matches it, keep the device. 4229 */ 4230 if (dev->n_native_sectors == n_native_sectors && 4231 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4232 ata_dev_warn(dev, 4233 "new n_sectors matches native, probably " 4234 "late HPA unlock, n_sectors updated\n"); 4235 /* use the larger n_sectors */ 4236 return 0; 4237 } 4238 4239 /* 4240 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4241 * unlocking HPA in those cases. 4242 * 4243 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4244 */ 4245 if (dev->n_native_sectors == n_native_sectors && 4246 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4247 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4248 ata_dev_warn(dev, 4249 "old n_sectors matches native, probably " 4250 "late HPA lock, will try to unlock HPA\n"); 4251 /* try unlocking HPA */ 4252 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4253 rc = -EIO; 4254 } else 4255 rc = -ENODEV; 4256 4257 /* restore original n_[native_]sectors and fail */ 4258 dev->n_native_sectors = n_native_sectors; 4259 dev->n_sectors = n_sectors; 4260 fail: 4261 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4262 return rc; 4263 } 4264 4265 struct ata_blacklist_entry { 4266 const char *model_num; 4267 const char *model_rev; 4268 unsigned long horkage; 4269 }; 4270 4271 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4272 /* Devices with DMA related problems under Linux */ 4273 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4274 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4275 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4276 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4277 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4278 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4279 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4280 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4281 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4282 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4283 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4284 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4285 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4286 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4287 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4288 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4289 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4290 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4291 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4292 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4293 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4294 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4295 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4296 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4297 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4298 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4299 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4300 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4301 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4302 { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA }, 4303 /* Odd clown on sil3726/4726 PMPs */ 4304 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4305 4306 /* Weird ATAPI devices */ 4307 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4308 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4309 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4310 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4311 4312 /* 4313 * Causes silent data corruption with higher max sects. 4314 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com 4315 */ 4316 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 }, 4317 4318 /* 4319 * Device times out with higher max sects. 4320 * https://bugzilla.kernel.org/show_bug.cgi?id=121671 4321 */ 4322 { "LITEON CX1-JB256-HP", NULL, ATA_HORKAGE_MAX_SEC_1024 }, 4323 4324 /* Devices we expect to fail diagnostics */ 4325 4326 /* Devices where NCQ should be avoided */ 4327 /* NCQ is slow */ 4328 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4329 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4330 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4331 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4332 /* NCQ is broken */ 4333 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4334 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4335 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4336 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4337 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4338 4339 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4340 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4341 ATA_HORKAGE_FIRMWARE_WARN }, 4342 4343 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4344 ATA_HORKAGE_FIRMWARE_WARN }, 4345 4346 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4347 ATA_HORKAGE_FIRMWARE_WARN }, 4348 4349 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4350 ATA_HORKAGE_FIRMWARE_WARN }, 4351 4352 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */ 4353 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4354 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4355 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4356 4357 /* Blacklist entries taken from Silicon Image 3124/3132 4358 Windows driver .inf file - also several Linux problem reports */ 4359 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4360 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4361 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4362 4363 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4364 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4365 4366 /* devices which puke on READ_NATIVE_MAX */ 4367 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4368 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4369 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4370 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4371 4372 /* this one allows HPA unlocking but fails IOs on the area */ 4373 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4374 4375 /* Devices which report 1 sector over size HPA */ 4376 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4377 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4378 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4379 4380 /* Devices which get the IVB wrong */ 4381 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4382 /* Maybe we should just blacklist TSSTcorp... */ 4383 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4384 4385 /* Devices that do not need bridging limits applied */ 4386 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4387 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4388 4389 /* Devices which aren't very happy with higher link speeds */ 4390 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4391 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, 4392 4393 /* 4394 * Devices which choke on SETXFER. Applies only if both the 4395 * device and controller are SATA. 4396 */ 4397 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4398 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, 4399 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, 4400 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4401 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4402 4403 /* devices that don't properly handle queued TRIM commands */ 4404 { "Micron_M500_*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4405 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4406 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4407 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4408 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4409 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4410 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4411 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4412 { "Crucial_CT*MX100*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4413 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4414 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4415 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4416 { "FCCT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4417 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4418 4419 /* devices that don't properly handle TRIM commands */ 4420 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, }, 4421 4422 /* 4423 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4424 * (Return Zero After Trim) flags in the ATA Command Set are 4425 * unreliable in the sense that they only define what happens if 4426 * the device successfully executed the DSM TRIM command. TRIM 4427 * is only advisory, however, and the device is free to silently 4428 * ignore all or parts of the request. 4429 * 4430 * Whitelist drives that are known to reliably return zeroes 4431 * after TRIM. 4432 */ 4433 4434 /* 4435 * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude 4436 * that model before whitelisting all other intel SSDs. 4437 */ 4438 { "INTEL*SSDSC2MH*", NULL, 0, }, 4439 4440 { "Micron*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4441 { "Crucial*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4442 { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4443 { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4444 { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4445 { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4446 { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4447 4448 /* 4449 * Some WD SATA-I drives spin up and down erratically when the link 4450 * is put into the slumber mode. We don't have full list of the 4451 * affected devices. Disable LPM if the device matches one of the 4452 * known prefixes and is SATA-1. As a side effect LPM partial is 4453 * lost too. 4454 * 4455 * https://bugzilla.kernel.org/show_bug.cgi?id=57211 4456 */ 4457 { "WDC WD800JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4458 { "WDC WD1200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4459 { "WDC WD1600JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4460 { "WDC WD2000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4461 { "WDC WD2500JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4462 { "WDC WD3000JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4463 { "WDC WD3200JD-*", NULL, ATA_HORKAGE_WD_BROKEN_LPM }, 4464 4465 /* End Marker */ 4466 { } 4467 }; 4468 4469 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4470 { 4471 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4472 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4473 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4474 4475 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4476 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4477 4478 while (ad->model_num) { 4479 if (glob_match(ad->model_num, model_num)) { 4480 if (ad->model_rev == NULL) 4481 return ad->horkage; 4482 if (glob_match(ad->model_rev, model_rev)) 4483 return ad->horkage; 4484 } 4485 ad++; 4486 } 4487 return 0; 4488 } 4489 4490 static int ata_dma_blacklisted(const struct ata_device *dev) 4491 { 4492 /* We don't support polling DMA. 4493 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4494 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4495 */ 4496 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4497 (dev->flags & ATA_DFLAG_CDB_INTR)) 4498 return 1; 4499 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4500 } 4501 4502 /** 4503 * ata_is_40wire - check drive side detection 4504 * @dev: device 4505 * 4506 * Perform drive side detection decoding, allowing for device vendors 4507 * who can't follow the documentation. 4508 */ 4509 4510 static int ata_is_40wire(struct ata_device *dev) 4511 { 4512 if (dev->horkage & ATA_HORKAGE_IVB) 4513 return ata_drive_40wire_relaxed(dev->id); 4514 return ata_drive_40wire(dev->id); 4515 } 4516 4517 /** 4518 * cable_is_40wire - 40/80/SATA decider 4519 * @ap: port to consider 4520 * 4521 * This function encapsulates the policy for speed management 4522 * in one place. At the moment we don't cache the result but 4523 * there is a good case for setting ap->cbl to the result when 4524 * we are called with unknown cables (and figuring out if it 4525 * impacts hotplug at all). 4526 * 4527 * Return 1 if the cable appears to be 40 wire. 4528 */ 4529 4530 static int cable_is_40wire(struct ata_port *ap) 4531 { 4532 struct ata_link *link; 4533 struct ata_device *dev; 4534 4535 /* If the controller thinks we are 40 wire, we are. */ 4536 if (ap->cbl == ATA_CBL_PATA40) 4537 return 1; 4538 4539 /* If the controller thinks we are 80 wire, we are. */ 4540 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4541 return 0; 4542 4543 /* If the system is known to be 40 wire short cable (eg 4544 * laptop), then we allow 80 wire modes even if the drive 4545 * isn't sure. 4546 */ 4547 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4548 return 0; 4549 4550 /* If the controller doesn't know, we scan. 4551 * 4552 * Note: We look for all 40 wire detects at this point. Any 4553 * 80 wire detect is taken to be 80 wire cable because 4554 * - in many setups only the one drive (slave if present) will 4555 * give a valid detect 4556 * - if you have a non detect capable drive you don't want it 4557 * to colour the choice 4558 */ 4559 ata_for_each_link(link, ap, EDGE) { 4560 ata_for_each_dev(dev, link, ENABLED) { 4561 if (!ata_is_40wire(dev)) 4562 return 0; 4563 } 4564 } 4565 return 1; 4566 } 4567 4568 /** 4569 * ata_dev_xfermask - Compute supported xfermask of the given device 4570 * @dev: Device to compute xfermask for 4571 * 4572 * Compute supported xfermask of @dev and store it in 4573 * dev->*_mask. This function is responsible for applying all 4574 * known limits including host controller limits, device 4575 * blacklist, etc... 4576 * 4577 * LOCKING: 4578 * None. 4579 */ 4580 static void ata_dev_xfermask(struct ata_device *dev) 4581 { 4582 struct ata_link *link = dev->link; 4583 struct ata_port *ap = link->ap; 4584 struct ata_host *host = ap->host; 4585 unsigned long xfer_mask; 4586 4587 /* controller modes available */ 4588 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4589 ap->mwdma_mask, ap->udma_mask); 4590 4591 /* drive modes available */ 4592 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4593 dev->mwdma_mask, dev->udma_mask); 4594 xfer_mask &= ata_id_xfermask(dev->id); 4595 4596 /* 4597 * CFA Advanced TrueIDE timings are not allowed on a shared 4598 * cable 4599 */ 4600 if (ata_dev_pair(dev)) { 4601 /* No PIO5 or PIO6 */ 4602 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4603 /* No MWDMA3 or MWDMA 4 */ 4604 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4605 } 4606 4607 if (ata_dma_blacklisted(dev)) { 4608 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4609 ata_dev_warn(dev, 4610 "device is on DMA blacklist, disabling DMA\n"); 4611 } 4612 4613 if ((host->flags & ATA_HOST_SIMPLEX) && 4614 host->simplex_claimed && host->simplex_claimed != ap) { 4615 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4616 ata_dev_warn(dev, 4617 "simplex DMA is claimed by other device, disabling DMA\n"); 4618 } 4619 4620 if (ap->flags & ATA_FLAG_NO_IORDY) 4621 xfer_mask &= ata_pio_mask_no_iordy(dev); 4622 4623 if (ap->ops->mode_filter) 4624 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4625 4626 /* Apply cable rule here. Don't apply it early because when 4627 * we handle hot plug the cable type can itself change. 4628 * Check this last so that we know if the transfer rate was 4629 * solely limited by the cable. 4630 * Unknown or 80 wire cables reported host side are checked 4631 * drive side as well. Cases where we know a 40wire cable 4632 * is used safely for 80 are not checked here. 4633 */ 4634 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4635 /* UDMA/44 or higher would be available */ 4636 if (cable_is_40wire(ap)) { 4637 ata_dev_warn(dev, 4638 "limited to UDMA/33 due to 40-wire cable\n"); 4639 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4640 } 4641 4642 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4643 &dev->mwdma_mask, &dev->udma_mask); 4644 } 4645 4646 /** 4647 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4648 * @dev: Device to which command will be sent 4649 * 4650 * Issue SET FEATURES - XFER MODE command to device @dev 4651 * on port @ap. 4652 * 4653 * LOCKING: 4654 * PCI/etc. bus probe sem. 4655 * 4656 * RETURNS: 4657 * 0 on success, AC_ERR_* mask otherwise. 4658 */ 4659 4660 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4661 { 4662 struct ata_taskfile tf; 4663 unsigned int err_mask; 4664 4665 /* set up set-features taskfile */ 4666 DPRINTK("set features - xfer mode\n"); 4667 4668 /* Some controllers and ATAPI devices show flaky interrupt 4669 * behavior after setting xfer mode. Use polling instead. 4670 */ 4671 ata_tf_init(dev, &tf); 4672 tf.command = ATA_CMD_SET_FEATURES; 4673 tf.feature = SETFEATURES_XFER; 4674 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4675 tf.protocol = ATA_PROT_NODATA; 4676 /* If we are using IORDY we must send the mode setting command */ 4677 if (ata_pio_need_iordy(dev)) 4678 tf.nsect = dev->xfer_mode; 4679 /* If the device has IORDY and the controller does not - turn it off */ 4680 else if (ata_id_has_iordy(dev->id)) 4681 tf.nsect = 0x01; 4682 else /* In the ancient relic department - skip all of this */ 4683 return 0; 4684 4685 /* On some disks, this command causes spin-up, so we need longer timeout */ 4686 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000); 4687 4688 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4689 return err_mask; 4690 } 4691 4692 /** 4693 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4694 * @dev: Device to which command will be sent 4695 * @enable: Whether to enable or disable the feature 4696 * @feature: The sector count represents the feature to set 4697 * 4698 * Issue SET FEATURES - SATA FEATURES command to device @dev 4699 * on port @ap with sector count 4700 * 4701 * LOCKING: 4702 * PCI/etc. bus probe sem. 4703 * 4704 * RETURNS: 4705 * 0 on success, AC_ERR_* mask otherwise. 4706 */ 4707 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4708 { 4709 struct ata_taskfile tf; 4710 unsigned int err_mask; 4711 unsigned long timeout = 0; 4712 4713 /* set up set-features taskfile */ 4714 DPRINTK("set features - SATA features\n"); 4715 4716 ata_tf_init(dev, &tf); 4717 tf.command = ATA_CMD_SET_FEATURES; 4718 tf.feature = enable; 4719 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4720 tf.protocol = ATA_PROT_NODATA; 4721 tf.nsect = feature; 4722 4723 if (enable == SETFEATURES_SPINUP) 4724 timeout = ata_probe_timeout ? 4725 ata_probe_timeout * 1000 : SETFEATURES_SPINUP_TIMEOUT; 4726 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, timeout); 4727 4728 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4729 return err_mask; 4730 } 4731 EXPORT_SYMBOL_GPL(ata_dev_set_feature); 4732 4733 /** 4734 * ata_dev_init_params - Issue INIT DEV PARAMS command 4735 * @dev: Device to which command will be sent 4736 * @heads: Number of heads (taskfile parameter) 4737 * @sectors: Number of sectors (taskfile parameter) 4738 * 4739 * LOCKING: 4740 * Kernel thread context (may sleep) 4741 * 4742 * RETURNS: 4743 * 0 on success, AC_ERR_* mask otherwise. 4744 */ 4745 static unsigned int ata_dev_init_params(struct ata_device *dev, 4746 u16 heads, u16 sectors) 4747 { 4748 struct ata_taskfile tf; 4749 unsigned int err_mask; 4750 4751 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4752 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4753 return AC_ERR_INVALID; 4754 4755 /* set up init dev params taskfile */ 4756 DPRINTK("init dev params \n"); 4757 4758 ata_tf_init(dev, &tf); 4759 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4760 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4761 tf.protocol = ATA_PROT_NODATA; 4762 tf.nsect = sectors; 4763 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4764 4765 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4766 /* A clean abort indicates an original or just out of spec drive 4767 and we should continue as we issue the setup based on the 4768 drive reported working geometry */ 4769 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4770 err_mask = 0; 4771 4772 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4773 return err_mask; 4774 } 4775 4776 /** 4777 * ata_sg_clean - Unmap DMA memory associated with command 4778 * @qc: Command containing DMA memory to be released 4779 * 4780 * Unmap all mapped DMA memory associated with this command. 4781 * 4782 * LOCKING: 4783 * spin_lock_irqsave(host lock) 4784 */ 4785 void ata_sg_clean(struct ata_queued_cmd *qc) 4786 { 4787 struct ata_port *ap = qc->ap; 4788 struct scatterlist *sg = qc->sg; 4789 int dir = qc->dma_dir; 4790 4791 WARN_ON_ONCE(sg == NULL); 4792 4793 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4794 4795 if (qc->n_elem) 4796 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4797 4798 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4799 qc->sg = NULL; 4800 } 4801 4802 /** 4803 * atapi_check_dma - Check whether ATAPI DMA can be supported 4804 * @qc: Metadata associated with taskfile to check 4805 * 4806 * Allow low-level driver to filter ATA PACKET commands, returning 4807 * a status indicating whether or not it is OK to use DMA for the 4808 * supplied PACKET command. 4809 * 4810 * LOCKING: 4811 * spin_lock_irqsave(host lock) 4812 * 4813 * RETURNS: 0 when ATAPI DMA can be used 4814 * nonzero otherwise 4815 */ 4816 int atapi_check_dma(struct ata_queued_cmd *qc) 4817 { 4818 struct ata_port *ap = qc->ap; 4819 4820 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4821 * few ATAPI devices choke on such DMA requests. 4822 */ 4823 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4824 unlikely(qc->nbytes & 15)) 4825 return 1; 4826 4827 if (ap->ops->check_atapi_dma) 4828 return ap->ops->check_atapi_dma(qc); 4829 4830 return 0; 4831 } 4832 4833 /** 4834 * ata_std_qc_defer - Check whether a qc needs to be deferred 4835 * @qc: ATA command in question 4836 * 4837 * Non-NCQ commands cannot run with any other command, NCQ or 4838 * not. As upper layer only knows the queue depth, we are 4839 * responsible for maintaining exclusion. This function checks 4840 * whether a new command @qc can be issued. 4841 * 4842 * LOCKING: 4843 * spin_lock_irqsave(host lock) 4844 * 4845 * RETURNS: 4846 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4847 */ 4848 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4849 { 4850 struct ata_link *link = qc->dev->link; 4851 4852 if (ata_is_ncq(qc->tf.protocol)) { 4853 if (!ata_tag_valid(link->active_tag)) 4854 return 0; 4855 } else { 4856 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4857 return 0; 4858 } 4859 4860 return ATA_DEFER_LINK; 4861 } 4862 4863 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4864 4865 /** 4866 * ata_sg_init - Associate command with scatter-gather table. 4867 * @qc: Command to be associated 4868 * @sg: Scatter-gather table. 4869 * @n_elem: Number of elements in s/g table. 4870 * 4871 * Initialize the data-related elements of queued_cmd @qc 4872 * to point to a scatter-gather table @sg, containing @n_elem 4873 * elements. 4874 * 4875 * LOCKING: 4876 * spin_lock_irqsave(host lock) 4877 */ 4878 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4879 unsigned int n_elem) 4880 { 4881 qc->sg = sg; 4882 qc->n_elem = n_elem; 4883 qc->cursg = qc->sg; 4884 } 4885 4886 /** 4887 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4888 * @qc: Command with scatter-gather table to be mapped. 4889 * 4890 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4891 * 4892 * LOCKING: 4893 * spin_lock_irqsave(host lock) 4894 * 4895 * RETURNS: 4896 * Zero on success, negative on error. 4897 * 4898 */ 4899 static int ata_sg_setup(struct ata_queued_cmd *qc) 4900 { 4901 struct ata_port *ap = qc->ap; 4902 unsigned int n_elem; 4903 4904 VPRINTK("ENTER, ata%u\n", ap->print_id); 4905 4906 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4907 if (n_elem < 1) 4908 return -1; 4909 4910 DPRINTK("%d sg elements mapped\n", n_elem); 4911 qc->orig_n_elem = qc->n_elem; 4912 qc->n_elem = n_elem; 4913 qc->flags |= ATA_QCFLAG_DMAMAP; 4914 4915 return 0; 4916 } 4917 4918 /** 4919 * swap_buf_le16 - swap halves of 16-bit words in place 4920 * @buf: Buffer to swap 4921 * @buf_words: Number of 16-bit words in buffer. 4922 * 4923 * Swap halves of 16-bit words if needed to convert from 4924 * little-endian byte order to native cpu byte order, or 4925 * vice-versa. 4926 * 4927 * LOCKING: 4928 * Inherited from caller. 4929 */ 4930 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4931 { 4932 #ifdef __BIG_ENDIAN 4933 unsigned int i; 4934 4935 for (i = 0; i < buf_words; i++) 4936 buf[i] = le16_to_cpu(buf[i]); 4937 #endif /* __BIG_ENDIAN */ 4938 } 4939 4940 /** 4941 * ata_qc_new_init - Request an available ATA command, and initialize it 4942 * @dev: Device from whom we request an available command structure 4943 * @tag: tag 4944 * 4945 * LOCKING: 4946 * None. 4947 */ 4948 4949 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag) 4950 { 4951 struct ata_port *ap = dev->link->ap; 4952 struct ata_queued_cmd *qc; 4953 4954 /* no command while frozen */ 4955 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4956 return NULL; 4957 4958 /* libsas case */ 4959 if (ap->flags & ATA_FLAG_SAS_HOST) { 4960 tag = ata_sas_allocate_tag(ap); 4961 if (tag < 0) 4962 return NULL; 4963 } 4964 4965 qc = __ata_qc_from_tag(ap, tag); 4966 qc->tag = tag; 4967 qc->scsicmd = NULL; 4968 qc->ap = ap; 4969 qc->dev = dev; 4970 4971 ata_qc_reinit(qc); 4972 4973 return qc; 4974 } 4975 4976 /** 4977 * ata_qc_free - free unused ata_queued_cmd 4978 * @qc: Command to complete 4979 * 4980 * Designed to free unused ata_queued_cmd object 4981 * in case something prevents using it. 4982 * 4983 * LOCKING: 4984 * spin_lock_irqsave(host lock) 4985 */ 4986 void ata_qc_free(struct ata_queued_cmd *qc) 4987 { 4988 struct ata_port *ap; 4989 unsigned int tag; 4990 4991 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4992 ap = qc->ap; 4993 4994 qc->flags = 0; 4995 tag = qc->tag; 4996 if (likely(ata_tag_valid(tag))) { 4997 qc->tag = ATA_TAG_POISON; 4998 if (ap->flags & ATA_FLAG_SAS_HOST) 4999 ata_sas_free_tag(tag, ap); 5000 } 5001 } 5002 5003 void __ata_qc_complete(struct ata_queued_cmd *qc) 5004 { 5005 struct ata_port *ap; 5006 struct ata_link *link; 5007 5008 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 5009 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 5010 ap = qc->ap; 5011 link = qc->dev->link; 5012 5013 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 5014 ata_sg_clean(qc); 5015 5016 /* command should be marked inactive atomically with qc completion */ 5017 if (ata_is_ncq(qc->tf.protocol)) { 5018 link->sactive &= ~(1 << qc->tag); 5019 if (!link->sactive) 5020 ap->nr_active_links--; 5021 } else { 5022 link->active_tag = ATA_TAG_POISON; 5023 ap->nr_active_links--; 5024 } 5025 5026 /* clear exclusive status */ 5027 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 5028 ap->excl_link == link)) 5029 ap->excl_link = NULL; 5030 5031 /* atapi: mark qc as inactive to prevent the interrupt handler 5032 * from completing the command twice later, before the error handler 5033 * is called. (when rc != 0 and atapi request sense is needed) 5034 */ 5035 qc->flags &= ~ATA_QCFLAG_ACTIVE; 5036 ap->qc_active &= ~(1 << qc->tag); 5037 5038 /* call completion callback */ 5039 qc->complete_fn(qc); 5040 } 5041 5042 static void fill_result_tf(struct ata_queued_cmd *qc) 5043 { 5044 struct ata_port *ap = qc->ap; 5045 5046 qc->result_tf.flags = qc->tf.flags; 5047 ap->ops->qc_fill_rtf(qc); 5048 } 5049 5050 static void ata_verify_xfer(struct ata_queued_cmd *qc) 5051 { 5052 struct ata_device *dev = qc->dev; 5053 5054 if (!ata_is_data(qc->tf.protocol)) 5055 return; 5056 5057 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 5058 return; 5059 5060 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 5061 } 5062 5063 /** 5064 * ata_qc_complete - Complete an active ATA command 5065 * @qc: Command to complete 5066 * 5067 * Indicate to the mid and upper layers that an ATA command has 5068 * completed, with either an ok or not-ok status. 5069 * 5070 * Refrain from calling this function multiple times when 5071 * successfully completing multiple NCQ commands. 5072 * ata_qc_complete_multiple() should be used instead, which will 5073 * properly update IRQ expect state. 5074 * 5075 * LOCKING: 5076 * spin_lock_irqsave(host lock) 5077 */ 5078 void ata_qc_complete(struct ata_queued_cmd *qc) 5079 { 5080 struct ata_port *ap = qc->ap; 5081 5082 /* Trigger the LED (if available) */ 5083 ledtrig_disk_activity(); 5084 5085 /* XXX: New EH and old EH use different mechanisms to 5086 * synchronize EH with regular execution path. 5087 * 5088 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 5089 * Normal execution path is responsible for not accessing a 5090 * failed qc. libata core enforces the rule by returning NULL 5091 * from ata_qc_from_tag() for failed qcs. 5092 * 5093 * Old EH depends on ata_qc_complete() nullifying completion 5094 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 5095 * not synchronize with interrupt handler. Only PIO task is 5096 * taken care of. 5097 */ 5098 if (ap->ops->error_handler) { 5099 struct ata_device *dev = qc->dev; 5100 struct ata_eh_info *ehi = &dev->link->eh_info; 5101 5102 if (unlikely(qc->err_mask)) 5103 qc->flags |= ATA_QCFLAG_FAILED; 5104 5105 /* 5106 * Finish internal commands without any further processing 5107 * and always with the result TF filled. 5108 */ 5109 if (unlikely(ata_tag_internal(qc->tag))) { 5110 fill_result_tf(qc); 5111 trace_ata_qc_complete_internal(qc); 5112 __ata_qc_complete(qc); 5113 return; 5114 } 5115 5116 /* 5117 * Non-internal qc has failed. Fill the result TF and 5118 * summon EH. 5119 */ 5120 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 5121 fill_result_tf(qc); 5122 trace_ata_qc_complete_failed(qc); 5123 ata_qc_schedule_eh(qc); 5124 return; 5125 } 5126 5127 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 5128 5129 /* read result TF if requested */ 5130 if (qc->flags & ATA_QCFLAG_RESULT_TF) 5131 fill_result_tf(qc); 5132 5133 trace_ata_qc_complete_done(qc); 5134 /* Some commands need post-processing after successful 5135 * completion. 5136 */ 5137 switch (qc->tf.command) { 5138 case ATA_CMD_SET_FEATURES: 5139 if (qc->tf.feature != SETFEATURES_WC_ON && 5140 qc->tf.feature != SETFEATURES_WC_OFF && 5141 qc->tf.feature != SETFEATURES_RA_ON && 5142 qc->tf.feature != SETFEATURES_RA_OFF) 5143 break; 5144 /* fall through */ 5145 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 5146 case ATA_CMD_SET_MULTI: /* multi_count changed */ 5147 /* revalidate device */ 5148 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 5149 ata_port_schedule_eh(ap); 5150 break; 5151 5152 case ATA_CMD_SLEEP: 5153 dev->flags |= ATA_DFLAG_SLEEPING; 5154 break; 5155 } 5156 5157 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 5158 ata_verify_xfer(qc); 5159 5160 __ata_qc_complete(qc); 5161 } else { 5162 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5163 return; 5164 5165 /* read result TF if failed or requested */ 5166 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5167 fill_result_tf(qc); 5168 5169 __ata_qc_complete(qc); 5170 } 5171 } 5172 5173 /** 5174 * ata_qc_complete_multiple - Complete multiple qcs successfully 5175 * @ap: port in question 5176 * @qc_active: new qc_active mask 5177 * 5178 * Complete in-flight commands. This functions is meant to be 5179 * called from low-level driver's interrupt routine to complete 5180 * requests normally. ap->qc_active and @qc_active is compared 5181 * and commands are completed accordingly. 5182 * 5183 * Always use this function when completing multiple NCQ commands 5184 * from IRQ handlers instead of calling ata_qc_complete() 5185 * multiple times to keep IRQ expect status properly in sync. 5186 * 5187 * LOCKING: 5188 * spin_lock_irqsave(host lock) 5189 * 5190 * RETURNS: 5191 * Number of completed commands on success, -errno otherwise. 5192 */ 5193 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5194 { 5195 int nr_done = 0; 5196 u32 done_mask; 5197 5198 done_mask = ap->qc_active ^ qc_active; 5199 5200 if (unlikely(done_mask & qc_active)) { 5201 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", 5202 ap->qc_active, qc_active); 5203 return -EINVAL; 5204 } 5205 5206 while (done_mask) { 5207 struct ata_queued_cmd *qc; 5208 unsigned int tag = __ffs(done_mask); 5209 5210 qc = ata_qc_from_tag(ap, tag); 5211 if (qc) { 5212 ata_qc_complete(qc); 5213 nr_done++; 5214 } 5215 done_mask &= ~(1 << tag); 5216 } 5217 5218 return nr_done; 5219 } 5220 5221 /** 5222 * ata_qc_issue - issue taskfile to device 5223 * @qc: command to issue to device 5224 * 5225 * Prepare an ATA command to submission to device. 5226 * This includes mapping the data into a DMA-able 5227 * area, filling in the S/G table, and finally 5228 * writing the taskfile to hardware, starting the command. 5229 * 5230 * LOCKING: 5231 * spin_lock_irqsave(host lock) 5232 */ 5233 void ata_qc_issue(struct ata_queued_cmd *qc) 5234 { 5235 struct ata_port *ap = qc->ap; 5236 struct ata_link *link = qc->dev->link; 5237 u8 prot = qc->tf.protocol; 5238 5239 /* Make sure only one non-NCQ command is outstanding. The 5240 * check is skipped for old EH because it reuses active qc to 5241 * request ATAPI sense. 5242 */ 5243 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5244 5245 if (ata_is_ncq(prot)) { 5246 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5247 5248 if (!link->sactive) 5249 ap->nr_active_links++; 5250 link->sactive |= 1 << qc->tag; 5251 } else { 5252 WARN_ON_ONCE(link->sactive); 5253 5254 ap->nr_active_links++; 5255 link->active_tag = qc->tag; 5256 } 5257 5258 qc->flags |= ATA_QCFLAG_ACTIVE; 5259 ap->qc_active |= 1 << qc->tag; 5260 5261 /* 5262 * We guarantee to LLDs that they will have at least one 5263 * non-zero sg if the command is a data command. 5264 */ 5265 if (WARN_ON_ONCE(ata_is_data(prot) && 5266 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5267 goto sys_err; 5268 5269 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5270 (ap->flags & ATA_FLAG_PIO_DMA))) 5271 if (ata_sg_setup(qc)) 5272 goto sys_err; 5273 5274 /* if device is sleeping, schedule reset and abort the link */ 5275 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5276 link->eh_info.action |= ATA_EH_RESET; 5277 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5278 ata_link_abort(link); 5279 return; 5280 } 5281 5282 ap->ops->qc_prep(qc); 5283 trace_ata_qc_issue(qc); 5284 qc->err_mask |= ap->ops->qc_issue(qc); 5285 if (unlikely(qc->err_mask)) 5286 goto err; 5287 return; 5288 5289 sys_err: 5290 qc->err_mask |= AC_ERR_SYSTEM; 5291 err: 5292 ata_qc_complete(qc); 5293 } 5294 5295 /** 5296 * sata_scr_valid - test whether SCRs are accessible 5297 * @link: ATA link to test SCR accessibility for 5298 * 5299 * Test whether SCRs are accessible for @link. 5300 * 5301 * LOCKING: 5302 * None. 5303 * 5304 * RETURNS: 5305 * 1 if SCRs are accessible, 0 otherwise. 5306 */ 5307 int sata_scr_valid(struct ata_link *link) 5308 { 5309 struct ata_port *ap = link->ap; 5310 5311 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5312 } 5313 5314 /** 5315 * sata_scr_read - read SCR register of the specified port 5316 * @link: ATA link to read SCR for 5317 * @reg: SCR to read 5318 * @val: Place to store read value 5319 * 5320 * Read SCR register @reg of @link into *@val. This function is 5321 * guaranteed to succeed if @link is ap->link, the cable type of 5322 * the port is SATA and the port implements ->scr_read. 5323 * 5324 * LOCKING: 5325 * None if @link is ap->link. Kernel thread context otherwise. 5326 * 5327 * RETURNS: 5328 * 0 on success, negative errno on failure. 5329 */ 5330 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5331 { 5332 if (ata_is_host_link(link)) { 5333 if (sata_scr_valid(link)) 5334 return link->ap->ops->scr_read(link, reg, val); 5335 return -EOPNOTSUPP; 5336 } 5337 5338 return sata_pmp_scr_read(link, reg, val); 5339 } 5340 5341 /** 5342 * sata_scr_write - write SCR register of the specified port 5343 * @link: ATA link to write SCR for 5344 * @reg: SCR to write 5345 * @val: value to write 5346 * 5347 * Write @val to SCR register @reg of @link. This function is 5348 * guaranteed to succeed if @link is ap->link, the cable type of 5349 * the port is SATA and the port implements ->scr_read. 5350 * 5351 * LOCKING: 5352 * None if @link is ap->link. Kernel thread context otherwise. 5353 * 5354 * RETURNS: 5355 * 0 on success, negative errno on failure. 5356 */ 5357 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5358 { 5359 if (ata_is_host_link(link)) { 5360 if (sata_scr_valid(link)) 5361 return link->ap->ops->scr_write(link, reg, val); 5362 return -EOPNOTSUPP; 5363 } 5364 5365 return sata_pmp_scr_write(link, reg, val); 5366 } 5367 5368 /** 5369 * sata_scr_write_flush - write SCR register of the specified port and flush 5370 * @link: ATA link to write SCR for 5371 * @reg: SCR to write 5372 * @val: value to write 5373 * 5374 * This function is identical to sata_scr_write() except that this 5375 * function performs flush after writing to the register. 5376 * 5377 * LOCKING: 5378 * None if @link is ap->link. Kernel thread context otherwise. 5379 * 5380 * RETURNS: 5381 * 0 on success, negative errno on failure. 5382 */ 5383 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5384 { 5385 if (ata_is_host_link(link)) { 5386 int rc; 5387 5388 if (sata_scr_valid(link)) { 5389 rc = link->ap->ops->scr_write(link, reg, val); 5390 if (rc == 0) 5391 rc = link->ap->ops->scr_read(link, reg, &val); 5392 return rc; 5393 } 5394 return -EOPNOTSUPP; 5395 } 5396 5397 return sata_pmp_scr_write(link, reg, val); 5398 } 5399 5400 /** 5401 * ata_phys_link_online - test whether the given link is online 5402 * @link: ATA link to test 5403 * 5404 * Test whether @link is online. Note that this function returns 5405 * 0 if online status of @link cannot be obtained, so 5406 * ata_link_online(link) != !ata_link_offline(link). 5407 * 5408 * LOCKING: 5409 * None. 5410 * 5411 * RETURNS: 5412 * True if the port online status is available and online. 5413 */ 5414 bool ata_phys_link_online(struct ata_link *link) 5415 { 5416 u32 sstatus; 5417 5418 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5419 ata_sstatus_online(sstatus)) 5420 return true; 5421 return false; 5422 } 5423 5424 /** 5425 * ata_phys_link_offline - test whether the given link is offline 5426 * @link: ATA link to test 5427 * 5428 * Test whether @link is offline. Note that this function 5429 * returns 0 if offline status of @link cannot be obtained, so 5430 * ata_link_online(link) != !ata_link_offline(link). 5431 * 5432 * LOCKING: 5433 * None. 5434 * 5435 * RETURNS: 5436 * True if the port offline status is available and offline. 5437 */ 5438 bool ata_phys_link_offline(struct ata_link *link) 5439 { 5440 u32 sstatus; 5441 5442 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5443 !ata_sstatus_online(sstatus)) 5444 return true; 5445 return false; 5446 } 5447 5448 /** 5449 * ata_link_online - test whether the given link is online 5450 * @link: ATA link to test 5451 * 5452 * Test whether @link is online. This is identical to 5453 * ata_phys_link_online() when there's no slave link. When 5454 * there's a slave link, this function should only be called on 5455 * the master link and will return true if any of M/S links is 5456 * online. 5457 * 5458 * LOCKING: 5459 * None. 5460 * 5461 * RETURNS: 5462 * True if the port online status is available and online. 5463 */ 5464 bool ata_link_online(struct ata_link *link) 5465 { 5466 struct ata_link *slave = link->ap->slave_link; 5467 5468 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5469 5470 return ata_phys_link_online(link) || 5471 (slave && ata_phys_link_online(slave)); 5472 } 5473 5474 /** 5475 * ata_link_offline - test whether the given link is offline 5476 * @link: ATA link to test 5477 * 5478 * Test whether @link is offline. This is identical to 5479 * ata_phys_link_offline() when there's no slave link. When 5480 * there's a slave link, this function should only be called on 5481 * the master link and will return true if both M/S links are 5482 * offline. 5483 * 5484 * LOCKING: 5485 * None. 5486 * 5487 * RETURNS: 5488 * True if the port offline status is available and offline. 5489 */ 5490 bool ata_link_offline(struct ata_link *link) 5491 { 5492 struct ata_link *slave = link->ap->slave_link; 5493 5494 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5495 5496 return ata_phys_link_offline(link) && 5497 (!slave || ata_phys_link_offline(slave)); 5498 } 5499 5500 #ifdef CONFIG_PM 5501 static void ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5502 unsigned int action, unsigned int ehi_flags, 5503 bool async) 5504 { 5505 struct ata_link *link; 5506 unsigned long flags; 5507 5508 /* Previous resume operation might still be in 5509 * progress. Wait for PM_PENDING to clear. 5510 */ 5511 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5512 ata_port_wait_eh(ap); 5513 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5514 } 5515 5516 /* request PM ops to EH */ 5517 spin_lock_irqsave(ap->lock, flags); 5518 5519 ap->pm_mesg = mesg; 5520 ap->pflags |= ATA_PFLAG_PM_PENDING; 5521 ata_for_each_link(link, ap, HOST_FIRST) { 5522 link->eh_info.action |= action; 5523 link->eh_info.flags |= ehi_flags; 5524 } 5525 5526 ata_port_schedule_eh(ap); 5527 5528 spin_unlock_irqrestore(ap->lock, flags); 5529 5530 if (!async) { 5531 ata_port_wait_eh(ap); 5532 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5533 } 5534 } 5535 5536 /* 5537 * On some hardware, device fails to respond after spun down for suspend. As 5538 * the device won't be used before being resumed, we don't need to touch the 5539 * device. Ask EH to skip the usual stuff and proceed directly to suspend. 5540 * 5541 * http://thread.gmane.org/gmane.linux.ide/46764 5542 */ 5543 static const unsigned int ata_port_suspend_ehi = ATA_EHI_QUIET 5544 | ATA_EHI_NO_AUTOPSY 5545 | ATA_EHI_NO_RECOVERY; 5546 5547 static void ata_port_suspend(struct ata_port *ap, pm_message_t mesg) 5548 { 5549 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, false); 5550 } 5551 5552 static void ata_port_suspend_async(struct ata_port *ap, pm_message_t mesg) 5553 { 5554 ata_port_request_pm(ap, mesg, 0, ata_port_suspend_ehi, true); 5555 } 5556 5557 static int ata_port_pm_suspend(struct device *dev) 5558 { 5559 struct ata_port *ap = to_ata_port(dev); 5560 5561 if (pm_runtime_suspended(dev)) 5562 return 0; 5563 5564 ata_port_suspend(ap, PMSG_SUSPEND); 5565 return 0; 5566 } 5567 5568 static int ata_port_pm_freeze(struct device *dev) 5569 { 5570 struct ata_port *ap = to_ata_port(dev); 5571 5572 if (pm_runtime_suspended(dev)) 5573 return 0; 5574 5575 ata_port_suspend(ap, PMSG_FREEZE); 5576 return 0; 5577 } 5578 5579 static int ata_port_pm_poweroff(struct device *dev) 5580 { 5581 ata_port_suspend(to_ata_port(dev), PMSG_HIBERNATE); 5582 return 0; 5583 } 5584 5585 static const unsigned int ata_port_resume_ehi = ATA_EHI_NO_AUTOPSY 5586 | ATA_EHI_QUIET; 5587 5588 static void ata_port_resume(struct ata_port *ap, pm_message_t mesg) 5589 { 5590 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, false); 5591 } 5592 5593 static void ata_port_resume_async(struct ata_port *ap, pm_message_t mesg) 5594 { 5595 ata_port_request_pm(ap, mesg, ATA_EH_RESET, ata_port_resume_ehi, true); 5596 } 5597 5598 static int ata_port_pm_resume(struct device *dev) 5599 { 5600 ata_port_resume_async(to_ata_port(dev), PMSG_RESUME); 5601 pm_runtime_disable(dev); 5602 pm_runtime_set_active(dev); 5603 pm_runtime_enable(dev); 5604 return 0; 5605 } 5606 5607 /* 5608 * For ODDs, the upper layer will poll for media change every few seconds, 5609 * which will make it enter and leave suspend state every few seconds. And 5610 * as each suspend will cause a hard/soft reset, the gain of runtime suspend 5611 * is very little and the ODD may malfunction after constantly being reset. 5612 * So the idle callback here will not proceed to suspend if a non-ZPODD capable 5613 * ODD is attached to the port. 5614 */ 5615 static int ata_port_runtime_idle(struct device *dev) 5616 { 5617 struct ata_port *ap = to_ata_port(dev); 5618 struct ata_link *link; 5619 struct ata_device *adev; 5620 5621 ata_for_each_link(link, ap, HOST_FIRST) { 5622 ata_for_each_dev(adev, link, ENABLED) 5623 if (adev->class == ATA_DEV_ATAPI && 5624 !zpodd_dev_enabled(adev)) 5625 return -EBUSY; 5626 } 5627 5628 return 0; 5629 } 5630 5631 static int ata_port_runtime_suspend(struct device *dev) 5632 { 5633 ata_port_suspend(to_ata_port(dev), PMSG_AUTO_SUSPEND); 5634 return 0; 5635 } 5636 5637 static int ata_port_runtime_resume(struct device *dev) 5638 { 5639 ata_port_resume(to_ata_port(dev), PMSG_AUTO_RESUME); 5640 return 0; 5641 } 5642 5643 static const struct dev_pm_ops ata_port_pm_ops = { 5644 .suspend = ata_port_pm_suspend, 5645 .resume = ata_port_pm_resume, 5646 .freeze = ata_port_pm_freeze, 5647 .thaw = ata_port_pm_resume, 5648 .poweroff = ata_port_pm_poweroff, 5649 .restore = ata_port_pm_resume, 5650 5651 .runtime_suspend = ata_port_runtime_suspend, 5652 .runtime_resume = ata_port_runtime_resume, 5653 .runtime_idle = ata_port_runtime_idle, 5654 }; 5655 5656 /* sas ports don't participate in pm runtime management of ata_ports, 5657 * and need to resume ata devices at the domain level, not the per-port 5658 * level. sas suspend/resume is async to allow parallel port recovery 5659 * since sas has multiple ata_port instances per Scsi_Host. 5660 */ 5661 void ata_sas_port_suspend(struct ata_port *ap) 5662 { 5663 ata_port_suspend_async(ap, PMSG_SUSPEND); 5664 } 5665 EXPORT_SYMBOL_GPL(ata_sas_port_suspend); 5666 5667 void ata_sas_port_resume(struct ata_port *ap) 5668 { 5669 ata_port_resume_async(ap, PMSG_RESUME); 5670 } 5671 EXPORT_SYMBOL_GPL(ata_sas_port_resume); 5672 5673 /** 5674 * ata_host_suspend - suspend host 5675 * @host: host to suspend 5676 * @mesg: PM message 5677 * 5678 * Suspend @host. Actual operation is performed by port suspend. 5679 */ 5680 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5681 { 5682 host->dev->power.power_state = mesg; 5683 return 0; 5684 } 5685 5686 /** 5687 * ata_host_resume - resume host 5688 * @host: host to resume 5689 * 5690 * Resume @host. Actual operation is performed by port resume. 5691 */ 5692 void ata_host_resume(struct ata_host *host) 5693 { 5694 host->dev->power.power_state = PMSG_ON; 5695 } 5696 #endif 5697 5698 struct device_type ata_port_type = { 5699 .name = "ata_port", 5700 #ifdef CONFIG_PM 5701 .pm = &ata_port_pm_ops, 5702 #endif 5703 }; 5704 5705 /** 5706 * ata_dev_init - Initialize an ata_device structure 5707 * @dev: Device structure to initialize 5708 * 5709 * Initialize @dev in preparation for probing. 5710 * 5711 * LOCKING: 5712 * Inherited from caller. 5713 */ 5714 void ata_dev_init(struct ata_device *dev) 5715 { 5716 struct ata_link *link = ata_dev_phys_link(dev); 5717 struct ata_port *ap = link->ap; 5718 unsigned long flags; 5719 5720 /* SATA spd limit is bound to the attached device, reset together */ 5721 link->sata_spd_limit = link->hw_sata_spd_limit; 5722 link->sata_spd = 0; 5723 5724 /* High bits of dev->flags are used to record warm plug 5725 * requests which occur asynchronously. Synchronize using 5726 * host lock. 5727 */ 5728 spin_lock_irqsave(ap->lock, flags); 5729 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5730 dev->horkage = 0; 5731 spin_unlock_irqrestore(ap->lock, flags); 5732 5733 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5734 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5735 dev->pio_mask = UINT_MAX; 5736 dev->mwdma_mask = UINT_MAX; 5737 dev->udma_mask = UINT_MAX; 5738 } 5739 5740 /** 5741 * ata_link_init - Initialize an ata_link structure 5742 * @ap: ATA port link is attached to 5743 * @link: Link structure to initialize 5744 * @pmp: Port multiplier port number 5745 * 5746 * Initialize @link. 5747 * 5748 * LOCKING: 5749 * Kernel thread context (may sleep) 5750 */ 5751 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5752 { 5753 int i; 5754 5755 /* clear everything except for devices */ 5756 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5757 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5758 5759 link->ap = ap; 5760 link->pmp = pmp; 5761 link->active_tag = ATA_TAG_POISON; 5762 link->hw_sata_spd_limit = UINT_MAX; 5763 5764 /* can't use iterator, ap isn't initialized yet */ 5765 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5766 struct ata_device *dev = &link->device[i]; 5767 5768 dev->link = link; 5769 dev->devno = dev - link->device; 5770 #ifdef CONFIG_ATA_ACPI 5771 dev->gtf_filter = ata_acpi_gtf_filter; 5772 #endif 5773 ata_dev_init(dev); 5774 } 5775 } 5776 5777 /** 5778 * sata_link_init_spd - Initialize link->sata_spd_limit 5779 * @link: Link to configure sata_spd_limit for 5780 * 5781 * Initialize @link->[hw_]sata_spd_limit to the currently 5782 * configured value. 5783 * 5784 * LOCKING: 5785 * Kernel thread context (may sleep). 5786 * 5787 * RETURNS: 5788 * 0 on success, -errno on failure. 5789 */ 5790 int sata_link_init_spd(struct ata_link *link) 5791 { 5792 u8 spd; 5793 int rc; 5794 5795 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5796 if (rc) 5797 return rc; 5798 5799 spd = (link->saved_scontrol >> 4) & 0xf; 5800 if (spd) 5801 link->hw_sata_spd_limit &= (1 << spd) - 1; 5802 5803 ata_force_link_limits(link); 5804 5805 link->sata_spd_limit = link->hw_sata_spd_limit; 5806 5807 return 0; 5808 } 5809 5810 /** 5811 * ata_port_alloc - allocate and initialize basic ATA port resources 5812 * @host: ATA host this allocated port belongs to 5813 * 5814 * Allocate and initialize basic ATA port resources. 5815 * 5816 * RETURNS: 5817 * Allocate ATA port on success, NULL on failure. 5818 * 5819 * LOCKING: 5820 * Inherited from calling layer (may sleep). 5821 */ 5822 struct ata_port *ata_port_alloc(struct ata_host *host) 5823 { 5824 struct ata_port *ap; 5825 5826 DPRINTK("ENTER\n"); 5827 5828 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5829 if (!ap) 5830 return NULL; 5831 5832 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5833 ap->lock = &host->lock; 5834 ap->print_id = -1; 5835 ap->local_port_no = -1; 5836 ap->host = host; 5837 ap->dev = host->dev; 5838 5839 #if defined(ATA_VERBOSE_DEBUG) 5840 /* turn on all debugging levels */ 5841 ap->msg_enable = 0x00FF; 5842 #elif defined(ATA_DEBUG) 5843 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5844 #else 5845 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5846 #endif 5847 5848 mutex_init(&ap->scsi_scan_mutex); 5849 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5850 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5851 INIT_LIST_HEAD(&ap->eh_done_q); 5852 init_waitqueue_head(&ap->eh_wait_q); 5853 init_completion(&ap->park_req_pending); 5854 init_timer_deferrable(&ap->fastdrain_timer); 5855 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5856 ap->fastdrain_timer.data = (unsigned long)ap; 5857 5858 ap->cbl = ATA_CBL_NONE; 5859 5860 ata_link_init(ap, &ap->link, 0); 5861 5862 #ifdef ATA_IRQ_TRAP 5863 ap->stats.unhandled_irq = 1; 5864 ap->stats.idle_irq = 1; 5865 #endif 5866 ata_sff_port_init(ap); 5867 5868 return ap; 5869 } 5870 5871 static void ata_host_release(struct device *gendev, void *res) 5872 { 5873 struct ata_host *host = dev_get_drvdata(gendev); 5874 int i; 5875 5876 for (i = 0; i < host->n_ports; i++) { 5877 struct ata_port *ap = host->ports[i]; 5878 5879 if (!ap) 5880 continue; 5881 5882 if (ap->scsi_host) 5883 scsi_host_put(ap->scsi_host); 5884 5885 kfree(ap->pmp_link); 5886 kfree(ap->slave_link); 5887 kfree(ap); 5888 host->ports[i] = NULL; 5889 } 5890 5891 dev_set_drvdata(gendev, NULL); 5892 } 5893 5894 /** 5895 * ata_host_alloc - allocate and init basic ATA host resources 5896 * @dev: generic device this host is associated with 5897 * @max_ports: maximum number of ATA ports associated with this host 5898 * 5899 * Allocate and initialize basic ATA host resources. LLD calls 5900 * this function to allocate a host, initializes it fully and 5901 * attaches it using ata_host_register(). 5902 * 5903 * @max_ports ports are allocated and host->n_ports is 5904 * initialized to @max_ports. The caller is allowed to decrease 5905 * host->n_ports before calling ata_host_register(). The unused 5906 * ports will be automatically freed on registration. 5907 * 5908 * RETURNS: 5909 * Allocate ATA host on success, NULL on failure. 5910 * 5911 * LOCKING: 5912 * Inherited from calling layer (may sleep). 5913 */ 5914 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5915 { 5916 struct ata_host *host; 5917 size_t sz; 5918 int i; 5919 5920 DPRINTK("ENTER\n"); 5921 5922 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5923 return NULL; 5924 5925 /* alloc a container for our list of ATA ports (buses) */ 5926 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5927 /* alloc a container for our list of ATA ports (buses) */ 5928 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5929 if (!host) 5930 goto err_out; 5931 5932 devres_add(dev, host); 5933 dev_set_drvdata(dev, host); 5934 5935 spin_lock_init(&host->lock); 5936 mutex_init(&host->eh_mutex); 5937 host->dev = dev; 5938 host->n_ports = max_ports; 5939 5940 /* allocate ports bound to this host */ 5941 for (i = 0; i < max_ports; i++) { 5942 struct ata_port *ap; 5943 5944 ap = ata_port_alloc(host); 5945 if (!ap) 5946 goto err_out; 5947 5948 ap->port_no = i; 5949 host->ports[i] = ap; 5950 } 5951 5952 devres_remove_group(dev, NULL); 5953 return host; 5954 5955 err_out: 5956 devres_release_group(dev, NULL); 5957 return NULL; 5958 } 5959 5960 /** 5961 * ata_host_alloc_pinfo - alloc host and init with port_info array 5962 * @dev: generic device this host is associated with 5963 * @ppi: array of ATA port_info to initialize host with 5964 * @n_ports: number of ATA ports attached to this host 5965 * 5966 * Allocate ATA host and initialize with info from @ppi. If NULL 5967 * terminated, @ppi may contain fewer entries than @n_ports. The 5968 * last entry will be used for the remaining ports. 5969 * 5970 * RETURNS: 5971 * Allocate ATA host on success, NULL on failure. 5972 * 5973 * LOCKING: 5974 * Inherited from calling layer (may sleep). 5975 */ 5976 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5977 const struct ata_port_info * const * ppi, 5978 int n_ports) 5979 { 5980 const struct ata_port_info *pi; 5981 struct ata_host *host; 5982 int i, j; 5983 5984 host = ata_host_alloc(dev, n_ports); 5985 if (!host) 5986 return NULL; 5987 5988 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5989 struct ata_port *ap = host->ports[i]; 5990 5991 if (ppi[j]) 5992 pi = ppi[j++]; 5993 5994 ap->pio_mask = pi->pio_mask; 5995 ap->mwdma_mask = pi->mwdma_mask; 5996 ap->udma_mask = pi->udma_mask; 5997 ap->flags |= pi->flags; 5998 ap->link.flags |= pi->link_flags; 5999 ap->ops = pi->port_ops; 6000 6001 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 6002 host->ops = pi->port_ops; 6003 } 6004 6005 return host; 6006 } 6007 6008 /** 6009 * ata_slave_link_init - initialize slave link 6010 * @ap: port to initialize slave link for 6011 * 6012 * Create and initialize slave link for @ap. This enables slave 6013 * link handling on the port. 6014 * 6015 * In libata, a port contains links and a link contains devices. 6016 * There is single host link but if a PMP is attached to it, 6017 * there can be multiple fan-out links. On SATA, there's usually 6018 * a single device connected to a link but PATA and SATA 6019 * controllers emulating TF based interface can have two - master 6020 * and slave. 6021 * 6022 * However, there are a few controllers which don't fit into this 6023 * abstraction too well - SATA controllers which emulate TF 6024 * interface with both master and slave devices but also have 6025 * separate SCR register sets for each device. These controllers 6026 * need separate links for physical link handling 6027 * (e.g. onlineness, link speed) but should be treated like a 6028 * traditional M/S controller for everything else (e.g. command 6029 * issue, softreset). 6030 * 6031 * slave_link is libata's way of handling this class of 6032 * controllers without impacting core layer too much. For 6033 * anything other than physical link handling, the default host 6034 * link is used for both master and slave. For physical link 6035 * handling, separate @ap->slave_link is used. All dirty details 6036 * are implemented inside libata core layer. From LLD's POV, the 6037 * only difference is that prereset, hardreset and postreset are 6038 * called once more for the slave link, so the reset sequence 6039 * looks like the following. 6040 * 6041 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 6042 * softreset(M) -> postreset(M) -> postreset(S) 6043 * 6044 * Note that softreset is called only for the master. Softreset 6045 * resets both M/S by definition, so SRST on master should handle 6046 * both (the standard method will work just fine). 6047 * 6048 * LOCKING: 6049 * Should be called before host is registered. 6050 * 6051 * RETURNS: 6052 * 0 on success, -errno on failure. 6053 */ 6054 int ata_slave_link_init(struct ata_port *ap) 6055 { 6056 struct ata_link *link; 6057 6058 WARN_ON(ap->slave_link); 6059 WARN_ON(ap->flags & ATA_FLAG_PMP); 6060 6061 link = kzalloc(sizeof(*link), GFP_KERNEL); 6062 if (!link) 6063 return -ENOMEM; 6064 6065 ata_link_init(ap, link, 1); 6066 ap->slave_link = link; 6067 return 0; 6068 } 6069 6070 static void ata_host_stop(struct device *gendev, void *res) 6071 { 6072 struct ata_host *host = dev_get_drvdata(gendev); 6073 int i; 6074 6075 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 6076 6077 for (i = 0; i < host->n_ports; i++) { 6078 struct ata_port *ap = host->ports[i]; 6079 6080 if (ap->ops->port_stop) 6081 ap->ops->port_stop(ap); 6082 } 6083 6084 if (host->ops->host_stop) 6085 host->ops->host_stop(host); 6086 } 6087 6088 /** 6089 * ata_finalize_port_ops - finalize ata_port_operations 6090 * @ops: ata_port_operations to finalize 6091 * 6092 * An ata_port_operations can inherit from another ops and that 6093 * ops can again inherit from another. This can go on as many 6094 * times as necessary as long as there is no loop in the 6095 * inheritance chain. 6096 * 6097 * Ops tables are finalized when the host is started. NULL or 6098 * unspecified entries are inherited from the closet ancestor 6099 * which has the method and the entry is populated with it. 6100 * After finalization, the ops table directly points to all the 6101 * methods and ->inherits is no longer necessary and cleared. 6102 * 6103 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 6104 * 6105 * LOCKING: 6106 * None. 6107 */ 6108 static void ata_finalize_port_ops(struct ata_port_operations *ops) 6109 { 6110 static DEFINE_SPINLOCK(lock); 6111 const struct ata_port_operations *cur; 6112 void **begin = (void **)ops; 6113 void **end = (void **)&ops->inherits; 6114 void **pp; 6115 6116 if (!ops || !ops->inherits) 6117 return; 6118 6119 spin_lock(&lock); 6120 6121 for (cur = ops->inherits; cur; cur = cur->inherits) { 6122 void **inherit = (void **)cur; 6123 6124 for (pp = begin; pp < end; pp++, inherit++) 6125 if (!*pp) 6126 *pp = *inherit; 6127 } 6128 6129 for (pp = begin; pp < end; pp++) 6130 if (IS_ERR(*pp)) 6131 *pp = NULL; 6132 6133 ops->inherits = NULL; 6134 6135 spin_unlock(&lock); 6136 } 6137 6138 /** 6139 * ata_host_start - start and freeze ports of an ATA host 6140 * @host: ATA host to start ports for 6141 * 6142 * Start and then freeze ports of @host. Started status is 6143 * recorded in host->flags, so this function can be called 6144 * multiple times. Ports are guaranteed to get started only 6145 * once. If host->ops isn't initialized yet, its set to the 6146 * first non-dummy port ops. 6147 * 6148 * LOCKING: 6149 * Inherited from calling layer (may sleep). 6150 * 6151 * RETURNS: 6152 * 0 if all ports are started successfully, -errno otherwise. 6153 */ 6154 int ata_host_start(struct ata_host *host) 6155 { 6156 int have_stop = 0; 6157 void *start_dr = NULL; 6158 int i, rc; 6159 6160 if (host->flags & ATA_HOST_STARTED) 6161 return 0; 6162 6163 ata_finalize_port_ops(host->ops); 6164 6165 for (i = 0; i < host->n_ports; i++) { 6166 struct ata_port *ap = host->ports[i]; 6167 6168 ata_finalize_port_ops(ap->ops); 6169 6170 if (!host->ops && !ata_port_is_dummy(ap)) 6171 host->ops = ap->ops; 6172 6173 if (ap->ops->port_stop) 6174 have_stop = 1; 6175 } 6176 6177 if (host->ops->host_stop) 6178 have_stop = 1; 6179 6180 if (have_stop) { 6181 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 6182 if (!start_dr) 6183 return -ENOMEM; 6184 } 6185 6186 for (i = 0; i < host->n_ports; i++) { 6187 struct ata_port *ap = host->ports[i]; 6188 6189 if (ap->ops->port_start) { 6190 rc = ap->ops->port_start(ap); 6191 if (rc) { 6192 if (rc != -ENODEV) 6193 dev_err(host->dev, 6194 "failed to start port %d (errno=%d)\n", 6195 i, rc); 6196 goto err_out; 6197 } 6198 } 6199 ata_eh_freeze_port(ap); 6200 } 6201 6202 if (start_dr) 6203 devres_add(host->dev, start_dr); 6204 host->flags |= ATA_HOST_STARTED; 6205 return 0; 6206 6207 err_out: 6208 while (--i >= 0) { 6209 struct ata_port *ap = host->ports[i]; 6210 6211 if (ap->ops->port_stop) 6212 ap->ops->port_stop(ap); 6213 } 6214 devres_free(start_dr); 6215 return rc; 6216 } 6217 6218 /** 6219 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) 6220 * @host: host to initialize 6221 * @dev: device host is attached to 6222 * @ops: port_ops 6223 * 6224 */ 6225 void ata_host_init(struct ata_host *host, struct device *dev, 6226 struct ata_port_operations *ops) 6227 { 6228 spin_lock_init(&host->lock); 6229 mutex_init(&host->eh_mutex); 6230 host->n_tags = ATA_MAX_QUEUE - 1; 6231 host->dev = dev; 6232 host->ops = ops; 6233 } 6234 6235 void __ata_port_probe(struct ata_port *ap) 6236 { 6237 struct ata_eh_info *ehi = &ap->link.eh_info; 6238 unsigned long flags; 6239 6240 /* kick EH for boot probing */ 6241 spin_lock_irqsave(ap->lock, flags); 6242 6243 ehi->probe_mask |= ATA_ALL_DEVICES; 6244 ehi->action |= ATA_EH_RESET; 6245 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6246 6247 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6248 ap->pflags |= ATA_PFLAG_LOADING; 6249 ata_port_schedule_eh(ap); 6250 6251 spin_unlock_irqrestore(ap->lock, flags); 6252 } 6253 6254 int ata_port_probe(struct ata_port *ap) 6255 { 6256 int rc = 0; 6257 6258 if (ap->ops->error_handler) { 6259 __ata_port_probe(ap); 6260 ata_port_wait_eh(ap); 6261 } else { 6262 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6263 rc = ata_bus_probe(ap); 6264 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6265 } 6266 return rc; 6267 } 6268 6269 6270 static void async_port_probe(void *data, async_cookie_t cookie) 6271 { 6272 struct ata_port *ap = data; 6273 6274 /* 6275 * If we're not allowed to scan this host in parallel, 6276 * we need to wait until all previous scans have completed 6277 * before going further. 6278 * Jeff Garzik says this is only within a controller, so we 6279 * don't need to wait for port 0, only for later ports. 6280 */ 6281 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6282 async_synchronize_cookie(cookie); 6283 6284 (void)ata_port_probe(ap); 6285 6286 /* in order to keep device order, we need to synchronize at this point */ 6287 async_synchronize_cookie(cookie); 6288 6289 ata_scsi_scan_host(ap, 1); 6290 } 6291 6292 /** 6293 * ata_host_register - register initialized ATA host 6294 * @host: ATA host to register 6295 * @sht: template for SCSI host 6296 * 6297 * Register initialized ATA host. @host is allocated using 6298 * ata_host_alloc() and fully initialized by LLD. This function 6299 * starts ports, registers @host with ATA and SCSI layers and 6300 * probe registered devices. 6301 * 6302 * LOCKING: 6303 * Inherited from calling layer (may sleep). 6304 * 6305 * RETURNS: 6306 * 0 on success, -errno otherwise. 6307 */ 6308 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6309 { 6310 int i, rc; 6311 6312 host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1); 6313 6314 /* host must have been started */ 6315 if (!(host->flags & ATA_HOST_STARTED)) { 6316 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6317 WARN_ON(1); 6318 return -EINVAL; 6319 } 6320 6321 /* Blow away unused ports. This happens when LLD can't 6322 * determine the exact number of ports to allocate at 6323 * allocation time. 6324 */ 6325 for (i = host->n_ports; host->ports[i]; i++) 6326 kfree(host->ports[i]); 6327 6328 /* give ports names and add SCSI hosts */ 6329 for (i = 0; i < host->n_ports; i++) { 6330 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6331 host->ports[i]->local_port_no = i + 1; 6332 } 6333 6334 /* Create associated sysfs transport objects */ 6335 for (i = 0; i < host->n_ports; i++) { 6336 rc = ata_tport_add(host->dev,host->ports[i]); 6337 if (rc) { 6338 goto err_tadd; 6339 } 6340 } 6341 6342 rc = ata_scsi_add_hosts(host, sht); 6343 if (rc) 6344 goto err_tadd; 6345 6346 /* set cable, sata_spd_limit and report */ 6347 for (i = 0; i < host->n_ports; i++) { 6348 struct ata_port *ap = host->ports[i]; 6349 unsigned long xfer_mask; 6350 6351 /* set SATA cable type if still unset */ 6352 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6353 ap->cbl = ATA_CBL_SATA; 6354 6355 /* init sata_spd_limit to the current value */ 6356 sata_link_init_spd(&ap->link); 6357 if (ap->slave_link) 6358 sata_link_init_spd(ap->slave_link); 6359 6360 /* print per-port info to dmesg */ 6361 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6362 ap->udma_mask); 6363 6364 if (!ata_port_is_dummy(ap)) { 6365 ata_port_info(ap, "%cATA max %s %s\n", 6366 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6367 ata_mode_string(xfer_mask), 6368 ap->link.eh_info.desc); 6369 ata_ehi_clear_desc(&ap->link.eh_info); 6370 } else 6371 ata_port_info(ap, "DUMMY\n"); 6372 } 6373 6374 /* perform each probe asynchronously */ 6375 for (i = 0; i < host->n_ports; i++) { 6376 struct ata_port *ap = host->ports[i]; 6377 async_schedule(async_port_probe, ap); 6378 } 6379 6380 return 0; 6381 6382 err_tadd: 6383 while (--i >= 0) { 6384 ata_tport_delete(host->ports[i]); 6385 } 6386 return rc; 6387 6388 } 6389 6390 /** 6391 * ata_host_activate - start host, request IRQ and register it 6392 * @host: target ATA host 6393 * @irq: IRQ to request 6394 * @irq_handler: irq_handler used when requesting IRQ 6395 * @irq_flags: irq_flags used when requesting IRQ 6396 * @sht: scsi_host_template to use when registering the host 6397 * 6398 * After allocating an ATA host and initializing it, most libata 6399 * LLDs perform three steps to activate the host - start host, 6400 * request IRQ and register it. This helper takes necessary 6401 * arguments and performs the three steps in one go. 6402 * 6403 * An invalid IRQ skips the IRQ registration and expects the host to 6404 * have set polling mode on the port. In this case, @irq_handler 6405 * should be NULL. 6406 * 6407 * LOCKING: 6408 * Inherited from calling layer (may sleep). 6409 * 6410 * RETURNS: 6411 * 0 on success, -errno otherwise. 6412 */ 6413 int ata_host_activate(struct ata_host *host, int irq, 6414 irq_handler_t irq_handler, unsigned long irq_flags, 6415 struct scsi_host_template *sht) 6416 { 6417 int i, rc; 6418 char *irq_desc; 6419 6420 rc = ata_host_start(host); 6421 if (rc) 6422 return rc; 6423 6424 /* Special case for polling mode */ 6425 if (!irq) { 6426 WARN_ON(irq_handler); 6427 return ata_host_register(host, sht); 6428 } 6429 6430 irq_desc = devm_kasprintf(host->dev, GFP_KERNEL, "%s[%s]", 6431 dev_driver_string(host->dev), 6432 dev_name(host->dev)); 6433 if (!irq_desc) 6434 return -ENOMEM; 6435 6436 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6437 irq_desc, host); 6438 if (rc) 6439 return rc; 6440 6441 for (i = 0; i < host->n_ports; i++) 6442 ata_port_desc(host->ports[i], "irq %d", irq); 6443 6444 rc = ata_host_register(host, sht); 6445 /* if failed, just free the IRQ and leave ports alone */ 6446 if (rc) 6447 devm_free_irq(host->dev, irq, host); 6448 6449 return rc; 6450 } 6451 6452 /** 6453 * ata_port_detach - Detach ATA port in preparation of device removal 6454 * @ap: ATA port to be detached 6455 * 6456 * Detach all ATA devices and the associated SCSI devices of @ap; 6457 * then, remove the associated SCSI host. @ap is guaranteed to 6458 * be quiescent on return from this function. 6459 * 6460 * LOCKING: 6461 * Kernel thread context (may sleep). 6462 */ 6463 static void ata_port_detach(struct ata_port *ap) 6464 { 6465 unsigned long flags; 6466 struct ata_link *link; 6467 struct ata_device *dev; 6468 6469 if (!ap->ops->error_handler) 6470 goto skip_eh; 6471 6472 /* tell EH we're leaving & flush EH */ 6473 spin_lock_irqsave(ap->lock, flags); 6474 ap->pflags |= ATA_PFLAG_UNLOADING; 6475 ata_port_schedule_eh(ap); 6476 spin_unlock_irqrestore(ap->lock, flags); 6477 6478 /* wait till EH commits suicide */ 6479 ata_port_wait_eh(ap); 6480 6481 /* it better be dead now */ 6482 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6483 6484 cancel_delayed_work_sync(&ap->hotplug_task); 6485 6486 skip_eh: 6487 /* clean up zpodd on port removal */ 6488 ata_for_each_link(link, ap, HOST_FIRST) { 6489 ata_for_each_dev(dev, link, ALL) { 6490 if (zpodd_dev_enabled(dev)) 6491 zpodd_exit(dev); 6492 } 6493 } 6494 if (ap->pmp_link) { 6495 int i; 6496 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6497 ata_tlink_delete(&ap->pmp_link[i]); 6498 } 6499 /* remove the associated SCSI host */ 6500 scsi_remove_host(ap->scsi_host); 6501 ata_tport_delete(ap); 6502 } 6503 6504 /** 6505 * ata_host_detach - Detach all ports of an ATA host 6506 * @host: Host to detach 6507 * 6508 * Detach all ports of @host. 6509 * 6510 * LOCKING: 6511 * Kernel thread context (may sleep). 6512 */ 6513 void ata_host_detach(struct ata_host *host) 6514 { 6515 int i; 6516 6517 for (i = 0; i < host->n_ports; i++) 6518 ata_port_detach(host->ports[i]); 6519 6520 /* the host is dead now, dissociate ACPI */ 6521 ata_acpi_dissociate(host); 6522 } 6523 6524 #ifdef CONFIG_PCI 6525 6526 /** 6527 * ata_pci_remove_one - PCI layer callback for device removal 6528 * @pdev: PCI device that was removed 6529 * 6530 * PCI layer indicates to libata via this hook that hot-unplug or 6531 * module unload event has occurred. Detach all ports. Resource 6532 * release is handled via devres. 6533 * 6534 * LOCKING: 6535 * Inherited from PCI layer (may sleep). 6536 */ 6537 void ata_pci_remove_one(struct pci_dev *pdev) 6538 { 6539 struct ata_host *host = pci_get_drvdata(pdev); 6540 6541 ata_host_detach(host); 6542 } 6543 6544 /* move to PCI subsystem */ 6545 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6546 { 6547 unsigned long tmp = 0; 6548 6549 switch (bits->width) { 6550 case 1: { 6551 u8 tmp8 = 0; 6552 pci_read_config_byte(pdev, bits->reg, &tmp8); 6553 tmp = tmp8; 6554 break; 6555 } 6556 case 2: { 6557 u16 tmp16 = 0; 6558 pci_read_config_word(pdev, bits->reg, &tmp16); 6559 tmp = tmp16; 6560 break; 6561 } 6562 case 4: { 6563 u32 tmp32 = 0; 6564 pci_read_config_dword(pdev, bits->reg, &tmp32); 6565 tmp = tmp32; 6566 break; 6567 } 6568 6569 default: 6570 return -EINVAL; 6571 } 6572 6573 tmp &= bits->mask; 6574 6575 return (tmp == bits->val) ? 1 : 0; 6576 } 6577 6578 #ifdef CONFIG_PM 6579 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6580 { 6581 pci_save_state(pdev); 6582 pci_disable_device(pdev); 6583 6584 if (mesg.event & PM_EVENT_SLEEP) 6585 pci_set_power_state(pdev, PCI_D3hot); 6586 } 6587 6588 int ata_pci_device_do_resume(struct pci_dev *pdev) 6589 { 6590 int rc; 6591 6592 pci_set_power_state(pdev, PCI_D0); 6593 pci_restore_state(pdev); 6594 6595 rc = pcim_enable_device(pdev); 6596 if (rc) { 6597 dev_err(&pdev->dev, 6598 "failed to enable device after resume (%d)\n", rc); 6599 return rc; 6600 } 6601 6602 pci_set_master(pdev); 6603 return 0; 6604 } 6605 6606 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6607 { 6608 struct ata_host *host = pci_get_drvdata(pdev); 6609 int rc = 0; 6610 6611 rc = ata_host_suspend(host, mesg); 6612 if (rc) 6613 return rc; 6614 6615 ata_pci_device_do_suspend(pdev, mesg); 6616 6617 return 0; 6618 } 6619 6620 int ata_pci_device_resume(struct pci_dev *pdev) 6621 { 6622 struct ata_host *host = pci_get_drvdata(pdev); 6623 int rc; 6624 6625 rc = ata_pci_device_do_resume(pdev); 6626 if (rc == 0) 6627 ata_host_resume(host); 6628 return rc; 6629 } 6630 #endif /* CONFIG_PM */ 6631 6632 #endif /* CONFIG_PCI */ 6633 6634 /** 6635 * ata_platform_remove_one - Platform layer callback for device removal 6636 * @pdev: Platform device that was removed 6637 * 6638 * Platform layer indicates to libata via this hook that hot-unplug or 6639 * module unload event has occurred. Detach all ports. Resource 6640 * release is handled via devres. 6641 * 6642 * LOCKING: 6643 * Inherited from platform layer (may sleep). 6644 */ 6645 int ata_platform_remove_one(struct platform_device *pdev) 6646 { 6647 struct ata_host *host = platform_get_drvdata(pdev); 6648 6649 ata_host_detach(host); 6650 6651 return 0; 6652 } 6653 6654 static int __init ata_parse_force_one(char **cur, 6655 struct ata_force_ent *force_ent, 6656 const char **reason) 6657 { 6658 static const struct ata_force_param force_tbl[] __initconst = { 6659 { "40c", .cbl = ATA_CBL_PATA40 }, 6660 { "80c", .cbl = ATA_CBL_PATA80 }, 6661 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6662 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6663 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6664 { "sata", .cbl = ATA_CBL_SATA }, 6665 { "1.5Gbps", .spd_limit = 1 }, 6666 { "3.0Gbps", .spd_limit = 2 }, 6667 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6668 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6669 { "noncqtrim", .horkage_on = ATA_HORKAGE_NO_NCQ_TRIM }, 6670 { "ncqtrim", .horkage_off = ATA_HORKAGE_NO_NCQ_TRIM }, 6671 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6672 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6673 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6674 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6675 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6676 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6677 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6678 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6679 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6680 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6681 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6682 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6683 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6684 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6685 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6686 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6687 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6688 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6689 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6690 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6691 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6692 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6693 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6694 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6695 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6696 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6697 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6698 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6699 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6700 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6701 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6702 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6703 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6704 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6705 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6706 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6707 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6708 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6709 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6710 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6711 { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, 6712 }; 6713 char *start = *cur, *p = *cur; 6714 char *id, *val, *endp; 6715 const struct ata_force_param *match_fp = NULL; 6716 int nr_matches = 0, i; 6717 6718 /* find where this param ends and update *cur */ 6719 while (*p != '\0' && *p != ',') 6720 p++; 6721 6722 if (*p == '\0') 6723 *cur = p; 6724 else 6725 *cur = p + 1; 6726 6727 *p = '\0'; 6728 6729 /* parse */ 6730 p = strchr(start, ':'); 6731 if (!p) { 6732 val = strstrip(start); 6733 goto parse_val; 6734 } 6735 *p = '\0'; 6736 6737 id = strstrip(start); 6738 val = strstrip(p + 1); 6739 6740 /* parse id */ 6741 p = strchr(id, '.'); 6742 if (p) { 6743 *p++ = '\0'; 6744 force_ent->device = simple_strtoul(p, &endp, 10); 6745 if (p == endp || *endp != '\0') { 6746 *reason = "invalid device"; 6747 return -EINVAL; 6748 } 6749 } 6750 6751 force_ent->port = simple_strtoul(id, &endp, 10); 6752 if (p == endp || *endp != '\0') { 6753 *reason = "invalid port/link"; 6754 return -EINVAL; 6755 } 6756 6757 parse_val: 6758 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6759 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6760 const struct ata_force_param *fp = &force_tbl[i]; 6761 6762 if (strncasecmp(val, fp->name, strlen(val))) 6763 continue; 6764 6765 nr_matches++; 6766 match_fp = fp; 6767 6768 if (strcasecmp(val, fp->name) == 0) { 6769 nr_matches = 1; 6770 break; 6771 } 6772 } 6773 6774 if (!nr_matches) { 6775 *reason = "unknown value"; 6776 return -EINVAL; 6777 } 6778 if (nr_matches > 1) { 6779 *reason = "ambigious value"; 6780 return -EINVAL; 6781 } 6782 6783 force_ent->param = *match_fp; 6784 6785 return 0; 6786 } 6787 6788 static void __init ata_parse_force_param(void) 6789 { 6790 int idx = 0, size = 1; 6791 int last_port = -1, last_device = -1; 6792 char *p, *cur, *next; 6793 6794 /* calculate maximum number of params and allocate force_tbl */ 6795 for (p = ata_force_param_buf; *p; p++) 6796 if (*p == ',') 6797 size++; 6798 6799 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6800 if (!ata_force_tbl) { 6801 printk(KERN_WARNING "ata: failed to extend force table, " 6802 "libata.force ignored\n"); 6803 return; 6804 } 6805 6806 /* parse and populate the table */ 6807 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6808 const char *reason = ""; 6809 struct ata_force_ent te = { .port = -1, .device = -1 }; 6810 6811 next = cur; 6812 if (ata_parse_force_one(&next, &te, &reason)) { 6813 printk(KERN_WARNING "ata: failed to parse force " 6814 "parameter \"%s\" (%s)\n", 6815 cur, reason); 6816 continue; 6817 } 6818 6819 if (te.port == -1) { 6820 te.port = last_port; 6821 te.device = last_device; 6822 } 6823 6824 ata_force_tbl[idx++] = te; 6825 6826 last_port = te.port; 6827 last_device = te.device; 6828 } 6829 6830 ata_force_tbl_size = idx; 6831 } 6832 6833 static int __init ata_init(void) 6834 { 6835 int rc; 6836 6837 ata_parse_force_param(); 6838 6839 rc = ata_sff_init(); 6840 if (rc) { 6841 kfree(ata_force_tbl); 6842 return rc; 6843 } 6844 6845 libata_transport_init(); 6846 ata_scsi_transport_template = ata_attach_transport(); 6847 if (!ata_scsi_transport_template) { 6848 ata_sff_exit(); 6849 rc = -ENOMEM; 6850 goto err_out; 6851 } 6852 6853 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6854 return 0; 6855 6856 err_out: 6857 return rc; 6858 } 6859 6860 static void __exit ata_exit(void) 6861 { 6862 ata_release_transport(ata_scsi_transport_template); 6863 libata_transport_exit(); 6864 ata_sff_exit(); 6865 kfree(ata_force_tbl); 6866 } 6867 6868 subsys_initcall(ata_init); 6869 module_exit(ata_exit); 6870 6871 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6872 6873 int ata_ratelimit(void) 6874 { 6875 return __ratelimit(&ratelimit); 6876 } 6877 6878 /** 6879 * ata_msleep - ATA EH owner aware msleep 6880 * @ap: ATA port to attribute the sleep to 6881 * @msecs: duration to sleep in milliseconds 6882 * 6883 * Sleeps @msecs. If the current task is owner of @ap's EH, the 6884 * ownership is released before going to sleep and reacquired 6885 * after the sleep is complete. IOW, other ports sharing the 6886 * @ap->host will be allowed to own the EH while this task is 6887 * sleeping. 6888 * 6889 * LOCKING: 6890 * Might sleep. 6891 */ 6892 void ata_msleep(struct ata_port *ap, unsigned int msecs) 6893 { 6894 bool owns_eh = ap && ap->host->eh_owner == current; 6895 6896 if (owns_eh) 6897 ata_eh_release(ap); 6898 6899 if (msecs < 20) { 6900 unsigned long usecs = msecs * USEC_PER_MSEC; 6901 usleep_range(usecs, usecs + 50); 6902 } else { 6903 msleep(msecs); 6904 } 6905 6906 if (owns_eh) 6907 ata_eh_acquire(ap); 6908 } 6909 6910 /** 6911 * ata_wait_register - wait until register value changes 6912 * @ap: ATA port to wait register for, can be NULL 6913 * @reg: IO-mapped register 6914 * @mask: Mask to apply to read register value 6915 * @val: Wait condition 6916 * @interval: polling interval in milliseconds 6917 * @timeout: timeout in milliseconds 6918 * 6919 * Waiting for some bits of register to change is a common 6920 * operation for ATA controllers. This function reads 32bit LE 6921 * IO-mapped register @reg and tests for the following condition. 6922 * 6923 * (*@reg & mask) != val 6924 * 6925 * If the condition is met, it returns; otherwise, the process is 6926 * repeated after @interval_msec until timeout. 6927 * 6928 * LOCKING: 6929 * Kernel thread context (may sleep) 6930 * 6931 * RETURNS: 6932 * The final register value. 6933 */ 6934 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 6935 unsigned long interval, unsigned long timeout) 6936 { 6937 unsigned long deadline; 6938 u32 tmp; 6939 6940 tmp = ioread32(reg); 6941 6942 /* Calculate timeout _after_ the first read to make sure 6943 * preceding writes reach the controller before starting to 6944 * eat away the timeout. 6945 */ 6946 deadline = ata_deadline(jiffies, timeout); 6947 6948 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6949 ata_msleep(ap, interval); 6950 tmp = ioread32(reg); 6951 } 6952 6953 return tmp; 6954 } 6955 6956 /** 6957 * sata_lpm_ignore_phy_events - test if PHY event should be ignored 6958 * @link: Link receiving the event 6959 * 6960 * Test whether the received PHY event has to be ignored or not. 6961 * 6962 * LOCKING: 6963 * None: 6964 * 6965 * RETURNS: 6966 * True if the event has to be ignored. 6967 */ 6968 bool sata_lpm_ignore_phy_events(struct ata_link *link) 6969 { 6970 unsigned long lpm_timeout = link->last_lpm_change + 6971 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); 6972 6973 /* if LPM is enabled, PHYRDY doesn't mean anything */ 6974 if (link->lpm_policy > ATA_LPM_MAX_POWER) 6975 return true; 6976 6977 /* ignore the first PHY event after the LPM policy changed 6978 * as it is might be spurious 6979 */ 6980 if ((link->flags & ATA_LFLAG_CHANGED) && 6981 time_before(jiffies, lpm_timeout)) 6982 return true; 6983 6984 return false; 6985 } 6986 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); 6987 6988 /* 6989 * Dummy port_ops 6990 */ 6991 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6992 { 6993 return AC_ERR_SYSTEM; 6994 } 6995 6996 static void ata_dummy_error_handler(struct ata_port *ap) 6997 { 6998 /* truly dummy */ 6999 } 7000 7001 struct ata_port_operations ata_dummy_port_ops = { 7002 .qc_prep = ata_noop_qc_prep, 7003 .qc_issue = ata_dummy_qc_issue, 7004 .error_handler = ata_dummy_error_handler, 7005 .sched_eh = ata_std_sched_eh, 7006 .end_eh = ata_std_end_eh, 7007 }; 7008 7009 const struct ata_port_info ata_dummy_port_info = { 7010 .port_ops = &ata_dummy_port_ops, 7011 }; 7012 7013 /* 7014 * Utility print functions 7015 */ 7016 void ata_port_printk(const struct ata_port *ap, const char *level, 7017 const char *fmt, ...) 7018 { 7019 struct va_format vaf; 7020 va_list args; 7021 7022 va_start(args, fmt); 7023 7024 vaf.fmt = fmt; 7025 vaf.va = &args; 7026 7027 printk("%sata%u: %pV", level, ap->print_id, &vaf); 7028 7029 va_end(args); 7030 } 7031 EXPORT_SYMBOL(ata_port_printk); 7032 7033 void ata_link_printk(const struct ata_link *link, const char *level, 7034 const char *fmt, ...) 7035 { 7036 struct va_format vaf; 7037 va_list args; 7038 7039 va_start(args, fmt); 7040 7041 vaf.fmt = fmt; 7042 vaf.va = &args; 7043 7044 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 7045 printk("%sata%u.%02u: %pV", 7046 level, link->ap->print_id, link->pmp, &vaf); 7047 else 7048 printk("%sata%u: %pV", 7049 level, link->ap->print_id, &vaf); 7050 7051 va_end(args); 7052 } 7053 EXPORT_SYMBOL(ata_link_printk); 7054 7055 void ata_dev_printk(const struct ata_device *dev, const char *level, 7056 const char *fmt, ...) 7057 { 7058 struct va_format vaf; 7059 va_list args; 7060 7061 va_start(args, fmt); 7062 7063 vaf.fmt = fmt; 7064 vaf.va = &args; 7065 7066 printk("%sata%u.%02u: %pV", 7067 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 7068 &vaf); 7069 7070 va_end(args); 7071 } 7072 EXPORT_SYMBOL(ata_dev_printk); 7073 7074 void ata_print_version(const struct device *dev, const char *version) 7075 { 7076 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 7077 } 7078 EXPORT_SYMBOL(ata_print_version); 7079 7080 /* 7081 * libata is essentially a library of internal helper functions for 7082 * low-level ATA host controller drivers. As such, the API/ABI is 7083 * likely to change as new drivers are added and updated. 7084 * Do not depend on ABI/API stability. 7085 */ 7086 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 7087 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 7088 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 7089 EXPORT_SYMBOL_GPL(ata_base_port_ops); 7090 EXPORT_SYMBOL_GPL(sata_port_ops); 7091 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 7092 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 7093 EXPORT_SYMBOL_GPL(ata_link_next); 7094 EXPORT_SYMBOL_GPL(ata_dev_next); 7095 EXPORT_SYMBOL_GPL(ata_std_bios_param); 7096 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 7097 EXPORT_SYMBOL_GPL(ata_host_init); 7098 EXPORT_SYMBOL_GPL(ata_host_alloc); 7099 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 7100 EXPORT_SYMBOL_GPL(ata_slave_link_init); 7101 EXPORT_SYMBOL_GPL(ata_host_start); 7102 EXPORT_SYMBOL_GPL(ata_host_register); 7103 EXPORT_SYMBOL_GPL(ata_host_activate); 7104 EXPORT_SYMBOL_GPL(ata_host_detach); 7105 EXPORT_SYMBOL_GPL(ata_sg_init); 7106 EXPORT_SYMBOL_GPL(ata_qc_complete); 7107 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 7108 EXPORT_SYMBOL_GPL(atapi_cmd_type); 7109 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 7110 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 7111 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 7112 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 7113 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 7114 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 7115 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 7116 EXPORT_SYMBOL_GPL(ata_mode_string); 7117 EXPORT_SYMBOL_GPL(ata_id_xfermask); 7118 EXPORT_SYMBOL_GPL(ata_do_set_mode); 7119 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 7120 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 7121 EXPORT_SYMBOL_GPL(ata_dev_disable); 7122 EXPORT_SYMBOL_GPL(sata_set_spd); 7123 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 7124 EXPORT_SYMBOL_GPL(sata_link_debounce); 7125 EXPORT_SYMBOL_GPL(sata_link_resume); 7126 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 7127 EXPORT_SYMBOL_GPL(ata_std_prereset); 7128 EXPORT_SYMBOL_GPL(sata_link_hardreset); 7129 EXPORT_SYMBOL_GPL(sata_std_hardreset); 7130 EXPORT_SYMBOL_GPL(ata_std_postreset); 7131 EXPORT_SYMBOL_GPL(ata_dev_classify); 7132 EXPORT_SYMBOL_GPL(ata_dev_pair); 7133 EXPORT_SYMBOL_GPL(ata_ratelimit); 7134 EXPORT_SYMBOL_GPL(ata_msleep); 7135 EXPORT_SYMBOL_GPL(ata_wait_register); 7136 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 7137 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 7138 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 7139 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 7140 EXPORT_SYMBOL_GPL(__ata_change_queue_depth); 7141 EXPORT_SYMBOL_GPL(sata_scr_valid); 7142 EXPORT_SYMBOL_GPL(sata_scr_read); 7143 EXPORT_SYMBOL_GPL(sata_scr_write); 7144 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 7145 EXPORT_SYMBOL_GPL(ata_link_online); 7146 EXPORT_SYMBOL_GPL(ata_link_offline); 7147 #ifdef CONFIG_PM 7148 EXPORT_SYMBOL_GPL(ata_host_suspend); 7149 EXPORT_SYMBOL_GPL(ata_host_resume); 7150 #endif /* CONFIG_PM */ 7151 EXPORT_SYMBOL_GPL(ata_id_string); 7152 EXPORT_SYMBOL_GPL(ata_id_c_string); 7153 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 7154 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 7155 7156 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 7157 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 7158 EXPORT_SYMBOL_GPL(ata_timing_compute); 7159 EXPORT_SYMBOL_GPL(ata_timing_merge); 7160 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 7161 7162 #ifdef CONFIG_PCI 7163 EXPORT_SYMBOL_GPL(pci_test_config_bits); 7164 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 7165 #ifdef CONFIG_PM 7166 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 7167 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 7168 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 7169 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 7170 #endif /* CONFIG_PM */ 7171 #endif /* CONFIG_PCI */ 7172 7173 EXPORT_SYMBOL_GPL(ata_platform_remove_one); 7174 7175 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 7176 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 7177 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 7178 EXPORT_SYMBOL_GPL(ata_port_desc); 7179 #ifdef CONFIG_PCI 7180 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 7181 #endif /* CONFIG_PCI */ 7182 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 7183 EXPORT_SYMBOL_GPL(ata_link_abort); 7184 EXPORT_SYMBOL_GPL(ata_port_abort); 7185 EXPORT_SYMBOL_GPL(ata_port_freeze); 7186 EXPORT_SYMBOL_GPL(sata_async_notification); 7187 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 7188 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 7189 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 7190 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 7191 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 7192 EXPORT_SYMBOL_GPL(ata_do_eh); 7193 EXPORT_SYMBOL_GPL(ata_std_error_handler); 7194 7195 EXPORT_SYMBOL_GPL(ata_cable_40wire); 7196 EXPORT_SYMBOL_GPL(ata_cable_80wire); 7197 EXPORT_SYMBOL_GPL(ata_cable_unknown); 7198 EXPORT_SYMBOL_GPL(ata_cable_ignore); 7199 EXPORT_SYMBOL_GPL(ata_cable_sata); 7200