1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Tejun Heo <tj@kernel.org> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/interrupt.h> 54 #include <linux/completion.h> 55 #include <linux/suspend.h> 56 #include <linux/workqueue.h> 57 #include <linux/scatterlist.h> 58 #include <linux/io.h> 59 #include <linux/async.h> 60 #include <linux/log2.h> 61 #include <linux/slab.h> 62 #include <scsi/scsi.h> 63 #include <scsi/scsi_cmnd.h> 64 #include <scsi/scsi_host.h> 65 #include <linux/libata.h> 66 #include <asm/byteorder.h> 67 #include <linux/cdrom.h> 68 #include <linux/ratelimit.h> 69 #include <linux/pm_runtime.h> 70 #include <linux/platform_device.h> 71 72 #include "libata.h" 73 #include "libata-transport.h" 74 75 /* debounce timing parameters in msecs { interval, duration, timeout } */ 76 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 77 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 78 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 79 80 const struct ata_port_operations ata_base_port_ops = { 81 .prereset = ata_std_prereset, 82 .postreset = ata_std_postreset, 83 .error_handler = ata_std_error_handler, 84 .sched_eh = ata_std_sched_eh, 85 .end_eh = ata_std_end_eh, 86 }; 87 88 const struct ata_port_operations sata_port_ops = { 89 .inherits = &ata_base_port_ops, 90 91 .qc_defer = ata_std_qc_defer, 92 .hardreset = sata_std_hardreset, 93 }; 94 95 static unsigned int ata_dev_init_params(struct ata_device *dev, 96 u16 heads, u16 sectors); 97 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 98 static void ata_dev_xfermask(struct ata_device *dev); 99 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 100 101 atomic_t ata_print_id = ATOMIC_INIT(0); 102 103 struct ata_force_param { 104 const char *name; 105 unsigned int cbl; 106 int spd_limit; 107 unsigned long xfer_mask; 108 unsigned int horkage_on; 109 unsigned int horkage_off; 110 unsigned int lflags; 111 }; 112 113 struct ata_force_ent { 114 int port; 115 int device; 116 struct ata_force_param param; 117 }; 118 119 static struct ata_force_ent *ata_force_tbl; 120 static int ata_force_tbl_size; 121 122 static char ata_force_param_buf[PAGE_SIZE] __initdata; 123 /* param_buf is thrown away after initialization, disallow read */ 124 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 125 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 126 127 static int atapi_enabled = 1; 128 module_param(atapi_enabled, int, 0444); 129 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 130 131 static int atapi_dmadir = 0; 132 module_param(atapi_dmadir, int, 0444); 133 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 134 135 int atapi_passthru16 = 1; 136 module_param(atapi_passthru16, int, 0444); 137 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 138 139 int libata_fua = 0; 140 module_param_named(fua, libata_fua, int, 0444); 141 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 142 143 static int ata_ignore_hpa; 144 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 145 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 146 147 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 148 module_param_named(dma, libata_dma_mask, int, 0444); 149 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 150 151 static int ata_probe_timeout; 152 module_param(ata_probe_timeout, int, 0444); 153 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 154 155 int libata_noacpi = 0; 156 module_param_named(noacpi, libata_noacpi, int, 0444); 157 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 158 159 int libata_allow_tpm = 0; 160 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 161 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 162 163 static int atapi_an; 164 module_param(atapi_an, int, 0444); 165 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 166 167 MODULE_AUTHOR("Jeff Garzik"); 168 MODULE_DESCRIPTION("Library module for ATA devices"); 169 MODULE_LICENSE("GPL"); 170 MODULE_VERSION(DRV_VERSION); 171 172 173 static bool ata_sstatus_online(u32 sstatus) 174 { 175 return (sstatus & 0xf) == 0x3; 176 } 177 178 /** 179 * ata_link_next - link iteration helper 180 * @link: the previous link, NULL to start 181 * @ap: ATA port containing links to iterate 182 * @mode: iteration mode, one of ATA_LITER_* 183 * 184 * LOCKING: 185 * Host lock or EH context. 186 * 187 * RETURNS: 188 * Pointer to the next link. 189 */ 190 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 191 enum ata_link_iter_mode mode) 192 { 193 BUG_ON(mode != ATA_LITER_EDGE && 194 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 195 196 /* NULL link indicates start of iteration */ 197 if (!link) 198 switch (mode) { 199 case ATA_LITER_EDGE: 200 case ATA_LITER_PMP_FIRST: 201 if (sata_pmp_attached(ap)) 202 return ap->pmp_link; 203 /* fall through */ 204 case ATA_LITER_HOST_FIRST: 205 return &ap->link; 206 } 207 208 /* we just iterated over the host link, what's next? */ 209 if (link == &ap->link) 210 switch (mode) { 211 case ATA_LITER_HOST_FIRST: 212 if (sata_pmp_attached(ap)) 213 return ap->pmp_link; 214 /* fall through */ 215 case ATA_LITER_PMP_FIRST: 216 if (unlikely(ap->slave_link)) 217 return ap->slave_link; 218 /* fall through */ 219 case ATA_LITER_EDGE: 220 return NULL; 221 } 222 223 /* slave_link excludes PMP */ 224 if (unlikely(link == ap->slave_link)) 225 return NULL; 226 227 /* we were over a PMP link */ 228 if (++link < ap->pmp_link + ap->nr_pmp_links) 229 return link; 230 231 if (mode == ATA_LITER_PMP_FIRST) 232 return &ap->link; 233 234 return NULL; 235 } 236 237 /** 238 * ata_dev_next - device iteration helper 239 * @dev: the previous device, NULL to start 240 * @link: ATA link containing devices to iterate 241 * @mode: iteration mode, one of ATA_DITER_* 242 * 243 * LOCKING: 244 * Host lock or EH context. 245 * 246 * RETURNS: 247 * Pointer to the next device. 248 */ 249 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 250 enum ata_dev_iter_mode mode) 251 { 252 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 253 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 254 255 /* NULL dev indicates start of iteration */ 256 if (!dev) 257 switch (mode) { 258 case ATA_DITER_ENABLED: 259 case ATA_DITER_ALL: 260 dev = link->device; 261 goto check; 262 case ATA_DITER_ENABLED_REVERSE: 263 case ATA_DITER_ALL_REVERSE: 264 dev = link->device + ata_link_max_devices(link) - 1; 265 goto check; 266 } 267 268 next: 269 /* move to the next one */ 270 switch (mode) { 271 case ATA_DITER_ENABLED: 272 case ATA_DITER_ALL: 273 if (++dev < link->device + ata_link_max_devices(link)) 274 goto check; 275 return NULL; 276 case ATA_DITER_ENABLED_REVERSE: 277 case ATA_DITER_ALL_REVERSE: 278 if (--dev >= link->device) 279 goto check; 280 return NULL; 281 } 282 283 check: 284 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 285 !ata_dev_enabled(dev)) 286 goto next; 287 return dev; 288 } 289 290 /** 291 * ata_dev_phys_link - find physical link for a device 292 * @dev: ATA device to look up physical link for 293 * 294 * Look up physical link which @dev is attached to. Note that 295 * this is different from @dev->link only when @dev is on slave 296 * link. For all other cases, it's the same as @dev->link. 297 * 298 * LOCKING: 299 * Don't care. 300 * 301 * RETURNS: 302 * Pointer to the found physical link. 303 */ 304 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 305 { 306 struct ata_port *ap = dev->link->ap; 307 308 if (!ap->slave_link) 309 return dev->link; 310 if (!dev->devno) 311 return &ap->link; 312 return ap->slave_link; 313 } 314 315 /** 316 * ata_force_cbl - force cable type according to libata.force 317 * @ap: ATA port of interest 318 * 319 * Force cable type according to libata.force and whine about it. 320 * The last entry which has matching port number is used, so it 321 * can be specified as part of device force parameters. For 322 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 323 * same effect. 324 * 325 * LOCKING: 326 * EH context. 327 */ 328 void ata_force_cbl(struct ata_port *ap) 329 { 330 int i; 331 332 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 333 const struct ata_force_ent *fe = &ata_force_tbl[i]; 334 335 if (fe->port != -1 && fe->port != ap->print_id) 336 continue; 337 338 if (fe->param.cbl == ATA_CBL_NONE) 339 continue; 340 341 ap->cbl = fe->param.cbl; 342 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 343 return; 344 } 345 } 346 347 /** 348 * ata_force_link_limits - force link limits according to libata.force 349 * @link: ATA link of interest 350 * 351 * Force link flags and SATA spd limit according to libata.force 352 * and whine about it. When only the port part is specified 353 * (e.g. 1:), the limit applies to all links connected to both 354 * the host link and all fan-out ports connected via PMP. If the 355 * device part is specified as 0 (e.g. 1.00:), it specifies the 356 * first fan-out link not the host link. Device number 15 always 357 * points to the host link whether PMP is attached or not. If the 358 * controller has slave link, device number 16 points to it. 359 * 360 * LOCKING: 361 * EH context. 362 */ 363 static void ata_force_link_limits(struct ata_link *link) 364 { 365 bool did_spd = false; 366 int linkno = link->pmp; 367 int i; 368 369 if (ata_is_host_link(link)) 370 linkno += 15; 371 372 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 373 const struct ata_force_ent *fe = &ata_force_tbl[i]; 374 375 if (fe->port != -1 && fe->port != link->ap->print_id) 376 continue; 377 378 if (fe->device != -1 && fe->device != linkno) 379 continue; 380 381 /* only honor the first spd limit */ 382 if (!did_spd && fe->param.spd_limit) { 383 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 384 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 385 fe->param.name); 386 did_spd = true; 387 } 388 389 /* let lflags stack */ 390 if (fe->param.lflags) { 391 link->flags |= fe->param.lflags; 392 ata_link_notice(link, 393 "FORCE: link flag 0x%x forced -> 0x%x\n", 394 fe->param.lflags, link->flags); 395 } 396 } 397 } 398 399 /** 400 * ata_force_xfermask - force xfermask according to libata.force 401 * @dev: ATA device of interest 402 * 403 * Force xfer_mask according to libata.force and whine about it. 404 * For consistency with link selection, device number 15 selects 405 * the first device connected to the host link. 406 * 407 * LOCKING: 408 * EH context. 409 */ 410 static void ata_force_xfermask(struct ata_device *dev) 411 { 412 int devno = dev->link->pmp + dev->devno; 413 int alt_devno = devno; 414 int i; 415 416 /* allow n.15/16 for devices attached to host port */ 417 if (ata_is_host_link(dev->link)) 418 alt_devno += 15; 419 420 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 421 const struct ata_force_ent *fe = &ata_force_tbl[i]; 422 unsigned long pio_mask, mwdma_mask, udma_mask; 423 424 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 425 continue; 426 427 if (fe->device != -1 && fe->device != devno && 428 fe->device != alt_devno) 429 continue; 430 431 if (!fe->param.xfer_mask) 432 continue; 433 434 ata_unpack_xfermask(fe->param.xfer_mask, 435 &pio_mask, &mwdma_mask, &udma_mask); 436 if (udma_mask) 437 dev->udma_mask = udma_mask; 438 else if (mwdma_mask) { 439 dev->udma_mask = 0; 440 dev->mwdma_mask = mwdma_mask; 441 } else { 442 dev->udma_mask = 0; 443 dev->mwdma_mask = 0; 444 dev->pio_mask = pio_mask; 445 } 446 447 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 448 fe->param.name); 449 return; 450 } 451 } 452 453 /** 454 * ata_force_horkage - force horkage according to libata.force 455 * @dev: ATA device of interest 456 * 457 * Force horkage according to libata.force and whine about it. 458 * For consistency with link selection, device number 15 selects 459 * the first device connected to the host link. 460 * 461 * LOCKING: 462 * EH context. 463 */ 464 static void ata_force_horkage(struct ata_device *dev) 465 { 466 int devno = dev->link->pmp + dev->devno; 467 int alt_devno = devno; 468 int i; 469 470 /* allow n.15/16 for devices attached to host port */ 471 if (ata_is_host_link(dev->link)) 472 alt_devno += 15; 473 474 for (i = 0; i < ata_force_tbl_size; i++) { 475 const struct ata_force_ent *fe = &ata_force_tbl[i]; 476 477 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 478 continue; 479 480 if (fe->device != -1 && fe->device != devno && 481 fe->device != alt_devno) 482 continue; 483 484 if (!(~dev->horkage & fe->param.horkage_on) && 485 !(dev->horkage & fe->param.horkage_off)) 486 continue; 487 488 dev->horkage |= fe->param.horkage_on; 489 dev->horkage &= ~fe->param.horkage_off; 490 491 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 492 fe->param.name); 493 } 494 } 495 496 /** 497 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 498 * @opcode: SCSI opcode 499 * 500 * Determine ATAPI command type from @opcode. 501 * 502 * LOCKING: 503 * None. 504 * 505 * RETURNS: 506 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 507 */ 508 int atapi_cmd_type(u8 opcode) 509 { 510 switch (opcode) { 511 case GPCMD_READ_10: 512 case GPCMD_READ_12: 513 return ATAPI_READ; 514 515 case GPCMD_WRITE_10: 516 case GPCMD_WRITE_12: 517 case GPCMD_WRITE_AND_VERIFY_10: 518 return ATAPI_WRITE; 519 520 case GPCMD_READ_CD: 521 case GPCMD_READ_CD_MSF: 522 return ATAPI_READ_CD; 523 524 case ATA_16: 525 case ATA_12: 526 if (atapi_passthru16) 527 return ATAPI_PASS_THRU; 528 /* fall thru */ 529 default: 530 return ATAPI_MISC; 531 } 532 } 533 534 /** 535 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 536 * @tf: Taskfile to convert 537 * @pmp: Port multiplier port 538 * @is_cmd: This FIS is for command 539 * @fis: Buffer into which data will output 540 * 541 * Converts a standard ATA taskfile to a Serial ATA 542 * FIS structure (Register - Host to Device). 543 * 544 * LOCKING: 545 * Inherited from caller. 546 */ 547 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 548 { 549 fis[0] = 0x27; /* Register - Host to Device FIS */ 550 fis[1] = pmp & 0xf; /* Port multiplier number*/ 551 if (is_cmd) 552 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 553 554 fis[2] = tf->command; 555 fis[3] = tf->feature; 556 557 fis[4] = tf->lbal; 558 fis[5] = tf->lbam; 559 fis[6] = tf->lbah; 560 fis[7] = tf->device; 561 562 fis[8] = tf->hob_lbal; 563 fis[9] = tf->hob_lbam; 564 fis[10] = tf->hob_lbah; 565 fis[11] = tf->hob_feature; 566 567 fis[12] = tf->nsect; 568 fis[13] = tf->hob_nsect; 569 fis[14] = 0; 570 fis[15] = tf->ctl; 571 572 fis[16] = 0; 573 fis[17] = 0; 574 fis[18] = 0; 575 fis[19] = 0; 576 } 577 578 /** 579 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 580 * @fis: Buffer from which data will be input 581 * @tf: Taskfile to output 582 * 583 * Converts a serial ATA FIS structure to a standard ATA taskfile. 584 * 585 * LOCKING: 586 * Inherited from caller. 587 */ 588 589 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 590 { 591 tf->command = fis[2]; /* status */ 592 tf->feature = fis[3]; /* error */ 593 594 tf->lbal = fis[4]; 595 tf->lbam = fis[5]; 596 tf->lbah = fis[6]; 597 tf->device = fis[7]; 598 599 tf->hob_lbal = fis[8]; 600 tf->hob_lbam = fis[9]; 601 tf->hob_lbah = fis[10]; 602 603 tf->nsect = fis[12]; 604 tf->hob_nsect = fis[13]; 605 } 606 607 static const u8 ata_rw_cmds[] = { 608 /* pio multi */ 609 ATA_CMD_READ_MULTI, 610 ATA_CMD_WRITE_MULTI, 611 ATA_CMD_READ_MULTI_EXT, 612 ATA_CMD_WRITE_MULTI_EXT, 613 0, 614 0, 615 0, 616 ATA_CMD_WRITE_MULTI_FUA_EXT, 617 /* pio */ 618 ATA_CMD_PIO_READ, 619 ATA_CMD_PIO_WRITE, 620 ATA_CMD_PIO_READ_EXT, 621 ATA_CMD_PIO_WRITE_EXT, 622 0, 623 0, 624 0, 625 0, 626 /* dma */ 627 ATA_CMD_READ, 628 ATA_CMD_WRITE, 629 ATA_CMD_READ_EXT, 630 ATA_CMD_WRITE_EXT, 631 0, 632 0, 633 0, 634 ATA_CMD_WRITE_FUA_EXT 635 }; 636 637 /** 638 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 639 * @tf: command to examine and configure 640 * @dev: device tf belongs to 641 * 642 * Examine the device configuration and tf->flags to calculate 643 * the proper read/write commands and protocol to use. 644 * 645 * LOCKING: 646 * caller. 647 */ 648 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 649 { 650 u8 cmd; 651 652 int index, fua, lba48, write; 653 654 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 655 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 656 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 657 658 if (dev->flags & ATA_DFLAG_PIO) { 659 tf->protocol = ATA_PROT_PIO; 660 index = dev->multi_count ? 0 : 8; 661 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 662 /* Unable to use DMA due to host limitation */ 663 tf->protocol = ATA_PROT_PIO; 664 index = dev->multi_count ? 0 : 8; 665 } else { 666 tf->protocol = ATA_PROT_DMA; 667 index = 16; 668 } 669 670 cmd = ata_rw_cmds[index + fua + lba48 + write]; 671 if (cmd) { 672 tf->command = cmd; 673 return 0; 674 } 675 return -1; 676 } 677 678 /** 679 * ata_tf_read_block - Read block address from ATA taskfile 680 * @tf: ATA taskfile of interest 681 * @dev: ATA device @tf belongs to 682 * 683 * LOCKING: 684 * None. 685 * 686 * Read block address from @tf. This function can handle all 687 * three address formats - LBA, LBA48 and CHS. tf->protocol and 688 * flags select the address format to use. 689 * 690 * RETURNS: 691 * Block address read from @tf. 692 */ 693 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 694 { 695 u64 block = 0; 696 697 if (tf->flags & ATA_TFLAG_LBA) { 698 if (tf->flags & ATA_TFLAG_LBA48) { 699 block |= (u64)tf->hob_lbah << 40; 700 block |= (u64)tf->hob_lbam << 32; 701 block |= (u64)tf->hob_lbal << 24; 702 } else 703 block |= (tf->device & 0xf) << 24; 704 705 block |= tf->lbah << 16; 706 block |= tf->lbam << 8; 707 block |= tf->lbal; 708 } else { 709 u32 cyl, head, sect; 710 711 cyl = tf->lbam | (tf->lbah << 8); 712 head = tf->device & 0xf; 713 sect = tf->lbal; 714 715 if (!sect) { 716 ata_dev_warn(dev, 717 "device reported invalid CHS sector 0\n"); 718 sect = 1; /* oh well */ 719 } 720 721 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 722 } 723 724 return block; 725 } 726 727 /** 728 * ata_build_rw_tf - Build ATA taskfile for given read/write request 729 * @tf: Target ATA taskfile 730 * @dev: ATA device @tf belongs to 731 * @block: Block address 732 * @n_block: Number of blocks 733 * @tf_flags: RW/FUA etc... 734 * @tag: tag 735 * 736 * LOCKING: 737 * None. 738 * 739 * Build ATA taskfile @tf for read/write request described by 740 * @block, @n_block, @tf_flags and @tag on @dev. 741 * 742 * RETURNS: 743 * 744 * 0 on success, -ERANGE if the request is too large for @dev, 745 * -EINVAL if the request is invalid. 746 */ 747 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 748 u64 block, u32 n_block, unsigned int tf_flags, 749 unsigned int tag) 750 { 751 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 752 tf->flags |= tf_flags; 753 754 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 755 /* yay, NCQ */ 756 if (!lba_48_ok(block, n_block)) 757 return -ERANGE; 758 759 tf->protocol = ATA_PROT_NCQ; 760 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 761 762 if (tf->flags & ATA_TFLAG_WRITE) 763 tf->command = ATA_CMD_FPDMA_WRITE; 764 else 765 tf->command = ATA_CMD_FPDMA_READ; 766 767 tf->nsect = tag << 3; 768 tf->hob_feature = (n_block >> 8) & 0xff; 769 tf->feature = n_block & 0xff; 770 771 tf->hob_lbah = (block >> 40) & 0xff; 772 tf->hob_lbam = (block >> 32) & 0xff; 773 tf->hob_lbal = (block >> 24) & 0xff; 774 tf->lbah = (block >> 16) & 0xff; 775 tf->lbam = (block >> 8) & 0xff; 776 tf->lbal = block & 0xff; 777 778 tf->device = ATA_LBA; 779 if (tf->flags & ATA_TFLAG_FUA) 780 tf->device |= 1 << 7; 781 } else if (dev->flags & ATA_DFLAG_LBA) { 782 tf->flags |= ATA_TFLAG_LBA; 783 784 if (lba_28_ok(block, n_block)) { 785 /* use LBA28 */ 786 tf->device |= (block >> 24) & 0xf; 787 } else if (lba_48_ok(block, n_block)) { 788 if (!(dev->flags & ATA_DFLAG_LBA48)) 789 return -ERANGE; 790 791 /* use LBA48 */ 792 tf->flags |= ATA_TFLAG_LBA48; 793 794 tf->hob_nsect = (n_block >> 8) & 0xff; 795 796 tf->hob_lbah = (block >> 40) & 0xff; 797 tf->hob_lbam = (block >> 32) & 0xff; 798 tf->hob_lbal = (block >> 24) & 0xff; 799 } else 800 /* request too large even for LBA48 */ 801 return -ERANGE; 802 803 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 804 return -EINVAL; 805 806 tf->nsect = n_block & 0xff; 807 808 tf->lbah = (block >> 16) & 0xff; 809 tf->lbam = (block >> 8) & 0xff; 810 tf->lbal = block & 0xff; 811 812 tf->device |= ATA_LBA; 813 } else { 814 /* CHS */ 815 u32 sect, head, cyl, track; 816 817 /* The request -may- be too large for CHS addressing. */ 818 if (!lba_28_ok(block, n_block)) 819 return -ERANGE; 820 821 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 822 return -EINVAL; 823 824 /* Convert LBA to CHS */ 825 track = (u32)block / dev->sectors; 826 cyl = track / dev->heads; 827 head = track % dev->heads; 828 sect = (u32)block % dev->sectors + 1; 829 830 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 831 (u32)block, track, cyl, head, sect); 832 833 /* Check whether the converted CHS can fit. 834 Cylinder: 0-65535 835 Head: 0-15 836 Sector: 1-255*/ 837 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 838 return -ERANGE; 839 840 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 841 tf->lbal = sect; 842 tf->lbam = cyl; 843 tf->lbah = cyl >> 8; 844 tf->device |= head; 845 } 846 847 return 0; 848 } 849 850 /** 851 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 852 * @pio_mask: pio_mask 853 * @mwdma_mask: mwdma_mask 854 * @udma_mask: udma_mask 855 * 856 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 857 * unsigned int xfer_mask. 858 * 859 * LOCKING: 860 * None. 861 * 862 * RETURNS: 863 * Packed xfer_mask. 864 */ 865 unsigned long ata_pack_xfermask(unsigned long pio_mask, 866 unsigned long mwdma_mask, 867 unsigned long udma_mask) 868 { 869 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 870 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 871 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 872 } 873 874 /** 875 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 876 * @xfer_mask: xfer_mask to unpack 877 * @pio_mask: resulting pio_mask 878 * @mwdma_mask: resulting mwdma_mask 879 * @udma_mask: resulting udma_mask 880 * 881 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 882 * Any NULL distination masks will be ignored. 883 */ 884 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 885 unsigned long *mwdma_mask, unsigned long *udma_mask) 886 { 887 if (pio_mask) 888 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 889 if (mwdma_mask) 890 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 891 if (udma_mask) 892 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 893 } 894 895 static const struct ata_xfer_ent { 896 int shift, bits; 897 u8 base; 898 } ata_xfer_tbl[] = { 899 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 900 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 901 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 902 { -1, }, 903 }; 904 905 /** 906 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 907 * @xfer_mask: xfer_mask of interest 908 * 909 * Return matching XFER_* value for @xfer_mask. Only the highest 910 * bit of @xfer_mask is considered. 911 * 912 * LOCKING: 913 * None. 914 * 915 * RETURNS: 916 * Matching XFER_* value, 0xff if no match found. 917 */ 918 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 919 { 920 int highbit = fls(xfer_mask) - 1; 921 const struct ata_xfer_ent *ent; 922 923 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 924 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 925 return ent->base + highbit - ent->shift; 926 return 0xff; 927 } 928 929 /** 930 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 931 * @xfer_mode: XFER_* of interest 932 * 933 * Return matching xfer_mask for @xfer_mode. 934 * 935 * LOCKING: 936 * None. 937 * 938 * RETURNS: 939 * Matching xfer_mask, 0 if no match found. 940 */ 941 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 942 { 943 const struct ata_xfer_ent *ent; 944 945 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 946 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 947 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 948 & ~((1 << ent->shift) - 1); 949 return 0; 950 } 951 952 /** 953 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 954 * @xfer_mode: XFER_* of interest 955 * 956 * Return matching xfer_shift for @xfer_mode. 957 * 958 * LOCKING: 959 * None. 960 * 961 * RETURNS: 962 * Matching xfer_shift, -1 if no match found. 963 */ 964 int ata_xfer_mode2shift(unsigned long xfer_mode) 965 { 966 const struct ata_xfer_ent *ent; 967 968 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 969 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 970 return ent->shift; 971 return -1; 972 } 973 974 /** 975 * ata_mode_string - convert xfer_mask to string 976 * @xfer_mask: mask of bits supported; only highest bit counts. 977 * 978 * Determine string which represents the highest speed 979 * (highest bit in @modemask). 980 * 981 * LOCKING: 982 * None. 983 * 984 * RETURNS: 985 * Constant C string representing highest speed listed in 986 * @mode_mask, or the constant C string "<n/a>". 987 */ 988 const char *ata_mode_string(unsigned long xfer_mask) 989 { 990 static const char * const xfer_mode_str[] = { 991 "PIO0", 992 "PIO1", 993 "PIO2", 994 "PIO3", 995 "PIO4", 996 "PIO5", 997 "PIO6", 998 "MWDMA0", 999 "MWDMA1", 1000 "MWDMA2", 1001 "MWDMA3", 1002 "MWDMA4", 1003 "UDMA/16", 1004 "UDMA/25", 1005 "UDMA/33", 1006 "UDMA/44", 1007 "UDMA/66", 1008 "UDMA/100", 1009 "UDMA/133", 1010 "UDMA7", 1011 }; 1012 int highbit; 1013 1014 highbit = fls(xfer_mask) - 1; 1015 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1016 return xfer_mode_str[highbit]; 1017 return "<n/a>"; 1018 } 1019 1020 const char *sata_spd_string(unsigned int spd) 1021 { 1022 static const char * const spd_str[] = { 1023 "1.5 Gbps", 1024 "3.0 Gbps", 1025 "6.0 Gbps", 1026 }; 1027 1028 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1029 return "<unknown>"; 1030 return spd_str[spd - 1]; 1031 } 1032 1033 /** 1034 * ata_dev_classify - determine device type based on ATA-spec signature 1035 * @tf: ATA taskfile register set for device to be identified 1036 * 1037 * Determine from taskfile register contents whether a device is 1038 * ATA or ATAPI, as per "Signature and persistence" section 1039 * of ATA/PI spec (volume 1, sect 5.14). 1040 * 1041 * LOCKING: 1042 * None. 1043 * 1044 * RETURNS: 1045 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1046 * %ATA_DEV_UNKNOWN the event of failure. 1047 */ 1048 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1049 { 1050 /* Apple's open source Darwin code hints that some devices only 1051 * put a proper signature into the LBA mid/high registers, 1052 * So, we only check those. It's sufficient for uniqueness. 1053 * 1054 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1055 * signatures for ATA and ATAPI devices attached on SerialATA, 1056 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1057 * spec has never mentioned about using different signatures 1058 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1059 * Multiplier specification began to use 0x69/0x96 to identify 1060 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1061 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1062 * 0x69/0x96 shortly and described them as reserved for 1063 * SerialATA. 1064 * 1065 * We follow the current spec and consider that 0x69/0x96 1066 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1067 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1068 * SEMB signature. This is worked around in 1069 * ata_dev_read_id(). 1070 */ 1071 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1072 DPRINTK("found ATA device by sig\n"); 1073 return ATA_DEV_ATA; 1074 } 1075 1076 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1077 DPRINTK("found ATAPI device by sig\n"); 1078 return ATA_DEV_ATAPI; 1079 } 1080 1081 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1082 DPRINTK("found PMP device by sig\n"); 1083 return ATA_DEV_PMP; 1084 } 1085 1086 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1087 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1088 return ATA_DEV_SEMB; 1089 } 1090 1091 DPRINTK("unknown device\n"); 1092 return ATA_DEV_UNKNOWN; 1093 } 1094 1095 /** 1096 * ata_id_string - Convert IDENTIFY DEVICE page into string 1097 * @id: IDENTIFY DEVICE results we will examine 1098 * @s: string into which data is output 1099 * @ofs: offset into identify device page 1100 * @len: length of string to return. must be an even number. 1101 * 1102 * The strings in the IDENTIFY DEVICE page are broken up into 1103 * 16-bit chunks. Run through the string, and output each 1104 * 8-bit chunk linearly, regardless of platform. 1105 * 1106 * LOCKING: 1107 * caller. 1108 */ 1109 1110 void ata_id_string(const u16 *id, unsigned char *s, 1111 unsigned int ofs, unsigned int len) 1112 { 1113 unsigned int c; 1114 1115 BUG_ON(len & 1); 1116 1117 while (len > 0) { 1118 c = id[ofs] >> 8; 1119 *s = c; 1120 s++; 1121 1122 c = id[ofs] & 0xff; 1123 *s = c; 1124 s++; 1125 1126 ofs++; 1127 len -= 2; 1128 } 1129 } 1130 1131 /** 1132 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1133 * @id: IDENTIFY DEVICE results we will examine 1134 * @s: string into which data is output 1135 * @ofs: offset into identify device page 1136 * @len: length of string to return. must be an odd number. 1137 * 1138 * This function is identical to ata_id_string except that it 1139 * trims trailing spaces and terminates the resulting string with 1140 * null. @len must be actual maximum length (even number) + 1. 1141 * 1142 * LOCKING: 1143 * caller. 1144 */ 1145 void ata_id_c_string(const u16 *id, unsigned char *s, 1146 unsigned int ofs, unsigned int len) 1147 { 1148 unsigned char *p; 1149 1150 ata_id_string(id, s, ofs, len - 1); 1151 1152 p = s + strnlen(s, len - 1); 1153 while (p > s && p[-1] == ' ') 1154 p--; 1155 *p = '\0'; 1156 } 1157 1158 static u64 ata_id_n_sectors(const u16 *id) 1159 { 1160 if (ata_id_has_lba(id)) { 1161 if (ata_id_has_lba48(id)) 1162 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1163 else 1164 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1165 } else { 1166 if (ata_id_current_chs_valid(id)) 1167 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1168 id[ATA_ID_CUR_SECTORS]; 1169 else 1170 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1171 id[ATA_ID_SECTORS]; 1172 } 1173 } 1174 1175 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1176 { 1177 u64 sectors = 0; 1178 1179 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1180 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1181 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1182 sectors |= (tf->lbah & 0xff) << 16; 1183 sectors |= (tf->lbam & 0xff) << 8; 1184 sectors |= (tf->lbal & 0xff); 1185 1186 return sectors; 1187 } 1188 1189 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1190 { 1191 u64 sectors = 0; 1192 1193 sectors |= (tf->device & 0x0f) << 24; 1194 sectors |= (tf->lbah & 0xff) << 16; 1195 sectors |= (tf->lbam & 0xff) << 8; 1196 sectors |= (tf->lbal & 0xff); 1197 1198 return sectors; 1199 } 1200 1201 /** 1202 * ata_read_native_max_address - Read native max address 1203 * @dev: target device 1204 * @max_sectors: out parameter for the result native max address 1205 * 1206 * Perform an LBA48 or LBA28 native size query upon the device in 1207 * question. 1208 * 1209 * RETURNS: 1210 * 0 on success, -EACCES if command is aborted by the drive. 1211 * -EIO on other errors. 1212 */ 1213 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1214 { 1215 unsigned int err_mask; 1216 struct ata_taskfile tf; 1217 int lba48 = ata_id_has_lba48(dev->id); 1218 1219 ata_tf_init(dev, &tf); 1220 1221 /* always clear all address registers */ 1222 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1223 1224 if (lba48) { 1225 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1226 tf.flags |= ATA_TFLAG_LBA48; 1227 } else 1228 tf.command = ATA_CMD_READ_NATIVE_MAX; 1229 1230 tf.protocol |= ATA_PROT_NODATA; 1231 tf.device |= ATA_LBA; 1232 1233 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1234 if (err_mask) { 1235 ata_dev_warn(dev, 1236 "failed to read native max address (err_mask=0x%x)\n", 1237 err_mask); 1238 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1239 return -EACCES; 1240 return -EIO; 1241 } 1242 1243 if (lba48) 1244 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1245 else 1246 *max_sectors = ata_tf_to_lba(&tf) + 1; 1247 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1248 (*max_sectors)--; 1249 return 0; 1250 } 1251 1252 /** 1253 * ata_set_max_sectors - Set max sectors 1254 * @dev: target device 1255 * @new_sectors: new max sectors value to set for the device 1256 * 1257 * Set max sectors of @dev to @new_sectors. 1258 * 1259 * RETURNS: 1260 * 0 on success, -EACCES if command is aborted or denied (due to 1261 * previous non-volatile SET_MAX) by the drive. -EIO on other 1262 * errors. 1263 */ 1264 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1265 { 1266 unsigned int err_mask; 1267 struct ata_taskfile tf; 1268 int lba48 = ata_id_has_lba48(dev->id); 1269 1270 new_sectors--; 1271 1272 ata_tf_init(dev, &tf); 1273 1274 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1275 1276 if (lba48) { 1277 tf.command = ATA_CMD_SET_MAX_EXT; 1278 tf.flags |= ATA_TFLAG_LBA48; 1279 1280 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1281 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1282 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1283 } else { 1284 tf.command = ATA_CMD_SET_MAX; 1285 1286 tf.device |= (new_sectors >> 24) & 0xf; 1287 } 1288 1289 tf.protocol |= ATA_PROT_NODATA; 1290 tf.device |= ATA_LBA; 1291 1292 tf.lbal = (new_sectors >> 0) & 0xff; 1293 tf.lbam = (new_sectors >> 8) & 0xff; 1294 tf.lbah = (new_sectors >> 16) & 0xff; 1295 1296 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1297 if (err_mask) { 1298 ata_dev_warn(dev, 1299 "failed to set max address (err_mask=0x%x)\n", 1300 err_mask); 1301 if (err_mask == AC_ERR_DEV && 1302 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1303 return -EACCES; 1304 return -EIO; 1305 } 1306 1307 return 0; 1308 } 1309 1310 /** 1311 * ata_hpa_resize - Resize a device with an HPA set 1312 * @dev: Device to resize 1313 * 1314 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1315 * it if required to the full size of the media. The caller must check 1316 * the drive has the HPA feature set enabled. 1317 * 1318 * RETURNS: 1319 * 0 on success, -errno on failure. 1320 */ 1321 static int ata_hpa_resize(struct ata_device *dev) 1322 { 1323 struct ata_eh_context *ehc = &dev->link->eh_context; 1324 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1325 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1326 u64 sectors = ata_id_n_sectors(dev->id); 1327 u64 native_sectors; 1328 int rc; 1329 1330 /* do we need to do it? */ 1331 if (dev->class != ATA_DEV_ATA || 1332 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1333 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1334 return 0; 1335 1336 /* read native max address */ 1337 rc = ata_read_native_max_address(dev, &native_sectors); 1338 if (rc) { 1339 /* If device aborted the command or HPA isn't going to 1340 * be unlocked, skip HPA resizing. 1341 */ 1342 if (rc == -EACCES || !unlock_hpa) { 1343 ata_dev_warn(dev, 1344 "HPA support seems broken, skipping HPA handling\n"); 1345 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1346 1347 /* we can continue if device aborted the command */ 1348 if (rc == -EACCES) 1349 rc = 0; 1350 } 1351 1352 return rc; 1353 } 1354 dev->n_native_sectors = native_sectors; 1355 1356 /* nothing to do? */ 1357 if (native_sectors <= sectors || !unlock_hpa) { 1358 if (!print_info || native_sectors == sectors) 1359 return 0; 1360 1361 if (native_sectors > sectors) 1362 ata_dev_info(dev, 1363 "HPA detected: current %llu, native %llu\n", 1364 (unsigned long long)sectors, 1365 (unsigned long long)native_sectors); 1366 else if (native_sectors < sectors) 1367 ata_dev_warn(dev, 1368 "native sectors (%llu) is smaller than sectors (%llu)\n", 1369 (unsigned long long)native_sectors, 1370 (unsigned long long)sectors); 1371 return 0; 1372 } 1373 1374 /* let's unlock HPA */ 1375 rc = ata_set_max_sectors(dev, native_sectors); 1376 if (rc == -EACCES) { 1377 /* if device aborted the command, skip HPA resizing */ 1378 ata_dev_warn(dev, 1379 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1380 (unsigned long long)sectors, 1381 (unsigned long long)native_sectors); 1382 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1383 return 0; 1384 } else if (rc) 1385 return rc; 1386 1387 /* re-read IDENTIFY data */ 1388 rc = ata_dev_reread_id(dev, 0); 1389 if (rc) { 1390 ata_dev_err(dev, 1391 "failed to re-read IDENTIFY data after HPA resizing\n"); 1392 return rc; 1393 } 1394 1395 if (print_info) { 1396 u64 new_sectors = ata_id_n_sectors(dev->id); 1397 ata_dev_info(dev, 1398 "HPA unlocked: %llu -> %llu, native %llu\n", 1399 (unsigned long long)sectors, 1400 (unsigned long long)new_sectors, 1401 (unsigned long long)native_sectors); 1402 } 1403 1404 return 0; 1405 } 1406 1407 /** 1408 * ata_dump_id - IDENTIFY DEVICE info debugging output 1409 * @id: IDENTIFY DEVICE page to dump 1410 * 1411 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1412 * page. 1413 * 1414 * LOCKING: 1415 * caller. 1416 */ 1417 1418 static inline void ata_dump_id(const u16 *id) 1419 { 1420 DPRINTK("49==0x%04x " 1421 "53==0x%04x " 1422 "63==0x%04x " 1423 "64==0x%04x " 1424 "75==0x%04x \n", 1425 id[49], 1426 id[53], 1427 id[63], 1428 id[64], 1429 id[75]); 1430 DPRINTK("80==0x%04x " 1431 "81==0x%04x " 1432 "82==0x%04x " 1433 "83==0x%04x " 1434 "84==0x%04x \n", 1435 id[80], 1436 id[81], 1437 id[82], 1438 id[83], 1439 id[84]); 1440 DPRINTK("88==0x%04x " 1441 "93==0x%04x\n", 1442 id[88], 1443 id[93]); 1444 } 1445 1446 /** 1447 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1448 * @id: IDENTIFY data to compute xfer mask from 1449 * 1450 * Compute the xfermask for this device. This is not as trivial 1451 * as it seems if we must consider early devices correctly. 1452 * 1453 * FIXME: pre IDE drive timing (do we care ?). 1454 * 1455 * LOCKING: 1456 * None. 1457 * 1458 * RETURNS: 1459 * Computed xfermask 1460 */ 1461 unsigned long ata_id_xfermask(const u16 *id) 1462 { 1463 unsigned long pio_mask, mwdma_mask, udma_mask; 1464 1465 /* Usual case. Word 53 indicates word 64 is valid */ 1466 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1467 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1468 pio_mask <<= 3; 1469 pio_mask |= 0x7; 1470 } else { 1471 /* If word 64 isn't valid then Word 51 high byte holds 1472 * the PIO timing number for the maximum. Turn it into 1473 * a mask. 1474 */ 1475 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1476 if (mode < 5) /* Valid PIO range */ 1477 pio_mask = (2 << mode) - 1; 1478 else 1479 pio_mask = 1; 1480 1481 /* But wait.. there's more. Design your standards by 1482 * committee and you too can get a free iordy field to 1483 * process. However its the speeds not the modes that 1484 * are supported... Note drivers using the timing API 1485 * will get this right anyway 1486 */ 1487 } 1488 1489 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1490 1491 if (ata_id_is_cfa(id)) { 1492 /* 1493 * Process compact flash extended modes 1494 */ 1495 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1496 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1497 1498 if (pio) 1499 pio_mask |= (1 << 5); 1500 if (pio > 1) 1501 pio_mask |= (1 << 6); 1502 if (dma) 1503 mwdma_mask |= (1 << 3); 1504 if (dma > 1) 1505 mwdma_mask |= (1 << 4); 1506 } 1507 1508 udma_mask = 0; 1509 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1510 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1511 1512 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1513 } 1514 1515 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1516 { 1517 struct completion *waiting = qc->private_data; 1518 1519 complete(waiting); 1520 } 1521 1522 /** 1523 * ata_exec_internal_sg - execute libata internal command 1524 * @dev: Device to which the command is sent 1525 * @tf: Taskfile registers for the command and the result 1526 * @cdb: CDB for packet command 1527 * @dma_dir: Data tranfer direction of the command 1528 * @sgl: sg list for the data buffer of the command 1529 * @n_elem: Number of sg entries 1530 * @timeout: Timeout in msecs (0 for default) 1531 * 1532 * Executes libata internal command with timeout. @tf contains 1533 * command on entry and result on return. Timeout and error 1534 * conditions are reported via return value. No recovery action 1535 * is taken after a command times out. It's caller's duty to 1536 * clean up after timeout. 1537 * 1538 * LOCKING: 1539 * None. Should be called with kernel context, might sleep. 1540 * 1541 * RETURNS: 1542 * Zero on success, AC_ERR_* mask on failure 1543 */ 1544 unsigned ata_exec_internal_sg(struct ata_device *dev, 1545 struct ata_taskfile *tf, const u8 *cdb, 1546 int dma_dir, struct scatterlist *sgl, 1547 unsigned int n_elem, unsigned long timeout) 1548 { 1549 struct ata_link *link = dev->link; 1550 struct ata_port *ap = link->ap; 1551 u8 command = tf->command; 1552 int auto_timeout = 0; 1553 struct ata_queued_cmd *qc; 1554 unsigned int tag, preempted_tag; 1555 u32 preempted_sactive, preempted_qc_active; 1556 int preempted_nr_active_links; 1557 DECLARE_COMPLETION_ONSTACK(wait); 1558 unsigned long flags; 1559 unsigned int err_mask; 1560 int rc; 1561 1562 spin_lock_irqsave(ap->lock, flags); 1563 1564 /* no internal command while frozen */ 1565 if (ap->pflags & ATA_PFLAG_FROZEN) { 1566 spin_unlock_irqrestore(ap->lock, flags); 1567 return AC_ERR_SYSTEM; 1568 } 1569 1570 /* initialize internal qc */ 1571 1572 /* XXX: Tag 0 is used for drivers with legacy EH as some 1573 * drivers choke if any other tag is given. This breaks 1574 * ata_tag_internal() test for those drivers. Don't use new 1575 * EH stuff without converting to it. 1576 */ 1577 if (ap->ops->error_handler) 1578 tag = ATA_TAG_INTERNAL; 1579 else 1580 tag = 0; 1581 1582 if (test_and_set_bit(tag, &ap->qc_allocated)) 1583 BUG(); 1584 qc = __ata_qc_from_tag(ap, tag); 1585 1586 qc->tag = tag; 1587 qc->scsicmd = NULL; 1588 qc->ap = ap; 1589 qc->dev = dev; 1590 ata_qc_reinit(qc); 1591 1592 preempted_tag = link->active_tag; 1593 preempted_sactive = link->sactive; 1594 preempted_qc_active = ap->qc_active; 1595 preempted_nr_active_links = ap->nr_active_links; 1596 link->active_tag = ATA_TAG_POISON; 1597 link->sactive = 0; 1598 ap->qc_active = 0; 1599 ap->nr_active_links = 0; 1600 1601 /* prepare & issue qc */ 1602 qc->tf = *tf; 1603 if (cdb) 1604 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1605 1606 /* some SATA bridges need us to indicate data xfer direction */ 1607 if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && 1608 dma_dir == DMA_FROM_DEVICE) 1609 qc->tf.feature |= ATAPI_DMADIR; 1610 1611 qc->flags |= ATA_QCFLAG_RESULT_TF; 1612 qc->dma_dir = dma_dir; 1613 if (dma_dir != DMA_NONE) { 1614 unsigned int i, buflen = 0; 1615 struct scatterlist *sg; 1616 1617 for_each_sg(sgl, sg, n_elem, i) 1618 buflen += sg->length; 1619 1620 ata_sg_init(qc, sgl, n_elem); 1621 qc->nbytes = buflen; 1622 } 1623 1624 qc->private_data = &wait; 1625 qc->complete_fn = ata_qc_complete_internal; 1626 1627 ata_qc_issue(qc); 1628 1629 spin_unlock_irqrestore(ap->lock, flags); 1630 1631 if (!timeout) { 1632 if (ata_probe_timeout) 1633 timeout = ata_probe_timeout * 1000; 1634 else { 1635 timeout = ata_internal_cmd_timeout(dev, command); 1636 auto_timeout = 1; 1637 } 1638 } 1639 1640 if (ap->ops->error_handler) 1641 ata_eh_release(ap); 1642 1643 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1644 1645 if (ap->ops->error_handler) 1646 ata_eh_acquire(ap); 1647 1648 ata_sff_flush_pio_task(ap); 1649 1650 if (!rc) { 1651 spin_lock_irqsave(ap->lock, flags); 1652 1653 /* We're racing with irq here. If we lose, the 1654 * following test prevents us from completing the qc 1655 * twice. If we win, the port is frozen and will be 1656 * cleaned up by ->post_internal_cmd(). 1657 */ 1658 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1659 qc->err_mask |= AC_ERR_TIMEOUT; 1660 1661 if (ap->ops->error_handler) 1662 ata_port_freeze(ap); 1663 else 1664 ata_qc_complete(qc); 1665 1666 if (ata_msg_warn(ap)) 1667 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1668 command); 1669 } 1670 1671 spin_unlock_irqrestore(ap->lock, flags); 1672 } 1673 1674 /* do post_internal_cmd */ 1675 if (ap->ops->post_internal_cmd) 1676 ap->ops->post_internal_cmd(qc); 1677 1678 /* perform minimal error analysis */ 1679 if (qc->flags & ATA_QCFLAG_FAILED) { 1680 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1681 qc->err_mask |= AC_ERR_DEV; 1682 1683 if (!qc->err_mask) 1684 qc->err_mask |= AC_ERR_OTHER; 1685 1686 if (qc->err_mask & ~AC_ERR_OTHER) 1687 qc->err_mask &= ~AC_ERR_OTHER; 1688 } 1689 1690 /* finish up */ 1691 spin_lock_irqsave(ap->lock, flags); 1692 1693 *tf = qc->result_tf; 1694 err_mask = qc->err_mask; 1695 1696 ata_qc_free(qc); 1697 link->active_tag = preempted_tag; 1698 link->sactive = preempted_sactive; 1699 ap->qc_active = preempted_qc_active; 1700 ap->nr_active_links = preempted_nr_active_links; 1701 1702 spin_unlock_irqrestore(ap->lock, flags); 1703 1704 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1705 ata_internal_cmd_timed_out(dev, command); 1706 1707 return err_mask; 1708 } 1709 1710 /** 1711 * ata_exec_internal - execute libata internal command 1712 * @dev: Device to which the command is sent 1713 * @tf: Taskfile registers for the command and the result 1714 * @cdb: CDB for packet command 1715 * @dma_dir: Data tranfer direction of the command 1716 * @buf: Data buffer of the command 1717 * @buflen: Length of data buffer 1718 * @timeout: Timeout in msecs (0 for default) 1719 * 1720 * Wrapper around ata_exec_internal_sg() which takes simple 1721 * buffer instead of sg list. 1722 * 1723 * LOCKING: 1724 * None. Should be called with kernel context, might sleep. 1725 * 1726 * RETURNS: 1727 * Zero on success, AC_ERR_* mask on failure 1728 */ 1729 unsigned ata_exec_internal(struct ata_device *dev, 1730 struct ata_taskfile *tf, const u8 *cdb, 1731 int dma_dir, void *buf, unsigned int buflen, 1732 unsigned long timeout) 1733 { 1734 struct scatterlist *psg = NULL, sg; 1735 unsigned int n_elem = 0; 1736 1737 if (dma_dir != DMA_NONE) { 1738 WARN_ON(!buf); 1739 sg_init_one(&sg, buf, buflen); 1740 psg = &sg; 1741 n_elem++; 1742 } 1743 1744 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1745 timeout); 1746 } 1747 1748 /** 1749 * ata_do_simple_cmd - execute simple internal command 1750 * @dev: Device to which the command is sent 1751 * @cmd: Opcode to execute 1752 * 1753 * Execute a 'simple' command, that only consists of the opcode 1754 * 'cmd' itself, without filling any other registers 1755 * 1756 * LOCKING: 1757 * Kernel thread context (may sleep). 1758 * 1759 * RETURNS: 1760 * Zero on success, AC_ERR_* mask on failure 1761 */ 1762 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1763 { 1764 struct ata_taskfile tf; 1765 1766 ata_tf_init(dev, &tf); 1767 1768 tf.command = cmd; 1769 tf.flags |= ATA_TFLAG_DEVICE; 1770 tf.protocol = ATA_PROT_NODATA; 1771 1772 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1773 } 1774 1775 /** 1776 * ata_pio_need_iordy - check if iordy needed 1777 * @adev: ATA device 1778 * 1779 * Check if the current speed of the device requires IORDY. Used 1780 * by various controllers for chip configuration. 1781 */ 1782 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1783 { 1784 /* Don't set IORDY if we're preparing for reset. IORDY may 1785 * lead to controller lock up on certain controllers if the 1786 * port is not occupied. See bko#11703 for details. 1787 */ 1788 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1789 return 0; 1790 /* Controller doesn't support IORDY. Probably a pointless 1791 * check as the caller should know this. 1792 */ 1793 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1794 return 0; 1795 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1796 if (ata_id_is_cfa(adev->id) 1797 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1798 return 0; 1799 /* PIO3 and higher it is mandatory */ 1800 if (adev->pio_mode > XFER_PIO_2) 1801 return 1; 1802 /* We turn it on when possible */ 1803 if (ata_id_has_iordy(adev->id)) 1804 return 1; 1805 return 0; 1806 } 1807 1808 /** 1809 * ata_pio_mask_no_iordy - Return the non IORDY mask 1810 * @adev: ATA device 1811 * 1812 * Compute the highest mode possible if we are not using iordy. Return 1813 * -1 if no iordy mode is available. 1814 */ 1815 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1816 { 1817 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1818 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1819 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1820 /* Is the speed faster than the drive allows non IORDY ? */ 1821 if (pio) { 1822 /* This is cycle times not frequency - watch the logic! */ 1823 if (pio > 240) /* PIO2 is 240nS per cycle */ 1824 return 3 << ATA_SHIFT_PIO; 1825 return 7 << ATA_SHIFT_PIO; 1826 } 1827 } 1828 return 3 << ATA_SHIFT_PIO; 1829 } 1830 1831 /** 1832 * ata_do_dev_read_id - default ID read method 1833 * @dev: device 1834 * @tf: proposed taskfile 1835 * @id: data buffer 1836 * 1837 * Issue the identify taskfile and hand back the buffer containing 1838 * identify data. For some RAID controllers and for pre ATA devices 1839 * this function is wrapped or replaced by the driver 1840 */ 1841 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1842 struct ata_taskfile *tf, u16 *id) 1843 { 1844 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1845 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1846 } 1847 1848 /** 1849 * ata_dev_read_id - Read ID data from the specified device 1850 * @dev: target device 1851 * @p_class: pointer to class of the target device (may be changed) 1852 * @flags: ATA_READID_* flags 1853 * @id: buffer to read IDENTIFY data into 1854 * 1855 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1856 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1857 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1858 * for pre-ATA4 drives. 1859 * 1860 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1861 * now we abort if we hit that case. 1862 * 1863 * LOCKING: 1864 * Kernel thread context (may sleep) 1865 * 1866 * RETURNS: 1867 * 0 on success, -errno otherwise. 1868 */ 1869 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1870 unsigned int flags, u16 *id) 1871 { 1872 struct ata_port *ap = dev->link->ap; 1873 unsigned int class = *p_class; 1874 struct ata_taskfile tf; 1875 unsigned int err_mask = 0; 1876 const char *reason; 1877 bool is_semb = class == ATA_DEV_SEMB; 1878 int may_fallback = 1, tried_spinup = 0; 1879 int rc; 1880 1881 if (ata_msg_ctl(ap)) 1882 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1883 1884 retry: 1885 ata_tf_init(dev, &tf); 1886 1887 switch (class) { 1888 case ATA_DEV_SEMB: 1889 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1890 case ATA_DEV_ATA: 1891 tf.command = ATA_CMD_ID_ATA; 1892 break; 1893 case ATA_DEV_ATAPI: 1894 tf.command = ATA_CMD_ID_ATAPI; 1895 break; 1896 default: 1897 rc = -ENODEV; 1898 reason = "unsupported class"; 1899 goto err_out; 1900 } 1901 1902 tf.protocol = ATA_PROT_PIO; 1903 1904 /* Some devices choke if TF registers contain garbage. Make 1905 * sure those are properly initialized. 1906 */ 1907 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1908 1909 /* Device presence detection is unreliable on some 1910 * controllers. Always poll IDENTIFY if available. 1911 */ 1912 tf.flags |= ATA_TFLAG_POLLING; 1913 1914 if (ap->ops->read_id) 1915 err_mask = ap->ops->read_id(dev, &tf, id); 1916 else 1917 err_mask = ata_do_dev_read_id(dev, &tf, id); 1918 1919 if (err_mask) { 1920 if (err_mask & AC_ERR_NODEV_HINT) { 1921 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1922 return -ENOENT; 1923 } 1924 1925 if (is_semb) { 1926 ata_dev_info(dev, 1927 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1928 /* SEMB is not supported yet */ 1929 *p_class = ATA_DEV_SEMB_UNSUP; 1930 return 0; 1931 } 1932 1933 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1934 /* Device or controller might have reported 1935 * the wrong device class. Give a shot at the 1936 * other IDENTIFY if the current one is 1937 * aborted by the device. 1938 */ 1939 if (may_fallback) { 1940 may_fallback = 0; 1941 1942 if (class == ATA_DEV_ATA) 1943 class = ATA_DEV_ATAPI; 1944 else 1945 class = ATA_DEV_ATA; 1946 goto retry; 1947 } 1948 1949 /* Control reaches here iff the device aborted 1950 * both flavors of IDENTIFYs which happens 1951 * sometimes with phantom devices. 1952 */ 1953 ata_dev_dbg(dev, 1954 "both IDENTIFYs aborted, assuming NODEV\n"); 1955 return -ENOENT; 1956 } 1957 1958 rc = -EIO; 1959 reason = "I/O error"; 1960 goto err_out; 1961 } 1962 1963 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1964 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1965 "class=%d may_fallback=%d tried_spinup=%d\n", 1966 class, may_fallback, tried_spinup); 1967 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1968 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1969 } 1970 1971 /* Falling back doesn't make sense if ID data was read 1972 * successfully at least once. 1973 */ 1974 may_fallback = 0; 1975 1976 swap_buf_le16(id, ATA_ID_WORDS); 1977 1978 /* sanity check */ 1979 rc = -EINVAL; 1980 reason = "device reports invalid type"; 1981 1982 if (class == ATA_DEV_ATA) { 1983 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1984 goto err_out; 1985 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1986 ata_id_is_ata(id)) { 1987 ata_dev_dbg(dev, 1988 "host indicates ignore ATA devices, ignored\n"); 1989 return -ENOENT; 1990 } 1991 } else { 1992 if (ata_id_is_ata(id)) 1993 goto err_out; 1994 } 1995 1996 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1997 tried_spinup = 1; 1998 /* 1999 * Drive powered-up in standby mode, and requires a specific 2000 * SET_FEATURES spin-up subcommand before it will accept 2001 * anything other than the original IDENTIFY command. 2002 */ 2003 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 2004 if (err_mask && id[2] != 0x738c) { 2005 rc = -EIO; 2006 reason = "SPINUP failed"; 2007 goto err_out; 2008 } 2009 /* 2010 * If the drive initially returned incomplete IDENTIFY info, 2011 * we now must reissue the IDENTIFY command. 2012 */ 2013 if (id[2] == 0x37c8) 2014 goto retry; 2015 } 2016 2017 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2018 /* 2019 * The exact sequence expected by certain pre-ATA4 drives is: 2020 * SRST RESET 2021 * IDENTIFY (optional in early ATA) 2022 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2023 * anything else.. 2024 * Some drives were very specific about that exact sequence. 2025 * 2026 * Note that ATA4 says lba is mandatory so the second check 2027 * should never trigger. 2028 */ 2029 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2030 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2031 if (err_mask) { 2032 rc = -EIO; 2033 reason = "INIT_DEV_PARAMS failed"; 2034 goto err_out; 2035 } 2036 2037 /* current CHS translation info (id[53-58]) might be 2038 * changed. reread the identify device info. 2039 */ 2040 flags &= ~ATA_READID_POSTRESET; 2041 goto retry; 2042 } 2043 } 2044 2045 *p_class = class; 2046 2047 return 0; 2048 2049 err_out: 2050 if (ata_msg_warn(ap)) 2051 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2052 reason, err_mask); 2053 return rc; 2054 } 2055 2056 static int ata_do_link_spd_horkage(struct ata_device *dev) 2057 { 2058 struct ata_link *plink = ata_dev_phys_link(dev); 2059 u32 target, target_limit; 2060 2061 if (!sata_scr_valid(plink)) 2062 return 0; 2063 2064 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2065 target = 1; 2066 else 2067 return 0; 2068 2069 target_limit = (1 << target) - 1; 2070 2071 /* if already on stricter limit, no need to push further */ 2072 if (plink->sata_spd_limit <= target_limit) 2073 return 0; 2074 2075 plink->sata_spd_limit = target_limit; 2076 2077 /* Request another EH round by returning -EAGAIN if link is 2078 * going faster than the target speed. Forward progress is 2079 * guaranteed by setting sata_spd_limit to target_limit above. 2080 */ 2081 if (plink->sata_spd > target) { 2082 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2083 sata_spd_string(target)); 2084 return -EAGAIN; 2085 } 2086 return 0; 2087 } 2088 2089 static inline u8 ata_dev_knobble(struct ata_device *dev) 2090 { 2091 struct ata_port *ap = dev->link->ap; 2092 2093 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2094 return 0; 2095 2096 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2097 } 2098 2099 static int ata_dev_config_ncq(struct ata_device *dev, 2100 char *desc, size_t desc_sz) 2101 { 2102 struct ata_port *ap = dev->link->ap; 2103 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2104 unsigned int err_mask; 2105 char *aa_desc = ""; 2106 2107 if (!ata_id_has_ncq(dev->id)) { 2108 desc[0] = '\0'; 2109 return 0; 2110 } 2111 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2112 snprintf(desc, desc_sz, "NCQ (not used)"); 2113 return 0; 2114 } 2115 if (ap->flags & ATA_FLAG_NCQ) { 2116 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2117 dev->flags |= ATA_DFLAG_NCQ; 2118 } 2119 2120 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2121 (ap->flags & ATA_FLAG_FPDMA_AA) && 2122 ata_id_has_fpdma_aa(dev->id)) { 2123 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2124 SATA_FPDMA_AA); 2125 if (err_mask) { 2126 ata_dev_err(dev, 2127 "failed to enable AA (error_mask=0x%x)\n", 2128 err_mask); 2129 if (err_mask != AC_ERR_DEV) { 2130 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2131 return -EIO; 2132 } 2133 } else 2134 aa_desc = ", AA"; 2135 } 2136 2137 if (hdepth >= ddepth) 2138 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2139 else 2140 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2141 ddepth, aa_desc); 2142 return 0; 2143 } 2144 2145 /** 2146 * ata_dev_configure - Configure the specified ATA/ATAPI device 2147 * @dev: Target device to configure 2148 * 2149 * Configure @dev according to @dev->id. Generic and low-level 2150 * driver specific fixups are also applied. 2151 * 2152 * LOCKING: 2153 * Kernel thread context (may sleep) 2154 * 2155 * RETURNS: 2156 * 0 on success, -errno otherwise 2157 */ 2158 int ata_dev_configure(struct ata_device *dev) 2159 { 2160 struct ata_port *ap = dev->link->ap; 2161 struct ata_eh_context *ehc = &dev->link->eh_context; 2162 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2163 const u16 *id = dev->id; 2164 unsigned long xfer_mask; 2165 unsigned int err_mask; 2166 char revbuf[7]; /* XYZ-99\0 */ 2167 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2168 char modelbuf[ATA_ID_PROD_LEN+1]; 2169 int rc; 2170 2171 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2172 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2173 return 0; 2174 } 2175 2176 if (ata_msg_probe(ap)) 2177 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2178 2179 /* set horkage */ 2180 dev->horkage |= ata_dev_blacklisted(dev); 2181 ata_force_horkage(dev); 2182 2183 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2184 ata_dev_info(dev, "unsupported device, disabling\n"); 2185 ata_dev_disable(dev); 2186 return 0; 2187 } 2188 2189 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2190 dev->class == ATA_DEV_ATAPI) { 2191 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2192 atapi_enabled ? "not supported with this driver" 2193 : "disabled"); 2194 ata_dev_disable(dev); 2195 return 0; 2196 } 2197 2198 rc = ata_do_link_spd_horkage(dev); 2199 if (rc) 2200 return rc; 2201 2202 /* let ACPI work its magic */ 2203 rc = ata_acpi_on_devcfg(dev); 2204 if (rc) 2205 return rc; 2206 2207 /* massage HPA, do it early as it might change IDENTIFY data */ 2208 rc = ata_hpa_resize(dev); 2209 if (rc) 2210 return rc; 2211 2212 /* print device capabilities */ 2213 if (ata_msg_probe(ap)) 2214 ata_dev_dbg(dev, 2215 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2216 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2217 __func__, 2218 id[49], id[82], id[83], id[84], 2219 id[85], id[86], id[87], id[88]); 2220 2221 /* initialize to-be-configured parameters */ 2222 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2223 dev->max_sectors = 0; 2224 dev->cdb_len = 0; 2225 dev->n_sectors = 0; 2226 dev->cylinders = 0; 2227 dev->heads = 0; 2228 dev->sectors = 0; 2229 dev->multi_count = 0; 2230 2231 /* 2232 * common ATA, ATAPI feature tests 2233 */ 2234 2235 /* find max transfer mode; for printk only */ 2236 xfer_mask = ata_id_xfermask(id); 2237 2238 if (ata_msg_probe(ap)) 2239 ata_dump_id(id); 2240 2241 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2242 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2243 sizeof(fwrevbuf)); 2244 2245 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2246 sizeof(modelbuf)); 2247 2248 /* ATA-specific feature tests */ 2249 if (dev->class == ATA_DEV_ATA) { 2250 if (ata_id_is_cfa(id)) { 2251 /* CPRM may make this media unusable */ 2252 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2253 ata_dev_warn(dev, 2254 "supports DRM functions and may not be fully accessible\n"); 2255 snprintf(revbuf, 7, "CFA"); 2256 } else { 2257 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2258 /* Warn the user if the device has TPM extensions */ 2259 if (ata_id_has_tpm(id)) 2260 ata_dev_warn(dev, 2261 "supports DRM functions and may not be fully accessible\n"); 2262 } 2263 2264 dev->n_sectors = ata_id_n_sectors(id); 2265 2266 /* get current R/W Multiple count setting */ 2267 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2268 unsigned int max = dev->id[47] & 0xff; 2269 unsigned int cnt = dev->id[59] & 0xff; 2270 /* only recognize/allow powers of two here */ 2271 if (is_power_of_2(max) && is_power_of_2(cnt)) 2272 if (cnt <= max) 2273 dev->multi_count = cnt; 2274 } 2275 2276 if (ata_id_has_lba(id)) { 2277 const char *lba_desc; 2278 char ncq_desc[24]; 2279 2280 lba_desc = "LBA"; 2281 dev->flags |= ATA_DFLAG_LBA; 2282 if (ata_id_has_lba48(id)) { 2283 dev->flags |= ATA_DFLAG_LBA48; 2284 lba_desc = "LBA48"; 2285 2286 if (dev->n_sectors >= (1UL << 28) && 2287 ata_id_has_flush_ext(id)) 2288 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2289 } 2290 2291 /* config NCQ */ 2292 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2293 if (rc) 2294 return rc; 2295 2296 /* print device info to dmesg */ 2297 if (ata_msg_drv(ap) && print_info) { 2298 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2299 revbuf, modelbuf, fwrevbuf, 2300 ata_mode_string(xfer_mask)); 2301 ata_dev_info(dev, 2302 "%llu sectors, multi %u: %s %s\n", 2303 (unsigned long long)dev->n_sectors, 2304 dev->multi_count, lba_desc, ncq_desc); 2305 } 2306 } else { 2307 /* CHS */ 2308 2309 /* Default translation */ 2310 dev->cylinders = id[1]; 2311 dev->heads = id[3]; 2312 dev->sectors = id[6]; 2313 2314 if (ata_id_current_chs_valid(id)) { 2315 /* Current CHS translation is valid. */ 2316 dev->cylinders = id[54]; 2317 dev->heads = id[55]; 2318 dev->sectors = id[56]; 2319 } 2320 2321 /* print device info to dmesg */ 2322 if (ata_msg_drv(ap) && print_info) { 2323 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2324 revbuf, modelbuf, fwrevbuf, 2325 ata_mode_string(xfer_mask)); 2326 ata_dev_info(dev, 2327 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2328 (unsigned long long)dev->n_sectors, 2329 dev->multi_count, dev->cylinders, 2330 dev->heads, dev->sectors); 2331 } 2332 } 2333 2334 /* Check and mark DevSlp capability. Get DevSlp timing variables 2335 * from SATA Settings page of Identify Device Data Log. 2336 */ 2337 if (ata_id_has_devslp(dev->id)) { 2338 u8 *sata_setting = ap->sector_buf; 2339 int i, j; 2340 2341 dev->flags |= ATA_DFLAG_DEVSLP; 2342 err_mask = ata_read_log_page(dev, 2343 ATA_LOG_SATA_ID_DEV_DATA, 2344 ATA_LOG_SATA_SETTINGS, 2345 sata_setting, 2346 1); 2347 if (err_mask) 2348 ata_dev_dbg(dev, 2349 "failed to get Identify Device Data, Emask 0x%x\n", 2350 err_mask); 2351 else 2352 for (i = 0; i < ATA_LOG_DEVSLP_SIZE; i++) { 2353 j = ATA_LOG_DEVSLP_OFFSET + i; 2354 dev->devslp_timing[i] = sata_setting[j]; 2355 } 2356 } 2357 2358 dev->cdb_len = 16; 2359 } 2360 2361 /* ATAPI-specific feature tests */ 2362 else if (dev->class == ATA_DEV_ATAPI) { 2363 const char *cdb_intr_string = ""; 2364 const char *atapi_an_string = ""; 2365 const char *dma_dir_string = ""; 2366 u32 sntf; 2367 2368 rc = atapi_cdb_len(id); 2369 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2370 if (ata_msg_warn(ap)) 2371 ata_dev_warn(dev, "unsupported CDB len\n"); 2372 rc = -EINVAL; 2373 goto err_out_nosup; 2374 } 2375 dev->cdb_len = (unsigned int) rc; 2376 2377 /* Enable ATAPI AN if both the host and device have 2378 * the support. If PMP is attached, SNTF is required 2379 * to enable ATAPI AN to discern between PHY status 2380 * changed notifications and ATAPI ANs. 2381 */ 2382 if (atapi_an && 2383 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2384 (!sata_pmp_attached(ap) || 2385 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2386 /* issue SET feature command to turn this on */ 2387 err_mask = ata_dev_set_feature(dev, 2388 SETFEATURES_SATA_ENABLE, SATA_AN); 2389 if (err_mask) 2390 ata_dev_err(dev, 2391 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2392 err_mask); 2393 else { 2394 dev->flags |= ATA_DFLAG_AN; 2395 atapi_an_string = ", ATAPI AN"; 2396 } 2397 } 2398 2399 if (ata_id_cdb_intr(dev->id)) { 2400 dev->flags |= ATA_DFLAG_CDB_INTR; 2401 cdb_intr_string = ", CDB intr"; 2402 } 2403 2404 if (atapi_dmadir || (dev->horkage & ATA_HORKAGE_ATAPI_DMADIR) || atapi_id_dmadir(dev->id)) { 2405 dev->flags |= ATA_DFLAG_DMADIR; 2406 dma_dir_string = ", DMADIR"; 2407 } 2408 2409 if (ata_id_has_da(dev->id)) { 2410 dev->flags |= ATA_DFLAG_DA; 2411 zpodd_init(dev); 2412 } 2413 2414 /* print device info to dmesg */ 2415 if (ata_msg_drv(ap) && print_info) 2416 ata_dev_info(dev, 2417 "ATAPI: %s, %s, max %s%s%s%s\n", 2418 modelbuf, fwrevbuf, 2419 ata_mode_string(xfer_mask), 2420 cdb_intr_string, atapi_an_string, 2421 dma_dir_string); 2422 } 2423 2424 /* determine max_sectors */ 2425 dev->max_sectors = ATA_MAX_SECTORS; 2426 if (dev->flags & ATA_DFLAG_LBA48) 2427 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2428 2429 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2430 200 sectors */ 2431 if (ata_dev_knobble(dev)) { 2432 if (ata_msg_drv(ap) && print_info) 2433 ata_dev_info(dev, "applying bridge limits\n"); 2434 dev->udma_mask &= ATA_UDMA5; 2435 dev->max_sectors = ATA_MAX_SECTORS; 2436 } 2437 2438 if ((dev->class == ATA_DEV_ATAPI) && 2439 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2440 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2441 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2442 } 2443 2444 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2445 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2446 dev->max_sectors); 2447 2448 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2449 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2450 2451 if (ap->ops->dev_config) 2452 ap->ops->dev_config(dev); 2453 2454 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2455 /* Let the user know. We don't want to disallow opens for 2456 rescue purposes, or in case the vendor is just a blithering 2457 idiot. Do this after the dev_config call as some controllers 2458 with buggy firmware may want to avoid reporting false device 2459 bugs */ 2460 2461 if (print_info) { 2462 ata_dev_warn(dev, 2463 "Drive reports diagnostics failure. This may indicate a drive\n"); 2464 ata_dev_warn(dev, 2465 "fault or invalid emulation. Contact drive vendor for information.\n"); 2466 } 2467 } 2468 2469 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2470 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2471 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2472 } 2473 2474 return 0; 2475 2476 err_out_nosup: 2477 if (ata_msg_probe(ap)) 2478 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2479 return rc; 2480 } 2481 2482 /** 2483 * ata_cable_40wire - return 40 wire cable type 2484 * @ap: port 2485 * 2486 * Helper method for drivers which want to hardwire 40 wire cable 2487 * detection. 2488 */ 2489 2490 int ata_cable_40wire(struct ata_port *ap) 2491 { 2492 return ATA_CBL_PATA40; 2493 } 2494 2495 /** 2496 * ata_cable_80wire - return 80 wire cable type 2497 * @ap: port 2498 * 2499 * Helper method for drivers which want to hardwire 80 wire cable 2500 * detection. 2501 */ 2502 2503 int ata_cable_80wire(struct ata_port *ap) 2504 { 2505 return ATA_CBL_PATA80; 2506 } 2507 2508 /** 2509 * ata_cable_unknown - return unknown PATA cable. 2510 * @ap: port 2511 * 2512 * Helper method for drivers which have no PATA cable detection. 2513 */ 2514 2515 int ata_cable_unknown(struct ata_port *ap) 2516 { 2517 return ATA_CBL_PATA_UNK; 2518 } 2519 2520 /** 2521 * ata_cable_ignore - return ignored PATA cable. 2522 * @ap: port 2523 * 2524 * Helper method for drivers which don't use cable type to limit 2525 * transfer mode. 2526 */ 2527 int ata_cable_ignore(struct ata_port *ap) 2528 { 2529 return ATA_CBL_PATA_IGN; 2530 } 2531 2532 /** 2533 * ata_cable_sata - return SATA cable type 2534 * @ap: port 2535 * 2536 * Helper method for drivers which have SATA cables 2537 */ 2538 2539 int ata_cable_sata(struct ata_port *ap) 2540 { 2541 return ATA_CBL_SATA; 2542 } 2543 2544 /** 2545 * ata_bus_probe - Reset and probe ATA bus 2546 * @ap: Bus to probe 2547 * 2548 * Master ATA bus probing function. Initiates a hardware-dependent 2549 * bus reset, then attempts to identify any devices found on 2550 * the bus. 2551 * 2552 * LOCKING: 2553 * PCI/etc. bus probe sem. 2554 * 2555 * RETURNS: 2556 * Zero on success, negative errno otherwise. 2557 */ 2558 2559 int ata_bus_probe(struct ata_port *ap) 2560 { 2561 unsigned int classes[ATA_MAX_DEVICES]; 2562 int tries[ATA_MAX_DEVICES]; 2563 int rc; 2564 struct ata_device *dev; 2565 2566 ata_for_each_dev(dev, &ap->link, ALL) 2567 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2568 2569 retry: 2570 ata_for_each_dev(dev, &ap->link, ALL) { 2571 /* If we issue an SRST then an ATA drive (not ATAPI) 2572 * may change configuration and be in PIO0 timing. If 2573 * we do a hard reset (or are coming from power on) 2574 * this is true for ATA or ATAPI. Until we've set a 2575 * suitable controller mode we should not touch the 2576 * bus as we may be talking too fast. 2577 */ 2578 dev->pio_mode = XFER_PIO_0; 2579 dev->dma_mode = 0xff; 2580 2581 /* If the controller has a pio mode setup function 2582 * then use it to set the chipset to rights. Don't 2583 * touch the DMA setup as that will be dealt with when 2584 * configuring devices. 2585 */ 2586 if (ap->ops->set_piomode) 2587 ap->ops->set_piomode(ap, dev); 2588 } 2589 2590 /* reset and determine device classes */ 2591 ap->ops->phy_reset(ap); 2592 2593 ata_for_each_dev(dev, &ap->link, ALL) { 2594 if (dev->class != ATA_DEV_UNKNOWN) 2595 classes[dev->devno] = dev->class; 2596 else 2597 classes[dev->devno] = ATA_DEV_NONE; 2598 2599 dev->class = ATA_DEV_UNKNOWN; 2600 } 2601 2602 /* read IDENTIFY page and configure devices. We have to do the identify 2603 specific sequence bass-ackwards so that PDIAG- is released by 2604 the slave device */ 2605 2606 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2607 if (tries[dev->devno]) 2608 dev->class = classes[dev->devno]; 2609 2610 if (!ata_dev_enabled(dev)) 2611 continue; 2612 2613 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2614 dev->id); 2615 if (rc) 2616 goto fail; 2617 } 2618 2619 /* Now ask for the cable type as PDIAG- should have been released */ 2620 if (ap->ops->cable_detect) 2621 ap->cbl = ap->ops->cable_detect(ap); 2622 2623 /* We may have SATA bridge glue hiding here irrespective of 2624 * the reported cable types and sensed types. When SATA 2625 * drives indicate we have a bridge, we don't know which end 2626 * of the link the bridge is which is a problem. 2627 */ 2628 ata_for_each_dev(dev, &ap->link, ENABLED) 2629 if (ata_id_is_sata(dev->id)) 2630 ap->cbl = ATA_CBL_SATA; 2631 2632 /* After the identify sequence we can now set up the devices. We do 2633 this in the normal order so that the user doesn't get confused */ 2634 2635 ata_for_each_dev(dev, &ap->link, ENABLED) { 2636 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2637 rc = ata_dev_configure(dev); 2638 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2639 if (rc) 2640 goto fail; 2641 } 2642 2643 /* configure transfer mode */ 2644 rc = ata_set_mode(&ap->link, &dev); 2645 if (rc) 2646 goto fail; 2647 2648 ata_for_each_dev(dev, &ap->link, ENABLED) 2649 return 0; 2650 2651 return -ENODEV; 2652 2653 fail: 2654 tries[dev->devno]--; 2655 2656 switch (rc) { 2657 case -EINVAL: 2658 /* eeek, something went very wrong, give up */ 2659 tries[dev->devno] = 0; 2660 break; 2661 2662 case -ENODEV: 2663 /* give it just one more chance */ 2664 tries[dev->devno] = min(tries[dev->devno], 1); 2665 case -EIO: 2666 if (tries[dev->devno] == 1) { 2667 /* This is the last chance, better to slow 2668 * down than lose it. 2669 */ 2670 sata_down_spd_limit(&ap->link, 0); 2671 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2672 } 2673 } 2674 2675 if (!tries[dev->devno]) 2676 ata_dev_disable(dev); 2677 2678 goto retry; 2679 } 2680 2681 /** 2682 * sata_print_link_status - Print SATA link status 2683 * @link: SATA link to printk link status about 2684 * 2685 * This function prints link speed and status of a SATA link. 2686 * 2687 * LOCKING: 2688 * None. 2689 */ 2690 static void sata_print_link_status(struct ata_link *link) 2691 { 2692 u32 sstatus, scontrol, tmp; 2693 2694 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2695 return; 2696 sata_scr_read(link, SCR_CONTROL, &scontrol); 2697 2698 if (ata_phys_link_online(link)) { 2699 tmp = (sstatus >> 4) & 0xf; 2700 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 2701 sata_spd_string(tmp), sstatus, scontrol); 2702 } else { 2703 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 2704 sstatus, scontrol); 2705 } 2706 } 2707 2708 /** 2709 * ata_dev_pair - return other device on cable 2710 * @adev: device 2711 * 2712 * Obtain the other device on the same cable, or if none is 2713 * present NULL is returned 2714 */ 2715 2716 struct ata_device *ata_dev_pair(struct ata_device *adev) 2717 { 2718 struct ata_link *link = adev->link; 2719 struct ata_device *pair = &link->device[1 - adev->devno]; 2720 if (!ata_dev_enabled(pair)) 2721 return NULL; 2722 return pair; 2723 } 2724 2725 /** 2726 * sata_down_spd_limit - adjust SATA spd limit downward 2727 * @link: Link to adjust SATA spd limit for 2728 * @spd_limit: Additional limit 2729 * 2730 * Adjust SATA spd limit of @link downward. Note that this 2731 * function only adjusts the limit. The change must be applied 2732 * using sata_set_spd(). 2733 * 2734 * If @spd_limit is non-zero, the speed is limited to equal to or 2735 * lower than @spd_limit if such speed is supported. If 2736 * @spd_limit is slower than any supported speed, only the lowest 2737 * supported speed is allowed. 2738 * 2739 * LOCKING: 2740 * Inherited from caller. 2741 * 2742 * RETURNS: 2743 * 0 on success, negative errno on failure 2744 */ 2745 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2746 { 2747 u32 sstatus, spd, mask; 2748 int rc, bit; 2749 2750 if (!sata_scr_valid(link)) 2751 return -EOPNOTSUPP; 2752 2753 /* If SCR can be read, use it to determine the current SPD. 2754 * If not, use cached value in link->sata_spd. 2755 */ 2756 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2757 if (rc == 0 && ata_sstatus_online(sstatus)) 2758 spd = (sstatus >> 4) & 0xf; 2759 else 2760 spd = link->sata_spd; 2761 2762 mask = link->sata_spd_limit; 2763 if (mask <= 1) 2764 return -EINVAL; 2765 2766 /* unconditionally mask off the highest bit */ 2767 bit = fls(mask) - 1; 2768 mask &= ~(1 << bit); 2769 2770 /* Mask off all speeds higher than or equal to the current 2771 * one. Force 1.5Gbps if current SPD is not available. 2772 */ 2773 if (spd > 1) 2774 mask &= (1 << (spd - 1)) - 1; 2775 else 2776 mask &= 1; 2777 2778 /* were we already at the bottom? */ 2779 if (!mask) 2780 return -EINVAL; 2781 2782 if (spd_limit) { 2783 if (mask & ((1 << spd_limit) - 1)) 2784 mask &= (1 << spd_limit) - 1; 2785 else { 2786 bit = ffs(mask) - 1; 2787 mask = 1 << bit; 2788 } 2789 } 2790 2791 link->sata_spd_limit = mask; 2792 2793 ata_link_warn(link, "limiting SATA link speed to %s\n", 2794 sata_spd_string(fls(mask))); 2795 2796 return 0; 2797 } 2798 2799 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2800 { 2801 struct ata_link *host_link = &link->ap->link; 2802 u32 limit, target, spd; 2803 2804 limit = link->sata_spd_limit; 2805 2806 /* Don't configure downstream link faster than upstream link. 2807 * It doesn't speed up anything and some PMPs choke on such 2808 * configuration. 2809 */ 2810 if (!ata_is_host_link(link) && host_link->sata_spd) 2811 limit &= (1 << host_link->sata_spd) - 1; 2812 2813 if (limit == UINT_MAX) 2814 target = 0; 2815 else 2816 target = fls(limit); 2817 2818 spd = (*scontrol >> 4) & 0xf; 2819 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2820 2821 return spd != target; 2822 } 2823 2824 /** 2825 * sata_set_spd_needed - is SATA spd configuration needed 2826 * @link: Link in question 2827 * 2828 * Test whether the spd limit in SControl matches 2829 * @link->sata_spd_limit. This function is used to determine 2830 * whether hardreset is necessary to apply SATA spd 2831 * configuration. 2832 * 2833 * LOCKING: 2834 * Inherited from caller. 2835 * 2836 * RETURNS: 2837 * 1 if SATA spd configuration is needed, 0 otherwise. 2838 */ 2839 static int sata_set_spd_needed(struct ata_link *link) 2840 { 2841 u32 scontrol; 2842 2843 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2844 return 1; 2845 2846 return __sata_set_spd_needed(link, &scontrol); 2847 } 2848 2849 /** 2850 * sata_set_spd - set SATA spd according to spd limit 2851 * @link: Link to set SATA spd for 2852 * 2853 * Set SATA spd of @link according to sata_spd_limit. 2854 * 2855 * LOCKING: 2856 * Inherited from caller. 2857 * 2858 * RETURNS: 2859 * 0 if spd doesn't need to be changed, 1 if spd has been 2860 * changed. Negative errno if SCR registers are inaccessible. 2861 */ 2862 int sata_set_spd(struct ata_link *link) 2863 { 2864 u32 scontrol; 2865 int rc; 2866 2867 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2868 return rc; 2869 2870 if (!__sata_set_spd_needed(link, &scontrol)) 2871 return 0; 2872 2873 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 2874 return rc; 2875 2876 return 1; 2877 } 2878 2879 /* 2880 * This mode timing computation functionality is ported over from 2881 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 2882 */ 2883 /* 2884 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 2885 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 2886 * for UDMA6, which is currently supported only by Maxtor drives. 2887 * 2888 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 2889 */ 2890 2891 static const struct ata_timing ata_timing[] = { 2892 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 2893 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 2894 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 2895 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 2896 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 2897 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 2898 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 2899 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 2900 2901 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 2902 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 2903 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 2904 2905 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 2906 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 2907 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 2908 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 2909 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 2910 2911 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2912 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 2913 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 2914 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 2915 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 2916 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 2917 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 2918 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 2919 2920 { 0xFF } 2921 }; 2922 2923 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 2924 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 2925 2926 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2927 { 2928 q->setup = EZ(t->setup * 1000, T); 2929 q->act8b = EZ(t->act8b * 1000, T); 2930 q->rec8b = EZ(t->rec8b * 1000, T); 2931 q->cyc8b = EZ(t->cyc8b * 1000, T); 2932 q->active = EZ(t->active * 1000, T); 2933 q->recover = EZ(t->recover * 1000, T); 2934 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 2935 q->cycle = EZ(t->cycle * 1000, T); 2936 q->udma = EZ(t->udma * 1000, UT); 2937 } 2938 2939 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 2940 struct ata_timing *m, unsigned int what) 2941 { 2942 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 2943 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 2944 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 2945 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 2946 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 2947 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 2948 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 2949 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 2950 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2951 } 2952 2953 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 2954 { 2955 const struct ata_timing *t = ata_timing; 2956 2957 while (xfer_mode > t->mode) 2958 t++; 2959 2960 if (xfer_mode == t->mode) 2961 return t; 2962 2963 WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n", 2964 __func__, xfer_mode); 2965 2966 return NULL; 2967 } 2968 2969 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 2970 struct ata_timing *t, int T, int UT) 2971 { 2972 const u16 *id = adev->id; 2973 const struct ata_timing *s; 2974 struct ata_timing p; 2975 2976 /* 2977 * Find the mode. 2978 */ 2979 2980 if (!(s = ata_timing_find_mode(speed))) 2981 return -EINVAL; 2982 2983 memcpy(t, s, sizeof(*s)); 2984 2985 /* 2986 * If the drive is an EIDE drive, it can tell us it needs extended 2987 * PIO/MW_DMA cycle timing. 2988 */ 2989 2990 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 2991 memset(&p, 0, sizeof(p)); 2992 2993 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { 2994 if (speed <= XFER_PIO_2) 2995 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 2996 else if ((speed <= XFER_PIO_4) || 2997 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 2998 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 2999 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3000 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3001 3002 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3003 } 3004 3005 /* 3006 * Convert the timing to bus clock counts. 3007 */ 3008 3009 ata_timing_quantize(t, t, T, UT); 3010 3011 /* 3012 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3013 * S.M.A.R.T * and some other commands. We have to ensure that the 3014 * DMA cycle timing is slower/equal than the fastest PIO timing. 3015 */ 3016 3017 if (speed > XFER_PIO_6) { 3018 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3019 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3020 } 3021 3022 /* 3023 * Lengthen active & recovery time so that cycle time is correct. 3024 */ 3025 3026 if (t->act8b + t->rec8b < t->cyc8b) { 3027 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3028 t->rec8b = t->cyc8b - t->act8b; 3029 } 3030 3031 if (t->active + t->recover < t->cycle) { 3032 t->active += (t->cycle - (t->active + t->recover)) / 2; 3033 t->recover = t->cycle - t->active; 3034 } 3035 3036 /* In a few cases quantisation may produce enough errors to 3037 leave t->cycle too low for the sum of active and recovery 3038 if so we must correct this */ 3039 if (t->active + t->recover > t->cycle) 3040 t->cycle = t->active + t->recover; 3041 3042 return 0; 3043 } 3044 3045 /** 3046 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3047 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3048 * @cycle: cycle duration in ns 3049 * 3050 * Return matching xfer mode for @cycle. The returned mode is of 3051 * the transfer type specified by @xfer_shift. If @cycle is too 3052 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3053 * than the fastest known mode, the fasted mode is returned. 3054 * 3055 * LOCKING: 3056 * None. 3057 * 3058 * RETURNS: 3059 * Matching xfer_mode, 0xff if no match found. 3060 */ 3061 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3062 { 3063 u8 base_mode = 0xff, last_mode = 0xff; 3064 const struct ata_xfer_ent *ent; 3065 const struct ata_timing *t; 3066 3067 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3068 if (ent->shift == xfer_shift) 3069 base_mode = ent->base; 3070 3071 for (t = ata_timing_find_mode(base_mode); 3072 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3073 unsigned short this_cycle; 3074 3075 switch (xfer_shift) { 3076 case ATA_SHIFT_PIO: 3077 case ATA_SHIFT_MWDMA: 3078 this_cycle = t->cycle; 3079 break; 3080 case ATA_SHIFT_UDMA: 3081 this_cycle = t->udma; 3082 break; 3083 default: 3084 return 0xff; 3085 } 3086 3087 if (cycle > this_cycle) 3088 break; 3089 3090 last_mode = t->mode; 3091 } 3092 3093 return last_mode; 3094 } 3095 3096 /** 3097 * ata_down_xfermask_limit - adjust dev xfer masks downward 3098 * @dev: Device to adjust xfer masks 3099 * @sel: ATA_DNXFER_* selector 3100 * 3101 * Adjust xfer masks of @dev downward. Note that this function 3102 * does not apply the change. Invoking ata_set_mode() afterwards 3103 * will apply the limit. 3104 * 3105 * LOCKING: 3106 * Inherited from caller. 3107 * 3108 * RETURNS: 3109 * 0 on success, negative errno on failure 3110 */ 3111 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3112 { 3113 char buf[32]; 3114 unsigned long orig_mask, xfer_mask; 3115 unsigned long pio_mask, mwdma_mask, udma_mask; 3116 int quiet, highbit; 3117 3118 quiet = !!(sel & ATA_DNXFER_QUIET); 3119 sel &= ~ATA_DNXFER_QUIET; 3120 3121 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3122 dev->mwdma_mask, 3123 dev->udma_mask); 3124 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3125 3126 switch (sel) { 3127 case ATA_DNXFER_PIO: 3128 highbit = fls(pio_mask) - 1; 3129 pio_mask &= ~(1 << highbit); 3130 break; 3131 3132 case ATA_DNXFER_DMA: 3133 if (udma_mask) { 3134 highbit = fls(udma_mask) - 1; 3135 udma_mask &= ~(1 << highbit); 3136 if (!udma_mask) 3137 return -ENOENT; 3138 } else if (mwdma_mask) { 3139 highbit = fls(mwdma_mask) - 1; 3140 mwdma_mask &= ~(1 << highbit); 3141 if (!mwdma_mask) 3142 return -ENOENT; 3143 } 3144 break; 3145 3146 case ATA_DNXFER_40C: 3147 udma_mask &= ATA_UDMA_MASK_40C; 3148 break; 3149 3150 case ATA_DNXFER_FORCE_PIO0: 3151 pio_mask &= 1; 3152 case ATA_DNXFER_FORCE_PIO: 3153 mwdma_mask = 0; 3154 udma_mask = 0; 3155 break; 3156 3157 default: 3158 BUG(); 3159 } 3160 3161 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3162 3163 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3164 return -ENOENT; 3165 3166 if (!quiet) { 3167 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3168 snprintf(buf, sizeof(buf), "%s:%s", 3169 ata_mode_string(xfer_mask), 3170 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3171 else 3172 snprintf(buf, sizeof(buf), "%s", 3173 ata_mode_string(xfer_mask)); 3174 3175 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3176 } 3177 3178 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3179 &dev->udma_mask); 3180 3181 return 0; 3182 } 3183 3184 static int ata_dev_set_mode(struct ata_device *dev) 3185 { 3186 struct ata_port *ap = dev->link->ap; 3187 struct ata_eh_context *ehc = &dev->link->eh_context; 3188 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3189 const char *dev_err_whine = ""; 3190 int ign_dev_err = 0; 3191 unsigned int err_mask = 0; 3192 int rc; 3193 3194 dev->flags &= ~ATA_DFLAG_PIO; 3195 if (dev->xfer_shift == ATA_SHIFT_PIO) 3196 dev->flags |= ATA_DFLAG_PIO; 3197 3198 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3199 dev_err_whine = " (SET_XFERMODE skipped)"; 3200 else { 3201 if (nosetxfer) 3202 ata_dev_warn(dev, 3203 "NOSETXFER but PATA detected - can't " 3204 "skip SETXFER, might malfunction\n"); 3205 err_mask = ata_dev_set_xfermode(dev); 3206 } 3207 3208 if (err_mask & ~AC_ERR_DEV) 3209 goto fail; 3210 3211 /* revalidate */ 3212 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3213 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3214 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3215 if (rc) 3216 return rc; 3217 3218 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3219 /* Old CFA may refuse this command, which is just fine */ 3220 if (ata_id_is_cfa(dev->id)) 3221 ign_dev_err = 1; 3222 /* Catch several broken garbage emulations plus some pre 3223 ATA devices */ 3224 if (ata_id_major_version(dev->id) == 0 && 3225 dev->pio_mode <= XFER_PIO_2) 3226 ign_dev_err = 1; 3227 /* Some very old devices and some bad newer ones fail 3228 any kind of SET_XFERMODE request but support PIO0-2 3229 timings and no IORDY */ 3230 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3231 ign_dev_err = 1; 3232 } 3233 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3234 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3235 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3236 dev->dma_mode == XFER_MW_DMA_0 && 3237 (dev->id[63] >> 8) & 1) 3238 ign_dev_err = 1; 3239 3240 /* if the device is actually configured correctly, ignore dev err */ 3241 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3242 ign_dev_err = 1; 3243 3244 if (err_mask & AC_ERR_DEV) { 3245 if (!ign_dev_err) 3246 goto fail; 3247 else 3248 dev_err_whine = " (device error ignored)"; 3249 } 3250 3251 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3252 dev->xfer_shift, (int)dev->xfer_mode); 3253 3254 ata_dev_info(dev, "configured for %s%s\n", 3255 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3256 dev_err_whine); 3257 3258 return 0; 3259 3260 fail: 3261 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3262 return -EIO; 3263 } 3264 3265 /** 3266 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3267 * @link: link on which timings will be programmed 3268 * @r_failed_dev: out parameter for failed device 3269 * 3270 * Standard implementation of the function used to tune and set 3271 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3272 * ata_dev_set_mode() fails, pointer to the failing device is 3273 * returned in @r_failed_dev. 3274 * 3275 * LOCKING: 3276 * PCI/etc. bus probe sem. 3277 * 3278 * RETURNS: 3279 * 0 on success, negative errno otherwise 3280 */ 3281 3282 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3283 { 3284 struct ata_port *ap = link->ap; 3285 struct ata_device *dev; 3286 int rc = 0, used_dma = 0, found = 0; 3287 3288 /* step 1: calculate xfer_mask */ 3289 ata_for_each_dev(dev, link, ENABLED) { 3290 unsigned long pio_mask, dma_mask; 3291 unsigned int mode_mask; 3292 3293 mode_mask = ATA_DMA_MASK_ATA; 3294 if (dev->class == ATA_DEV_ATAPI) 3295 mode_mask = ATA_DMA_MASK_ATAPI; 3296 else if (ata_id_is_cfa(dev->id)) 3297 mode_mask = ATA_DMA_MASK_CFA; 3298 3299 ata_dev_xfermask(dev); 3300 ata_force_xfermask(dev); 3301 3302 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3303 3304 if (libata_dma_mask & mode_mask) 3305 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, 3306 dev->udma_mask); 3307 else 3308 dma_mask = 0; 3309 3310 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3311 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3312 3313 found = 1; 3314 if (ata_dma_enabled(dev)) 3315 used_dma = 1; 3316 } 3317 if (!found) 3318 goto out; 3319 3320 /* step 2: always set host PIO timings */ 3321 ata_for_each_dev(dev, link, ENABLED) { 3322 if (dev->pio_mode == 0xff) { 3323 ata_dev_warn(dev, "no PIO support\n"); 3324 rc = -EINVAL; 3325 goto out; 3326 } 3327 3328 dev->xfer_mode = dev->pio_mode; 3329 dev->xfer_shift = ATA_SHIFT_PIO; 3330 if (ap->ops->set_piomode) 3331 ap->ops->set_piomode(ap, dev); 3332 } 3333 3334 /* step 3: set host DMA timings */ 3335 ata_for_each_dev(dev, link, ENABLED) { 3336 if (!ata_dma_enabled(dev)) 3337 continue; 3338 3339 dev->xfer_mode = dev->dma_mode; 3340 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3341 if (ap->ops->set_dmamode) 3342 ap->ops->set_dmamode(ap, dev); 3343 } 3344 3345 /* step 4: update devices' xfer mode */ 3346 ata_for_each_dev(dev, link, ENABLED) { 3347 rc = ata_dev_set_mode(dev); 3348 if (rc) 3349 goto out; 3350 } 3351 3352 /* Record simplex status. If we selected DMA then the other 3353 * host channels are not permitted to do so. 3354 */ 3355 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3356 ap->host->simplex_claimed = ap; 3357 3358 out: 3359 if (rc) 3360 *r_failed_dev = dev; 3361 return rc; 3362 } 3363 3364 /** 3365 * ata_wait_ready - wait for link to become ready 3366 * @link: link to be waited on 3367 * @deadline: deadline jiffies for the operation 3368 * @check_ready: callback to check link readiness 3369 * 3370 * Wait for @link to become ready. @check_ready should return 3371 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3372 * link doesn't seem to be occupied, other errno for other error 3373 * conditions. 3374 * 3375 * Transient -ENODEV conditions are allowed for 3376 * ATA_TMOUT_FF_WAIT. 3377 * 3378 * LOCKING: 3379 * EH context. 3380 * 3381 * RETURNS: 3382 * 0 if @linke is ready before @deadline; otherwise, -errno. 3383 */ 3384 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3385 int (*check_ready)(struct ata_link *link)) 3386 { 3387 unsigned long start = jiffies; 3388 unsigned long nodev_deadline; 3389 int warned = 0; 3390 3391 /* choose which 0xff timeout to use, read comment in libata.h */ 3392 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3393 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3394 else 3395 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3396 3397 /* Slave readiness can't be tested separately from master. On 3398 * M/S emulation configuration, this function should be called 3399 * only on the master and it will handle both master and slave. 3400 */ 3401 WARN_ON(link == link->ap->slave_link); 3402 3403 if (time_after(nodev_deadline, deadline)) 3404 nodev_deadline = deadline; 3405 3406 while (1) { 3407 unsigned long now = jiffies; 3408 int ready, tmp; 3409 3410 ready = tmp = check_ready(link); 3411 if (ready > 0) 3412 return 0; 3413 3414 /* 3415 * -ENODEV could be transient. Ignore -ENODEV if link 3416 * is online. Also, some SATA devices take a long 3417 * time to clear 0xff after reset. Wait for 3418 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3419 * offline. 3420 * 3421 * Note that some PATA controllers (pata_ali) explode 3422 * if status register is read more than once when 3423 * there's no device attached. 3424 */ 3425 if (ready == -ENODEV) { 3426 if (ata_link_online(link)) 3427 ready = 0; 3428 else if ((link->ap->flags & ATA_FLAG_SATA) && 3429 !ata_link_offline(link) && 3430 time_before(now, nodev_deadline)) 3431 ready = 0; 3432 } 3433 3434 if (ready) 3435 return ready; 3436 if (time_after(now, deadline)) 3437 return -EBUSY; 3438 3439 if (!warned && time_after(now, start + 5 * HZ) && 3440 (deadline - now > 3 * HZ)) { 3441 ata_link_warn(link, 3442 "link is slow to respond, please be patient " 3443 "(ready=%d)\n", tmp); 3444 warned = 1; 3445 } 3446 3447 ata_msleep(link->ap, 50); 3448 } 3449 } 3450 3451 /** 3452 * ata_wait_after_reset - wait for link to become ready after reset 3453 * @link: link to be waited on 3454 * @deadline: deadline jiffies for the operation 3455 * @check_ready: callback to check link readiness 3456 * 3457 * Wait for @link to become ready after reset. 3458 * 3459 * LOCKING: 3460 * EH context. 3461 * 3462 * RETURNS: 3463 * 0 if @linke is ready before @deadline; otherwise, -errno. 3464 */ 3465 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3466 int (*check_ready)(struct ata_link *link)) 3467 { 3468 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3469 3470 return ata_wait_ready(link, deadline, check_ready); 3471 } 3472 3473 /** 3474 * sata_link_debounce - debounce SATA phy status 3475 * @link: ATA link to debounce SATA phy status for 3476 * @params: timing parameters { interval, duratinon, timeout } in msec 3477 * @deadline: deadline jiffies for the operation 3478 * 3479 * Make sure SStatus of @link reaches stable state, determined by 3480 * holding the same value where DET is not 1 for @duration polled 3481 * every @interval, before @timeout. Timeout constraints the 3482 * beginning of the stable state. Because DET gets stuck at 1 on 3483 * some controllers after hot unplugging, this functions waits 3484 * until timeout then returns 0 if DET is stable at 1. 3485 * 3486 * @timeout is further limited by @deadline. The sooner of the 3487 * two is used. 3488 * 3489 * LOCKING: 3490 * Kernel thread context (may sleep) 3491 * 3492 * RETURNS: 3493 * 0 on success, -errno on failure. 3494 */ 3495 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3496 unsigned long deadline) 3497 { 3498 unsigned long interval = params[0]; 3499 unsigned long duration = params[1]; 3500 unsigned long last_jiffies, t; 3501 u32 last, cur; 3502 int rc; 3503 3504 t = ata_deadline(jiffies, params[2]); 3505 if (time_before(t, deadline)) 3506 deadline = t; 3507 3508 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3509 return rc; 3510 cur &= 0xf; 3511 3512 last = cur; 3513 last_jiffies = jiffies; 3514 3515 while (1) { 3516 ata_msleep(link->ap, interval); 3517 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3518 return rc; 3519 cur &= 0xf; 3520 3521 /* DET stable? */ 3522 if (cur == last) { 3523 if (cur == 1 && time_before(jiffies, deadline)) 3524 continue; 3525 if (time_after(jiffies, 3526 ata_deadline(last_jiffies, duration))) 3527 return 0; 3528 continue; 3529 } 3530 3531 /* unstable, start over */ 3532 last = cur; 3533 last_jiffies = jiffies; 3534 3535 /* Check deadline. If debouncing failed, return 3536 * -EPIPE to tell upper layer to lower link speed. 3537 */ 3538 if (time_after(jiffies, deadline)) 3539 return -EPIPE; 3540 } 3541 } 3542 3543 /** 3544 * sata_link_resume - resume SATA link 3545 * @link: ATA link to resume SATA 3546 * @params: timing parameters { interval, duratinon, timeout } in msec 3547 * @deadline: deadline jiffies for the operation 3548 * 3549 * Resume SATA phy @link and debounce it. 3550 * 3551 * LOCKING: 3552 * Kernel thread context (may sleep) 3553 * 3554 * RETURNS: 3555 * 0 on success, -errno on failure. 3556 */ 3557 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3558 unsigned long deadline) 3559 { 3560 int tries = ATA_LINK_RESUME_TRIES; 3561 u32 scontrol, serror; 3562 int rc; 3563 3564 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3565 return rc; 3566 3567 /* 3568 * Writes to SControl sometimes get ignored under certain 3569 * controllers (ata_piix SIDPR). Make sure DET actually is 3570 * cleared. 3571 */ 3572 do { 3573 scontrol = (scontrol & 0x0f0) | 0x300; 3574 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3575 return rc; 3576 /* 3577 * Some PHYs react badly if SStatus is pounded 3578 * immediately after resuming. Delay 200ms before 3579 * debouncing. 3580 */ 3581 ata_msleep(link->ap, 200); 3582 3583 /* is SControl restored correctly? */ 3584 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3585 return rc; 3586 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3587 3588 if ((scontrol & 0xf0f) != 0x300) { 3589 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3590 scontrol); 3591 return 0; 3592 } 3593 3594 if (tries < ATA_LINK_RESUME_TRIES) 3595 ata_link_warn(link, "link resume succeeded after %d retries\n", 3596 ATA_LINK_RESUME_TRIES - tries); 3597 3598 if ((rc = sata_link_debounce(link, params, deadline))) 3599 return rc; 3600 3601 /* clear SError, some PHYs require this even for SRST to work */ 3602 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3603 rc = sata_scr_write(link, SCR_ERROR, serror); 3604 3605 return rc != -EINVAL ? rc : 0; 3606 } 3607 3608 /** 3609 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3610 * @link: ATA link to manipulate SControl for 3611 * @policy: LPM policy to configure 3612 * @spm_wakeup: initiate LPM transition to active state 3613 * 3614 * Manipulate the IPM field of the SControl register of @link 3615 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3616 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3617 * the link. This function also clears PHYRDY_CHG before 3618 * returning. 3619 * 3620 * LOCKING: 3621 * EH context. 3622 * 3623 * RETURNS: 3624 * 0 on succes, -errno otherwise. 3625 */ 3626 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3627 bool spm_wakeup) 3628 { 3629 struct ata_eh_context *ehc = &link->eh_context; 3630 bool woken_up = false; 3631 u32 scontrol; 3632 int rc; 3633 3634 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3635 if (rc) 3636 return rc; 3637 3638 switch (policy) { 3639 case ATA_LPM_MAX_POWER: 3640 /* disable all LPM transitions */ 3641 scontrol |= (0x7 << 8); 3642 /* initiate transition to active state */ 3643 if (spm_wakeup) { 3644 scontrol |= (0x4 << 12); 3645 woken_up = true; 3646 } 3647 break; 3648 case ATA_LPM_MED_POWER: 3649 /* allow LPM to PARTIAL */ 3650 scontrol &= ~(0x1 << 8); 3651 scontrol |= (0x6 << 8); 3652 break; 3653 case ATA_LPM_MIN_POWER: 3654 if (ata_link_nr_enabled(link) > 0) 3655 /* no restrictions on LPM transitions */ 3656 scontrol &= ~(0x7 << 8); 3657 else { 3658 /* empty port, power off */ 3659 scontrol &= ~0xf; 3660 scontrol |= (0x1 << 2); 3661 } 3662 break; 3663 default: 3664 WARN_ON(1); 3665 } 3666 3667 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3668 if (rc) 3669 return rc; 3670 3671 /* give the link time to transit out of LPM state */ 3672 if (woken_up) 3673 msleep(10); 3674 3675 /* clear PHYRDY_CHG from SError */ 3676 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3677 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3678 } 3679 3680 /** 3681 * ata_std_prereset - prepare for reset 3682 * @link: ATA link to be reset 3683 * @deadline: deadline jiffies for the operation 3684 * 3685 * @link is about to be reset. Initialize it. Failure from 3686 * prereset makes libata abort whole reset sequence and give up 3687 * that port, so prereset should be best-effort. It does its 3688 * best to prepare for reset sequence but if things go wrong, it 3689 * should just whine, not fail. 3690 * 3691 * LOCKING: 3692 * Kernel thread context (may sleep) 3693 * 3694 * RETURNS: 3695 * 0 on success, -errno otherwise. 3696 */ 3697 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3698 { 3699 struct ata_port *ap = link->ap; 3700 struct ata_eh_context *ehc = &link->eh_context; 3701 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3702 int rc; 3703 3704 /* if we're about to do hardreset, nothing more to do */ 3705 if (ehc->i.action & ATA_EH_HARDRESET) 3706 return 0; 3707 3708 /* if SATA, resume link */ 3709 if (ap->flags & ATA_FLAG_SATA) { 3710 rc = sata_link_resume(link, timing, deadline); 3711 /* whine about phy resume failure but proceed */ 3712 if (rc && rc != -EOPNOTSUPP) 3713 ata_link_warn(link, 3714 "failed to resume link for reset (errno=%d)\n", 3715 rc); 3716 } 3717 3718 /* no point in trying softreset on offline link */ 3719 if (ata_phys_link_offline(link)) 3720 ehc->i.action &= ~ATA_EH_SOFTRESET; 3721 3722 return 0; 3723 } 3724 3725 /** 3726 * sata_link_hardreset - reset link via SATA phy reset 3727 * @link: link to reset 3728 * @timing: timing parameters { interval, duratinon, timeout } in msec 3729 * @deadline: deadline jiffies for the operation 3730 * @online: optional out parameter indicating link onlineness 3731 * @check_ready: optional callback to check link readiness 3732 * 3733 * SATA phy-reset @link using DET bits of SControl register. 3734 * After hardreset, link readiness is waited upon using 3735 * ata_wait_ready() if @check_ready is specified. LLDs are 3736 * allowed to not specify @check_ready and wait itself after this 3737 * function returns. Device classification is LLD's 3738 * responsibility. 3739 * 3740 * *@online is set to one iff reset succeeded and @link is online 3741 * after reset. 3742 * 3743 * LOCKING: 3744 * Kernel thread context (may sleep) 3745 * 3746 * RETURNS: 3747 * 0 on success, -errno otherwise. 3748 */ 3749 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3750 unsigned long deadline, 3751 bool *online, int (*check_ready)(struct ata_link *)) 3752 { 3753 u32 scontrol; 3754 int rc; 3755 3756 DPRINTK("ENTER\n"); 3757 3758 if (online) 3759 *online = false; 3760 3761 if (sata_set_spd_needed(link)) { 3762 /* SATA spec says nothing about how to reconfigure 3763 * spd. To be on the safe side, turn off phy during 3764 * reconfiguration. This works for at least ICH7 AHCI 3765 * and Sil3124. 3766 */ 3767 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3768 goto out; 3769 3770 scontrol = (scontrol & 0x0f0) | 0x304; 3771 3772 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3773 goto out; 3774 3775 sata_set_spd(link); 3776 } 3777 3778 /* issue phy wake/reset */ 3779 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3780 goto out; 3781 3782 scontrol = (scontrol & 0x0f0) | 0x301; 3783 3784 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3785 goto out; 3786 3787 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3788 * 10.4.2 says at least 1 ms. 3789 */ 3790 ata_msleep(link->ap, 1); 3791 3792 /* bring link back */ 3793 rc = sata_link_resume(link, timing, deadline); 3794 if (rc) 3795 goto out; 3796 /* if link is offline nothing more to do */ 3797 if (ata_phys_link_offline(link)) 3798 goto out; 3799 3800 /* Link is online. From this point, -ENODEV too is an error. */ 3801 if (online) 3802 *online = true; 3803 3804 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3805 /* If PMP is supported, we have to do follow-up SRST. 3806 * Some PMPs don't send D2H Reg FIS after hardreset if 3807 * the first port is empty. Wait only for 3808 * ATA_TMOUT_PMP_SRST_WAIT. 3809 */ 3810 if (check_ready) { 3811 unsigned long pmp_deadline; 3812 3813 pmp_deadline = ata_deadline(jiffies, 3814 ATA_TMOUT_PMP_SRST_WAIT); 3815 if (time_after(pmp_deadline, deadline)) 3816 pmp_deadline = deadline; 3817 ata_wait_ready(link, pmp_deadline, check_ready); 3818 } 3819 rc = -EAGAIN; 3820 goto out; 3821 } 3822 3823 rc = 0; 3824 if (check_ready) 3825 rc = ata_wait_ready(link, deadline, check_ready); 3826 out: 3827 if (rc && rc != -EAGAIN) { 3828 /* online is set iff link is online && reset succeeded */ 3829 if (online) 3830 *online = false; 3831 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 3832 } 3833 DPRINTK("EXIT, rc=%d\n", rc); 3834 return rc; 3835 } 3836 3837 /** 3838 * sata_std_hardreset - COMRESET w/o waiting or classification 3839 * @link: link to reset 3840 * @class: resulting class of attached device 3841 * @deadline: deadline jiffies for the operation 3842 * 3843 * Standard SATA COMRESET w/o waiting or classification. 3844 * 3845 * LOCKING: 3846 * Kernel thread context (may sleep) 3847 * 3848 * RETURNS: 3849 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3850 */ 3851 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3852 unsigned long deadline) 3853 { 3854 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3855 bool online; 3856 int rc; 3857 3858 /* do hardreset */ 3859 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3860 return online ? -EAGAIN : rc; 3861 } 3862 3863 /** 3864 * ata_std_postreset - standard postreset callback 3865 * @link: the target ata_link 3866 * @classes: classes of attached devices 3867 * 3868 * This function is invoked after a successful reset. Note that 3869 * the device might have been reset more than once using 3870 * different reset methods before postreset is invoked. 3871 * 3872 * LOCKING: 3873 * Kernel thread context (may sleep) 3874 */ 3875 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3876 { 3877 u32 serror; 3878 3879 DPRINTK("ENTER\n"); 3880 3881 /* reset complete, clear SError */ 3882 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3883 sata_scr_write(link, SCR_ERROR, serror); 3884 3885 /* print link status */ 3886 sata_print_link_status(link); 3887 3888 DPRINTK("EXIT\n"); 3889 } 3890 3891 /** 3892 * ata_dev_same_device - Determine whether new ID matches configured device 3893 * @dev: device to compare against 3894 * @new_class: class of the new device 3895 * @new_id: IDENTIFY page of the new device 3896 * 3897 * Compare @new_class and @new_id against @dev and determine 3898 * whether @dev is the device indicated by @new_class and 3899 * @new_id. 3900 * 3901 * LOCKING: 3902 * None. 3903 * 3904 * RETURNS: 3905 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3906 */ 3907 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3908 const u16 *new_id) 3909 { 3910 const u16 *old_id = dev->id; 3911 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3912 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3913 3914 if (dev->class != new_class) { 3915 ata_dev_info(dev, "class mismatch %d != %d\n", 3916 dev->class, new_class); 3917 return 0; 3918 } 3919 3920 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3921 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3922 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3923 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3924 3925 if (strcmp(model[0], model[1])) { 3926 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 3927 model[0], model[1]); 3928 return 0; 3929 } 3930 3931 if (strcmp(serial[0], serial[1])) { 3932 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 3933 serial[0], serial[1]); 3934 return 0; 3935 } 3936 3937 return 1; 3938 } 3939 3940 /** 3941 * ata_dev_reread_id - Re-read IDENTIFY data 3942 * @dev: target ATA device 3943 * @readid_flags: read ID flags 3944 * 3945 * Re-read IDENTIFY page and make sure @dev is still attached to 3946 * the port. 3947 * 3948 * LOCKING: 3949 * Kernel thread context (may sleep) 3950 * 3951 * RETURNS: 3952 * 0 on success, negative errno otherwise 3953 */ 3954 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 3955 { 3956 unsigned int class = dev->class; 3957 u16 *id = (void *)dev->link->ap->sector_buf; 3958 int rc; 3959 3960 /* read ID data */ 3961 rc = ata_dev_read_id(dev, &class, readid_flags, id); 3962 if (rc) 3963 return rc; 3964 3965 /* is the device still there? */ 3966 if (!ata_dev_same_device(dev, class, id)) 3967 return -ENODEV; 3968 3969 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 3970 return 0; 3971 } 3972 3973 /** 3974 * ata_dev_revalidate - Revalidate ATA device 3975 * @dev: device to revalidate 3976 * @new_class: new class code 3977 * @readid_flags: read ID flags 3978 * 3979 * Re-read IDENTIFY page, make sure @dev is still attached to the 3980 * port and reconfigure it according to the new IDENTIFY page. 3981 * 3982 * LOCKING: 3983 * Kernel thread context (may sleep) 3984 * 3985 * RETURNS: 3986 * 0 on success, negative errno otherwise 3987 */ 3988 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 3989 unsigned int readid_flags) 3990 { 3991 u64 n_sectors = dev->n_sectors; 3992 u64 n_native_sectors = dev->n_native_sectors; 3993 int rc; 3994 3995 if (!ata_dev_enabled(dev)) 3996 return -ENODEV; 3997 3998 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 3999 if (ata_class_enabled(new_class) && 4000 new_class != ATA_DEV_ATA && 4001 new_class != ATA_DEV_ATAPI && 4002 new_class != ATA_DEV_SEMB) { 4003 ata_dev_info(dev, "class mismatch %u != %u\n", 4004 dev->class, new_class); 4005 rc = -ENODEV; 4006 goto fail; 4007 } 4008 4009 /* re-read ID */ 4010 rc = ata_dev_reread_id(dev, readid_flags); 4011 if (rc) 4012 goto fail; 4013 4014 /* configure device according to the new ID */ 4015 rc = ata_dev_configure(dev); 4016 if (rc) 4017 goto fail; 4018 4019 /* verify n_sectors hasn't changed */ 4020 if (dev->class != ATA_DEV_ATA || !n_sectors || 4021 dev->n_sectors == n_sectors) 4022 return 0; 4023 4024 /* n_sectors has changed */ 4025 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 4026 (unsigned long long)n_sectors, 4027 (unsigned long long)dev->n_sectors); 4028 4029 /* 4030 * Something could have caused HPA to be unlocked 4031 * involuntarily. If n_native_sectors hasn't changed and the 4032 * new size matches it, keep the device. 4033 */ 4034 if (dev->n_native_sectors == n_native_sectors && 4035 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4036 ata_dev_warn(dev, 4037 "new n_sectors matches native, probably " 4038 "late HPA unlock, n_sectors updated\n"); 4039 /* use the larger n_sectors */ 4040 return 0; 4041 } 4042 4043 /* 4044 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4045 * unlocking HPA in those cases. 4046 * 4047 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4048 */ 4049 if (dev->n_native_sectors == n_native_sectors && 4050 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4051 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4052 ata_dev_warn(dev, 4053 "old n_sectors matches native, probably " 4054 "late HPA lock, will try to unlock HPA\n"); 4055 /* try unlocking HPA */ 4056 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4057 rc = -EIO; 4058 } else 4059 rc = -ENODEV; 4060 4061 /* restore original n_[native_]sectors and fail */ 4062 dev->n_native_sectors = n_native_sectors; 4063 dev->n_sectors = n_sectors; 4064 fail: 4065 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4066 return rc; 4067 } 4068 4069 struct ata_blacklist_entry { 4070 const char *model_num; 4071 const char *model_rev; 4072 unsigned long horkage; 4073 }; 4074 4075 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4076 /* Devices with DMA related problems under Linux */ 4077 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4078 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4079 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4080 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4081 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4082 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4083 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4084 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4085 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4086 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4087 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4088 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4089 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4090 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4091 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4092 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4093 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4094 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4095 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4096 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4097 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4098 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4099 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4100 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4101 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4102 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4103 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4104 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4105 { " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4106 /* Odd clown on sil3726/4726 PMPs */ 4107 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4108 4109 /* Weird ATAPI devices */ 4110 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4111 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4112 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4113 4114 /* Devices we expect to fail diagnostics */ 4115 4116 /* Devices where NCQ should be avoided */ 4117 /* NCQ is slow */ 4118 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4119 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4120 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4121 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4122 /* NCQ is broken */ 4123 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4124 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4125 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4126 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4127 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4128 4129 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4130 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4131 ATA_HORKAGE_FIRMWARE_WARN }, 4132 4133 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4134 ATA_HORKAGE_FIRMWARE_WARN }, 4135 4136 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4137 ATA_HORKAGE_FIRMWARE_WARN }, 4138 4139 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4140 ATA_HORKAGE_FIRMWARE_WARN }, 4141 4142 /* Blacklist entries taken from Silicon Image 3124/3132 4143 Windows driver .inf file - also several Linux problem reports */ 4144 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4145 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4146 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4147 4148 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4149 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4150 4151 /* devices which puke on READ_NATIVE_MAX */ 4152 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4153 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4154 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4155 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4156 4157 /* this one allows HPA unlocking but fails IOs on the area */ 4158 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4159 4160 /* Devices which report 1 sector over size HPA */ 4161 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4162 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4163 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4164 4165 /* Devices which get the IVB wrong */ 4166 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4167 /* Maybe we should just blacklist TSSTcorp... */ 4168 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4169 4170 /* Devices that do not need bridging limits applied */ 4171 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4172 { "BUFFALO HD-QSU2/R5", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4173 4174 /* Devices which aren't very happy with higher link speeds */ 4175 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4176 { "Seagate FreeAgent GoFlex", NULL, ATA_HORKAGE_1_5_GBPS, }, 4177 4178 /* 4179 * Devices which choke on SETXFER. Applies only if both the 4180 * device and controller are SATA. 4181 */ 4182 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4183 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, 4184 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, 4185 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4186 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4187 4188 /* End Marker */ 4189 { } 4190 }; 4191 4192 /** 4193 * glob_match - match a text string against a glob-style pattern 4194 * @text: the string to be examined 4195 * @pattern: the glob-style pattern to be matched against 4196 * 4197 * Either/both of text and pattern can be empty strings. 4198 * 4199 * Match text against a glob-style pattern, with wildcards and simple sets: 4200 * 4201 * ? matches any single character. 4202 * * matches any run of characters. 4203 * [xyz] matches a single character from the set: x, y, or z. 4204 * [a-d] matches a single character from the range: a, b, c, or d. 4205 * [a-d0-9] matches a single character from either range. 4206 * 4207 * The special characters ?, [, -, or *, can be matched using a set, eg. [*] 4208 * Behaviour with malformed patterns is undefined, though generally reasonable. 4209 * 4210 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx" 4211 * 4212 * This function uses one level of recursion per '*' in pattern. 4213 * Since it calls _nothing_ else, and has _no_ explicit local variables, 4214 * this will not cause stack problems for any reasonable use here. 4215 * 4216 * RETURNS: 4217 * 0 on match, 1 otherwise. 4218 */ 4219 static int glob_match (const char *text, const char *pattern) 4220 { 4221 do { 4222 /* Match single character or a '?' wildcard */ 4223 if (*text == *pattern || *pattern == '?') { 4224 if (!*pattern++) 4225 return 0; /* End of both strings: match */ 4226 } else { 4227 /* Match single char against a '[' bracketed ']' pattern set */ 4228 if (!*text || *pattern != '[') 4229 break; /* Not a pattern set */ 4230 while (*++pattern && *pattern != ']' && *text != *pattern) { 4231 if (*pattern == '-' && *(pattern - 1) != '[') 4232 if (*text > *(pattern - 1) && *text < *(pattern + 1)) { 4233 ++pattern; 4234 break; 4235 } 4236 } 4237 if (!*pattern || *pattern == ']') 4238 return 1; /* No match */ 4239 while (*pattern && *pattern++ != ']'); 4240 } 4241 } while (*++text && *pattern); 4242 4243 /* Match any run of chars against a '*' wildcard */ 4244 if (*pattern == '*') { 4245 if (!*++pattern) 4246 return 0; /* Match: avoid recursion at end of pattern */ 4247 /* Loop to handle additional pattern chars after the wildcard */ 4248 while (*text) { 4249 if (glob_match(text, pattern) == 0) 4250 return 0; /* Remainder matched */ 4251 ++text; /* Absorb (match) this char and try again */ 4252 } 4253 } 4254 if (!*text && !*pattern) 4255 return 0; /* End of both strings: match */ 4256 return 1; /* No match */ 4257 } 4258 4259 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4260 { 4261 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4262 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4263 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4264 4265 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4266 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4267 4268 while (ad->model_num) { 4269 if (!glob_match(model_num, ad->model_num)) { 4270 if (ad->model_rev == NULL) 4271 return ad->horkage; 4272 if (!glob_match(model_rev, ad->model_rev)) 4273 return ad->horkage; 4274 } 4275 ad++; 4276 } 4277 return 0; 4278 } 4279 4280 static int ata_dma_blacklisted(const struct ata_device *dev) 4281 { 4282 /* We don't support polling DMA. 4283 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4284 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4285 */ 4286 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4287 (dev->flags & ATA_DFLAG_CDB_INTR)) 4288 return 1; 4289 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4290 } 4291 4292 /** 4293 * ata_is_40wire - check drive side detection 4294 * @dev: device 4295 * 4296 * Perform drive side detection decoding, allowing for device vendors 4297 * who can't follow the documentation. 4298 */ 4299 4300 static int ata_is_40wire(struct ata_device *dev) 4301 { 4302 if (dev->horkage & ATA_HORKAGE_IVB) 4303 return ata_drive_40wire_relaxed(dev->id); 4304 return ata_drive_40wire(dev->id); 4305 } 4306 4307 /** 4308 * cable_is_40wire - 40/80/SATA decider 4309 * @ap: port to consider 4310 * 4311 * This function encapsulates the policy for speed management 4312 * in one place. At the moment we don't cache the result but 4313 * there is a good case for setting ap->cbl to the result when 4314 * we are called with unknown cables (and figuring out if it 4315 * impacts hotplug at all). 4316 * 4317 * Return 1 if the cable appears to be 40 wire. 4318 */ 4319 4320 static int cable_is_40wire(struct ata_port *ap) 4321 { 4322 struct ata_link *link; 4323 struct ata_device *dev; 4324 4325 /* If the controller thinks we are 40 wire, we are. */ 4326 if (ap->cbl == ATA_CBL_PATA40) 4327 return 1; 4328 4329 /* If the controller thinks we are 80 wire, we are. */ 4330 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4331 return 0; 4332 4333 /* If the system is known to be 40 wire short cable (eg 4334 * laptop), then we allow 80 wire modes even if the drive 4335 * isn't sure. 4336 */ 4337 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4338 return 0; 4339 4340 /* If the controller doesn't know, we scan. 4341 * 4342 * Note: We look for all 40 wire detects at this point. Any 4343 * 80 wire detect is taken to be 80 wire cable because 4344 * - in many setups only the one drive (slave if present) will 4345 * give a valid detect 4346 * - if you have a non detect capable drive you don't want it 4347 * to colour the choice 4348 */ 4349 ata_for_each_link(link, ap, EDGE) { 4350 ata_for_each_dev(dev, link, ENABLED) { 4351 if (!ata_is_40wire(dev)) 4352 return 0; 4353 } 4354 } 4355 return 1; 4356 } 4357 4358 /** 4359 * ata_dev_xfermask - Compute supported xfermask of the given device 4360 * @dev: Device to compute xfermask for 4361 * 4362 * Compute supported xfermask of @dev and store it in 4363 * dev->*_mask. This function is responsible for applying all 4364 * known limits including host controller limits, device 4365 * blacklist, etc... 4366 * 4367 * LOCKING: 4368 * None. 4369 */ 4370 static void ata_dev_xfermask(struct ata_device *dev) 4371 { 4372 struct ata_link *link = dev->link; 4373 struct ata_port *ap = link->ap; 4374 struct ata_host *host = ap->host; 4375 unsigned long xfer_mask; 4376 4377 /* controller modes available */ 4378 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4379 ap->mwdma_mask, ap->udma_mask); 4380 4381 /* drive modes available */ 4382 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4383 dev->mwdma_mask, dev->udma_mask); 4384 xfer_mask &= ata_id_xfermask(dev->id); 4385 4386 /* 4387 * CFA Advanced TrueIDE timings are not allowed on a shared 4388 * cable 4389 */ 4390 if (ata_dev_pair(dev)) { 4391 /* No PIO5 or PIO6 */ 4392 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4393 /* No MWDMA3 or MWDMA 4 */ 4394 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4395 } 4396 4397 if (ata_dma_blacklisted(dev)) { 4398 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4399 ata_dev_warn(dev, 4400 "device is on DMA blacklist, disabling DMA\n"); 4401 } 4402 4403 if ((host->flags & ATA_HOST_SIMPLEX) && 4404 host->simplex_claimed && host->simplex_claimed != ap) { 4405 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4406 ata_dev_warn(dev, 4407 "simplex DMA is claimed by other device, disabling DMA\n"); 4408 } 4409 4410 if (ap->flags & ATA_FLAG_NO_IORDY) 4411 xfer_mask &= ata_pio_mask_no_iordy(dev); 4412 4413 if (ap->ops->mode_filter) 4414 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4415 4416 /* Apply cable rule here. Don't apply it early because when 4417 * we handle hot plug the cable type can itself change. 4418 * Check this last so that we know if the transfer rate was 4419 * solely limited by the cable. 4420 * Unknown or 80 wire cables reported host side are checked 4421 * drive side as well. Cases where we know a 40wire cable 4422 * is used safely for 80 are not checked here. 4423 */ 4424 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4425 /* UDMA/44 or higher would be available */ 4426 if (cable_is_40wire(ap)) { 4427 ata_dev_warn(dev, 4428 "limited to UDMA/33 due to 40-wire cable\n"); 4429 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4430 } 4431 4432 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4433 &dev->mwdma_mask, &dev->udma_mask); 4434 } 4435 4436 /** 4437 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4438 * @dev: Device to which command will be sent 4439 * 4440 * Issue SET FEATURES - XFER MODE command to device @dev 4441 * on port @ap. 4442 * 4443 * LOCKING: 4444 * PCI/etc. bus probe sem. 4445 * 4446 * RETURNS: 4447 * 0 on success, AC_ERR_* mask otherwise. 4448 */ 4449 4450 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4451 { 4452 struct ata_taskfile tf; 4453 unsigned int err_mask; 4454 4455 /* set up set-features taskfile */ 4456 DPRINTK("set features - xfer mode\n"); 4457 4458 /* Some controllers and ATAPI devices show flaky interrupt 4459 * behavior after setting xfer mode. Use polling instead. 4460 */ 4461 ata_tf_init(dev, &tf); 4462 tf.command = ATA_CMD_SET_FEATURES; 4463 tf.feature = SETFEATURES_XFER; 4464 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4465 tf.protocol = ATA_PROT_NODATA; 4466 /* If we are using IORDY we must send the mode setting command */ 4467 if (ata_pio_need_iordy(dev)) 4468 tf.nsect = dev->xfer_mode; 4469 /* If the device has IORDY and the controller does not - turn it off */ 4470 else if (ata_id_has_iordy(dev->id)) 4471 tf.nsect = 0x01; 4472 else /* In the ancient relic department - skip all of this */ 4473 return 0; 4474 4475 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4476 4477 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4478 return err_mask; 4479 } 4480 4481 /** 4482 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4483 * @dev: Device to which command will be sent 4484 * @enable: Whether to enable or disable the feature 4485 * @feature: The sector count represents the feature to set 4486 * 4487 * Issue SET FEATURES - SATA FEATURES command to device @dev 4488 * on port @ap with sector count 4489 * 4490 * LOCKING: 4491 * PCI/etc. bus probe sem. 4492 * 4493 * RETURNS: 4494 * 0 on success, AC_ERR_* mask otherwise. 4495 */ 4496 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4497 { 4498 struct ata_taskfile tf; 4499 unsigned int err_mask; 4500 4501 /* set up set-features taskfile */ 4502 DPRINTK("set features - SATA features\n"); 4503 4504 ata_tf_init(dev, &tf); 4505 tf.command = ATA_CMD_SET_FEATURES; 4506 tf.feature = enable; 4507 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4508 tf.protocol = ATA_PROT_NODATA; 4509 tf.nsect = feature; 4510 4511 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4512 4513 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4514 return err_mask; 4515 } 4516 EXPORT_SYMBOL_GPL(ata_dev_set_feature); 4517 4518 /** 4519 * ata_dev_init_params - Issue INIT DEV PARAMS command 4520 * @dev: Device to which command will be sent 4521 * @heads: Number of heads (taskfile parameter) 4522 * @sectors: Number of sectors (taskfile parameter) 4523 * 4524 * LOCKING: 4525 * Kernel thread context (may sleep) 4526 * 4527 * RETURNS: 4528 * 0 on success, AC_ERR_* mask otherwise. 4529 */ 4530 static unsigned int ata_dev_init_params(struct ata_device *dev, 4531 u16 heads, u16 sectors) 4532 { 4533 struct ata_taskfile tf; 4534 unsigned int err_mask; 4535 4536 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4537 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4538 return AC_ERR_INVALID; 4539 4540 /* set up init dev params taskfile */ 4541 DPRINTK("init dev params \n"); 4542 4543 ata_tf_init(dev, &tf); 4544 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4545 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4546 tf.protocol = ATA_PROT_NODATA; 4547 tf.nsect = sectors; 4548 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4549 4550 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4551 /* A clean abort indicates an original or just out of spec drive 4552 and we should continue as we issue the setup based on the 4553 drive reported working geometry */ 4554 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4555 err_mask = 0; 4556 4557 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4558 return err_mask; 4559 } 4560 4561 /** 4562 * ata_sg_clean - Unmap DMA memory associated with command 4563 * @qc: Command containing DMA memory to be released 4564 * 4565 * Unmap all mapped DMA memory associated with this command. 4566 * 4567 * LOCKING: 4568 * spin_lock_irqsave(host lock) 4569 */ 4570 void ata_sg_clean(struct ata_queued_cmd *qc) 4571 { 4572 struct ata_port *ap = qc->ap; 4573 struct scatterlist *sg = qc->sg; 4574 int dir = qc->dma_dir; 4575 4576 WARN_ON_ONCE(sg == NULL); 4577 4578 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4579 4580 if (qc->n_elem) 4581 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4582 4583 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4584 qc->sg = NULL; 4585 } 4586 4587 /** 4588 * atapi_check_dma - Check whether ATAPI DMA can be supported 4589 * @qc: Metadata associated with taskfile to check 4590 * 4591 * Allow low-level driver to filter ATA PACKET commands, returning 4592 * a status indicating whether or not it is OK to use DMA for the 4593 * supplied PACKET command. 4594 * 4595 * LOCKING: 4596 * spin_lock_irqsave(host lock) 4597 * 4598 * RETURNS: 0 when ATAPI DMA can be used 4599 * nonzero otherwise 4600 */ 4601 int atapi_check_dma(struct ata_queued_cmd *qc) 4602 { 4603 struct ata_port *ap = qc->ap; 4604 4605 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4606 * few ATAPI devices choke on such DMA requests. 4607 */ 4608 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4609 unlikely(qc->nbytes & 15)) 4610 return 1; 4611 4612 if (ap->ops->check_atapi_dma) 4613 return ap->ops->check_atapi_dma(qc); 4614 4615 return 0; 4616 } 4617 4618 /** 4619 * ata_std_qc_defer - Check whether a qc needs to be deferred 4620 * @qc: ATA command in question 4621 * 4622 * Non-NCQ commands cannot run with any other command, NCQ or 4623 * not. As upper layer only knows the queue depth, we are 4624 * responsible for maintaining exclusion. This function checks 4625 * whether a new command @qc can be issued. 4626 * 4627 * LOCKING: 4628 * spin_lock_irqsave(host lock) 4629 * 4630 * RETURNS: 4631 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4632 */ 4633 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4634 { 4635 struct ata_link *link = qc->dev->link; 4636 4637 if (qc->tf.protocol == ATA_PROT_NCQ) { 4638 if (!ata_tag_valid(link->active_tag)) 4639 return 0; 4640 } else { 4641 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4642 return 0; 4643 } 4644 4645 return ATA_DEFER_LINK; 4646 } 4647 4648 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4649 4650 /** 4651 * ata_sg_init - Associate command with scatter-gather table. 4652 * @qc: Command to be associated 4653 * @sg: Scatter-gather table. 4654 * @n_elem: Number of elements in s/g table. 4655 * 4656 * Initialize the data-related elements of queued_cmd @qc 4657 * to point to a scatter-gather table @sg, containing @n_elem 4658 * elements. 4659 * 4660 * LOCKING: 4661 * spin_lock_irqsave(host lock) 4662 */ 4663 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4664 unsigned int n_elem) 4665 { 4666 qc->sg = sg; 4667 qc->n_elem = n_elem; 4668 qc->cursg = qc->sg; 4669 } 4670 4671 /** 4672 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4673 * @qc: Command with scatter-gather table to be mapped. 4674 * 4675 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4676 * 4677 * LOCKING: 4678 * spin_lock_irqsave(host lock) 4679 * 4680 * RETURNS: 4681 * Zero on success, negative on error. 4682 * 4683 */ 4684 static int ata_sg_setup(struct ata_queued_cmd *qc) 4685 { 4686 struct ata_port *ap = qc->ap; 4687 unsigned int n_elem; 4688 4689 VPRINTK("ENTER, ata%u\n", ap->print_id); 4690 4691 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4692 if (n_elem < 1) 4693 return -1; 4694 4695 DPRINTK("%d sg elements mapped\n", n_elem); 4696 qc->orig_n_elem = qc->n_elem; 4697 qc->n_elem = n_elem; 4698 qc->flags |= ATA_QCFLAG_DMAMAP; 4699 4700 return 0; 4701 } 4702 4703 /** 4704 * swap_buf_le16 - swap halves of 16-bit words in place 4705 * @buf: Buffer to swap 4706 * @buf_words: Number of 16-bit words in buffer. 4707 * 4708 * Swap halves of 16-bit words if needed to convert from 4709 * little-endian byte order to native cpu byte order, or 4710 * vice-versa. 4711 * 4712 * LOCKING: 4713 * Inherited from caller. 4714 */ 4715 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4716 { 4717 #ifdef __BIG_ENDIAN 4718 unsigned int i; 4719 4720 for (i = 0; i < buf_words; i++) 4721 buf[i] = le16_to_cpu(buf[i]); 4722 #endif /* __BIG_ENDIAN */ 4723 } 4724 4725 /** 4726 * ata_qc_new - Request an available ATA command, for queueing 4727 * @ap: target port 4728 * 4729 * LOCKING: 4730 * None. 4731 */ 4732 4733 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4734 { 4735 struct ata_queued_cmd *qc = NULL; 4736 unsigned int i; 4737 4738 /* no command while frozen */ 4739 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4740 return NULL; 4741 4742 /* the last tag is reserved for internal command. */ 4743 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 4744 if (!test_and_set_bit(i, &ap->qc_allocated)) { 4745 qc = __ata_qc_from_tag(ap, i); 4746 break; 4747 } 4748 4749 if (qc) 4750 qc->tag = i; 4751 4752 return qc; 4753 } 4754 4755 /** 4756 * ata_qc_new_init - Request an available ATA command, and initialize it 4757 * @dev: Device from whom we request an available command structure 4758 * 4759 * LOCKING: 4760 * None. 4761 */ 4762 4763 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 4764 { 4765 struct ata_port *ap = dev->link->ap; 4766 struct ata_queued_cmd *qc; 4767 4768 qc = ata_qc_new(ap); 4769 if (qc) { 4770 qc->scsicmd = NULL; 4771 qc->ap = ap; 4772 qc->dev = dev; 4773 4774 ata_qc_reinit(qc); 4775 } 4776 4777 return qc; 4778 } 4779 4780 /** 4781 * ata_qc_free - free unused ata_queued_cmd 4782 * @qc: Command to complete 4783 * 4784 * Designed to free unused ata_queued_cmd object 4785 * in case something prevents using it. 4786 * 4787 * LOCKING: 4788 * spin_lock_irqsave(host lock) 4789 */ 4790 void ata_qc_free(struct ata_queued_cmd *qc) 4791 { 4792 struct ata_port *ap; 4793 unsigned int tag; 4794 4795 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4796 ap = qc->ap; 4797 4798 qc->flags = 0; 4799 tag = qc->tag; 4800 if (likely(ata_tag_valid(tag))) { 4801 qc->tag = ATA_TAG_POISON; 4802 clear_bit(tag, &ap->qc_allocated); 4803 } 4804 } 4805 4806 void __ata_qc_complete(struct ata_queued_cmd *qc) 4807 { 4808 struct ata_port *ap; 4809 struct ata_link *link; 4810 4811 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4812 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4813 ap = qc->ap; 4814 link = qc->dev->link; 4815 4816 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4817 ata_sg_clean(qc); 4818 4819 /* command should be marked inactive atomically with qc completion */ 4820 if (qc->tf.protocol == ATA_PROT_NCQ) { 4821 link->sactive &= ~(1 << qc->tag); 4822 if (!link->sactive) 4823 ap->nr_active_links--; 4824 } else { 4825 link->active_tag = ATA_TAG_POISON; 4826 ap->nr_active_links--; 4827 } 4828 4829 /* clear exclusive status */ 4830 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4831 ap->excl_link == link)) 4832 ap->excl_link = NULL; 4833 4834 /* atapi: mark qc as inactive to prevent the interrupt handler 4835 * from completing the command twice later, before the error handler 4836 * is called. (when rc != 0 and atapi request sense is needed) 4837 */ 4838 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4839 ap->qc_active &= ~(1 << qc->tag); 4840 4841 /* call completion callback */ 4842 qc->complete_fn(qc); 4843 } 4844 4845 static void fill_result_tf(struct ata_queued_cmd *qc) 4846 { 4847 struct ata_port *ap = qc->ap; 4848 4849 qc->result_tf.flags = qc->tf.flags; 4850 ap->ops->qc_fill_rtf(qc); 4851 } 4852 4853 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4854 { 4855 struct ata_device *dev = qc->dev; 4856 4857 if (ata_is_nodata(qc->tf.protocol)) 4858 return; 4859 4860 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4861 return; 4862 4863 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4864 } 4865 4866 /** 4867 * ata_qc_complete - Complete an active ATA command 4868 * @qc: Command to complete 4869 * 4870 * Indicate to the mid and upper layers that an ATA command has 4871 * completed, with either an ok or not-ok status. 4872 * 4873 * Refrain from calling this function multiple times when 4874 * successfully completing multiple NCQ commands. 4875 * ata_qc_complete_multiple() should be used instead, which will 4876 * properly update IRQ expect state. 4877 * 4878 * LOCKING: 4879 * spin_lock_irqsave(host lock) 4880 */ 4881 void ata_qc_complete(struct ata_queued_cmd *qc) 4882 { 4883 struct ata_port *ap = qc->ap; 4884 4885 /* XXX: New EH and old EH use different mechanisms to 4886 * synchronize EH with regular execution path. 4887 * 4888 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4889 * Normal execution path is responsible for not accessing a 4890 * failed qc. libata core enforces the rule by returning NULL 4891 * from ata_qc_from_tag() for failed qcs. 4892 * 4893 * Old EH depends on ata_qc_complete() nullifying completion 4894 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4895 * not synchronize with interrupt handler. Only PIO task is 4896 * taken care of. 4897 */ 4898 if (ap->ops->error_handler) { 4899 struct ata_device *dev = qc->dev; 4900 struct ata_eh_info *ehi = &dev->link->eh_info; 4901 4902 if (unlikely(qc->err_mask)) 4903 qc->flags |= ATA_QCFLAG_FAILED; 4904 4905 /* 4906 * Finish internal commands without any further processing 4907 * and always with the result TF filled. 4908 */ 4909 if (unlikely(ata_tag_internal(qc->tag))) { 4910 fill_result_tf(qc); 4911 __ata_qc_complete(qc); 4912 return; 4913 } 4914 4915 /* 4916 * Non-internal qc has failed. Fill the result TF and 4917 * summon EH. 4918 */ 4919 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4920 fill_result_tf(qc); 4921 ata_qc_schedule_eh(qc); 4922 return; 4923 } 4924 4925 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4926 4927 /* read result TF if requested */ 4928 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4929 fill_result_tf(qc); 4930 4931 /* Some commands need post-processing after successful 4932 * completion. 4933 */ 4934 switch (qc->tf.command) { 4935 case ATA_CMD_SET_FEATURES: 4936 if (qc->tf.feature != SETFEATURES_WC_ON && 4937 qc->tf.feature != SETFEATURES_WC_OFF) 4938 break; 4939 /* fall through */ 4940 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 4941 case ATA_CMD_SET_MULTI: /* multi_count changed */ 4942 /* revalidate device */ 4943 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 4944 ata_port_schedule_eh(ap); 4945 break; 4946 4947 case ATA_CMD_SLEEP: 4948 dev->flags |= ATA_DFLAG_SLEEPING; 4949 break; 4950 } 4951 4952 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 4953 ata_verify_xfer(qc); 4954 4955 __ata_qc_complete(qc); 4956 } else { 4957 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 4958 return; 4959 4960 /* read result TF if failed or requested */ 4961 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 4962 fill_result_tf(qc); 4963 4964 __ata_qc_complete(qc); 4965 } 4966 } 4967 4968 /** 4969 * ata_qc_complete_multiple - Complete multiple qcs successfully 4970 * @ap: port in question 4971 * @qc_active: new qc_active mask 4972 * 4973 * Complete in-flight commands. This functions is meant to be 4974 * called from low-level driver's interrupt routine to complete 4975 * requests normally. ap->qc_active and @qc_active is compared 4976 * and commands are completed accordingly. 4977 * 4978 * Always use this function when completing multiple NCQ commands 4979 * from IRQ handlers instead of calling ata_qc_complete() 4980 * multiple times to keep IRQ expect status properly in sync. 4981 * 4982 * LOCKING: 4983 * spin_lock_irqsave(host lock) 4984 * 4985 * RETURNS: 4986 * Number of completed commands on success, -errno otherwise. 4987 */ 4988 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 4989 { 4990 int nr_done = 0; 4991 u32 done_mask; 4992 4993 done_mask = ap->qc_active ^ qc_active; 4994 4995 if (unlikely(done_mask & qc_active)) { 4996 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", 4997 ap->qc_active, qc_active); 4998 return -EINVAL; 4999 } 5000 5001 while (done_mask) { 5002 struct ata_queued_cmd *qc; 5003 unsigned int tag = __ffs(done_mask); 5004 5005 qc = ata_qc_from_tag(ap, tag); 5006 if (qc) { 5007 ata_qc_complete(qc); 5008 nr_done++; 5009 } 5010 done_mask &= ~(1 << tag); 5011 } 5012 5013 return nr_done; 5014 } 5015 5016 /** 5017 * ata_qc_issue - issue taskfile to device 5018 * @qc: command to issue to device 5019 * 5020 * Prepare an ATA command to submission to device. 5021 * This includes mapping the data into a DMA-able 5022 * area, filling in the S/G table, and finally 5023 * writing the taskfile to hardware, starting the command. 5024 * 5025 * LOCKING: 5026 * spin_lock_irqsave(host lock) 5027 */ 5028 void ata_qc_issue(struct ata_queued_cmd *qc) 5029 { 5030 struct ata_port *ap = qc->ap; 5031 struct ata_link *link = qc->dev->link; 5032 u8 prot = qc->tf.protocol; 5033 5034 /* Make sure only one non-NCQ command is outstanding. The 5035 * check is skipped for old EH because it reuses active qc to 5036 * request ATAPI sense. 5037 */ 5038 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5039 5040 if (ata_is_ncq(prot)) { 5041 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5042 5043 if (!link->sactive) 5044 ap->nr_active_links++; 5045 link->sactive |= 1 << qc->tag; 5046 } else { 5047 WARN_ON_ONCE(link->sactive); 5048 5049 ap->nr_active_links++; 5050 link->active_tag = qc->tag; 5051 } 5052 5053 qc->flags |= ATA_QCFLAG_ACTIVE; 5054 ap->qc_active |= 1 << qc->tag; 5055 5056 /* 5057 * We guarantee to LLDs that they will have at least one 5058 * non-zero sg if the command is a data command. 5059 */ 5060 if (WARN_ON_ONCE(ata_is_data(prot) && 5061 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5062 goto sys_err; 5063 5064 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5065 (ap->flags & ATA_FLAG_PIO_DMA))) 5066 if (ata_sg_setup(qc)) 5067 goto sys_err; 5068 5069 /* if device is sleeping, schedule reset and abort the link */ 5070 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5071 link->eh_info.action |= ATA_EH_RESET; 5072 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5073 ata_link_abort(link); 5074 return; 5075 } 5076 5077 ap->ops->qc_prep(qc); 5078 5079 qc->err_mask |= ap->ops->qc_issue(qc); 5080 if (unlikely(qc->err_mask)) 5081 goto err; 5082 return; 5083 5084 sys_err: 5085 qc->err_mask |= AC_ERR_SYSTEM; 5086 err: 5087 ata_qc_complete(qc); 5088 } 5089 5090 /** 5091 * sata_scr_valid - test whether SCRs are accessible 5092 * @link: ATA link to test SCR accessibility for 5093 * 5094 * Test whether SCRs are accessible for @link. 5095 * 5096 * LOCKING: 5097 * None. 5098 * 5099 * RETURNS: 5100 * 1 if SCRs are accessible, 0 otherwise. 5101 */ 5102 int sata_scr_valid(struct ata_link *link) 5103 { 5104 struct ata_port *ap = link->ap; 5105 5106 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5107 } 5108 5109 /** 5110 * sata_scr_read - read SCR register of the specified port 5111 * @link: ATA link to read SCR for 5112 * @reg: SCR to read 5113 * @val: Place to store read value 5114 * 5115 * Read SCR register @reg of @link into *@val. This function is 5116 * guaranteed to succeed if @link is ap->link, the cable type of 5117 * the port is SATA and the port implements ->scr_read. 5118 * 5119 * LOCKING: 5120 * None if @link is ap->link. Kernel thread context otherwise. 5121 * 5122 * RETURNS: 5123 * 0 on success, negative errno on failure. 5124 */ 5125 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5126 { 5127 if (ata_is_host_link(link)) { 5128 if (sata_scr_valid(link)) 5129 return link->ap->ops->scr_read(link, reg, val); 5130 return -EOPNOTSUPP; 5131 } 5132 5133 return sata_pmp_scr_read(link, reg, val); 5134 } 5135 5136 /** 5137 * sata_scr_write - write SCR register of the specified port 5138 * @link: ATA link to write SCR for 5139 * @reg: SCR to write 5140 * @val: value to write 5141 * 5142 * Write @val to SCR register @reg of @link. This function is 5143 * guaranteed to succeed if @link is ap->link, the cable type of 5144 * the port is SATA and the port implements ->scr_read. 5145 * 5146 * LOCKING: 5147 * None if @link is ap->link. Kernel thread context otherwise. 5148 * 5149 * RETURNS: 5150 * 0 on success, negative errno on failure. 5151 */ 5152 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5153 { 5154 if (ata_is_host_link(link)) { 5155 if (sata_scr_valid(link)) 5156 return link->ap->ops->scr_write(link, reg, val); 5157 return -EOPNOTSUPP; 5158 } 5159 5160 return sata_pmp_scr_write(link, reg, val); 5161 } 5162 5163 /** 5164 * sata_scr_write_flush - write SCR register of the specified port and flush 5165 * @link: ATA link to write SCR for 5166 * @reg: SCR to write 5167 * @val: value to write 5168 * 5169 * This function is identical to sata_scr_write() except that this 5170 * function performs flush after writing to the register. 5171 * 5172 * LOCKING: 5173 * None if @link is ap->link. Kernel thread context otherwise. 5174 * 5175 * RETURNS: 5176 * 0 on success, negative errno on failure. 5177 */ 5178 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5179 { 5180 if (ata_is_host_link(link)) { 5181 int rc; 5182 5183 if (sata_scr_valid(link)) { 5184 rc = link->ap->ops->scr_write(link, reg, val); 5185 if (rc == 0) 5186 rc = link->ap->ops->scr_read(link, reg, &val); 5187 return rc; 5188 } 5189 return -EOPNOTSUPP; 5190 } 5191 5192 return sata_pmp_scr_write(link, reg, val); 5193 } 5194 5195 /** 5196 * ata_phys_link_online - test whether the given link is online 5197 * @link: ATA link to test 5198 * 5199 * Test whether @link is online. Note that this function returns 5200 * 0 if online status of @link cannot be obtained, so 5201 * ata_link_online(link) != !ata_link_offline(link). 5202 * 5203 * LOCKING: 5204 * None. 5205 * 5206 * RETURNS: 5207 * True if the port online status is available and online. 5208 */ 5209 bool ata_phys_link_online(struct ata_link *link) 5210 { 5211 u32 sstatus; 5212 5213 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5214 ata_sstatus_online(sstatus)) 5215 return true; 5216 return false; 5217 } 5218 5219 /** 5220 * ata_phys_link_offline - test whether the given link is offline 5221 * @link: ATA link to test 5222 * 5223 * Test whether @link is offline. Note that this function 5224 * returns 0 if offline status of @link cannot be obtained, so 5225 * ata_link_online(link) != !ata_link_offline(link). 5226 * 5227 * LOCKING: 5228 * None. 5229 * 5230 * RETURNS: 5231 * True if the port offline status is available and offline. 5232 */ 5233 bool ata_phys_link_offline(struct ata_link *link) 5234 { 5235 u32 sstatus; 5236 5237 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5238 !ata_sstatus_online(sstatus)) 5239 return true; 5240 return false; 5241 } 5242 5243 /** 5244 * ata_link_online - test whether the given link is online 5245 * @link: ATA link to test 5246 * 5247 * Test whether @link is online. This is identical to 5248 * ata_phys_link_online() when there's no slave link. When 5249 * there's a slave link, this function should only be called on 5250 * the master link and will return true if any of M/S links is 5251 * online. 5252 * 5253 * LOCKING: 5254 * None. 5255 * 5256 * RETURNS: 5257 * True if the port online status is available and online. 5258 */ 5259 bool ata_link_online(struct ata_link *link) 5260 { 5261 struct ata_link *slave = link->ap->slave_link; 5262 5263 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5264 5265 return ata_phys_link_online(link) || 5266 (slave && ata_phys_link_online(slave)); 5267 } 5268 5269 /** 5270 * ata_link_offline - test whether the given link is offline 5271 * @link: ATA link to test 5272 * 5273 * Test whether @link is offline. This is identical to 5274 * ata_phys_link_offline() when there's no slave link. When 5275 * there's a slave link, this function should only be called on 5276 * the master link and will return true if both M/S links are 5277 * offline. 5278 * 5279 * LOCKING: 5280 * None. 5281 * 5282 * RETURNS: 5283 * True if the port offline status is available and offline. 5284 */ 5285 bool ata_link_offline(struct ata_link *link) 5286 { 5287 struct ata_link *slave = link->ap->slave_link; 5288 5289 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5290 5291 return ata_phys_link_offline(link) && 5292 (!slave || ata_phys_link_offline(slave)); 5293 } 5294 5295 #ifdef CONFIG_PM 5296 static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5297 unsigned int action, unsigned int ehi_flags, 5298 int *async) 5299 { 5300 struct ata_link *link; 5301 unsigned long flags; 5302 int rc = 0; 5303 5304 /* Previous resume operation might still be in 5305 * progress. Wait for PM_PENDING to clear. 5306 */ 5307 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5308 if (async) { 5309 *async = -EAGAIN; 5310 return 0; 5311 } 5312 ata_port_wait_eh(ap); 5313 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5314 } 5315 5316 /* request PM ops to EH */ 5317 spin_lock_irqsave(ap->lock, flags); 5318 5319 ap->pm_mesg = mesg; 5320 if (async) 5321 ap->pm_result = async; 5322 else 5323 ap->pm_result = &rc; 5324 5325 ap->pflags |= ATA_PFLAG_PM_PENDING; 5326 ata_for_each_link(link, ap, HOST_FIRST) { 5327 link->eh_info.action |= action; 5328 link->eh_info.flags |= ehi_flags; 5329 } 5330 5331 ata_port_schedule_eh(ap); 5332 5333 spin_unlock_irqrestore(ap->lock, flags); 5334 5335 /* wait and check result */ 5336 if (!async) { 5337 ata_port_wait_eh(ap); 5338 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5339 } 5340 5341 return rc; 5342 } 5343 5344 static int __ata_port_suspend_common(struct ata_port *ap, pm_message_t mesg, int *async) 5345 { 5346 /* 5347 * On some hardware, device fails to respond after spun down 5348 * for suspend. As the device won't be used before being 5349 * resumed, we don't need to touch the device. Ask EH to skip 5350 * the usual stuff and proceed directly to suspend. 5351 * 5352 * http://thread.gmane.org/gmane.linux.ide/46764 5353 */ 5354 unsigned int ehi_flags = ATA_EHI_QUIET | ATA_EHI_NO_AUTOPSY | 5355 ATA_EHI_NO_RECOVERY; 5356 return ata_port_request_pm(ap, mesg, 0, ehi_flags, async); 5357 } 5358 5359 static int ata_port_suspend_common(struct device *dev, pm_message_t mesg) 5360 { 5361 struct ata_port *ap = to_ata_port(dev); 5362 5363 return __ata_port_suspend_common(ap, mesg, NULL); 5364 } 5365 5366 static int ata_port_suspend(struct device *dev) 5367 { 5368 if (pm_runtime_suspended(dev)) 5369 return 0; 5370 5371 return ata_port_suspend_common(dev, PMSG_SUSPEND); 5372 } 5373 5374 static int ata_port_do_freeze(struct device *dev) 5375 { 5376 if (pm_runtime_suspended(dev)) 5377 return 0; 5378 5379 return ata_port_suspend_common(dev, PMSG_FREEZE); 5380 } 5381 5382 static int ata_port_poweroff(struct device *dev) 5383 { 5384 return ata_port_suspend_common(dev, PMSG_HIBERNATE); 5385 } 5386 5387 static int __ata_port_resume_common(struct ata_port *ap, pm_message_t mesg, 5388 int *async) 5389 { 5390 int rc; 5391 5392 rc = ata_port_request_pm(ap, mesg, ATA_EH_RESET, 5393 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, async); 5394 return rc; 5395 } 5396 5397 static int ata_port_resume_common(struct device *dev, pm_message_t mesg) 5398 { 5399 struct ata_port *ap = to_ata_port(dev); 5400 5401 return __ata_port_resume_common(ap, mesg, NULL); 5402 } 5403 5404 static int ata_port_resume(struct device *dev) 5405 { 5406 int rc; 5407 5408 rc = ata_port_resume_common(dev, PMSG_RESUME); 5409 if (!rc) { 5410 pm_runtime_disable(dev); 5411 pm_runtime_set_active(dev); 5412 pm_runtime_enable(dev); 5413 } 5414 5415 return rc; 5416 } 5417 5418 /* 5419 * For ODDs, the upper layer will poll for media change every few seconds, 5420 * which will make it enter and leave suspend state every few seconds. And 5421 * as each suspend will cause a hard/soft reset, the gain of runtime suspend 5422 * is very little and the ODD may malfunction after constantly being reset. 5423 * So the idle callback here will not proceed to suspend if a non-ZPODD capable 5424 * ODD is attached to the port. 5425 */ 5426 static int ata_port_runtime_idle(struct device *dev) 5427 { 5428 struct ata_port *ap = to_ata_port(dev); 5429 struct ata_link *link; 5430 struct ata_device *adev; 5431 5432 ata_for_each_link(link, ap, HOST_FIRST) { 5433 ata_for_each_dev(adev, link, ENABLED) 5434 if (adev->class == ATA_DEV_ATAPI && 5435 !zpodd_dev_enabled(adev)) 5436 return -EBUSY; 5437 } 5438 5439 return 0; 5440 } 5441 5442 static int ata_port_runtime_suspend(struct device *dev) 5443 { 5444 return ata_port_suspend_common(dev, PMSG_AUTO_SUSPEND); 5445 } 5446 5447 static int ata_port_runtime_resume(struct device *dev) 5448 { 5449 return ata_port_resume_common(dev, PMSG_AUTO_RESUME); 5450 } 5451 5452 static const struct dev_pm_ops ata_port_pm_ops = { 5453 .suspend = ata_port_suspend, 5454 .resume = ata_port_resume, 5455 .freeze = ata_port_do_freeze, 5456 .thaw = ata_port_resume, 5457 .poweroff = ata_port_poweroff, 5458 .restore = ata_port_resume, 5459 5460 .runtime_suspend = ata_port_runtime_suspend, 5461 .runtime_resume = ata_port_runtime_resume, 5462 .runtime_idle = ata_port_runtime_idle, 5463 }; 5464 5465 /* sas ports don't participate in pm runtime management of ata_ports, 5466 * and need to resume ata devices at the domain level, not the per-port 5467 * level. sas suspend/resume is async to allow parallel port recovery 5468 * since sas has multiple ata_port instances per Scsi_Host. 5469 */ 5470 int ata_sas_port_async_suspend(struct ata_port *ap, int *async) 5471 { 5472 return __ata_port_suspend_common(ap, PMSG_SUSPEND, async); 5473 } 5474 EXPORT_SYMBOL_GPL(ata_sas_port_async_suspend); 5475 5476 int ata_sas_port_async_resume(struct ata_port *ap, int *async) 5477 { 5478 return __ata_port_resume_common(ap, PMSG_RESUME, async); 5479 } 5480 EXPORT_SYMBOL_GPL(ata_sas_port_async_resume); 5481 5482 5483 /** 5484 * ata_host_suspend - suspend host 5485 * @host: host to suspend 5486 * @mesg: PM message 5487 * 5488 * Suspend @host. Actual operation is performed by port suspend. 5489 */ 5490 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5491 { 5492 host->dev->power.power_state = mesg; 5493 return 0; 5494 } 5495 5496 /** 5497 * ata_host_resume - resume host 5498 * @host: host to resume 5499 * 5500 * Resume @host. Actual operation is performed by port resume. 5501 */ 5502 void ata_host_resume(struct ata_host *host) 5503 { 5504 host->dev->power.power_state = PMSG_ON; 5505 } 5506 #endif 5507 5508 struct device_type ata_port_type = { 5509 .name = "ata_port", 5510 #ifdef CONFIG_PM 5511 .pm = &ata_port_pm_ops, 5512 #endif 5513 }; 5514 5515 /** 5516 * ata_dev_init - Initialize an ata_device structure 5517 * @dev: Device structure to initialize 5518 * 5519 * Initialize @dev in preparation for probing. 5520 * 5521 * LOCKING: 5522 * Inherited from caller. 5523 */ 5524 void ata_dev_init(struct ata_device *dev) 5525 { 5526 struct ata_link *link = ata_dev_phys_link(dev); 5527 struct ata_port *ap = link->ap; 5528 unsigned long flags; 5529 5530 /* SATA spd limit is bound to the attached device, reset together */ 5531 link->sata_spd_limit = link->hw_sata_spd_limit; 5532 link->sata_spd = 0; 5533 5534 /* High bits of dev->flags are used to record warm plug 5535 * requests which occur asynchronously. Synchronize using 5536 * host lock. 5537 */ 5538 spin_lock_irqsave(ap->lock, flags); 5539 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5540 dev->horkage = 0; 5541 spin_unlock_irqrestore(ap->lock, flags); 5542 5543 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5544 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5545 dev->pio_mask = UINT_MAX; 5546 dev->mwdma_mask = UINT_MAX; 5547 dev->udma_mask = UINT_MAX; 5548 } 5549 5550 /** 5551 * ata_link_init - Initialize an ata_link structure 5552 * @ap: ATA port link is attached to 5553 * @link: Link structure to initialize 5554 * @pmp: Port multiplier port number 5555 * 5556 * Initialize @link. 5557 * 5558 * LOCKING: 5559 * Kernel thread context (may sleep) 5560 */ 5561 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5562 { 5563 int i; 5564 5565 /* clear everything except for devices */ 5566 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5567 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5568 5569 link->ap = ap; 5570 link->pmp = pmp; 5571 link->active_tag = ATA_TAG_POISON; 5572 link->hw_sata_spd_limit = UINT_MAX; 5573 5574 /* can't use iterator, ap isn't initialized yet */ 5575 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5576 struct ata_device *dev = &link->device[i]; 5577 5578 dev->link = link; 5579 dev->devno = dev - link->device; 5580 #ifdef CONFIG_ATA_ACPI 5581 dev->gtf_filter = ata_acpi_gtf_filter; 5582 #endif 5583 ata_dev_init(dev); 5584 } 5585 } 5586 5587 /** 5588 * sata_link_init_spd - Initialize link->sata_spd_limit 5589 * @link: Link to configure sata_spd_limit for 5590 * 5591 * Initialize @link->[hw_]sata_spd_limit to the currently 5592 * configured value. 5593 * 5594 * LOCKING: 5595 * Kernel thread context (may sleep). 5596 * 5597 * RETURNS: 5598 * 0 on success, -errno on failure. 5599 */ 5600 int sata_link_init_spd(struct ata_link *link) 5601 { 5602 u8 spd; 5603 int rc; 5604 5605 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5606 if (rc) 5607 return rc; 5608 5609 spd = (link->saved_scontrol >> 4) & 0xf; 5610 if (spd) 5611 link->hw_sata_spd_limit &= (1 << spd) - 1; 5612 5613 ata_force_link_limits(link); 5614 5615 link->sata_spd_limit = link->hw_sata_spd_limit; 5616 5617 return 0; 5618 } 5619 5620 /** 5621 * ata_port_alloc - allocate and initialize basic ATA port resources 5622 * @host: ATA host this allocated port belongs to 5623 * 5624 * Allocate and initialize basic ATA port resources. 5625 * 5626 * RETURNS: 5627 * Allocate ATA port on success, NULL on failure. 5628 * 5629 * LOCKING: 5630 * Inherited from calling layer (may sleep). 5631 */ 5632 struct ata_port *ata_port_alloc(struct ata_host *host) 5633 { 5634 struct ata_port *ap; 5635 5636 DPRINTK("ENTER\n"); 5637 5638 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5639 if (!ap) 5640 return NULL; 5641 5642 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5643 ap->lock = &host->lock; 5644 ap->print_id = -1; 5645 ap->local_port_no = -1; 5646 ap->host = host; 5647 ap->dev = host->dev; 5648 5649 #if defined(ATA_VERBOSE_DEBUG) 5650 /* turn on all debugging levels */ 5651 ap->msg_enable = 0x00FF; 5652 #elif defined(ATA_DEBUG) 5653 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5654 #else 5655 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5656 #endif 5657 5658 mutex_init(&ap->scsi_scan_mutex); 5659 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5660 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5661 INIT_LIST_HEAD(&ap->eh_done_q); 5662 init_waitqueue_head(&ap->eh_wait_q); 5663 init_completion(&ap->park_req_pending); 5664 init_timer_deferrable(&ap->fastdrain_timer); 5665 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5666 ap->fastdrain_timer.data = (unsigned long)ap; 5667 5668 ap->cbl = ATA_CBL_NONE; 5669 5670 ata_link_init(ap, &ap->link, 0); 5671 5672 #ifdef ATA_IRQ_TRAP 5673 ap->stats.unhandled_irq = 1; 5674 ap->stats.idle_irq = 1; 5675 #endif 5676 ata_sff_port_init(ap); 5677 5678 return ap; 5679 } 5680 5681 static void ata_host_release(struct device *gendev, void *res) 5682 { 5683 struct ata_host *host = dev_get_drvdata(gendev); 5684 int i; 5685 5686 for (i = 0; i < host->n_ports; i++) { 5687 struct ata_port *ap = host->ports[i]; 5688 5689 if (!ap) 5690 continue; 5691 5692 if (ap->scsi_host) 5693 scsi_host_put(ap->scsi_host); 5694 5695 kfree(ap->pmp_link); 5696 kfree(ap->slave_link); 5697 kfree(ap); 5698 host->ports[i] = NULL; 5699 } 5700 5701 dev_set_drvdata(gendev, NULL); 5702 } 5703 5704 /** 5705 * ata_host_alloc - allocate and init basic ATA host resources 5706 * @dev: generic device this host is associated with 5707 * @max_ports: maximum number of ATA ports associated with this host 5708 * 5709 * Allocate and initialize basic ATA host resources. LLD calls 5710 * this function to allocate a host, initializes it fully and 5711 * attaches it using ata_host_register(). 5712 * 5713 * @max_ports ports are allocated and host->n_ports is 5714 * initialized to @max_ports. The caller is allowed to decrease 5715 * host->n_ports before calling ata_host_register(). The unused 5716 * ports will be automatically freed on registration. 5717 * 5718 * RETURNS: 5719 * Allocate ATA host on success, NULL on failure. 5720 * 5721 * LOCKING: 5722 * Inherited from calling layer (may sleep). 5723 */ 5724 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5725 { 5726 struct ata_host *host; 5727 size_t sz; 5728 int i; 5729 5730 DPRINTK("ENTER\n"); 5731 5732 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5733 return NULL; 5734 5735 /* alloc a container for our list of ATA ports (buses) */ 5736 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5737 /* alloc a container for our list of ATA ports (buses) */ 5738 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5739 if (!host) 5740 goto err_out; 5741 5742 devres_add(dev, host); 5743 dev_set_drvdata(dev, host); 5744 5745 spin_lock_init(&host->lock); 5746 mutex_init(&host->eh_mutex); 5747 host->dev = dev; 5748 host->n_ports = max_ports; 5749 5750 /* allocate ports bound to this host */ 5751 for (i = 0; i < max_ports; i++) { 5752 struct ata_port *ap; 5753 5754 ap = ata_port_alloc(host); 5755 if (!ap) 5756 goto err_out; 5757 5758 ap->port_no = i; 5759 host->ports[i] = ap; 5760 } 5761 5762 devres_remove_group(dev, NULL); 5763 return host; 5764 5765 err_out: 5766 devres_release_group(dev, NULL); 5767 return NULL; 5768 } 5769 5770 /** 5771 * ata_host_alloc_pinfo - alloc host and init with port_info array 5772 * @dev: generic device this host is associated with 5773 * @ppi: array of ATA port_info to initialize host with 5774 * @n_ports: number of ATA ports attached to this host 5775 * 5776 * Allocate ATA host and initialize with info from @ppi. If NULL 5777 * terminated, @ppi may contain fewer entries than @n_ports. The 5778 * last entry will be used for the remaining ports. 5779 * 5780 * RETURNS: 5781 * Allocate ATA host on success, NULL on failure. 5782 * 5783 * LOCKING: 5784 * Inherited from calling layer (may sleep). 5785 */ 5786 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5787 const struct ata_port_info * const * ppi, 5788 int n_ports) 5789 { 5790 const struct ata_port_info *pi; 5791 struct ata_host *host; 5792 int i, j; 5793 5794 host = ata_host_alloc(dev, n_ports); 5795 if (!host) 5796 return NULL; 5797 5798 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5799 struct ata_port *ap = host->ports[i]; 5800 5801 if (ppi[j]) 5802 pi = ppi[j++]; 5803 5804 ap->pio_mask = pi->pio_mask; 5805 ap->mwdma_mask = pi->mwdma_mask; 5806 ap->udma_mask = pi->udma_mask; 5807 ap->flags |= pi->flags; 5808 ap->link.flags |= pi->link_flags; 5809 ap->ops = pi->port_ops; 5810 5811 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5812 host->ops = pi->port_ops; 5813 } 5814 5815 return host; 5816 } 5817 5818 /** 5819 * ata_slave_link_init - initialize slave link 5820 * @ap: port to initialize slave link for 5821 * 5822 * Create and initialize slave link for @ap. This enables slave 5823 * link handling on the port. 5824 * 5825 * In libata, a port contains links and a link contains devices. 5826 * There is single host link but if a PMP is attached to it, 5827 * there can be multiple fan-out links. On SATA, there's usually 5828 * a single device connected to a link but PATA and SATA 5829 * controllers emulating TF based interface can have two - master 5830 * and slave. 5831 * 5832 * However, there are a few controllers which don't fit into this 5833 * abstraction too well - SATA controllers which emulate TF 5834 * interface with both master and slave devices but also have 5835 * separate SCR register sets for each device. These controllers 5836 * need separate links for physical link handling 5837 * (e.g. onlineness, link speed) but should be treated like a 5838 * traditional M/S controller for everything else (e.g. command 5839 * issue, softreset). 5840 * 5841 * slave_link is libata's way of handling this class of 5842 * controllers without impacting core layer too much. For 5843 * anything other than physical link handling, the default host 5844 * link is used for both master and slave. For physical link 5845 * handling, separate @ap->slave_link is used. All dirty details 5846 * are implemented inside libata core layer. From LLD's POV, the 5847 * only difference is that prereset, hardreset and postreset are 5848 * called once more for the slave link, so the reset sequence 5849 * looks like the following. 5850 * 5851 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5852 * softreset(M) -> postreset(M) -> postreset(S) 5853 * 5854 * Note that softreset is called only for the master. Softreset 5855 * resets both M/S by definition, so SRST on master should handle 5856 * both (the standard method will work just fine). 5857 * 5858 * LOCKING: 5859 * Should be called before host is registered. 5860 * 5861 * RETURNS: 5862 * 0 on success, -errno on failure. 5863 */ 5864 int ata_slave_link_init(struct ata_port *ap) 5865 { 5866 struct ata_link *link; 5867 5868 WARN_ON(ap->slave_link); 5869 WARN_ON(ap->flags & ATA_FLAG_PMP); 5870 5871 link = kzalloc(sizeof(*link), GFP_KERNEL); 5872 if (!link) 5873 return -ENOMEM; 5874 5875 ata_link_init(ap, link, 1); 5876 ap->slave_link = link; 5877 return 0; 5878 } 5879 5880 static void ata_host_stop(struct device *gendev, void *res) 5881 { 5882 struct ata_host *host = dev_get_drvdata(gendev); 5883 int i; 5884 5885 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5886 5887 for (i = 0; i < host->n_ports; i++) { 5888 struct ata_port *ap = host->ports[i]; 5889 5890 if (ap->ops->port_stop) 5891 ap->ops->port_stop(ap); 5892 } 5893 5894 if (host->ops->host_stop) 5895 host->ops->host_stop(host); 5896 } 5897 5898 /** 5899 * ata_finalize_port_ops - finalize ata_port_operations 5900 * @ops: ata_port_operations to finalize 5901 * 5902 * An ata_port_operations can inherit from another ops and that 5903 * ops can again inherit from another. This can go on as many 5904 * times as necessary as long as there is no loop in the 5905 * inheritance chain. 5906 * 5907 * Ops tables are finalized when the host is started. NULL or 5908 * unspecified entries are inherited from the closet ancestor 5909 * which has the method and the entry is populated with it. 5910 * After finalization, the ops table directly points to all the 5911 * methods and ->inherits is no longer necessary and cleared. 5912 * 5913 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5914 * 5915 * LOCKING: 5916 * None. 5917 */ 5918 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5919 { 5920 static DEFINE_SPINLOCK(lock); 5921 const struct ata_port_operations *cur; 5922 void **begin = (void **)ops; 5923 void **end = (void **)&ops->inherits; 5924 void **pp; 5925 5926 if (!ops || !ops->inherits) 5927 return; 5928 5929 spin_lock(&lock); 5930 5931 for (cur = ops->inherits; cur; cur = cur->inherits) { 5932 void **inherit = (void **)cur; 5933 5934 for (pp = begin; pp < end; pp++, inherit++) 5935 if (!*pp) 5936 *pp = *inherit; 5937 } 5938 5939 for (pp = begin; pp < end; pp++) 5940 if (IS_ERR(*pp)) 5941 *pp = NULL; 5942 5943 ops->inherits = NULL; 5944 5945 spin_unlock(&lock); 5946 } 5947 5948 /** 5949 * ata_host_start - start and freeze ports of an ATA host 5950 * @host: ATA host to start ports for 5951 * 5952 * Start and then freeze ports of @host. Started status is 5953 * recorded in host->flags, so this function can be called 5954 * multiple times. Ports are guaranteed to get started only 5955 * once. If host->ops isn't initialized yet, its set to the 5956 * first non-dummy port ops. 5957 * 5958 * LOCKING: 5959 * Inherited from calling layer (may sleep). 5960 * 5961 * RETURNS: 5962 * 0 if all ports are started successfully, -errno otherwise. 5963 */ 5964 int ata_host_start(struct ata_host *host) 5965 { 5966 int have_stop = 0; 5967 void *start_dr = NULL; 5968 int i, rc; 5969 5970 if (host->flags & ATA_HOST_STARTED) 5971 return 0; 5972 5973 ata_finalize_port_ops(host->ops); 5974 5975 for (i = 0; i < host->n_ports; i++) { 5976 struct ata_port *ap = host->ports[i]; 5977 5978 ata_finalize_port_ops(ap->ops); 5979 5980 if (!host->ops && !ata_port_is_dummy(ap)) 5981 host->ops = ap->ops; 5982 5983 if (ap->ops->port_stop) 5984 have_stop = 1; 5985 } 5986 5987 if (host->ops->host_stop) 5988 have_stop = 1; 5989 5990 if (have_stop) { 5991 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 5992 if (!start_dr) 5993 return -ENOMEM; 5994 } 5995 5996 for (i = 0; i < host->n_ports; i++) { 5997 struct ata_port *ap = host->ports[i]; 5998 5999 if (ap->ops->port_start) { 6000 rc = ap->ops->port_start(ap); 6001 if (rc) { 6002 if (rc != -ENODEV) 6003 dev_err(host->dev, 6004 "failed to start port %d (errno=%d)\n", 6005 i, rc); 6006 goto err_out; 6007 } 6008 } 6009 ata_eh_freeze_port(ap); 6010 } 6011 6012 if (start_dr) 6013 devres_add(host->dev, start_dr); 6014 host->flags |= ATA_HOST_STARTED; 6015 return 0; 6016 6017 err_out: 6018 while (--i >= 0) { 6019 struct ata_port *ap = host->ports[i]; 6020 6021 if (ap->ops->port_stop) 6022 ap->ops->port_stop(ap); 6023 } 6024 devres_free(start_dr); 6025 return rc; 6026 } 6027 6028 /** 6029 * ata_sas_host_init - Initialize a host struct for sas (ipr, libsas) 6030 * @host: host to initialize 6031 * @dev: device host is attached to 6032 * @ops: port_ops 6033 * 6034 */ 6035 void ata_host_init(struct ata_host *host, struct device *dev, 6036 struct ata_port_operations *ops) 6037 { 6038 spin_lock_init(&host->lock); 6039 mutex_init(&host->eh_mutex); 6040 host->dev = dev; 6041 host->ops = ops; 6042 } 6043 6044 void __ata_port_probe(struct ata_port *ap) 6045 { 6046 struct ata_eh_info *ehi = &ap->link.eh_info; 6047 unsigned long flags; 6048 6049 /* kick EH for boot probing */ 6050 spin_lock_irqsave(ap->lock, flags); 6051 6052 ehi->probe_mask |= ATA_ALL_DEVICES; 6053 ehi->action |= ATA_EH_RESET; 6054 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6055 6056 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6057 ap->pflags |= ATA_PFLAG_LOADING; 6058 ata_port_schedule_eh(ap); 6059 6060 spin_unlock_irqrestore(ap->lock, flags); 6061 } 6062 6063 int ata_port_probe(struct ata_port *ap) 6064 { 6065 int rc = 0; 6066 6067 if (ap->ops->error_handler) { 6068 __ata_port_probe(ap); 6069 ata_port_wait_eh(ap); 6070 } else { 6071 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6072 rc = ata_bus_probe(ap); 6073 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6074 } 6075 return rc; 6076 } 6077 6078 6079 static void async_port_probe(void *data, async_cookie_t cookie) 6080 { 6081 struct ata_port *ap = data; 6082 6083 /* 6084 * If we're not allowed to scan this host in parallel, 6085 * we need to wait until all previous scans have completed 6086 * before going further. 6087 * Jeff Garzik says this is only within a controller, so we 6088 * don't need to wait for port 0, only for later ports. 6089 */ 6090 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6091 async_synchronize_cookie(cookie); 6092 6093 (void)ata_port_probe(ap); 6094 6095 /* in order to keep device order, we need to synchronize at this point */ 6096 async_synchronize_cookie(cookie); 6097 6098 ata_scsi_scan_host(ap, 1); 6099 } 6100 6101 /** 6102 * ata_host_register - register initialized ATA host 6103 * @host: ATA host to register 6104 * @sht: template for SCSI host 6105 * 6106 * Register initialized ATA host. @host is allocated using 6107 * ata_host_alloc() and fully initialized by LLD. This function 6108 * starts ports, registers @host with ATA and SCSI layers and 6109 * probe registered devices. 6110 * 6111 * LOCKING: 6112 * Inherited from calling layer (may sleep). 6113 * 6114 * RETURNS: 6115 * 0 on success, -errno otherwise. 6116 */ 6117 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6118 { 6119 int i, rc; 6120 6121 /* host must have been started */ 6122 if (!(host->flags & ATA_HOST_STARTED)) { 6123 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6124 WARN_ON(1); 6125 return -EINVAL; 6126 } 6127 6128 /* Blow away unused ports. This happens when LLD can't 6129 * determine the exact number of ports to allocate at 6130 * allocation time. 6131 */ 6132 for (i = host->n_ports; host->ports[i]; i++) 6133 kfree(host->ports[i]); 6134 6135 /* give ports names and add SCSI hosts */ 6136 for (i = 0; i < host->n_ports; i++) { 6137 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6138 host->ports[i]->local_port_no = i + 1; 6139 } 6140 6141 /* Create associated sysfs transport objects */ 6142 for (i = 0; i < host->n_ports; i++) { 6143 rc = ata_tport_add(host->dev,host->ports[i]); 6144 if (rc) { 6145 goto err_tadd; 6146 } 6147 } 6148 6149 rc = ata_scsi_add_hosts(host, sht); 6150 if (rc) 6151 goto err_tadd; 6152 6153 ata_acpi_hotplug_init(host); 6154 6155 /* set cable, sata_spd_limit and report */ 6156 for (i = 0; i < host->n_ports; i++) { 6157 struct ata_port *ap = host->ports[i]; 6158 unsigned long xfer_mask; 6159 6160 /* set SATA cable type if still unset */ 6161 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6162 ap->cbl = ATA_CBL_SATA; 6163 6164 /* init sata_spd_limit to the current value */ 6165 sata_link_init_spd(&ap->link); 6166 if (ap->slave_link) 6167 sata_link_init_spd(ap->slave_link); 6168 6169 /* print per-port info to dmesg */ 6170 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6171 ap->udma_mask); 6172 6173 if (!ata_port_is_dummy(ap)) { 6174 ata_port_info(ap, "%cATA max %s %s\n", 6175 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6176 ata_mode_string(xfer_mask), 6177 ap->link.eh_info.desc); 6178 ata_ehi_clear_desc(&ap->link.eh_info); 6179 } else 6180 ata_port_info(ap, "DUMMY\n"); 6181 } 6182 6183 /* perform each probe asynchronously */ 6184 for (i = 0; i < host->n_ports; i++) { 6185 struct ata_port *ap = host->ports[i]; 6186 async_schedule(async_port_probe, ap); 6187 } 6188 6189 return 0; 6190 6191 err_tadd: 6192 while (--i >= 0) { 6193 ata_tport_delete(host->ports[i]); 6194 } 6195 return rc; 6196 6197 } 6198 6199 /** 6200 * ata_host_activate - start host, request IRQ and register it 6201 * @host: target ATA host 6202 * @irq: IRQ to request 6203 * @irq_handler: irq_handler used when requesting IRQ 6204 * @irq_flags: irq_flags used when requesting IRQ 6205 * @sht: scsi_host_template to use when registering the host 6206 * 6207 * After allocating an ATA host and initializing it, most libata 6208 * LLDs perform three steps to activate the host - start host, 6209 * request IRQ and register it. This helper takes necessasry 6210 * arguments and performs the three steps in one go. 6211 * 6212 * An invalid IRQ skips the IRQ registration and expects the host to 6213 * have set polling mode on the port. In this case, @irq_handler 6214 * should be NULL. 6215 * 6216 * LOCKING: 6217 * Inherited from calling layer (may sleep). 6218 * 6219 * RETURNS: 6220 * 0 on success, -errno otherwise. 6221 */ 6222 int ata_host_activate(struct ata_host *host, int irq, 6223 irq_handler_t irq_handler, unsigned long irq_flags, 6224 struct scsi_host_template *sht) 6225 { 6226 int i, rc; 6227 6228 rc = ata_host_start(host); 6229 if (rc) 6230 return rc; 6231 6232 /* Special case for polling mode */ 6233 if (!irq) { 6234 WARN_ON(irq_handler); 6235 return ata_host_register(host, sht); 6236 } 6237 6238 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6239 dev_driver_string(host->dev), host); 6240 if (rc) 6241 return rc; 6242 6243 for (i = 0; i < host->n_ports; i++) 6244 ata_port_desc(host->ports[i], "irq %d", irq); 6245 6246 rc = ata_host_register(host, sht); 6247 /* if failed, just free the IRQ and leave ports alone */ 6248 if (rc) 6249 devm_free_irq(host->dev, irq, host); 6250 6251 return rc; 6252 } 6253 6254 /** 6255 * ata_port_detach - Detach ATA port in prepration of device removal 6256 * @ap: ATA port to be detached 6257 * 6258 * Detach all ATA devices and the associated SCSI devices of @ap; 6259 * then, remove the associated SCSI host. @ap is guaranteed to 6260 * be quiescent on return from this function. 6261 * 6262 * LOCKING: 6263 * Kernel thread context (may sleep). 6264 */ 6265 static void ata_port_detach(struct ata_port *ap) 6266 { 6267 unsigned long flags; 6268 6269 if (!ap->ops->error_handler) 6270 goto skip_eh; 6271 6272 /* tell EH we're leaving & flush EH */ 6273 spin_lock_irqsave(ap->lock, flags); 6274 ap->pflags |= ATA_PFLAG_UNLOADING; 6275 ata_port_schedule_eh(ap); 6276 spin_unlock_irqrestore(ap->lock, flags); 6277 6278 /* wait till EH commits suicide */ 6279 ata_port_wait_eh(ap); 6280 6281 /* it better be dead now */ 6282 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6283 6284 cancel_delayed_work_sync(&ap->hotplug_task); 6285 6286 skip_eh: 6287 if (ap->pmp_link) { 6288 int i; 6289 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6290 ata_tlink_delete(&ap->pmp_link[i]); 6291 } 6292 ata_tport_delete(ap); 6293 6294 /* remove the associated SCSI host */ 6295 scsi_remove_host(ap->scsi_host); 6296 } 6297 6298 /** 6299 * ata_host_detach - Detach all ports of an ATA host 6300 * @host: Host to detach 6301 * 6302 * Detach all ports of @host. 6303 * 6304 * LOCKING: 6305 * Kernel thread context (may sleep). 6306 */ 6307 void ata_host_detach(struct ata_host *host) 6308 { 6309 int i; 6310 6311 for (i = 0; i < host->n_ports; i++) 6312 ata_port_detach(host->ports[i]); 6313 6314 /* the host is dead now, dissociate ACPI */ 6315 ata_acpi_dissociate(host); 6316 } 6317 6318 #ifdef CONFIG_PCI 6319 6320 /** 6321 * ata_pci_remove_one - PCI layer callback for device removal 6322 * @pdev: PCI device that was removed 6323 * 6324 * PCI layer indicates to libata via this hook that hot-unplug or 6325 * module unload event has occurred. Detach all ports. Resource 6326 * release is handled via devres. 6327 * 6328 * LOCKING: 6329 * Inherited from PCI layer (may sleep). 6330 */ 6331 void ata_pci_remove_one(struct pci_dev *pdev) 6332 { 6333 struct ata_host *host = pci_get_drvdata(pdev); 6334 6335 ata_host_detach(host); 6336 } 6337 6338 /* move to PCI subsystem */ 6339 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6340 { 6341 unsigned long tmp = 0; 6342 6343 switch (bits->width) { 6344 case 1: { 6345 u8 tmp8 = 0; 6346 pci_read_config_byte(pdev, bits->reg, &tmp8); 6347 tmp = tmp8; 6348 break; 6349 } 6350 case 2: { 6351 u16 tmp16 = 0; 6352 pci_read_config_word(pdev, bits->reg, &tmp16); 6353 tmp = tmp16; 6354 break; 6355 } 6356 case 4: { 6357 u32 tmp32 = 0; 6358 pci_read_config_dword(pdev, bits->reg, &tmp32); 6359 tmp = tmp32; 6360 break; 6361 } 6362 6363 default: 6364 return -EINVAL; 6365 } 6366 6367 tmp &= bits->mask; 6368 6369 return (tmp == bits->val) ? 1 : 0; 6370 } 6371 6372 #ifdef CONFIG_PM 6373 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6374 { 6375 pci_save_state(pdev); 6376 pci_disable_device(pdev); 6377 6378 if (mesg.event & PM_EVENT_SLEEP) 6379 pci_set_power_state(pdev, PCI_D3hot); 6380 } 6381 6382 int ata_pci_device_do_resume(struct pci_dev *pdev) 6383 { 6384 int rc; 6385 6386 pci_set_power_state(pdev, PCI_D0); 6387 pci_restore_state(pdev); 6388 6389 rc = pcim_enable_device(pdev); 6390 if (rc) { 6391 dev_err(&pdev->dev, 6392 "failed to enable device after resume (%d)\n", rc); 6393 return rc; 6394 } 6395 6396 pci_set_master(pdev); 6397 return 0; 6398 } 6399 6400 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6401 { 6402 struct ata_host *host = pci_get_drvdata(pdev); 6403 int rc = 0; 6404 6405 rc = ata_host_suspend(host, mesg); 6406 if (rc) 6407 return rc; 6408 6409 ata_pci_device_do_suspend(pdev, mesg); 6410 6411 return 0; 6412 } 6413 6414 int ata_pci_device_resume(struct pci_dev *pdev) 6415 { 6416 struct ata_host *host = pci_get_drvdata(pdev); 6417 int rc; 6418 6419 rc = ata_pci_device_do_resume(pdev); 6420 if (rc == 0) 6421 ata_host_resume(host); 6422 return rc; 6423 } 6424 #endif /* CONFIG_PM */ 6425 6426 #endif /* CONFIG_PCI */ 6427 6428 /** 6429 * ata_platform_remove_one - Platform layer callback for device removal 6430 * @pdev: Platform device that was removed 6431 * 6432 * Platform layer indicates to libata via this hook that hot-unplug or 6433 * module unload event has occurred. Detach all ports. Resource 6434 * release is handled via devres. 6435 * 6436 * LOCKING: 6437 * Inherited from platform layer (may sleep). 6438 */ 6439 int ata_platform_remove_one(struct platform_device *pdev) 6440 { 6441 struct ata_host *host = platform_get_drvdata(pdev); 6442 6443 ata_host_detach(host); 6444 6445 return 0; 6446 } 6447 6448 static int __init ata_parse_force_one(char **cur, 6449 struct ata_force_ent *force_ent, 6450 const char **reason) 6451 { 6452 /* FIXME: Currently, there's no way to tag init const data and 6453 * using __initdata causes build failure on some versions of 6454 * gcc. Once __initdataconst is implemented, add const to the 6455 * following structure. 6456 */ 6457 static struct ata_force_param force_tbl[] __initdata = { 6458 { "40c", .cbl = ATA_CBL_PATA40 }, 6459 { "80c", .cbl = ATA_CBL_PATA80 }, 6460 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6461 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6462 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6463 { "sata", .cbl = ATA_CBL_SATA }, 6464 { "1.5Gbps", .spd_limit = 1 }, 6465 { "3.0Gbps", .spd_limit = 2 }, 6466 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6467 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6468 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6469 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6470 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6471 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6472 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6473 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6474 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6475 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6476 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6477 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6478 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6479 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6480 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6481 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6482 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6483 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6484 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6485 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6486 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6487 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6488 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6489 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6490 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6491 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6492 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6493 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6494 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6495 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6496 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6497 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6498 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6499 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6500 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6501 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6502 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6503 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6504 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6505 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6506 { "rstonce", .lflags = ATA_LFLAG_RST_ONCE }, 6507 { "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR }, 6508 }; 6509 char *start = *cur, *p = *cur; 6510 char *id, *val, *endp; 6511 const struct ata_force_param *match_fp = NULL; 6512 int nr_matches = 0, i; 6513 6514 /* find where this param ends and update *cur */ 6515 while (*p != '\0' && *p != ',') 6516 p++; 6517 6518 if (*p == '\0') 6519 *cur = p; 6520 else 6521 *cur = p + 1; 6522 6523 *p = '\0'; 6524 6525 /* parse */ 6526 p = strchr(start, ':'); 6527 if (!p) { 6528 val = strstrip(start); 6529 goto parse_val; 6530 } 6531 *p = '\0'; 6532 6533 id = strstrip(start); 6534 val = strstrip(p + 1); 6535 6536 /* parse id */ 6537 p = strchr(id, '.'); 6538 if (p) { 6539 *p++ = '\0'; 6540 force_ent->device = simple_strtoul(p, &endp, 10); 6541 if (p == endp || *endp != '\0') { 6542 *reason = "invalid device"; 6543 return -EINVAL; 6544 } 6545 } 6546 6547 force_ent->port = simple_strtoul(id, &endp, 10); 6548 if (p == endp || *endp != '\0') { 6549 *reason = "invalid port/link"; 6550 return -EINVAL; 6551 } 6552 6553 parse_val: 6554 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6555 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6556 const struct ata_force_param *fp = &force_tbl[i]; 6557 6558 if (strncasecmp(val, fp->name, strlen(val))) 6559 continue; 6560 6561 nr_matches++; 6562 match_fp = fp; 6563 6564 if (strcasecmp(val, fp->name) == 0) { 6565 nr_matches = 1; 6566 break; 6567 } 6568 } 6569 6570 if (!nr_matches) { 6571 *reason = "unknown value"; 6572 return -EINVAL; 6573 } 6574 if (nr_matches > 1) { 6575 *reason = "ambigious value"; 6576 return -EINVAL; 6577 } 6578 6579 force_ent->param = *match_fp; 6580 6581 return 0; 6582 } 6583 6584 static void __init ata_parse_force_param(void) 6585 { 6586 int idx = 0, size = 1; 6587 int last_port = -1, last_device = -1; 6588 char *p, *cur, *next; 6589 6590 /* calculate maximum number of params and allocate force_tbl */ 6591 for (p = ata_force_param_buf; *p; p++) 6592 if (*p == ',') 6593 size++; 6594 6595 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6596 if (!ata_force_tbl) { 6597 printk(KERN_WARNING "ata: failed to extend force table, " 6598 "libata.force ignored\n"); 6599 return; 6600 } 6601 6602 /* parse and populate the table */ 6603 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6604 const char *reason = ""; 6605 struct ata_force_ent te = { .port = -1, .device = -1 }; 6606 6607 next = cur; 6608 if (ata_parse_force_one(&next, &te, &reason)) { 6609 printk(KERN_WARNING "ata: failed to parse force " 6610 "parameter \"%s\" (%s)\n", 6611 cur, reason); 6612 continue; 6613 } 6614 6615 if (te.port == -1) { 6616 te.port = last_port; 6617 te.device = last_device; 6618 } 6619 6620 ata_force_tbl[idx++] = te; 6621 6622 last_port = te.port; 6623 last_device = te.device; 6624 } 6625 6626 ata_force_tbl_size = idx; 6627 } 6628 6629 static int __init ata_init(void) 6630 { 6631 int rc; 6632 6633 ata_parse_force_param(); 6634 6635 ata_acpi_register(); 6636 6637 rc = ata_sff_init(); 6638 if (rc) { 6639 kfree(ata_force_tbl); 6640 return rc; 6641 } 6642 6643 libata_transport_init(); 6644 ata_scsi_transport_template = ata_attach_transport(); 6645 if (!ata_scsi_transport_template) { 6646 ata_sff_exit(); 6647 rc = -ENOMEM; 6648 goto err_out; 6649 } 6650 6651 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6652 return 0; 6653 6654 err_out: 6655 return rc; 6656 } 6657 6658 static void __exit ata_exit(void) 6659 { 6660 ata_release_transport(ata_scsi_transport_template); 6661 libata_transport_exit(); 6662 ata_sff_exit(); 6663 ata_acpi_unregister(); 6664 kfree(ata_force_tbl); 6665 } 6666 6667 subsys_initcall(ata_init); 6668 module_exit(ata_exit); 6669 6670 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6671 6672 int ata_ratelimit(void) 6673 { 6674 return __ratelimit(&ratelimit); 6675 } 6676 6677 /** 6678 * ata_msleep - ATA EH owner aware msleep 6679 * @ap: ATA port to attribute the sleep to 6680 * @msecs: duration to sleep in milliseconds 6681 * 6682 * Sleeps @msecs. If the current task is owner of @ap's EH, the 6683 * ownership is released before going to sleep and reacquired 6684 * after the sleep is complete. IOW, other ports sharing the 6685 * @ap->host will be allowed to own the EH while this task is 6686 * sleeping. 6687 * 6688 * LOCKING: 6689 * Might sleep. 6690 */ 6691 void ata_msleep(struct ata_port *ap, unsigned int msecs) 6692 { 6693 bool owns_eh = ap && ap->host->eh_owner == current; 6694 6695 if (owns_eh) 6696 ata_eh_release(ap); 6697 6698 msleep(msecs); 6699 6700 if (owns_eh) 6701 ata_eh_acquire(ap); 6702 } 6703 6704 /** 6705 * ata_wait_register - wait until register value changes 6706 * @ap: ATA port to wait register for, can be NULL 6707 * @reg: IO-mapped register 6708 * @mask: Mask to apply to read register value 6709 * @val: Wait condition 6710 * @interval: polling interval in milliseconds 6711 * @timeout: timeout in milliseconds 6712 * 6713 * Waiting for some bits of register to change is a common 6714 * operation for ATA controllers. This function reads 32bit LE 6715 * IO-mapped register @reg and tests for the following condition. 6716 * 6717 * (*@reg & mask) != val 6718 * 6719 * If the condition is met, it returns; otherwise, the process is 6720 * repeated after @interval_msec until timeout. 6721 * 6722 * LOCKING: 6723 * Kernel thread context (may sleep) 6724 * 6725 * RETURNS: 6726 * The final register value. 6727 */ 6728 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 6729 unsigned long interval, unsigned long timeout) 6730 { 6731 unsigned long deadline; 6732 u32 tmp; 6733 6734 tmp = ioread32(reg); 6735 6736 /* Calculate timeout _after_ the first read to make sure 6737 * preceding writes reach the controller before starting to 6738 * eat away the timeout. 6739 */ 6740 deadline = ata_deadline(jiffies, timeout); 6741 6742 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6743 ata_msleep(ap, interval); 6744 tmp = ioread32(reg); 6745 } 6746 6747 return tmp; 6748 } 6749 6750 /* 6751 * Dummy port_ops 6752 */ 6753 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6754 { 6755 return AC_ERR_SYSTEM; 6756 } 6757 6758 static void ata_dummy_error_handler(struct ata_port *ap) 6759 { 6760 /* truly dummy */ 6761 } 6762 6763 struct ata_port_operations ata_dummy_port_ops = { 6764 .qc_prep = ata_noop_qc_prep, 6765 .qc_issue = ata_dummy_qc_issue, 6766 .error_handler = ata_dummy_error_handler, 6767 .sched_eh = ata_std_sched_eh, 6768 .end_eh = ata_std_end_eh, 6769 }; 6770 6771 const struct ata_port_info ata_dummy_port_info = { 6772 .port_ops = &ata_dummy_port_ops, 6773 }; 6774 6775 /* 6776 * Utility print functions 6777 */ 6778 int ata_port_printk(const struct ata_port *ap, const char *level, 6779 const char *fmt, ...) 6780 { 6781 struct va_format vaf; 6782 va_list args; 6783 int r; 6784 6785 va_start(args, fmt); 6786 6787 vaf.fmt = fmt; 6788 vaf.va = &args; 6789 6790 r = printk("%sata%u: %pV", level, ap->print_id, &vaf); 6791 6792 va_end(args); 6793 6794 return r; 6795 } 6796 EXPORT_SYMBOL(ata_port_printk); 6797 6798 int ata_link_printk(const struct ata_link *link, const char *level, 6799 const char *fmt, ...) 6800 { 6801 struct va_format vaf; 6802 va_list args; 6803 int r; 6804 6805 va_start(args, fmt); 6806 6807 vaf.fmt = fmt; 6808 vaf.va = &args; 6809 6810 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 6811 r = printk("%sata%u.%02u: %pV", 6812 level, link->ap->print_id, link->pmp, &vaf); 6813 else 6814 r = printk("%sata%u: %pV", 6815 level, link->ap->print_id, &vaf); 6816 6817 va_end(args); 6818 6819 return r; 6820 } 6821 EXPORT_SYMBOL(ata_link_printk); 6822 6823 int ata_dev_printk(const struct ata_device *dev, const char *level, 6824 const char *fmt, ...) 6825 { 6826 struct va_format vaf; 6827 va_list args; 6828 int r; 6829 6830 va_start(args, fmt); 6831 6832 vaf.fmt = fmt; 6833 vaf.va = &args; 6834 6835 r = printk("%sata%u.%02u: %pV", 6836 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 6837 &vaf); 6838 6839 va_end(args); 6840 6841 return r; 6842 } 6843 EXPORT_SYMBOL(ata_dev_printk); 6844 6845 void ata_print_version(const struct device *dev, const char *version) 6846 { 6847 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 6848 } 6849 EXPORT_SYMBOL(ata_print_version); 6850 6851 /* 6852 * libata is essentially a library of internal helper functions for 6853 * low-level ATA host controller drivers. As such, the API/ABI is 6854 * likely to change as new drivers are added and updated. 6855 * Do not depend on ABI/API stability. 6856 */ 6857 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6858 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6859 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6860 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6861 EXPORT_SYMBOL_GPL(sata_port_ops); 6862 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6863 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6864 EXPORT_SYMBOL_GPL(ata_link_next); 6865 EXPORT_SYMBOL_GPL(ata_dev_next); 6866 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6867 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 6868 EXPORT_SYMBOL_GPL(ata_host_init); 6869 EXPORT_SYMBOL_GPL(ata_host_alloc); 6870 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6871 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6872 EXPORT_SYMBOL_GPL(ata_host_start); 6873 EXPORT_SYMBOL_GPL(ata_host_register); 6874 EXPORT_SYMBOL_GPL(ata_host_activate); 6875 EXPORT_SYMBOL_GPL(ata_host_detach); 6876 EXPORT_SYMBOL_GPL(ata_sg_init); 6877 EXPORT_SYMBOL_GPL(ata_qc_complete); 6878 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6879 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6880 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6881 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6882 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6883 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6884 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6885 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6886 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6887 EXPORT_SYMBOL_GPL(ata_mode_string); 6888 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6889 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6890 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6891 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6892 EXPORT_SYMBOL_GPL(ata_dev_disable); 6893 EXPORT_SYMBOL_GPL(sata_set_spd); 6894 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6895 EXPORT_SYMBOL_GPL(sata_link_debounce); 6896 EXPORT_SYMBOL_GPL(sata_link_resume); 6897 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 6898 EXPORT_SYMBOL_GPL(ata_std_prereset); 6899 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6900 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6901 EXPORT_SYMBOL_GPL(ata_std_postreset); 6902 EXPORT_SYMBOL_GPL(ata_dev_classify); 6903 EXPORT_SYMBOL_GPL(ata_dev_pair); 6904 EXPORT_SYMBOL_GPL(ata_ratelimit); 6905 EXPORT_SYMBOL_GPL(ata_msleep); 6906 EXPORT_SYMBOL_GPL(ata_wait_register); 6907 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6908 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6909 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6910 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6911 EXPORT_SYMBOL_GPL(__ata_change_queue_depth); 6912 EXPORT_SYMBOL_GPL(sata_scr_valid); 6913 EXPORT_SYMBOL_GPL(sata_scr_read); 6914 EXPORT_SYMBOL_GPL(sata_scr_write); 6915 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6916 EXPORT_SYMBOL_GPL(ata_link_online); 6917 EXPORT_SYMBOL_GPL(ata_link_offline); 6918 #ifdef CONFIG_PM 6919 EXPORT_SYMBOL_GPL(ata_host_suspend); 6920 EXPORT_SYMBOL_GPL(ata_host_resume); 6921 #endif /* CONFIG_PM */ 6922 EXPORT_SYMBOL_GPL(ata_id_string); 6923 EXPORT_SYMBOL_GPL(ata_id_c_string); 6924 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6925 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6926 6927 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6928 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6929 EXPORT_SYMBOL_GPL(ata_timing_compute); 6930 EXPORT_SYMBOL_GPL(ata_timing_merge); 6931 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6932 6933 #ifdef CONFIG_PCI 6934 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6935 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6936 #ifdef CONFIG_PM 6937 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6938 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6939 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6940 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6941 #endif /* CONFIG_PM */ 6942 #endif /* CONFIG_PCI */ 6943 6944 EXPORT_SYMBOL_GPL(ata_platform_remove_one); 6945 6946 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6947 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6948 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6949 EXPORT_SYMBOL_GPL(ata_port_desc); 6950 #ifdef CONFIG_PCI 6951 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6952 #endif /* CONFIG_PCI */ 6953 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6954 EXPORT_SYMBOL_GPL(ata_link_abort); 6955 EXPORT_SYMBOL_GPL(ata_port_abort); 6956 EXPORT_SYMBOL_GPL(ata_port_freeze); 6957 EXPORT_SYMBOL_GPL(sata_async_notification); 6958 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6959 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6960 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6961 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6962 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 6963 EXPORT_SYMBOL_GPL(ata_do_eh); 6964 EXPORT_SYMBOL_GPL(ata_std_error_handler); 6965 6966 EXPORT_SYMBOL_GPL(ata_cable_40wire); 6967 EXPORT_SYMBOL_GPL(ata_cable_80wire); 6968 EXPORT_SYMBOL_GPL(ata_cable_unknown); 6969 EXPORT_SYMBOL_GPL(ata_cable_ignore); 6970 EXPORT_SYMBOL_GPL(ata_cable_sata); 6971