1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/interrupt.h> 54 #include <linux/completion.h> 55 #include <linux/suspend.h> 56 #include <linux/workqueue.h> 57 #include <linux/scatterlist.h> 58 #include <linux/io.h> 59 #include <linux/async.h> 60 #include <linux/log2.h> 61 #include <linux/slab.h> 62 #include <scsi/scsi.h> 63 #include <scsi/scsi_cmnd.h> 64 #include <scsi/scsi_host.h> 65 #include <linux/libata.h> 66 #include <asm/byteorder.h> 67 #include <linux/cdrom.h> 68 #include <linux/ratelimit.h> 69 #include <linux/pm_runtime.h> 70 71 #include "libata.h" 72 #include "libata-transport.h" 73 74 /* debounce timing parameters in msecs { interval, duration, timeout } */ 75 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 76 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 77 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 78 79 const struct ata_port_operations ata_base_port_ops = { 80 .prereset = ata_std_prereset, 81 .postreset = ata_std_postreset, 82 .error_handler = ata_std_error_handler, 83 }; 84 85 const struct ata_port_operations sata_port_ops = { 86 .inherits = &ata_base_port_ops, 87 88 .qc_defer = ata_std_qc_defer, 89 .hardreset = sata_std_hardreset, 90 }; 91 92 static unsigned int ata_dev_init_params(struct ata_device *dev, 93 u16 heads, u16 sectors); 94 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 95 static void ata_dev_xfermask(struct ata_device *dev); 96 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 97 98 atomic_t ata_print_id = ATOMIC_INIT(0); 99 100 struct ata_force_param { 101 const char *name; 102 unsigned int cbl; 103 int spd_limit; 104 unsigned long xfer_mask; 105 unsigned int horkage_on; 106 unsigned int horkage_off; 107 unsigned int lflags; 108 }; 109 110 struct ata_force_ent { 111 int port; 112 int device; 113 struct ata_force_param param; 114 }; 115 116 static struct ata_force_ent *ata_force_tbl; 117 static int ata_force_tbl_size; 118 119 static char ata_force_param_buf[PAGE_SIZE] __initdata; 120 /* param_buf is thrown away after initialization, disallow read */ 121 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 122 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 123 124 static int atapi_enabled = 1; 125 module_param(atapi_enabled, int, 0444); 126 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 127 128 static int atapi_dmadir = 0; 129 module_param(atapi_dmadir, int, 0444); 130 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 131 132 int atapi_passthru16 = 1; 133 module_param(atapi_passthru16, int, 0444); 134 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 135 136 int libata_fua = 0; 137 module_param_named(fua, libata_fua, int, 0444); 138 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 139 140 static int ata_ignore_hpa; 141 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 142 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 143 144 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 145 module_param_named(dma, libata_dma_mask, int, 0444); 146 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 147 148 static int ata_probe_timeout; 149 module_param(ata_probe_timeout, int, 0444); 150 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 151 152 int libata_noacpi = 0; 153 module_param_named(noacpi, libata_noacpi, int, 0444); 154 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 155 156 int libata_allow_tpm = 0; 157 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 158 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 159 160 static int atapi_an; 161 module_param(atapi_an, int, 0444); 162 MODULE_PARM_DESC(atapi_an, "Enable ATAPI AN media presence notification (0=0ff [default], 1=on)"); 163 164 MODULE_AUTHOR("Jeff Garzik"); 165 MODULE_DESCRIPTION("Library module for ATA devices"); 166 MODULE_LICENSE("GPL"); 167 MODULE_VERSION(DRV_VERSION); 168 169 170 static bool ata_sstatus_online(u32 sstatus) 171 { 172 return (sstatus & 0xf) == 0x3; 173 } 174 175 /** 176 * ata_link_next - link iteration helper 177 * @link: the previous link, NULL to start 178 * @ap: ATA port containing links to iterate 179 * @mode: iteration mode, one of ATA_LITER_* 180 * 181 * LOCKING: 182 * Host lock or EH context. 183 * 184 * RETURNS: 185 * Pointer to the next link. 186 */ 187 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 188 enum ata_link_iter_mode mode) 189 { 190 BUG_ON(mode != ATA_LITER_EDGE && 191 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 192 193 /* NULL link indicates start of iteration */ 194 if (!link) 195 switch (mode) { 196 case ATA_LITER_EDGE: 197 case ATA_LITER_PMP_FIRST: 198 if (sata_pmp_attached(ap)) 199 return ap->pmp_link; 200 /* fall through */ 201 case ATA_LITER_HOST_FIRST: 202 return &ap->link; 203 } 204 205 /* we just iterated over the host link, what's next? */ 206 if (link == &ap->link) 207 switch (mode) { 208 case ATA_LITER_HOST_FIRST: 209 if (sata_pmp_attached(ap)) 210 return ap->pmp_link; 211 /* fall through */ 212 case ATA_LITER_PMP_FIRST: 213 if (unlikely(ap->slave_link)) 214 return ap->slave_link; 215 /* fall through */ 216 case ATA_LITER_EDGE: 217 return NULL; 218 } 219 220 /* slave_link excludes PMP */ 221 if (unlikely(link == ap->slave_link)) 222 return NULL; 223 224 /* we were over a PMP link */ 225 if (++link < ap->pmp_link + ap->nr_pmp_links) 226 return link; 227 228 if (mode == ATA_LITER_PMP_FIRST) 229 return &ap->link; 230 231 return NULL; 232 } 233 234 /** 235 * ata_dev_next - device iteration helper 236 * @dev: the previous device, NULL to start 237 * @link: ATA link containing devices to iterate 238 * @mode: iteration mode, one of ATA_DITER_* 239 * 240 * LOCKING: 241 * Host lock or EH context. 242 * 243 * RETURNS: 244 * Pointer to the next device. 245 */ 246 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 247 enum ata_dev_iter_mode mode) 248 { 249 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 250 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 251 252 /* NULL dev indicates start of iteration */ 253 if (!dev) 254 switch (mode) { 255 case ATA_DITER_ENABLED: 256 case ATA_DITER_ALL: 257 dev = link->device; 258 goto check; 259 case ATA_DITER_ENABLED_REVERSE: 260 case ATA_DITER_ALL_REVERSE: 261 dev = link->device + ata_link_max_devices(link) - 1; 262 goto check; 263 } 264 265 next: 266 /* move to the next one */ 267 switch (mode) { 268 case ATA_DITER_ENABLED: 269 case ATA_DITER_ALL: 270 if (++dev < link->device + ata_link_max_devices(link)) 271 goto check; 272 return NULL; 273 case ATA_DITER_ENABLED_REVERSE: 274 case ATA_DITER_ALL_REVERSE: 275 if (--dev >= link->device) 276 goto check; 277 return NULL; 278 } 279 280 check: 281 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 282 !ata_dev_enabled(dev)) 283 goto next; 284 return dev; 285 } 286 287 /** 288 * ata_dev_phys_link - find physical link for a device 289 * @dev: ATA device to look up physical link for 290 * 291 * Look up physical link which @dev is attached to. Note that 292 * this is different from @dev->link only when @dev is on slave 293 * link. For all other cases, it's the same as @dev->link. 294 * 295 * LOCKING: 296 * Don't care. 297 * 298 * RETURNS: 299 * Pointer to the found physical link. 300 */ 301 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 302 { 303 struct ata_port *ap = dev->link->ap; 304 305 if (!ap->slave_link) 306 return dev->link; 307 if (!dev->devno) 308 return &ap->link; 309 return ap->slave_link; 310 } 311 312 /** 313 * ata_force_cbl - force cable type according to libata.force 314 * @ap: ATA port of interest 315 * 316 * Force cable type according to libata.force and whine about it. 317 * The last entry which has matching port number is used, so it 318 * can be specified as part of device force parameters. For 319 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 320 * same effect. 321 * 322 * LOCKING: 323 * EH context. 324 */ 325 void ata_force_cbl(struct ata_port *ap) 326 { 327 int i; 328 329 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 330 const struct ata_force_ent *fe = &ata_force_tbl[i]; 331 332 if (fe->port != -1 && fe->port != ap->print_id) 333 continue; 334 335 if (fe->param.cbl == ATA_CBL_NONE) 336 continue; 337 338 ap->cbl = fe->param.cbl; 339 ata_port_notice(ap, "FORCE: cable set to %s\n", fe->param.name); 340 return; 341 } 342 } 343 344 /** 345 * ata_force_link_limits - force link limits according to libata.force 346 * @link: ATA link of interest 347 * 348 * Force link flags and SATA spd limit according to libata.force 349 * and whine about it. When only the port part is specified 350 * (e.g. 1:), the limit applies to all links connected to both 351 * the host link and all fan-out ports connected via PMP. If the 352 * device part is specified as 0 (e.g. 1.00:), it specifies the 353 * first fan-out link not the host link. Device number 15 always 354 * points to the host link whether PMP is attached or not. If the 355 * controller has slave link, device number 16 points to it. 356 * 357 * LOCKING: 358 * EH context. 359 */ 360 static void ata_force_link_limits(struct ata_link *link) 361 { 362 bool did_spd = false; 363 int linkno = link->pmp; 364 int i; 365 366 if (ata_is_host_link(link)) 367 linkno += 15; 368 369 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 370 const struct ata_force_ent *fe = &ata_force_tbl[i]; 371 372 if (fe->port != -1 && fe->port != link->ap->print_id) 373 continue; 374 375 if (fe->device != -1 && fe->device != linkno) 376 continue; 377 378 /* only honor the first spd limit */ 379 if (!did_spd && fe->param.spd_limit) { 380 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 381 ata_link_notice(link, "FORCE: PHY spd limit set to %s\n", 382 fe->param.name); 383 did_spd = true; 384 } 385 386 /* let lflags stack */ 387 if (fe->param.lflags) { 388 link->flags |= fe->param.lflags; 389 ata_link_notice(link, 390 "FORCE: link flag 0x%x forced -> 0x%x\n", 391 fe->param.lflags, link->flags); 392 } 393 } 394 } 395 396 /** 397 * ata_force_xfermask - force xfermask according to libata.force 398 * @dev: ATA device of interest 399 * 400 * Force xfer_mask according to libata.force and whine about it. 401 * For consistency with link selection, device number 15 selects 402 * the first device connected to the host link. 403 * 404 * LOCKING: 405 * EH context. 406 */ 407 static void ata_force_xfermask(struct ata_device *dev) 408 { 409 int devno = dev->link->pmp + dev->devno; 410 int alt_devno = devno; 411 int i; 412 413 /* allow n.15/16 for devices attached to host port */ 414 if (ata_is_host_link(dev->link)) 415 alt_devno += 15; 416 417 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 418 const struct ata_force_ent *fe = &ata_force_tbl[i]; 419 unsigned long pio_mask, mwdma_mask, udma_mask; 420 421 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 422 continue; 423 424 if (fe->device != -1 && fe->device != devno && 425 fe->device != alt_devno) 426 continue; 427 428 if (!fe->param.xfer_mask) 429 continue; 430 431 ata_unpack_xfermask(fe->param.xfer_mask, 432 &pio_mask, &mwdma_mask, &udma_mask); 433 if (udma_mask) 434 dev->udma_mask = udma_mask; 435 else if (mwdma_mask) { 436 dev->udma_mask = 0; 437 dev->mwdma_mask = mwdma_mask; 438 } else { 439 dev->udma_mask = 0; 440 dev->mwdma_mask = 0; 441 dev->pio_mask = pio_mask; 442 } 443 444 ata_dev_notice(dev, "FORCE: xfer_mask set to %s\n", 445 fe->param.name); 446 return; 447 } 448 } 449 450 /** 451 * ata_force_horkage - force horkage according to libata.force 452 * @dev: ATA device of interest 453 * 454 * Force horkage according to libata.force and whine about it. 455 * For consistency with link selection, device number 15 selects 456 * the first device connected to the host link. 457 * 458 * LOCKING: 459 * EH context. 460 */ 461 static void ata_force_horkage(struct ata_device *dev) 462 { 463 int devno = dev->link->pmp + dev->devno; 464 int alt_devno = devno; 465 int i; 466 467 /* allow n.15/16 for devices attached to host port */ 468 if (ata_is_host_link(dev->link)) 469 alt_devno += 15; 470 471 for (i = 0; i < ata_force_tbl_size; i++) { 472 const struct ata_force_ent *fe = &ata_force_tbl[i]; 473 474 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 475 continue; 476 477 if (fe->device != -1 && fe->device != devno && 478 fe->device != alt_devno) 479 continue; 480 481 if (!(~dev->horkage & fe->param.horkage_on) && 482 !(dev->horkage & fe->param.horkage_off)) 483 continue; 484 485 dev->horkage |= fe->param.horkage_on; 486 dev->horkage &= ~fe->param.horkage_off; 487 488 ata_dev_notice(dev, "FORCE: horkage modified (%s)\n", 489 fe->param.name); 490 } 491 } 492 493 /** 494 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 495 * @opcode: SCSI opcode 496 * 497 * Determine ATAPI command type from @opcode. 498 * 499 * LOCKING: 500 * None. 501 * 502 * RETURNS: 503 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 504 */ 505 int atapi_cmd_type(u8 opcode) 506 { 507 switch (opcode) { 508 case GPCMD_READ_10: 509 case GPCMD_READ_12: 510 return ATAPI_READ; 511 512 case GPCMD_WRITE_10: 513 case GPCMD_WRITE_12: 514 case GPCMD_WRITE_AND_VERIFY_10: 515 return ATAPI_WRITE; 516 517 case GPCMD_READ_CD: 518 case GPCMD_READ_CD_MSF: 519 return ATAPI_READ_CD; 520 521 case ATA_16: 522 case ATA_12: 523 if (atapi_passthru16) 524 return ATAPI_PASS_THRU; 525 /* fall thru */ 526 default: 527 return ATAPI_MISC; 528 } 529 } 530 531 /** 532 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 533 * @tf: Taskfile to convert 534 * @pmp: Port multiplier port 535 * @is_cmd: This FIS is for command 536 * @fis: Buffer into which data will output 537 * 538 * Converts a standard ATA taskfile to a Serial ATA 539 * FIS structure (Register - Host to Device). 540 * 541 * LOCKING: 542 * Inherited from caller. 543 */ 544 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 545 { 546 fis[0] = 0x27; /* Register - Host to Device FIS */ 547 fis[1] = pmp & 0xf; /* Port multiplier number*/ 548 if (is_cmd) 549 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 550 551 fis[2] = tf->command; 552 fis[3] = tf->feature; 553 554 fis[4] = tf->lbal; 555 fis[5] = tf->lbam; 556 fis[6] = tf->lbah; 557 fis[7] = tf->device; 558 559 fis[8] = tf->hob_lbal; 560 fis[9] = tf->hob_lbam; 561 fis[10] = tf->hob_lbah; 562 fis[11] = tf->hob_feature; 563 564 fis[12] = tf->nsect; 565 fis[13] = tf->hob_nsect; 566 fis[14] = 0; 567 fis[15] = tf->ctl; 568 569 fis[16] = 0; 570 fis[17] = 0; 571 fis[18] = 0; 572 fis[19] = 0; 573 } 574 575 /** 576 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 577 * @fis: Buffer from which data will be input 578 * @tf: Taskfile to output 579 * 580 * Converts a serial ATA FIS structure to a standard ATA taskfile. 581 * 582 * LOCKING: 583 * Inherited from caller. 584 */ 585 586 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 587 { 588 tf->command = fis[2]; /* status */ 589 tf->feature = fis[3]; /* error */ 590 591 tf->lbal = fis[4]; 592 tf->lbam = fis[5]; 593 tf->lbah = fis[6]; 594 tf->device = fis[7]; 595 596 tf->hob_lbal = fis[8]; 597 tf->hob_lbam = fis[9]; 598 tf->hob_lbah = fis[10]; 599 600 tf->nsect = fis[12]; 601 tf->hob_nsect = fis[13]; 602 } 603 604 static const u8 ata_rw_cmds[] = { 605 /* pio multi */ 606 ATA_CMD_READ_MULTI, 607 ATA_CMD_WRITE_MULTI, 608 ATA_CMD_READ_MULTI_EXT, 609 ATA_CMD_WRITE_MULTI_EXT, 610 0, 611 0, 612 0, 613 ATA_CMD_WRITE_MULTI_FUA_EXT, 614 /* pio */ 615 ATA_CMD_PIO_READ, 616 ATA_CMD_PIO_WRITE, 617 ATA_CMD_PIO_READ_EXT, 618 ATA_CMD_PIO_WRITE_EXT, 619 0, 620 0, 621 0, 622 0, 623 /* dma */ 624 ATA_CMD_READ, 625 ATA_CMD_WRITE, 626 ATA_CMD_READ_EXT, 627 ATA_CMD_WRITE_EXT, 628 0, 629 0, 630 0, 631 ATA_CMD_WRITE_FUA_EXT 632 }; 633 634 /** 635 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 636 * @tf: command to examine and configure 637 * @dev: device tf belongs to 638 * 639 * Examine the device configuration and tf->flags to calculate 640 * the proper read/write commands and protocol to use. 641 * 642 * LOCKING: 643 * caller. 644 */ 645 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 646 { 647 u8 cmd; 648 649 int index, fua, lba48, write; 650 651 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 652 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 653 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 654 655 if (dev->flags & ATA_DFLAG_PIO) { 656 tf->protocol = ATA_PROT_PIO; 657 index = dev->multi_count ? 0 : 8; 658 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 659 /* Unable to use DMA due to host limitation */ 660 tf->protocol = ATA_PROT_PIO; 661 index = dev->multi_count ? 0 : 8; 662 } else { 663 tf->protocol = ATA_PROT_DMA; 664 index = 16; 665 } 666 667 cmd = ata_rw_cmds[index + fua + lba48 + write]; 668 if (cmd) { 669 tf->command = cmd; 670 return 0; 671 } 672 return -1; 673 } 674 675 /** 676 * ata_tf_read_block - Read block address from ATA taskfile 677 * @tf: ATA taskfile of interest 678 * @dev: ATA device @tf belongs to 679 * 680 * LOCKING: 681 * None. 682 * 683 * Read block address from @tf. This function can handle all 684 * three address formats - LBA, LBA48 and CHS. tf->protocol and 685 * flags select the address format to use. 686 * 687 * RETURNS: 688 * Block address read from @tf. 689 */ 690 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 691 { 692 u64 block = 0; 693 694 if (tf->flags & ATA_TFLAG_LBA) { 695 if (tf->flags & ATA_TFLAG_LBA48) { 696 block |= (u64)tf->hob_lbah << 40; 697 block |= (u64)tf->hob_lbam << 32; 698 block |= (u64)tf->hob_lbal << 24; 699 } else 700 block |= (tf->device & 0xf) << 24; 701 702 block |= tf->lbah << 16; 703 block |= tf->lbam << 8; 704 block |= tf->lbal; 705 } else { 706 u32 cyl, head, sect; 707 708 cyl = tf->lbam | (tf->lbah << 8); 709 head = tf->device & 0xf; 710 sect = tf->lbal; 711 712 if (!sect) { 713 ata_dev_warn(dev, 714 "device reported invalid CHS sector 0\n"); 715 sect = 1; /* oh well */ 716 } 717 718 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 719 } 720 721 return block; 722 } 723 724 /** 725 * ata_build_rw_tf - Build ATA taskfile for given read/write request 726 * @tf: Target ATA taskfile 727 * @dev: ATA device @tf belongs to 728 * @block: Block address 729 * @n_block: Number of blocks 730 * @tf_flags: RW/FUA etc... 731 * @tag: tag 732 * 733 * LOCKING: 734 * None. 735 * 736 * Build ATA taskfile @tf for read/write request described by 737 * @block, @n_block, @tf_flags and @tag on @dev. 738 * 739 * RETURNS: 740 * 741 * 0 on success, -ERANGE if the request is too large for @dev, 742 * -EINVAL if the request is invalid. 743 */ 744 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 745 u64 block, u32 n_block, unsigned int tf_flags, 746 unsigned int tag) 747 { 748 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 749 tf->flags |= tf_flags; 750 751 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 752 /* yay, NCQ */ 753 if (!lba_48_ok(block, n_block)) 754 return -ERANGE; 755 756 tf->protocol = ATA_PROT_NCQ; 757 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 758 759 if (tf->flags & ATA_TFLAG_WRITE) 760 tf->command = ATA_CMD_FPDMA_WRITE; 761 else 762 tf->command = ATA_CMD_FPDMA_READ; 763 764 tf->nsect = tag << 3; 765 tf->hob_feature = (n_block >> 8) & 0xff; 766 tf->feature = n_block & 0xff; 767 768 tf->hob_lbah = (block >> 40) & 0xff; 769 tf->hob_lbam = (block >> 32) & 0xff; 770 tf->hob_lbal = (block >> 24) & 0xff; 771 tf->lbah = (block >> 16) & 0xff; 772 tf->lbam = (block >> 8) & 0xff; 773 tf->lbal = block & 0xff; 774 775 tf->device = 1 << 6; 776 if (tf->flags & ATA_TFLAG_FUA) 777 tf->device |= 1 << 7; 778 } else if (dev->flags & ATA_DFLAG_LBA) { 779 tf->flags |= ATA_TFLAG_LBA; 780 781 if (lba_28_ok(block, n_block)) { 782 /* use LBA28 */ 783 tf->device |= (block >> 24) & 0xf; 784 } else if (lba_48_ok(block, n_block)) { 785 if (!(dev->flags & ATA_DFLAG_LBA48)) 786 return -ERANGE; 787 788 /* use LBA48 */ 789 tf->flags |= ATA_TFLAG_LBA48; 790 791 tf->hob_nsect = (n_block >> 8) & 0xff; 792 793 tf->hob_lbah = (block >> 40) & 0xff; 794 tf->hob_lbam = (block >> 32) & 0xff; 795 tf->hob_lbal = (block >> 24) & 0xff; 796 } else 797 /* request too large even for LBA48 */ 798 return -ERANGE; 799 800 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 801 return -EINVAL; 802 803 tf->nsect = n_block & 0xff; 804 805 tf->lbah = (block >> 16) & 0xff; 806 tf->lbam = (block >> 8) & 0xff; 807 tf->lbal = block & 0xff; 808 809 tf->device |= ATA_LBA; 810 } else { 811 /* CHS */ 812 u32 sect, head, cyl, track; 813 814 /* The request -may- be too large for CHS addressing. */ 815 if (!lba_28_ok(block, n_block)) 816 return -ERANGE; 817 818 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 819 return -EINVAL; 820 821 /* Convert LBA to CHS */ 822 track = (u32)block / dev->sectors; 823 cyl = track / dev->heads; 824 head = track % dev->heads; 825 sect = (u32)block % dev->sectors + 1; 826 827 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 828 (u32)block, track, cyl, head, sect); 829 830 /* Check whether the converted CHS can fit. 831 Cylinder: 0-65535 832 Head: 0-15 833 Sector: 1-255*/ 834 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 835 return -ERANGE; 836 837 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 838 tf->lbal = sect; 839 tf->lbam = cyl; 840 tf->lbah = cyl >> 8; 841 tf->device |= head; 842 } 843 844 return 0; 845 } 846 847 /** 848 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 849 * @pio_mask: pio_mask 850 * @mwdma_mask: mwdma_mask 851 * @udma_mask: udma_mask 852 * 853 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 854 * unsigned int xfer_mask. 855 * 856 * LOCKING: 857 * None. 858 * 859 * RETURNS: 860 * Packed xfer_mask. 861 */ 862 unsigned long ata_pack_xfermask(unsigned long pio_mask, 863 unsigned long mwdma_mask, 864 unsigned long udma_mask) 865 { 866 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 867 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 868 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 869 } 870 871 /** 872 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 873 * @xfer_mask: xfer_mask to unpack 874 * @pio_mask: resulting pio_mask 875 * @mwdma_mask: resulting mwdma_mask 876 * @udma_mask: resulting udma_mask 877 * 878 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 879 * Any NULL distination masks will be ignored. 880 */ 881 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 882 unsigned long *mwdma_mask, unsigned long *udma_mask) 883 { 884 if (pio_mask) 885 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 886 if (mwdma_mask) 887 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 888 if (udma_mask) 889 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 890 } 891 892 static const struct ata_xfer_ent { 893 int shift, bits; 894 u8 base; 895 } ata_xfer_tbl[] = { 896 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 897 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 898 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 899 { -1, }, 900 }; 901 902 /** 903 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 904 * @xfer_mask: xfer_mask of interest 905 * 906 * Return matching XFER_* value for @xfer_mask. Only the highest 907 * bit of @xfer_mask is considered. 908 * 909 * LOCKING: 910 * None. 911 * 912 * RETURNS: 913 * Matching XFER_* value, 0xff if no match found. 914 */ 915 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 916 { 917 int highbit = fls(xfer_mask) - 1; 918 const struct ata_xfer_ent *ent; 919 920 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 921 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 922 return ent->base + highbit - ent->shift; 923 return 0xff; 924 } 925 926 /** 927 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 928 * @xfer_mode: XFER_* of interest 929 * 930 * Return matching xfer_mask for @xfer_mode. 931 * 932 * LOCKING: 933 * None. 934 * 935 * RETURNS: 936 * Matching xfer_mask, 0 if no match found. 937 */ 938 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 939 { 940 const struct ata_xfer_ent *ent; 941 942 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 943 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 944 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 945 & ~((1 << ent->shift) - 1); 946 return 0; 947 } 948 949 /** 950 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 951 * @xfer_mode: XFER_* of interest 952 * 953 * Return matching xfer_shift for @xfer_mode. 954 * 955 * LOCKING: 956 * None. 957 * 958 * RETURNS: 959 * Matching xfer_shift, -1 if no match found. 960 */ 961 int ata_xfer_mode2shift(unsigned long xfer_mode) 962 { 963 const struct ata_xfer_ent *ent; 964 965 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 966 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 967 return ent->shift; 968 return -1; 969 } 970 971 /** 972 * ata_mode_string - convert xfer_mask to string 973 * @xfer_mask: mask of bits supported; only highest bit counts. 974 * 975 * Determine string which represents the highest speed 976 * (highest bit in @modemask). 977 * 978 * LOCKING: 979 * None. 980 * 981 * RETURNS: 982 * Constant C string representing highest speed listed in 983 * @mode_mask, or the constant C string "<n/a>". 984 */ 985 const char *ata_mode_string(unsigned long xfer_mask) 986 { 987 static const char * const xfer_mode_str[] = { 988 "PIO0", 989 "PIO1", 990 "PIO2", 991 "PIO3", 992 "PIO4", 993 "PIO5", 994 "PIO6", 995 "MWDMA0", 996 "MWDMA1", 997 "MWDMA2", 998 "MWDMA3", 999 "MWDMA4", 1000 "UDMA/16", 1001 "UDMA/25", 1002 "UDMA/33", 1003 "UDMA/44", 1004 "UDMA/66", 1005 "UDMA/100", 1006 "UDMA/133", 1007 "UDMA7", 1008 }; 1009 int highbit; 1010 1011 highbit = fls(xfer_mask) - 1; 1012 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1013 return xfer_mode_str[highbit]; 1014 return "<n/a>"; 1015 } 1016 1017 const char *sata_spd_string(unsigned int spd) 1018 { 1019 static const char * const spd_str[] = { 1020 "1.5 Gbps", 1021 "3.0 Gbps", 1022 "6.0 Gbps", 1023 }; 1024 1025 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1026 return "<unknown>"; 1027 return spd_str[spd - 1]; 1028 } 1029 1030 /** 1031 * ata_dev_classify - determine device type based on ATA-spec signature 1032 * @tf: ATA taskfile register set for device to be identified 1033 * 1034 * Determine from taskfile register contents whether a device is 1035 * ATA or ATAPI, as per "Signature and persistence" section 1036 * of ATA/PI spec (volume 1, sect 5.14). 1037 * 1038 * LOCKING: 1039 * None. 1040 * 1041 * RETURNS: 1042 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1043 * %ATA_DEV_UNKNOWN the event of failure. 1044 */ 1045 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1046 { 1047 /* Apple's open source Darwin code hints that some devices only 1048 * put a proper signature into the LBA mid/high registers, 1049 * So, we only check those. It's sufficient for uniqueness. 1050 * 1051 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1052 * signatures for ATA and ATAPI devices attached on SerialATA, 1053 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1054 * spec has never mentioned about using different signatures 1055 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1056 * Multiplier specification began to use 0x69/0x96 to identify 1057 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1058 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1059 * 0x69/0x96 shortly and described them as reserved for 1060 * SerialATA. 1061 * 1062 * We follow the current spec and consider that 0x69/0x96 1063 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1064 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1065 * SEMB signature. This is worked around in 1066 * ata_dev_read_id(). 1067 */ 1068 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1069 DPRINTK("found ATA device by sig\n"); 1070 return ATA_DEV_ATA; 1071 } 1072 1073 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1074 DPRINTK("found ATAPI device by sig\n"); 1075 return ATA_DEV_ATAPI; 1076 } 1077 1078 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1079 DPRINTK("found PMP device by sig\n"); 1080 return ATA_DEV_PMP; 1081 } 1082 1083 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1084 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1085 return ATA_DEV_SEMB; 1086 } 1087 1088 DPRINTK("unknown device\n"); 1089 return ATA_DEV_UNKNOWN; 1090 } 1091 1092 /** 1093 * ata_id_string - Convert IDENTIFY DEVICE page into string 1094 * @id: IDENTIFY DEVICE results we will examine 1095 * @s: string into which data is output 1096 * @ofs: offset into identify device page 1097 * @len: length of string to return. must be an even number. 1098 * 1099 * The strings in the IDENTIFY DEVICE page are broken up into 1100 * 16-bit chunks. Run through the string, and output each 1101 * 8-bit chunk linearly, regardless of platform. 1102 * 1103 * LOCKING: 1104 * caller. 1105 */ 1106 1107 void ata_id_string(const u16 *id, unsigned char *s, 1108 unsigned int ofs, unsigned int len) 1109 { 1110 unsigned int c; 1111 1112 BUG_ON(len & 1); 1113 1114 while (len > 0) { 1115 c = id[ofs] >> 8; 1116 *s = c; 1117 s++; 1118 1119 c = id[ofs] & 0xff; 1120 *s = c; 1121 s++; 1122 1123 ofs++; 1124 len -= 2; 1125 } 1126 } 1127 1128 /** 1129 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1130 * @id: IDENTIFY DEVICE results we will examine 1131 * @s: string into which data is output 1132 * @ofs: offset into identify device page 1133 * @len: length of string to return. must be an odd number. 1134 * 1135 * This function is identical to ata_id_string except that it 1136 * trims trailing spaces and terminates the resulting string with 1137 * null. @len must be actual maximum length (even number) + 1. 1138 * 1139 * LOCKING: 1140 * caller. 1141 */ 1142 void ata_id_c_string(const u16 *id, unsigned char *s, 1143 unsigned int ofs, unsigned int len) 1144 { 1145 unsigned char *p; 1146 1147 ata_id_string(id, s, ofs, len - 1); 1148 1149 p = s + strnlen(s, len - 1); 1150 while (p > s && p[-1] == ' ') 1151 p--; 1152 *p = '\0'; 1153 } 1154 1155 static u64 ata_id_n_sectors(const u16 *id) 1156 { 1157 if (ata_id_has_lba(id)) { 1158 if (ata_id_has_lba48(id)) 1159 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1160 else 1161 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1162 } else { 1163 if (ata_id_current_chs_valid(id)) 1164 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1165 id[ATA_ID_CUR_SECTORS]; 1166 else 1167 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1168 id[ATA_ID_SECTORS]; 1169 } 1170 } 1171 1172 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1173 { 1174 u64 sectors = 0; 1175 1176 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1177 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1178 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1179 sectors |= (tf->lbah & 0xff) << 16; 1180 sectors |= (tf->lbam & 0xff) << 8; 1181 sectors |= (tf->lbal & 0xff); 1182 1183 return sectors; 1184 } 1185 1186 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1187 { 1188 u64 sectors = 0; 1189 1190 sectors |= (tf->device & 0x0f) << 24; 1191 sectors |= (tf->lbah & 0xff) << 16; 1192 sectors |= (tf->lbam & 0xff) << 8; 1193 sectors |= (tf->lbal & 0xff); 1194 1195 return sectors; 1196 } 1197 1198 /** 1199 * ata_read_native_max_address - Read native max address 1200 * @dev: target device 1201 * @max_sectors: out parameter for the result native max address 1202 * 1203 * Perform an LBA48 or LBA28 native size query upon the device in 1204 * question. 1205 * 1206 * RETURNS: 1207 * 0 on success, -EACCES if command is aborted by the drive. 1208 * -EIO on other errors. 1209 */ 1210 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1211 { 1212 unsigned int err_mask; 1213 struct ata_taskfile tf; 1214 int lba48 = ata_id_has_lba48(dev->id); 1215 1216 ata_tf_init(dev, &tf); 1217 1218 /* always clear all address registers */ 1219 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1220 1221 if (lba48) { 1222 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1223 tf.flags |= ATA_TFLAG_LBA48; 1224 } else 1225 tf.command = ATA_CMD_READ_NATIVE_MAX; 1226 1227 tf.protocol |= ATA_PROT_NODATA; 1228 tf.device |= ATA_LBA; 1229 1230 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1231 if (err_mask) { 1232 ata_dev_warn(dev, 1233 "failed to read native max address (err_mask=0x%x)\n", 1234 err_mask); 1235 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1236 return -EACCES; 1237 return -EIO; 1238 } 1239 1240 if (lba48) 1241 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1242 else 1243 *max_sectors = ata_tf_to_lba(&tf) + 1; 1244 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1245 (*max_sectors)--; 1246 return 0; 1247 } 1248 1249 /** 1250 * ata_set_max_sectors - Set max sectors 1251 * @dev: target device 1252 * @new_sectors: new max sectors value to set for the device 1253 * 1254 * Set max sectors of @dev to @new_sectors. 1255 * 1256 * RETURNS: 1257 * 0 on success, -EACCES if command is aborted or denied (due to 1258 * previous non-volatile SET_MAX) by the drive. -EIO on other 1259 * errors. 1260 */ 1261 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1262 { 1263 unsigned int err_mask; 1264 struct ata_taskfile tf; 1265 int lba48 = ata_id_has_lba48(dev->id); 1266 1267 new_sectors--; 1268 1269 ata_tf_init(dev, &tf); 1270 1271 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1272 1273 if (lba48) { 1274 tf.command = ATA_CMD_SET_MAX_EXT; 1275 tf.flags |= ATA_TFLAG_LBA48; 1276 1277 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1278 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1279 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1280 } else { 1281 tf.command = ATA_CMD_SET_MAX; 1282 1283 tf.device |= (new_sectors >> 24) & 0xf; 1284 } 1285 1286 tf.protocol |= ATA_PROT_NODATA; 1287 tf.device |= ATA_LBA; 1288 1289 tf.lbal = (new_sectors >> 0) & 0xff; 1290 tf.lbam = (new_sectors >> 8) & 0xff; 1291 tf.lbah = (new_sectors >> 16) & 0xff; 1292 1293 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1294 if (err_mask) { 1295 ata_dev_warn(dev, 1296 "failed to set max address (err_mask=0x%x)\n", 1297 err_mask); 1298 if (err_mask == AC_ERR_DEV && 1299 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1300 return -EACCES; 1301 return -EIO; 1302 } 1303 1304 return 0; 1305 } 1306 1307 /** 1308 * ata_hpa_resize - Resize a device with an HPA set 1309 * @dev: Device to resize 1310 * 1311 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1312 * it if required to the full size of the media. The caller must check 1313 * the drive has the HPA feature set enabled. 1314 * 1315 * RETURNS: 1316 * 0 on success, -errno on failure. 1317 */ 1318 static int ata_hpa_resize(struct ata_device *dev) 1319 { 1320 struct ata_eh_context *ehc = &dev->link->eh_context; 1321 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1322 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1323 u64 sectors = ata_id_n_sectors(dev->id); 1324 u64 native_sectors; 1325 int rc; 1326 1327 /* do we need to do it? */ 1328 if (dev->class != ATA_DEV_ATA || 1329 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1330 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1331 return 0; 1332 1333 /* read native max address */ 1334 rc = ata_read_native_max_address(dev, &native_sectors); 1335 if (rc) { 1336 /* If device aborted the command or HPA isn't going to 1337 * be unlocked, skip HPA resizing. 1338 */ 1339 if (rc == -EACCES || !unlock_hpa) { 1340 ata_dev_warn(dev, 1341 "HPA support seems broken, skipping HPA handling\n"); 1342 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1343 1344 /* we can continue if device aborted the command */ 1345 if (rc == -EACCES) 1346 rc = 0; 1347 } 1348 1349 return rc; 1350 } 1351 dev->n_native_sectors = native_sectors; 1352 1353 /* nothing to do? */ 1354 if (native_sectors <= sectors || !unlock_hpa) { 1355 if (!print_info || native_sectors == sectors) 1356 return 0; 1357 1358 if (native_sectors > sectors) 1359 ata_dev_info(dev, 1360 "HPA detected: current %llu, native %llu\n", 1361 (unsigned long long)sectors, 1362 (unsigned long long)native_sectors); 1363 else if (native_sectors < sectors) 1364 ata_dev_warn(dev, 1365 "native sectors (%llu) is smaller than sectors (%llu)\n", 1366 (unsigned long long)native_sectors, 1367 (unsigned long long)sectors); 1368 return 0; 1369 } 1370 1371 /* let's unlock HPA */ 1372 rc = ata_set_max_sectors(dev, native_sectors); 1373 if (rc == -EACCES) { 1374 /* if device aborted the command, skip HPA resizing */ 1375 ata_dev_warn(dev, 1376 "device aborted resize (%llu -> %llu), skipping HPA handling\n", 1377 (unsigned long long)sectors, 1378 (unsigned long long)native_sectors); 1379 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1380 return 0; 1381 } else if (rc) 1382 return rc; 1383 1384 /* re-read IDENTIFY data */ 1385 rc = ata_dev_reread_id(dev, 0); 1386 if (rc) { 1387 ata_dev_err(dev, 1388 "failed to re-read IDENTIFY data after HPA resizing\n"); 1389 return rc; 1390 } 1391 1392 if (print_info) { 1393 u64 new_sectors = ata_id_n_sectors(dev->id); 1394 ata_dev_info(dev, 1395 "HPA unlocked: %llu -> %llu, native %llu\n", 1396 (unsigned long long)sectors, 1397 (unsigned long long)new_sectors, 1398 (unsigned long long)native_sectors); 1399 } 1400 1401 return 0; 1402 } 1403 1404 /** 1405 * ata_dump_id - IDENTIFY DEVICE info debugging output 1406 * @id: IDENTIFY DEVICE page to dump 1407 * 1408 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1409 * page. 1410 * 1411 * LOCKING: 1412 * caller. 1413 */ 1414 1415 static inline void ata_dump_id(const u16 *id) 1416 { 1417 DPRINTK("49==0x%04x " 1418 "53==0x%04x " 1419 "63==0x%04x " 1420 "64==0x%04x " 1421 "75==0x%04x \n", 1422 id[49], 1423 id[53], 1424 id[63], 1425 id[64], 1426 id[75]); 1427 DPRINTK("80==0x%04x " 1428 "81==0x%04x " 1429 "82==0x%04x " 1430 "83==0x%04x " 1431 "84==0x%04x \n", 1432 id[80], 1433 id[81], 1434 id[82], 1435 id[83], 1436 id[84]); 1437 DPRINTK("88==0x%04x " 1438 "93==0x%04x\n", 1439 id[88], 1440 id[93]); 1441 } 1442 1443 /** 1444 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1445 * @id: IDENTIFY data to compute xfer mask from 1446 * 1447 * Compute the xfermask for this device. This is not as trivial 1448 * as it seems if we must consider early devices correctly. 1449 * 1450 * FIXME: pre IDE drive timing (do we care ?). 1451 * 1452 * LOCKING: 1453 * None. 1454 * 1455 * RETURNS: 1456 * Computed xfermask 1457 */ 1458 unsigned long ata_id_xfermask(const u16 *id) 1459 { 1460 unsigned long pio_mask, mwdma_mask, udma_mask; 1461 1462 /* Usual case. Word 53 indicates word 64 is valid */ 1463 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1464 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1465 pio_mask <<= 3; 1466 pio_mask |= 0x7; 1467 } else { 1468 /* If word 64 isn't valid then Word 51 high byte holds 1469 * the PIO timing number for the maximum. Turn it into 1470 * a mask. 1471 */ 1472 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1473 if (mode < 5) /* Valid PIO range */ 1474 pio_mask = (2 << mode) - 1; 1475 else 1476 pio_mask = 1; 1477 1478 /* But wait.. there's more. Design your standards by 1479 * committee and you too can get a free iordy field to 1480 * process. However its the speeds not the modes that 1481 * are supported... Note drivers using the timing API 1482 * will get this right anyway 1483 */ 1484 } 1485 1486 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1487 1488 if (ata_id_is_cfa(id)) { 1489 /* 1490 * Process compact flash extended modes 1491 */ 1492 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1493 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1494 1495 if (pio) 1496 pio_mask |= (1 << 5); 1497 if (pio > 1) 1498 pio_mask |= (1 << 6); 1499 if (dma) 1500 mwdma_mask |= (1 << 3); 1501 if (dma > 1) 1502 mwdma_mask |= (1 << 4); 1503 } 1504 1505 udma_mask = 0; 1506 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1507 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1508 1509 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1510 } 1511 1512 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1513 { 1514 struct completion *waiting = qc->private_data; 1515 1516 complete(waiting); 1517 } 1518 1519 /** 1520 * ata_exec_internal_sg - execute libata internal command 1521 * @dev: Device to which the command is sent 1522 * @tf: Taskfile registers for the command and the result 1523 * @cdb: CDB for packet command 1524 * @dma_dir: Data tranfer direction of the command 1525 * @sgl: sg list for the data buffer of the command 1526 * @n_elem: Number of sg entries 1527 * @timeout: Timeout in msecs (0 for default) 1528 * 1529 * Executes libata internal command with timeout. @tf contains 1530 * command on entry and result on return. Timeout and error 1531 * conditions are reported via return value. No recovery action 1532 * is taken after a command times out. It's caller's duty to 1533 * clean up after timeout. 1534 * 1535 * LOCKING: 1536 * None. Should be called with kernel context, might sleep. 1537 * 1538 * RETURNS: 1539 * Zero on success, AC_ERR_* mask on failure 1540 */ 1541 unsigned ata_exec_internal_sg(struct ata_device *dev, 1542 struct ata_taskfile *tf, const u8 *cdb, 1543 int dma_dir, struct scatterlist *sgl, 1544 unsigned int n_elem, unsigned long timeout) 1545 { 1546 struct ata_link *link = dev->link; 1547 struct ata_port *ap = link->ap; 1548 u8 command = tf->command; 1549 int auto_timeout = 0; 1550 struct ata_queued_cmd *qc; 1551 unsigned int tag, preempted_tag; 1552 u32 preempted_sactive, preempted_qc_active; 1553 int preempted_nr_active_links; 1554 DECLARE_COMPLETION_ONSTACK(wait); 1555 unsigned long flags; 1556 unsigned int err_mask; 1557 int rc; 1558 1559 spin_lock_irqsave(ap->lock, flags); 1560 1561 /* no internal command while frozen */ 1562 if (ap->pflags & ATA_PFLAG_FROZEN) { 1563 spin_unlock_irqrestore(ap->lock, flags); 1564 return AC_ERR_SYSTEM; 1565 } 1566 1567 /* initialize internal qc */ 1568 1569 /* XXX: Tag 0 is used for drivers with legacy EH as some 1570 * drivers choke if any other tag is given. This breaks 1571 * ata_tag_internal() test for those drivers. Don't use new 1572 * EH stuff without converting to it. 1573 */ 1574 if (ap->ops->error_handler) 1575 tag = ATA_TAG_INTERNAL; 1576 else 1577 tag = 0; 1578 1579 if (test_and_set_bit(tag, &ap->qc_allocated)) 1580 BUG(); 1581 qc = __ata_qc_from_tag(ap, tag); 1582 1583 qc->tag = tag; 1584 qc->scsicmd = NULL; 1585 qc->ap = ap; 1586 qc->dev = dev; 1587 ata_qc_reinit(qc); 1588 1589 preempted_tag = link->active_tag; 1590 preempted_sactive = link->sactive; 1591 preempted_qc_active = ap->qc_active; 1592 preempted_nr_active_links = ap->nr_active_links; 1593 link->active_tag = ATA_TAG_POISON; 1594 link->sactive = 0; 1595 ap->qc_active = 0; 1596 ap->nr_active_links = 0; 1597 1598 /* prepare & issue qc */ 1599 qc->tf = *tf; 1600 if (cdb) 1601 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1602 qc->flags |= ATA_QCFLAG_RESULT_TF; 1603 qc->dma_dir = dma_dir; 1604 if (dma_dir != DMA_NONE) { 1605 unsigned int i, buflen = 0; 1606 struct scatterlist *sg; 1607 1608 for_each_sg(sgl, sg, n_elem, i) 1609 buflen += sg->length; 1610 1611 ata_sg_init(qc, sgl, n_elem); 1612 qc->nbytes = buflen; 1613 } 1614 1615 qc->private_data = &wait; 1616 qc->complete_fn = ata_qc_complete_internal; 1617 1618 ata_qc_issue(qc); 1619 1620 spin_unlock_irqrestore(ap->lock, flags); 1621 1622 if (!timeout) { 1623 if (ata_probe_timeout) 1624 timeout = ata_probe_timeout * 1000; 1625 else { 1626 timeout = ata_internal_cmd_timeout(dev, command); 1627 auto_timeout = 1; 1628 } 1629 } 1630 1631 if (ap->ops->error_handler) 1632 ata_eh_release(ap); 1633 1634 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1635 1636 if (ap->ops->error_handler) 1637 ata_eh_acquire(ap); 1638 1639 ata_sff_flush_pio_task(ap); 1640 1641 if (!rc) { 1642 spin_lock_irqsave(ap->lock, flags); 1643 1644 /* We're racing with irq here. If we lose, the 1645 * following test prevents us from completing the qc 1646 * twice. If we win, the port is frozen and will be 1647 * cleaned up by ->post_internal_cmd(). 1648 */ 1649 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1650 qc->err_mask |= AC_ERR_TIMEOUT; 1651 1652 if (ap->ops->error_handler) 1653 ata_port_freeze(ap); 1654 else 1655 ata_qc_complete(qc); 1656 1657 if (ata_msg_warn(ap)) 1658 ata_dev_warn(dev, "qc timeout (cmd 0x%x)\n", 1659 command); 1660 } 1661 1662 spin_unlock_irqrestore(ap->lock, flags); 1663 } 1664 1665 /* do post_internal_cmd */ 1666 if (ap->ops->post_internal_cmd) 1667 ap->ops->post_internal_cmd(qc); 1668 1669 /* perform minimal error analysis */ 1670 if (qc->flags & ATA_QCFLAG_FAILED) { 1671 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1672 qc->err_mask |= AC_ERR_DEV; 1673 1674 if (!qc->err_mask) 1675 qc->err_mask |= AC_ERR_OTHER; 1676 1677 if (qc->err_mask & ~AC_ERR_OTHER) 1678 qc->err_mask &= ~AC_ERR_OTHER; 1679 } 1680 1681 /* finish up */ 1682 spin_lock_irqsave(ap->lock, flags); 1683 1684 *tf = qc->result_tf; 1685 err_mask = qc->err_mask; 1686 1687 ata_qc_free(qc); 1688 link->active_tag = preempted_tag; 1689 link->sactive = preempted_sactive; 1690 ap->qc_active = preempted_qc_active; 1691 ap->nr_active_links = preempted_nr_active_links; 1692 1693 spin_unlock_irqrestore(ap->lock, flags); 1694 1695 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1696 ata_internal_cmd_timed_out(dev, command); 1697 1698 return err_mask; 1699 } 1700 1701 /** 1702 * ata_exec_internal - execute libata internal command 1703 * @dev: Device to which the command is sent 1704 * @tf: Taskfile registers for the command and the result 1705 * @cdb: CDB for packet command 1706 * @dma_dir: Data tranfer direction of the command 1707 * @buf: Data buffer of the command 1708 * @buflen: Length of data buffer 1709 * @timeout: Timeout in msecs (0 for default) 1710 * 1711 * Wrapper around ata_exec_internal_sg() which takes simple 1712 * buffer instead of sg list. 1713 * 1714 * LOCKING: 1715 * None. Should be called with kernel context, might sleep. 1716 * 1717 * RETURNS: 1718 * Zero on success, AC_ERR_* mask on failure 1719 */ 1720 unsigned ata_exec_internal(struct ata_device *dev, 1721 struct ata_taskfile *tf, const u8 *cdb, 1722 int dma_dir, void *buf, unsigned int buflen, 1723 unsigned long timeout) 1724 { 1725 struct scatterlist *psg = NULL, sg; 1726 unsigned int n_elem = 0; 1727 1728 if (dma_dir != DMA_NONE) { 1729 WARN_ON(!buf); 1730 sg_init_one(&sg, buf, buflen); 1731 psg = &sg; 1732 n_elem++; 1733 } 1734 1735 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1736 timeout); 1737 } 1738 1739 /** 1740 * ata_do_simple_cmd - execute simple internal command 1741 * @dev: Device to which the command is sent 1742 * @cmd: Opcode to execute 1743 * 1744 * Execute a 'simple' command, that only consists of the opcode 1745 * 'cmd' itself, without filling any other registers 1746 * 1747 * LOCKING: 1748 * Kernel thread context (may sleep). 1749 * 1750 * RETURNS: 1751 * Zero on success, AC_ERR_* mask on failure 1752 */ 1753 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1754 { 1755 struct ata_taskfile tf; 1756 1757 ata_tf_init(dev, &tf); 1758 1759 tf.command = cmd; 1760 tf.flags |= ATA_TFLAG_DEVICE; 1761 tf.protocol = ATA_PROT_NODATA; 1762 1763 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1764 } 1765 1766 /** 1767 * ata_pio_need_iordy - check if iordy needed 1768 * @adev: ATA device 1769 * 1770 * Check if the current speed of the device requires IORDY. Used 1771 * by various controllers for chip configuration. 1772 */ 1773 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1774 { 1775 /* Don't set IORDY if we're preparing for reset. IORDY may 1776 * lead to controller lock up on certain controllers if the 1777 * port is not occupied. See bko#11703 for details. 1778 */ 1779 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1780 return 0; 1781 /* Controller doesn't support IORDY. Probably a pointless 1782 * check as the caller should know this. 1783 */ 1784 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1785 return 0; 1786 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1787 if (ata_id_is_cfa(adev->id) 1788 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1789 return 0; 1790 /* PIO3 and higher it is mandatory */ 1791 if (adev->pio_mode > XFER_PIO_2) 1792 return 1; 1793 /* We turn it on when possible */ 1794 if (ata_id_has_iordy(adev->id)) 1795 return 1; 1796 return 0; 1797 } 1798 1799 /** 1800 * ata_pio_mask_no_iordy - Return the non IORDY mask 1801 * @adev: ATA device 1802 * 1803 * Compute the highest mode possible if we are not using iordy. Return 1804 * -1 if no iordy mode is available. 1805 */ 1806 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1807 { 1808 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1809 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1810 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1811 /* Is the speed faster than the drive allows non IORDY ? */ 1812 if (pio) { 1813 /* This is cycle times not frequency - watch the logic! */ 1814 if (pio > 240) /* PIO2 is 240nS per cycle */ 1815 return 3 << ATA_SHIFT_PIO; 1816 return 7 << ATA_SHIFT_PIO; 1817 } 1818 } 1819 return 3 << ATA_SHIFT_PIO; 1820 } 1821 1822 /** 1823 * ata_do_dev_read_id - default ID read method 1824 * @dev: device 1825 * @tf: proposed taskfile 1826 * @id: data buffer 1827 * 1828 * Issue the identify taskfile and hand back the buffer containing 1829 * identify data. For some RAID controllers and for pre ATA devices 1830 * this function is wrapped or replaced by the driver 1831 */ 1832 unsigned int ata_do_dev_read_id(struct ata_device *dev, 1833 struct ata_taskfile *tf, u16 *id) 1834 { 1835 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 1836 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 1837 } 1838 1839 /** 1840 * ata_dev_read_id - Read ID data from the specified device 1841 * @dev: target device 1842 * @p_class: pointer to class of the target device (may be changed) 1843 * @flags: ATA_READID_* flags 1844 * @id: buffer to read IDENTIFY data into 1845 * 1846 * Read ID data from the specified device. ATA_CMD_ID_ATA is 1847 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 1848 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 1849 * for pre-ATA4 drives. 1850 * 1851 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 1852 * now we abort if we hit that case. 1853 * 1854 * LOCKING: 1855 * Kernel thread context (may sleep) 1856 * 1857 * RETURNS: 1858 * 0 on success, -errno otherwise. 1859 */ 1860 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 1861 unsigned int flags, u16 *id) 1862 { 1863 struct ata_port *ap = dev->link->ap; 1864 unsigned int class = *p_class; 1865 struct ata_taskfile tf; 1866 unsigned int err_mask = 0; 1867 const char *reason; 1868 bool is_semb = class == ATA_DEV_SEMB; 1869 int may_fallback = 1, tried_spinup = 0; 1870 int rc; 1871 1872 if (ata_msg_ctl(ap)) 1873 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 1874 1875 retry: 1876 ata_tf_init(dev, &tf); 1877 1878 switch (class) { 1879 case ATA_DEV_SEMB: 1880 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1881 case ATA_DEV_ATA: 1882 tf.command = ATA_CMD_ID_ATA; 1883 break; 1884 case ATA_DEV_ATAPI: 1885 tf.command = ATA_CMD_ID_ATAPI; 1886 break; 1887 default: 1888 rc = -ENODEV; 1889 reason = "unsupported class"; 1890 goto err_out; 1891 } 1892 1893 tf.protocol = ATA_PROT_PIO; 1894 1895 /* Some devices choke if TF registers contain garbage. Make 1896 * sure those are properly initialized. 1897 */ 1898 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1899 1900 /* Device presence detection is unreliable on some 1901 * controllers. Always poll IDENTIFY if available. 1902 */ 1903 tf.flags |= ATA_TFLAG_POLLING; 1904 1905 if (ap->ops->read_id) 1906 err_mask = ap->ops->read_id(dev, &tf, id); 1907 else 1908 err_mask = ata_do_dev_read_id(dev, &tf, id); 1909 1910 if (err_mask) { 1911 if (err_mask & AC_ERR_NODEV_HINT) { 1912 ata_dev_dbg(dev, "NODEV after polling detection\n"); 1913 return -ENOENT; 1914 } 1915 1916 if (is_semb) { 1917 ata_dev_info(dev, 1918 "IDENTIFY failed on device w/ SEMB sig, disabled\n"); 1919 /* SEMB is not supported yet */ 1920 *p_class = ATA_DEV_SEMB_UNSUP; 1921 return 0; 1922 } 1923 1924 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 1925 /* Device or controller might have reported 1926 * the wrong device class. Give a shot at the 1927 * other IDENTIFY if the current one is 1928 * aborted by the device. 1929 */ 1930 if (may_fallback) { 1931 may_fallback = 0; 1932 1933 if (class == ATA_DEV_ATA) 1934 class = ATA_DEV_ATAPI; 1935 else 1936 class = ATA_DEV_ATA; 1937 goto retry; 1938 } 1939 1940 /* Control reaches here iff the device aborted 1941 * both flavors of IDENTIFYs which happens 1942 * sometimes with phantom devices. 1943 */ 1944 ata_dev_dbg(dev, 1945 "both IDENTIFYs aborted, assuming NODEV\n"); 1946 return -ENOENT; 1947 } 1948 1949 rc = -EIO; 1950 reason = "I/O error"; 1951 goto err_out; 1952 } 1953 1954 if (dev->horkage & ATA_HORKAGE_DUMP_ID) { 1955 ata_dev_dbg(dev, "dumping IDENTIFY data, " 1956 "class=%d may_fallback=%d tried_spinup=%d\n", 1957 class, may_fallback, tried_spinup); 1958 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 1959 16, 2, id, ATA_ID_WORDS * sizeof(*id), true); 1960 } 1961 1962 /* Falling back doesn't make sense if ID data was read 1963 * successfully at least once. 1964 */ 1965 may_fallback = 0; 1966 1967 swap_buf_le16(id, ATA_ID_WORDS); 1968 1969 /* sanity check */ 1970 rc = -EINVAL; 1971 reason = "device reports invalid type"; 1972 1973 if (class == ATA_DEV_ATA) { 1974 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1975 goto err_out; 1976 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1977 ata_id_is_ata(id)) { 1978 ata_dev_dbg(dev, 1979 "host indicates ignore ATA devices, ignored\n"); 1980 return -ENOENT; 1981 } 1982 } else { 1983 if (ata_id_is_ata(id)) 1984 goto err_out; 1985 } 1986 1987 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 1988 tried_spinup = 1; 1989 /* 1990 * Drive powered-up in standby mode, and requires a specific 1991 * SET_FEATURES spin-up subcommand before it will accept 1992 * anything other than the original IDENTIFY command. 1993 */ 1994 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 1995 if (err_mask && id[2] != 0x738c) { 1996 rc = -EIO; 1997 reason = "SPINUP failed"; 1998 goto err_out; 1999 } 2000 /* 2001 * If the drive initially returned incomplete IDENTIFY info, 2002 * we now must reissue the IDENTIFY command. 2003 */ 2004 if (id[2] == 0x37c8) 2005 goto retry; 2006 } 2007 2008 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2009 /* 2010 * The exact sequence expected by certain pre-ATA4 drives is: 2011 * SRST RESET 2012 * IDENTIFY (optional in early ATA) 2013 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2014 * anything else.. 2015 * Some drives were very specific about that exact sequence. 2016 * 2017 * Note that ATA4 says lba is mandatory so the second check 2018 * should never trigger. 2019 */ 2020 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2021 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2022 if (err_mask) { 2023 rc = -EIO; 2024 reason = "INIT_DEV_PARAMS failed"; 2025 goto err_out; 2026 } 2027 2028 /* current CHS translation info (id[53-58]) might be 2029 * changed. reread the identify device info. 2030 */ 2031 flags &= ~ATA_READID_POSTRESET; 2032 goto retry; 2033 } 2034 } 2035 2036 *p_class = class; 2037 2038 return 0; 2039 2040 err_out: 2041 if (ata_msg_warn(ap)) 2042 ata_dev_warn(dev, "failed to IDENTIFY (%s, err_mask=0x%x)\n", 2043 reason, err_mask); 2044 return rc; 2045 } 2046 2047 static int ata_do_link_spd_horkage(struct ata_device *dev) 2048 { 2049 struct ata_link *plink = ata_dev_phys_link(dev); 2050 u32 target, target_limit; 2051 2052 if (!sata_scr_valid(plink)) 2053 return 0; 2054 2055 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2056 target = 1; 2057 else 2058 return 0; 2059 2060 target_limit = (1 << target) - 1; 2061 2062 /* if already on stricter limit, no need to push further */ 2063 if (plink->sata_spd_limit <= target_limit) 2064 return 0; 2065 2066 plink->sata_spd_limit = target_limit; 2067 2068 /* Request another EH round by returning -EAGAIN if link is 2069 * going faster than the target speed. Forward progress is 2070 * guaranteed by setting sata_spd_limit to target_limit above. 2071 */ 2072 if (plink->sata_spd > target) { 2073 ata_dev_info(dev, "applying link speed limit horkage to %s\n", 2074 sata_spd_string(target)); 2075 return -EAGAIN; 2076 } 2077 return 0; 2078 } 2079 2080 static inline u8 ata_dev_knobble(struct ata_device *dev) 2081 { 2082 struct ata_port *ap = dev->link->ap; 2083 2084 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2085 return 0; 2086 2087 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2088 } 2089 2090 static int ata_dev_config_ncq(struct ata_device *dev, 2091 char *desc, size_t desc_sz) 2092 { 2093 struct ata_port *ap = dev->link->ap; 2094 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2095 unsigned int err_mask; 2096 char *aa_desc = ""; 2097 2098 if (!ata_id_has_ncq(dev->id)) { 2099 desc[0] = '\0'; 2100 return 0; 2101 } 2102 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2103 snprintf(desc, desc_sz, "NCQ (not used)"); 2104 return 0; 2105 } 2106 if (ap->flags & ATA_FLAG_NCQ) { 2107 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2108 dev->flags |= ATA_DFLAG_NCQ; 2109 } 2110 2111 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2112 (ap->flags & ATA_FLAG_FPDMA_AA) && 2113 ata_id_has_fpdma_aa(dev->id)) { 2114 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2115 SATA_FPDMA_AA); 2116 if (err_mask) { 2117 ata_dev_err(dev, 2118 "failed to enable AA (error_mask=0x%x)\n", 2119 err_mask); 2120 if (err_mask != AC_ERR_DEV) { 2121 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2122 return -EIO; 2123 } 2124 } else 2125 aa_desc = ", AA"; 2126 } 2127 2128 if (hdepth >= ddepth) 2129 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2130 else 2131 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2132 ddepth, aa_desc); 2133 return 0; 2134 } 2135 2136 /** 2137 * ata_dev_configure - Configure the specified ATA/ATAPI device 2138 * @dev: Target device to configure 2139 * 2140 * Configure @dev according to @dev->id. Generic and low-level 2141 * driver specific fixups are also applied. 2142 * 2143 * LOCKING: 2144 * Kernel thread context (may sleep) 2145 * 2146 * RETURNS: 2147 * 0 on success, -errno otherwise 2148 */ 2149 int ata_dev_configure(struct ata_device *dev) 2150 { 2151 struct ata_port *ap = dev->link->ap; 2152 struct ata_eh_context *ehc = &dev->link->eh_context; 2153 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2154 const u16 *id = dev->id; 2155 unsigned long xfer_mask; 2156 char revbuf[7]; /* XYZ-99\0 */ 2157 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2158 char modelbuf[ATA_ID_PROD_LEN+1]; 2159 int rc; 2160 2161 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2162 ata_dev_info(dev, "%s: ENTER/EXIT -- nodev\n", __func__); 2163 return 0; 2164 } 2165 2166 if (ata_msg_probe(ap)) 2167 ata_dev_dbg(dev, "%s: ENTER\n", __func__); 2168 2169 /* set horkage */ 2170 dev->horkage |= ata_dev_blacklisted(dev); 2171 ata_force_horkage(dev); 2172 2173 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2174 ata_dev_info(dev, "unsupported device, disabling\n"); 2175 ata_dev_disable(dev); 2176 return 0; 2177 } 2178 2179 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2180 dev->class == ATA_DEV_ATAPI) { 2181 ata_dev_warn(dev, "WARNING: ATAPI is %s, device ignored\n", 2182 atapi_enabled ? "not supported with this driver" 2183 : "disabled"); 2184 ata_dev_disable(dev); 2185 return 0; 2186 } 2187 2188 rc = ata_do_link_spd_horkage(dev); 2189 if (rc) 2190 return rc; 2191 2192 /* let ACPI work its magic */ 2193 rc = ata_acpi_on_devcfg(dev); 2194 if (rc) 2195 return rc; 2196 2197 /* massage HPA, do it early as it might change IDENTIFY data */ 2198 rc = ata_hpa_resize(dev); 2199 if (rc) 2200 return rc; 2201 2202 /* print device capabilities */ 2203 if (ata_msg_probe(ap)) 2204 ata_dev_dbg(dev, 2205 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2206 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2207 __func__, 2208 id[49], id[82], id[83], id[84], 2209 id[85], id[86], id[87], id[88]); 2210 2211 /* initialize to-be-configured parameters */ 2212 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2213 dev->max_sectors = 0; 2214 dev->cdb_len = 0; 2215 dev->n_sectors = 0; 2216 dev->cylinders = 0; 2217 dev->heads = 0; 2218 dev->sectors = 0; 2219 dev->multi_count = 0; 2220 2221 /* 2222 * common ATA, ATAPI feature tests 2223 */ 2224 2225 /* find max transfer mode; for printk only */ 2226 xfer_mask = ata_id_xfermask(id); 2227 2228 if (ata_msg_probe(ap)) 2229 ata_dump_id(id); 2230 2231 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2232 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2233 sizeof(fwrevbuf)); 2234 2235 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2236 sizeof(modelbuf)); 2237 2238 /* ATA-specific feature tests */ 2239 if (dev->class == ATA_DEV_ATA) { 2240 if (ata_id_is_cfa(id)) { 2241 /* CPRM may make this media unusable */ 2242 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2243 ata_dev_warn(dev, 2244 "supports DRM functions and may not be fully accessible\n"); 2245 snprintf(revbuf, 7, "CFA"); 2246 } else { 2247 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2248 /* Warn the user if the device has TPM extensions */ 2249 if (ata_id_has_tpm(id)) 2250 ata_dev_warn(dev, 2251 "supports DRM functions and may not be fully accessible\n"); 2252 } 2253 2254 dev->n_sectors = ata_id_n_sectors(id); 2255 2256 /* get current R/W Multiple count setting */ 2257 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2258 unsigned int max = dev->id[47] & 0xff; 2259 unsigned int cnt = dev->id[59] & 0xff; 2260 /* only recognize/allow powers of two here */ 2261 if (is_power_of_2(max) && is_power_of_2(cnt)) 2262 if (cnt <= max) 2263 dev->multi_count = cnt; 2264 } 2265 2266 if (ata_id_has_lba(id)) { 2267 const char *lba_desc; 2268 char ncq_desc[24]; 2269 2270 lba_desc = "LBA"; 2271 dev->flags |= ATA_DFLAG_LBA; 2272 if (ata_id_has_lba48(id)) { 2273 dev->flags |= ATA_DFLAG_LBA48; 2274 lba_desc = "LBA48"; 2275 2276 if (dev->n_sectors >= (1UL << 28) && 2277 ata_id_has_flush_ext(id)) 2278 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2279 } 2280 2281 /* config NCQ */ 2282 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2283 if (rc) 2284 return rc; 2285 2286 /* print device info to dmesg */ 2287 if (ata_msg_drv(ap) && print_info) { 2288 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2289 revbuf, modelbuf, fwrevbuf, 2290 ata_mode_string(xfer_mask)); 2291 ata_dev_info(dev, 2292 "%llu sectors, multi %u: %s %s\n", 2293 (unsigned long long)dev->n_sectors, 2294 dev->multi_count, lba_desc, ncq_desc); 2295 } 2296 } else { 2297 /* CHS */ 2298 2299 /* Default translation */ 2300 dev->cylinders = id[1]; 2301 dev->heads = id[3]; 2302 dev->sectors = id[6]; 2303 2304 if (ata_id_current_chs_valid(id)) { 2305 /* Current CHS translation is valid. */ 2306 dev->cylinders = id[54]; 2307 dev->heads = id[55]; 2308 dev->sectors = id[56]; 2309 } 2310 2311 /* print device info to dmesg */ 2312 if (ata_msg_drv(ap) && print_info) { 2313 ata_dev_info(dev, "%s: %s, %s, max %s\n", 2314 revbuf, modelbuf, fwrevbuf, 2315 ata_mode_string(xfer_mask)); 2316 ata_dev_info(dev, 2317 "%llu sectors, multi %u, CHS %u/%u/%u\n", 2318 (unsigned long long)dev->n_sectors, 2319 dev->multi_count, dev->cylinders, 2320 dev->heads, dev->sectors); 2321 } 2322 } 2323 2324 dev->cdb_len = 16; 2325 } 2326 2327 /* ATAPI-specific feature tests */ 2328 else if (dev->class == ATA_DEV_ATAPI) { 2329 const char *cdb_intr_string = ""; 2330 const char *atapi_an_string = ""; 2331 const char *dma_dir_string = ""; 2332 u32 sntf; 2333 2334 rc = atapi_cdb_len(id); 2335 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2336 if (ata_msg_warn(ap)) 2337 ata_dev_warn(dev, "unsupported CDB len\n"); 2338 rc = -EINVAL; 2339 goto err_out_nosup; 2340 } 2341 dev->cdb_len = (unsigned int) rc; 2342 2343 /* Enable ATAPI AN if both the host and device have 2344 * the support. If PMP is attached, SNTF is required 2345 * to enable ATAPI AN to discern between PHY status 2346 * changed notifications and ATAPI ANs. 2347 */ 2348 if (atapi_an && 2349 (ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2350 (!sata_pmp_attached(ap) || 2351 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2352 unsigned int err_mask; 2353 2354 /* issue SET feature command to turn this on */ 2355 err_mask = ata_dev_set_feature(dev, 2356 SETFEATURES_SATA_ENABLE, SATA_AN); 2357 if (err_mask) 2358 ata_dev_err(dev, 2359 "failed to enable ATAPI AN (err_mask=0x%x)\n", 2360 err_mask); 2361 else { 2362 dev->flags |= ATA_DFLAG_AN; 2363 atapi_an_string = ", ATAPI AN"; 2364 } 2365 } 2366 2367 if (ata_id_cdb_intr(dev->id)) { 2368 dev->flags |= ATA_DFLAG_CDB_INTR; 2369 cdb_intr_string = ", CDB intr"; 2370 } 2371 2372 if (atapi_dmadir || atapi_id_dmadir(dev->id)) { 2373 dev->flags |= ATA_DFLAG_DMADIR; 2374 dma_dir_string = ", DMADIR"; 2375 } 2376 2377 /* print device info to dmesg */ 2378 if (ata_msg_drv(ap) && print_info) 2379 ata_dev_info(dev, 2380 "ATAPI: %s, %s, max %s%s%s%s\n", 2381 modelbuf, fwrevbuf, 2382 ata_mode_string(xfer_mask), 2383 cdb_intr_string, atapi_an_string, 2384 dma_dir_string); 2385 } 2386 2387 /* determine max_sectors */ 2388 dev->max_sectors = ATA_MAX_SECTORS; 2389 if (dev->flags & ATA_DFLAG_LBA48) 2390 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2391 2392 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2393 200 sectors */ 2394 if (ata_dev_knobble(dev)) { 2395 if (ata_msg_drv(ap) && print_info) 2396 ata_dev_info(dev, "applying bridge limits\n"); 2397 dev->udma_mask &= ATA_UDMA5; 2398 dev->max_sectors = ATA_MAX_SECTORS; 2399 } 2400 2401 if ((dev->class == ATA_DEV_ATAPI) && 2402 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2403 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2404 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2405 } 2406 2407 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2408 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2409 dev->max_sectors); 2410 2411 if (ap->ops->dev_config) 2412 ap->ops->dev_config(dev); 2413 2414 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2415 /* Let the user know. We don't want to disallow opens for 2416 rescue purposes, or in case the vendor is just a blithering 2417 idiot. Do this after the dev_config call as some controllers 2418 with buggy firmware may want to avoid reporting false device 2419 bugs */ 2420 2421 if (print_info) { 2422 ata_dev_warn(dev, 2423 "Drive reports diagnostics failure. This may indicate a drive\n"); 2424 ata_dev_warn(dev, 2425 "fault or invalid emulation. Contact drive vendor for information.\n"); 2426 } 2427 } 2428 2429 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2430 ata_dev_warn(dev, "WARNING: device requires firmware update to be fully functional\n"); 2431 ata_dev_warn(dev, " contact the vendor or visit http://ata.wiki.kernel.org\n"); 2432 } 2433 2434 return 0; 2435 2436 err_out_nosup: 2437 if (ata_msg_probe(ap)) 2438 ata_dev_dbg(dev, "%s: EXIT, err\n", __func__); 2439 return rc; 2440 } 2441 2442 /** 2443 * ata_cable_40wire - return 40 wire cable type 2444 * @ap: port 2445 * 2446 * Helper method for drivers which want to hardwire 40 wire cable 2447 * detection. 2448 */ 2449 2450 int ata_cable_40wire(struct ata_port *ap) 2451 { 2452 return ATA_CBL_PATA40; 2453 } 2454 2455 /** 2456 * ata_cable_80wire - return 80 wire cable type 2457 * @ap: port 2458 * 2459 * Helper method for drivers which want to hardwire 80 wire cable 2460 * detection. 2461 */ 2462 2463 int ata_cable_80wire(struct ata_port *ap) 2464 { 2465 return ATA_CBL_PATA80; 2466 } 2467 2468 /** 2469 * ata_cable_unknown - return unknown PATA cable. 2470 * @ap: port 2471 * 2472 * Helper method for drivers which have no PATA cable detection. 2473 */ 2474 2475 int ata_cable_unknown(struct ata_port *ap) 2476 { 2477 return ATA_CBL_PATA_UNK; 2478 } 2479 2480 /** 2481 * ata_cable_ignore - return ignored PATA cable. 2482 * @ap: port 2483 * 2484 * Helper method for drivers which don't use cable type to limit 2485 * transfer mode. 2486 */ 2487 int ata_cable_ignore(struct ata_port *ap) 2488 { 2489 return ATA_CBL_PATA_IGN; 2490 } 2491 2492 /** 2493 * ata_cable_sata - return SATA cable type 2494 * @ap: port 2495 * 2496 * Helper method for drivers which have SATA cables 2497 */ 2498 2499 int ata_cable_sata(struct ata_port *ap) 2500 { 2501 return ATA_CBL_SATA; 2502 } 2503 2504 /** 2505 * ata_bus_probe - Reset and probe ATA bus 2506 * @ap: Bus to probe 2507 * 2508 * Master ATA bus probing function. Initiates a hardware-dependent 2509 * bus reset, then attempts to identify any devices found on 2510 * the bus. 2511 * 2512 * LOCKING: 2513 * PCI/etc. bus probe sem. 2514 * 2515 * RETURNS: 2516 * Zero on success, negative errno otherwise. 2517 */ 2518 2519 int ata_bus_probe(struct ata_port *ap) 2520 { 2521 unsigned int classes[ATA_MAX_DEVICES]; 2522 int tries[ATA_MAX_DEVICES]; 2523 int rc; 2524 struct ata_device *dev; 2525 2526 ata_for_each_dev(dev, &ap->link, ALL) 2527 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2528 2529 retry: 2530 ata_for_each_dev(dev, &ap->link, ALL) { 2531 /* If we issue an SRST then an ATA drive (not ATAPI) 2532 * may change configuration and be in PIO0 timing. If 2533 * we do a hard reset (or are coming from power on) 2534 * this is true for ATA or ATAPI. Until we've set a 2535 * suitable controller mode we should not touch the 2536 * bus as we may be talking too fast. 2537 */ 2538 dev->pio_mode = XFER_PIO_0; 2539 2540 /* If the controller has a pio mode setup function 2541 * then use it to set the chipset to rights. Don't 2542 * touch the DMA setup as that will be dealt with when 2543 * configuring devices. 2544 */ 2545 if (ap->ops->set_piomode) 2546 ap->ops->set_piomode(ap, dev); 2547 } 2548 2549 /* reset and determine device classes */ 2550 ap->ops->phy_reset(ap); 2551 2552 ata_for_each_dev(dev, &ap->link, ALL) { 2553 if (dev->class != ATA_DEV_UNKNOWN) 2554 classes[dev->devno] = dev->class; 2555 else 2556 classes[dev->devno] = ATA_DEV_NONE; 2557 2558 dev->class = ATA_DEV_UNKNOWN; 2559 } 2560 2561 /* read IDENTIFY page and configure devices. We have to do the identify 2562 specific sequence bass-ackwards so that PDIAG- is released by 2563 the slave device */ 2564 2565 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2566 if (tries[dev->devno]) 2567 dev->class = classes[dev->devno]; 2568 2569 if (!ata_dev_enabled(dev)) 2570 continue; 2571 2572 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2573 dev->id); 2574 if (rc) 2575 goto fail; 2576 } 2577 2578 /* Now ask for the cable type as PDIAG- should have been released */ 2579 if (ap->ops->cable_detect) 2580 ap->cbl = ap->ops->cable_detect(ap); 2581 2582 /* We may have SATA bridge glue hiding here irrespective of 2583 * the reported cable types and sensed types. When SATA 2584 * drives indicate we have a bridge, we don't know which end 2585 * of the link the bridge is which is a problem. 2586 */ 2587 ata_for_each_dev(dev, &ap->link, ENABLED) 2588 if (ata_id_is_sata(dev->id)) 2589 ap->cbl = ATA_CBL_SATA; 2590 2591 /* After the identify sequence we can now set up the devices. We do 2592 this in the normal order so that the user doesn't get confused */ 2593 2594 ata_for_each_dev(dev, &ap->link, ENABLED) { 2595 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2596 rc = ata_dev_configure(dev); 2597 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2598 if (rc) 2599 goto fail; 2600 } 2601 2602 /* configure transfer mode */ 2603 rc = ata_set_mode(&ap->link, &dev); 2604 if (rc) 2605 goto fail; 2606 2607 ata_for_each_dev(dev, &ap->link, ENABLED) 2608 return 0; 2609 2610 return -ENODEV; 2611 2612 fail: 2613 tries[dev->devno]--; 2614 2615 switch (rc) { 2616 case -EINVAL: 2617 /* eeek, something went very wrong, give up */ 2618 tries[dev->devno] = 0; 2619 break; 2620 2621 case -ENODEV: 2622 /* give it just one more chance */ 2623 tries[dev->devno] = min(tries[dev->devno], 1); 2624 case -EIO: 2625 if (tries[dev->devno] == 1) { 2626 /* This is the last chance, better to slow 2627 * down than lose it. 2628 */ 2629 sata_down_spd_limit(&ap->link, 0); 2630 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2631 } 2632 } 2633 2634 if (!tries[dev->devno]) 2635 ata_dev_disable(dev); 2636 2637 goto retry; 2638 } 2639 2640 /** 2641 * sata_print_link_status - Print SATA link status 2642 * @link: SATA link to printk link status about 2643 * 2644 * This function prints link speed and status of a SATA link. 2645 * 2646 * LOCKING: 2647 * None. 2648 */ 2649 static void sata_print_link_status(struct ata_link *link) 2650 { 2651 u32 sstatus, scontrol, tmp; 2652 2653 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2654 return; 2655 sata_scr_read(link, SCR_CONTROL, &scontrol); 2656 2657 if (ata_phys_link_online(link)) { 2658 tmp = (sstatus >> 4) & 0xf; 2659 ata_link_info(link, "SATA link up %s (SStatus %X SControl %X)\n", 2660 sata_spd_string(tmp), sstatus, scontrol); 2661 } else { 2662 ata_link_info(link, "SATA link down (SStatus %X SControl %X)\n", 2663 sstatus, scontrol); 2664 } 2665 } 2666 2667 /** 2668 * ata_dev_pair - return other device on cable 2669 * @adev: device 2670 * 2671 * Obtain the other device on the same cable, or if none is 2672 * present NULL is returned 2673 */ 2674 2675 struct ata_device *ata_dev_pair(struct ata_device *adev) 2676 { 2677 struct ata_link *link = adev->link; 2678 struct ata_device *pair = &link->device[1 - adev->devno]; 2679 if (!ata_dev_enabled(pair)) 2680 return NULL; 2681 return pair; 2682 } 2683 2684 /** 2685 * sata_down_spd_limit - adjust SATA spd limit downward 2686 * @link: Link to adjust SATA spd limit for 2687 * @spd_limit: Additional limit 2688 * 2689 * Adjust SATA spd limit of @link downward. Note that this 2690 * function only adjusts the limit. The change must be applied 2691 * using sata_set_spd(). 2692 * 2693 * If @spd_limit is non-zero, the speed is limited to equal to or 2694 * lower than @spd_limit if such speed is supported. If 2695 * @spd_limit is slower than any supported speed, only the lowest 2696 * supported speed is allowed. 2697 * 2698 * LOCKING: 2699 * Inherited from caller. 2700 * 2701 * RETURNS: 2702 * 0 on success, negative errno on failure 2703 */ 2704 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2705 { 2706 u32 sstatus, spd, mask; 2707 int rc, bit; 2708 2709 if (!sata_scr_valid(link)) 2710 return -EOPNOTSUPP; 2711 2712 /* If SCR can be read, use it to determine the current SPD. 2713 * If not, use cached value in link->sata_spd. 2714 */ 2715 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2716 if (rc == 0 && ata_sstatus_online(sstatus)) 2717 spd = (sstatus >> 4) & 0xf; 2718 else 2719 spd = link->sata_spd; 2720 2721 mask = link->sata_spd_limit; 2722 if (mask <= 1) 2723 return -EINVAL; 2724 2725 /* unconditionally mask off the highest bit */ 2726 bit = fls(mask) - 1; 2727 mask &= ~(1 << bit); 2728 2729 /* Mask off all speeds higher than or equal to the current 2730 * one. Force 1.5Gbps if current SPD is not available. 2731 */ 2732 if (spd > 1) 2733 mask &= (1 << (spd - 1)) - 1; 2734 else 2735 mask &= 1; 2736 2737 /* were we already at the bottom? */ 2738 if (!mask) 2739 return -EINVAL; 2740 2741 if (spd_limit) { 2742 if (mask & ((1 << spd_limit) - 1)) 2743 mask &= (1 << spd_limit) - 1; 2744 else { 2745 bit = ffs(mask) - 1; 2746 mask = 1 << bit; 2747 } 2748 } 2749 2750 link->sata_spd_limit = mask; 2751 2752 ata_link_warn(link, "limiting SATA link speed to %s\n", 2753 sata_spd_string(fls(mask))); 2754 2755 return 0; 2756 } 2757 2758 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2759 { 2760 struct ata_link *host_link = &link->ap->link; 2761 u32 limit, target, spd; 2762 2763 limit = link->sata_spd_limit; 2764 2765 /* Don't configure downstream link faster than upstream link. 2766 * It doesn't speed up anything and some PMPs choke on such 2767 * configuration. 2768 */ 2769 if (!ata_is_host_link(link) && host_link->sata_spd) 2770 limit &= (1 << host_link->sata_spd) - 1; 2771 2772 if (limit == UINT_MAX) 2773 target = 0; 2774 else 2775 target = fls(limit); 2776 2777 spd = (*scontrol >> 4) & 0xf; 2778 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2779 2780 return spd != target; 2781 } 2782 2783 /** 2784 * sata_set_spd_needed - is SATA spd configuration needed 2785 * @link: Link in question 2786 * 2787 * Test whether the spd limit in SControl matches 2788 * @link->sata_spd_limit. This function is used to determine 2789 * whether hardreset is necessary to apply SATA spd 2790 * configuration. 2791 * 2792 * LOCKING: 2793 * Inherited from caller. 2794 * 2795 * RETURNS: 2796 * 1 if SATA spd configuration is needed, 0 otherwise. 2797 */ 2798 static int sata_set_spd_needed(struct ata_link *link) 2799 { 2800 u32 scontrol; 2801 2802 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2803 return 1; 2804 2805 return __sata_set_spd_needed(link, &scontrol); 2806 } 2807 2808 /** 2809 * sata_set_spd - set SATA spd according to spd limit 2810 * @link: Link to set SATA spd for 2811 * 2812 * Set SATA spd of @link according to sata_spd_limit. 2813 * 2814 * LOCKING: 2815 * Inherited from caller. 2816 * 2817 * RETURNS: 2818 * 0 if spd doesn't need to be changed, 1 if spd has been 2819 * changed. Negative errno if SCR registers are inaccessible. 2820 */ 2821 int sata_set_spd(struct ata_link *link) 2822 { 2823 u32 scontrol; 2824 int rc; 2825 2826 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 2827 return rc; 2828 2829 if (!__sata_set_spd_needed(link, &scontrol)) 2830 return 0; 2831 2832 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 2833 return rc; 2834 2835 return 1; 2836 } 2837 2838 /* 2839 * This mode timing computation functionality is ported over from 2840 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 2841 */ 2842 /* 2843 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 2844 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 2845 * for UDMA6, which is currently supported only by Maxtor drives. 2846 * 2847 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 2848 */ 2849 2850 static const struct ata_timing ata_timing[] = { 2851 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 2852 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 2853 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 2854 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 2855 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 2856 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 2857 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 2858 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 2859 2860 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 2861 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 2862 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 2863 2864 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 2865 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 2866 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 2867 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 2868 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 2869 2870 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 2871 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 2872 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 2873 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 2874 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 2875 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 2876 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 2877 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 2878 2879 { 0xFF } 2880 }; 2881 2882 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 2883 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 2884 2885 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 2886 { 2887 q->setup = EZ(t->setup * 1000, T); 2888 q->act8b = EZ(t->act8b * 1000, T); 2889 q->rec8b = EZ(t->rec8b * 1000, T); 2890 q->cyc8b = EZ(t->cyc8b * 1000, T); 2891 q->active = EZ(t->active * 1000, T); 2892 q->recover = EZ(t->recover * 1000, T); 2893 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 2894 q->cycle = EZ(t->cycle * 1000, T); 2895 q->udma = EZ(t->udma * 1000, UT); 2896 } 2897 2898 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 2899 struct ata_timing *m, unsigned int what) 2900 { 2901 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 2902 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 2903 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 2904 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 2905 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 2906 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 2907 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 2908 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 2909 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 2910 } 2911 2912 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 2913 { 2914 const struct ata_timing *t = ata_timing; 2915 2916 while (xfer_mode > t->mode) 2917 t++; 2918 2919 if (xfer_mode == t->mode) 2920 return t; 2921 return NULL; 2922 } 2923 2924 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 2925 struct ata_timing *t, int T, int UT) 2926 { 2927 const u16 *id = adev->id; 2928 const struct ata_timing *s; 2929 struct ata_timing p; 2930 2931 /* 2932 * Find the mode. 2933 */ 2934 2935 if (!(s = ata_timing_find_mode(speed))) 2936 return -EINVAL; 2937 2938 memcpy(t, s, sizeof(*s)); 2939 2940 /* 2941 * If the drive is an EIDE drive, it can tell us it needs extended 2942 * PIO/MW_DMA cycle timing. 2943 */ 2944 2945 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 2946 memset(&p, 0, sizeof(p)); 2947 2948 if (speed >= XFER_PIO_0 && speed < XFER_SW_DMA_0) { 2949 if (speed <= XFER_PIO_2) 2950 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 2951 else if ((speed <= XFER_PIO_4) || 2952 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 2953 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 2954 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 2955 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 2956 2957 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 2958 } 2959 2960 /* 2961 * Convert the timing to bus clock counts. 2962 */ 2963 2964 ata_timing_quantize(t, t, T, UT); 2965 2966 /* 2967 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 2968 * S.M.A.R.T * and some other commands. We have to ensure that the 2969 * DMA cycle timing is slower/equal than the fastest PIO timing. 2970 */ 2971 2972 if (speed > XFER_PIO_6) { 2973 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 2974 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 2975 } 2976 2977 /* 2978 * Lengthen active & recovery time so that cycle time is correct. 2979 */ 2980 2981 if (t->act8b + t->rec8b < t->cyc8b) { 2982 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 2983 t->rec8b = t->cyc8b - t->act8b; 2984 } 2985 2986 if (t->active + t->recover < t->cycle) { 2987 t->active += (t->cycle - (t->active + t->recover)) / 2; 2988 t->recover = t->cycle - t->active; 2989 } 2990 2991 /* In a few cases quantisation may produce enough errors to 2992 leave t->cycle too low for the sum of active and recovery 2993 if so we must correct this */ 2994 if (t->active + t->recover > t->cycle) 2995 t->cycle = t->active + t->recover; 2996 2997 return 0; 2998 } 2999 3000 /** 3001 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3002 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3003 * @cycle: cycle duration in ns 3004 * 3005 * Return matching xfer mode for @cycle. The returned mode is of 3006 * the transfer type specified by @xfer_shift. If @cycle is too 3007 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3008 * than the fastest known mode, the fasted mode is returned. 3009 * 3010 * LOCKING: 3011 * None. 3012 * 3013 * RETURNS: 3014 * Matching xfer_mode, 0xff if no match found. 3015 */ 3016 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3017 { 3018 u8 base_mode = 0xff, last_mode = 0xff; 3019 const struct ata_xfer_ent *ent; 3020 const struct ata_timing *t; 3021 3022 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3023 if (ent->shift == xfer_shift) 3024 base_mode = ent->base; 3025 3026 for (t = ata_timing_find_mode(base_mode); 3027 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3028 unsigned short this_cycle; 3029 3030 switch (xfer_shift) { 3031 case ATA_SHIFT_PIO: 3032 case ATA_SHIFT_MWDMA: 3033 this_cycle = t->cycle; 3034 break; 3035 case ATA_SHIFT_UDMA: 3036 this_cycle = t->udma; 3037 break; 3038 default: 3039 return 0xff; 3040 } 3041 3042 if (cycle > this_cycle) 3043 break; 3044 3045 last_mode = t->mode; 3046 } 3047 3048 return last_mode; 3049 } 3050 3051 /** 3052 * ata_down_xfermask_limit - adjust dev xfer masks downward 3053 * @dev: Device to adjust xfer masks 3054 * @sel: ATA_DNXFER_* selector 3055 * 3056 * Adjust xfer masks of @dev downward. Note that this function 3057 * does not apply the change. Invoking ata_set_mode() afterwards 3058 * will apply the limit. 3059 * 3060 * LOCKING: 3061 * Inherited from caller. 3062 * 3063 * RETURNS: 3064 * 0 on success, negative errno on failure 3065 */ 3066 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3067 { 3068 char buf[32]; 3069 unsigned long orig_mask, xfer_mask; 3070 unsigned long pio_mask, mwdma_mask, udma_mask; 3071 int quiet, highbit; 3072 3073 quiet = !!(sel & ATA_DNXFER_QUIET); 3074 sel &= ~ATA_DNXFER_QUIET; 3075 3076 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3077 dev->mwdma_mask, 3078 dev->udma_mask); 3079 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3080 3081 switch (sel) { 3082 case ATA_DNXFER_PIO: 3083 highbit = fls(pio_mask) - 1; 3084 pio_mask &= ~(1 << highbit); 3085 break; 3086 3087 case ATA_DNXFER_DMA: 3088 if (udma_mask) { 3089 highbit = fls(udma_mask) - 1; 3090 udma_mask &= ~(1 << highbit); 3091 if (!udma_mask) 3092 return -ENOENT; 3093 } else if (mwdma_mask) { 3094 highbit = fls(mwdma_mask) - 1; 3095 mwdma_mask &= ~(1 << highbit); 3096 if (!mwdma_mask) 3097 return -ENOENT; 3098 } 3099 break; 3100 3101 case ATA_DNXFER_40C: 3102 udma_mask &= ATA_UDMA_MASK_40C; 3103 break; 3104 3105 case ATA_DNXFER_FORCE_PIO0: 3106 pio_mask &= 1; 3107 case ATA_DNXFER_FORCE_PIO: 3108 mwdma_mask = 0; 3109 udma_mask = 0; 3110 break; 3111 3112 default: 3113 BUG(); 3114 } 3115 3116 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3117 3118 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3119 return -ENOENT; 3120 3121 if (!quiet) { 3122 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3123 snprintf(buf, sizeof(buf), "%s:%s", 3124 ata_mode_string(xfer_mask), 3125 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3126 else 3127 snprintf(buf, sizeof(buf), "%s", 3128 ata_mode_string(xfer_mask)); 3129 3130 ata_dev_warn(dev, "limiting speed to %s\n", buf); 3131 } 3132 3133 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3134 &dev->udma_mask); 3135 3136 return 0; 3137 } 3138 3139 static int ata_dev_set_mode(struct ata_device *dev) 3140 { 3141 struct ata_port *ap = dev->link->ap; 3142 struct ata_eh_context *ehc = &dev->link->eh_context; 3143 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3144 const char *dev_err_whine = ""; 3145 int ign_dev_err = 0; 3146 unsigned int err_mask = 0; 3147 int rc; 3148 3149 dev->flags &= ~ATA_DFLAG_PIO; 3150 if (dev->xfer_shift == ATA_SHIFT_PIO) 3151 dev->flags |= ATA_DFLAG_PIO; 3152 3153 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3154 dev_err_whine = " (SET_XFERMODE skipped)"; 3155 else { 3156 if (nosetxfer) 3157 ata_dev_warn(dev, 3158 "NOSETXFER but PATA detected - can't " 3159 "skip SETXFER, might malfunction\n"); 3160 err_mask = ata_dev_set_xfermode(dev); 3161 } 3162 3163 if (err_mask & ~AC_ERR_DEV) 3164 goto fail; 3165 3166 /* revalidate */ 3167 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3168 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3169 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3170 if (rc) 3171 return rc; 3172 3173 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3174 /* Old CFA may refuse this command, which is just fine */ 3175 if (ata_id_is_cfa(dev->id)) 3176 ign_dev_err = 1; 3177 /* Catch several broken garbage emulations plus some pre 3178 ATA devices */ 3179 if (ata_id_major_version(dev->id) == 0 && 3180 dev->pio_mode <= XFER_PIO_2) 3181 ign_dev_err = 1; 3182 /* Some very old devices and some bad newer ones fail 3183 any kind of SET_XFERMODE request but support PIO0-2 3184 timings and no IORDY */ 3185 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3186 ign_dev_err = 1; 3187 } 3188 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3189 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3190 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3191 dev->dma_mode == XFER_MW_DMA_0 && 3192 (dev->id[63] >> 8) & 1) 3193 ign_dev_err = 1; 3194 3195 /* if the device is actually configured correctly, ignore dev err */ 3196 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3197 ign_dev_err = 1; 3198 3199 if (err_mask & AC_ERR_DEV) { 3200 if (!ign_dev_err) 3201 goto fail; 3202 else 3203 dev_err_whine = " (device error ignored)"; 3204 } 3205 3206 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3207 dev->xfer_shift, (int)dev->xfer_mode); 3208 3209 ata_dev_info(dev, "configured for %s%s\n", 3210 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3211 dev_err_whine); 3212 3213 return 0; 3214 3215 fail: 3216 ata_dev_err(dev, "failed to set xfermode (err_mask=0x%x)\n", err_mask); 3217 return -EIO; 3218 } 3219 3220 /** 3221 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3222 * @link: link on which timings will be programmed 3223 * @r_failed_dev: out parameter for failed device 3224 * 3225 * Standard implementation of the function used to tune and set 3226 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3227 * ata_dev_set_mode() fails, pointer to the failing device is 3228 * returned in @r_failed_dev. 3229 * 3230 * LOCKING: 3231 * PCI/etc. bus probe sem. 3232 * 3233 * RETURNS: 3234 * 0 on success, negative errno otherwise 3235 */ 3236 3237 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3238 { 3239 struct ata_port *ap = link->ap; 3240 struct ata_device *dev; 3241 int rc = 0, used_dma = 0, found = 0; 3242 3243 /* step 1: calculate xfer_mask */ 3244 ata_for_each_dev(dev, link, ENABLED) { 3245 unsigned long pio_mask, dma_mask; 3246 unsigned int mode_mask; 3247 3248 mode_mask = ATA_DMA_MASK_ATA; 3249 if (dev->class == ATA_DEV_ATAPI) 3250 mode_mask = ATA_DMA_MASK_ATAPI; 3251 else if (ata_id_is_cfa(dev->id)) 3252 mode_mask = ATA_DMA_MASK_CFA; 3253 3254 ata_dev_xfermask(dev); 3255 ata_force_xfermask(dev); 3256 3257 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3258 3259 if (libata_dma_mask & mode_mask) 3260 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, 3261 dev->udma_mask); 3262 else 3263 dma_mask = 0; 3264 3265 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3266 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3267 3268 found = 1; 3269 if (ata_dma_enabled(dev)) 3270 used_dma = 1; 3271 } 3272 if (!found) 3273 goto out; 3274 3275 /* step 2: always set host PIO timings */ 3276 ata_for_each_dev(dev, link, ENABLED) { 3277 if (dev->pio_mode == 0xff) { 3278 ata_dev_warn(dev, "no PIO support\n"); 3279 rc = -EINVAL; 3280 goto out; 3281 } 3282 3283 dev->xfer_mode = dev->pio_mode; 3284 dev->xfer_shift = ATA_SHIFT_PIO; 3285 if (ap->ops->set_piomode) 3286 ap->ops->set_piomode(ap, dev); 3287 } 3288 3289 /* step 3: set host DMA timings */ 3290 ata_for_each_dev(dev, link, ENABLED) { 3291 if (!ata_dma_enabled(dev)) 3292 continue; 3293 3294 dev->xfer_mode = dev->dma_mode; 3295 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3296 if (ap->ops->set_dmamode) 3297 ap->ops->set_dmamode(ap, dev); 3298 } 3299 3300 /* step 4: update devices' xfer mode */ 3301 ata_for_each_dev(dev, link, ENABLED) { 3302 rc = ata_dev_set_mode(dev); 3303 if (rc) 3304 goto out; 3305 } 3306 3307 /* Record simplex status. If we selected DMA then the other 3308 * host channels are not permitted to do so. 3309 */ 3310 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3311 ap->host->simplex_claimed = ap; 3312 3313 out: 3314 if (rc) 3315 *r_failed_dev = dev; 3316 return rc; 3317 } 3318 3319 /** 3320 * ata_wait_ready - wait for link to become ready 3321 * @link: link to be waited on 3322 * @deadline: deadline jiffies for the operation 3323 * @check_ready: callback to check link readiness 3324 * 3325 * Wait for @link to become ready. @check_ready should return 3326 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3327 * link doesn't seem to be occupied, other errno for other error 3328 * conditions. 3329 * 3330 * Transient -ENODEV conditions are allowed for 3331 * ATA_TMOUT_FF_WAIT. 3332 * 3333 * LOCKING: 3334 * EH context. 3335 * 3336 * RETURNS: 3337 * 0 if @linke is ready before @deadline; otherwise, -errno. 3338 */ 3339 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3340 int (*check_ready)(struct ata_link *link)) 3341 { 3342 unsigned long start = jiffies; 3343 unsigned long nodev_deadline; 3344 int warned = 0; 3345 3346 /* choose which 0xff timeout to use, read comment in libata.h */ 3347 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3348 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3349 else 3350 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3351 3352 /* Slave readiness can't be tested separately from master. On 3353 * M/S emulation configuration, this function should be called 3354 * only on the master and it will handle both master and slave. 3355 */ 3356 WARN_ON(link == link->ap->slave_link); 3357 3358 if (time_after(nodev_deadline, deadline)) 3359 nodev_deadline = deadline; 3360 3361 while (1) { 3362 unsigned long now = jiffies; 3363 int ready, tmp; 3364 3365 ready = tmp = check_ready(link); 3366 if (ready > 0) 3367 return 0; 3368 3369 /* 3370 * -ENODEV could be transient. Ignore -ENODEV if link 3371 * is online. Also, some SATA devices take a long 3372 * time to clear 0xff after reset. Wait for 3373 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3374 * offline. 3375 * 3376 * Note that some PATA controllers (pata_ali) explode 3377 * if status register is read more than once when 3378 * there's no device attached. 3379 */ 3380 if (ready == -ENODEV) { 3381 if (ata_link_online(link)) 3382 ready = 0; 3383 else if ((link->ap->flags & ATA_FLAG_SATA) && 3384 !ata_link_offline(link) && 3385 time_before(now, nodev_deadline)) 3386 ready = 0; 3387 } 3388 3389 if (ready) 3390 return ready; 3391 if (time_after(now, deadline)) 3392 return -EBUSY; 3393 3394 if (!warned && time_after(now, start + 5 * HZ) && 3395 (deadline - now > 3 * HZ)) { 3396 ata_link_warn(link, 3397 "link is slow to respond, please be patient " 3398 "(ready=%d)\n", tmp); 3399 warned = 1; 3400 } 3401 3402 ata_msleep(link->ap, 50); 3403 } 3404 } 3405 3406 /** 3407 * ata_wait_after_reset - wait for link to become ready after reset 3408 * @link: link to be waited on 3409 * @deadline: deadline jiffies for the operation 3410 * @check_ready: callback to check link readiness 3411 * 3412 * Wait for @link to become ready after reset. 3413 * 3414 * LOCKING: 3415 * EH context. 3416 * 3417 * RETURNS: 3418 * 0 if @linke is ready before @deadline; otherwise, -errno. 3419 */ 3420 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3421 int (*check_ready)(struct ata_link *link)) 3422 { 3423 ata_msleep(link->ap, ATA_WAIT_AFTER_RESET); 3424 3425 return ata_wait_ready(link, deadline, check_ready); 3426 } 3427 3428 /** 3429 * sata_link_debounce - debounce SATA phy status 3430 * @link: ATA link to debounce SATA phy status for 3431 * @params: timing parameters { interval, duratinon, timeout } in msec 3432 * @deadline: deadline jiffies for the operation 3433 * 3434 * Make sure SStatus of @link reaches stable state, determined by 3435 * holding the same value where DET is not 1 for @duration polled 3436 * every @interval, before @timeout. Timeout constraints the 3437 * beginning of the stable state. Because DET gets stuck at 1 on 3438 * some controllers after hot unplugging, this functions waits 3439 * until timeout then returns 0 if DET is stable at 1. 3440 * 3441 * @timeout is further limited by @deadline. The sooner of the 3442 * two is used. 3443 * 3444 * LOCKING: 3445 * Kernel thread context (may sleep) 3446 * 3447 * RETURNS: 3448 * 0 on success, -errno on failure. 3449 */ 3450 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3451 unsigned long deadline) 3452 { 3453 unsigned long interval = params[0]; 3454 unsigned long duration = params[1]; 3455 unsigned long last_jiffies, t; 3456 u32 last, cur; 3457 int rc; 3458 3459 t = ata_deadline(jiffies, params[2]); 3460 if (time_before(t, deadline)) 3461 deadline = t; 3462 3463 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3464 return rc; 3465 cur &= 0xf; 3466 3467 last = cur; 3468 last_jiffies = jiffies; 3469 3470 while (1) { 3471 ata_msleep(link->ap, interval); 3472 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3473 return rc; 3474 cur &= 0xf; 3475 3476 /* DET stable? */ 3477 if (cur == last) { 3478 if (cur == 1 && time_before(jiffies, deadline)) 3479 continue; 3480 if (time_after(jiffies, 3481 ata_deadline(last_jiffies, duration))) 3482 return 0; 3483 continue; 3484 } 3485 3486 /* unstable, start over */ 3487 last = cur; 3488 last_jiffies = jiffies; 3489 3490 /* Check deadline. If debouncing failed, return 3491 * -EPIPE to tell upper layer to lower link speed. 3492 */ 3493 if (time_after(jiffies, deadline)) 3494 return -EPIPE; 3495 } 3496 } 3497 3498 /** 3499 * sata_link_resume - resume SATA link 3500 * @link: ATA link to resume SATA 3501 * @params: timing parameters { interval, duratinon, timeout } in msec 3502 * @deadline: deadline jiffies for the operation 3503 * 3504 * Resume SATA phy @link and debounce it. 3505 * 3506 * LOCKING: 3507 * Kernel thread context (may sleep) 3508 * 3509 * RETURNS: 3510 * 0 on success, -errno on failure. 3511 */ 3512 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3513 unsigned long deadline) 3514 { 3515 int tries = ATA_LINK_RESUME_TRIES; 3516 u32 scontrol, serror; 3517 int rc; 3518 3519 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3520 return rc; 3521 3522 /* 3523 * Writes to SControl sometimes get ignored under certain 3524 * controllers (ata_piix SIDPR). Make sure DET actually is 3525 * cleared. 3526 */ 3527 do { 3528 scontrol = (scontrol & 0x0f0) | 0x300; 3529 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3530 return rc; 3531 /* 3532 * Some PHYs react badly if SStatus is pounded 3533 * immediately after resuming. Delay 200ms before 3534 * debouncing. 3535 */ 3536 ata_msleep(link->ap, 200); 3537 3538 /* is SControl restored correctly? */ 3539 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3540 return rc; 3541 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3542 3543 if ((scontrol & 0xf0f) != 0x300) { 3544 ata_link_warn(link, "failed to resume link (SControl %X)\n", 3545 scontrol); 3546 return 0; 3547 } 3548 3549 if (tries < ATA_LINK_RESUME_TRIES) 3550 ata_link_warn(link, "link resume succeeded after %d retries\n", 3551 ATA_LINK_RESUME_TRIES - tries); 3552 3553 if ((rc = sata_link_debounce(link, params, deadline))) 3554 return rc; 3555 3556 /* clear SError, some PHYs require this even for SRST to work */ 3557 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3558 rc = sata_scr_write(link, SCR_ERROR, serror); 3559 3560 return rc != -EINVAL ? rc : 0; 3561 } 3562 3563 /** 3564 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 3565 * @link: ATA link to manipulate SControl for 3566 * @policy: LPM policy to configure 3567 * @spm_wakeup: initiate LPM transition to active state 3568 * 3569 * Manipulate the IPM field of the SControl register of @link 3570 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 3571 * @spm_wakeup is %true, the SPM field is manipulated to wake up 3572 * the link. This function also clears PHYRDY_CHG before 3573 * returning. 3574 * 3575 * LOCKING: 3576 * EH context. 3577 * 3578 * RETURNS: 3579 * 0 on succes, -errno otherwise. 3580 */ 3581 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 3582 bool spm_wakeup) 3583 { 3584 struct ata_eh_context *ehc = &link->eh_context; 3585 bool woken_up = false; 3586 u32 scontrol; 3587 int rc; 3588 3589 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 3590 if (rc) 3591 return rc; 3592 3593 switch (policy) { 3594 case ATA_LPM_MAX_POWER: 3595 /* disable all LPM transitions */ 3596 scontrol |= (0x3 << 8); 3597 /* initiate transition to active state */ 3598 if (spm_wakeup) { 3599 scontrol |= (0x4 << 12); 3600 woken_up = true; 3601 } 3602 break; 3603 case ATA_LPM_MED_POWER: 3604 /* allow LPM to PARTIAL */ 3605 scontrol &= ~(0x1 << 8); 3606 scontrol |= (0x2 << 8); 3607 break; 3608 case ATA_LPM_MIN_POWER: 3609 if (ata_link_nr_enabled(link) > 0) 3610 /* no restrictions on LPM transitions */ 3611 scontrol &= ~(0x3 << 8); 3612 else { 3613 /* empty port, power off */ 3614 scontrol &= ~0xf; 3615 scontrol |= (0x1 << 2); 3616 } 3617 break; 3618 default: 3619 WARN_ON(1); 3620 } 3621 3622 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 3623 if (rc) 3624 return rc; 3625 3626 /* give the link time to transit out of LPM state */ 3627 if (woken_up) 3628 msleep(10); 3629 3630 /* clear PHYRDY_CHG from SError */ 3631 ehc->i.serror &= ~SERR_PHYRDY_CHG; 3632 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 3633 } 3634 3635 /** 3636 * ata_std_prereset - prepare for reset 3637 * @link: ATA link to be reset 3638 * @deadline: deadline jiffies for the operation 3639 * 3640 * @link is about to be reset. Initialize it. Failure from 3641 * prereset makes libata abort whole reset sequence and give up 3642 * that port, so prereset should be best-effort. It does its 3643 * best to prepare for reset sequence but if things go wrong, it 3644 * should just whine, not fail. 3645 * 3646 * LOCKING: 3647 * Kernel thread context (may sleep) 3648 * 3649 * RETURNS: 3650 * 0 on success, -errno otherwise. 3651 */ 3652 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3653 { 3654 struct ata_port *ap = link->ap; 3655 struct ata_eh_context *ehc = &link->eh_context; 3656 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3657 int rc; 3658 3659 /* if we're about to do hardreset, nothing more to do */ 3660 if (ehc->i.action & ATA_EH_HARDRESET) 3661 return 0; 3662 3663 /* if SATA, resume link */ 3664 if (ap->flags & ATA_FLAG_SATA) { 3665 rc = sata_link_resume(link, timing, deadline); 3666 /* whine about phy resume failure but proceed */ 3667 if (rc && rc != -EOPNOTSUPP) 3668 ata_link_warn(link, 3669 "failed to resume link for reset (errno=%d)\n", 3670 rc); 3671 } 3672 3673 /* no point in trying softreset on offline link */ 3674 if (ata_phys_link_offline(link)) 3675 ehc->i.action &= ~ATA_EH_SOFTRESET; 3676 3677 return 0; 3678 } 3679 3680 /** 3681 * sata_link_hardreset - reset link via SATA phy reset 3682 * @link: link to reset 3683 * @timing: timing parameters { interval, duratinon, timeout } in msec 3684 * @deadline: deadline jiffies for the operation 3685 * @online: optional out parameter indicating link onlineness 3686 * @check_ready: optional callback to check link readiness 3687 * 3688 * SATA phy-reset @link using DET bits of SControl register. 3689 * After hardreset, link readiness is waited upon using 3690 * ata_wait_ready() if @check_ready is specified. LLDs are 3691 * allowed to not specify @check_ready and wait itself after this 3692 * function returns. Device classification is LLD's 3693 * responsibility. 3694 * 3695 * *@online is set to one iff reset succeeded and @link is online 3696 * after reset. 3697 * 3698 * LOCKING: 3699 * Kernel thread context (may sleep) 3700 * 3701 * RETURNS: 3702 * 0 on success, -errno otherwise. 3703 */ 3704 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3705 unsigned long deadline, 3706 bool *online, int (*check_ready)(struct ata_link *)) 3707 { 3708 u32 scontrol; 3709 int rc; 3710 3711 DPRINTK("ENTER\n"); 3712 3713 if (online) 3714 *online = false; 3715 3716 if (sata_set_spd_needed(link)) { 3717 /* SATA spec says nothing about how to reconfigure 3718 * spd. To be on the safe side, turn off phy during 3719 * reconfiguration. This works for at least ICH7 AHCI 3720 * and Sil3124. 3721 */ 3722 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3723 goto out; 3724 3725 scontrol = (scontrol & 0x0f0) | 0x304; 3726 3727 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3728 goto out; 3729 3730 sata_set_spd(link); 3731 } 3732 3733 /* issue phy wake/reset */ 3734 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3735 goto out; 3736 3737 scontrol = (scontrol & 0x0f0) | 0x301; 3738 3739 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3740 goto out; 3741 3742 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3743 * 10.4.2 says at least 1 ms. 3744 */ 3745 ata_msleep(link->ap, 1); 3746 3747 /* bring link back */ 3748 rc = sata_link_resume(link, timing, deadline); 3749 if (rc) 3750 goto out; 3751 /* if link is offline nothing more to do */ 3752 if (ata_phys_link_offline(link)) 3753 goto out; 3754 3755 /* Link is online. From this point, -ENODEV too is an error. */ 3756 if (online) 3757 *online = true; 3758 3759 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3760 /* If PMP is supported, we have to do follow-up SRST. 3761 * Some PMPs don't send D2H Reg FIS after hardreset if 3762 * the first port is empty. Wait only for 3763 * ATA_TMOUT_PMP_SRST_WAIT. 3764 */ 3765 if (check_ready) { 3766 unsigned long pmp_deadline; 3767 3768 pmp_deadline = ata_deadline(jiffies, 3769 ATA_TMOUT_PMP_SRST_WAIT); 3770 if (time_after(pmp_deadline, deadline)) 3771 pmp_deadline = deadline; 3772 ata_wait_ready(link, pmp_deadline, check_ready); 3773 } 3774 rc = -EAGAIN; 3775 goto out; 3776 } 3777 3778 rc = 0; 3779 if (check_ready) 3780 rc = ata_wait_ready(link, deadline, check_ready); 3781 out: 3782 if (rc && rc != -EAGAIN) { 3783 /* online is set iff link is online && reset succeeded */ 3784 if (online) 3785 *online = false; 3786 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 3787 } 3788 DPRINTK("EXIT, rc=%d\n", rc); 3789 return rc; 3790 } 3791 3792 /** 3793 * sata_std_hardreset - COMRESET w/o waiting or classification 3794 * @link: link to reset 3795 * @class: resulting class of attached device 3796 * @deadline: deadline jiffies for the operation 3797 * 3798 * Standard SATA COMRESET w/o waiting or classification. 3799 * 3800 * LOCKING: 3801 * Kernel thread context (may sleep) 3802 * 3803 * RETURNS: 3804 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3805 */ 3806 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3807 unsigned long deadline) 3808 { 3809 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3810 bool online; 3811 int rc; 3812 3813 /* do hardreset */ 3814 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3815 return online ? -EAGAIN : rc; 3816 } 3817 3818 /** 3819 * ata_std_postreset - standard postreset callback 3820 * @link: the target ata_link 3821 * @classes: classes of attached devices 3822 * 3823 * This function is invoked after a successful reset. Note that 3824 * the device might have been reset more than once using 3825 * different reset methods before postreset is invoked. 3826 * 3827 * LOCKING: 3828 * Kernel thread context (may sleep) 3829 */ 3830 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3831 { 3832 u32 serror; 3833 3834 DPRINTK("ENTER\n"); 3835 3836 /* reset complete, clear SError */ 3837 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3838 sata_scr_write(link, SCR_ERROR, serror); 3839 3840 /* print link status */ 3841 sata_print_link_status(link); 3842 3843 DPRINTK("EXIT\n"); 3844 } 3845 3846 /** 3847 * ata_dev_same_device - Determine whether new ID matches configured device 3848 * @dev: device to compare against 3849 * @new_class: class of the new device 3850 * @new_id: IDENTIFY page of the new device 3851 * 3852 * Compare @new_class and @new_id against @dev and determine 3853 * whether @dev is the device indicated by @new_class and 3854 * @new_id. 3855 * 3856 * LOCKING: 3857 * None. 3858 * 3859 * RETURNS: 3860 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3861 */ 3862 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3863 const u16 *new_id) 3864 { 3865 const u16 *old_id = dev->id; 3866 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3867 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3868 3869 if (dev->class != new_class) { 3870 ata_dev_info(dev, "class mismatch %d != %d\n", 3871 dev->class, new_class); 3872 return 0; 3873 } 3874 3875 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3876 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3877 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3878 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3879 3880 if (strcmp(model[0], model[1])) { 3881 ata_dev_info(dev, "model number mismatch '%s' != '%s'\n", 3882 model[0], model[1]); 3883 return 0; 3884 } 3885 3886 if (strcmp(serial[0], serial[1])) { 3887 ata_dev_info(dev, "serial number mismatch '%s' != '%s'\n", 3888 serial[0], serial[1]); 3889 return 0; 3890 } 3891 3892 return 1; 3893 } 3894 3895 /** 3896 * ata_dev_reread_id - Re-read IDENTIFY data 3897 * @dev: target ATA device 3898 * @readid_flags: read ID flags 3899 * 3900 * Re-read IDENTIFY page and make sure @dev is still attached to 3901 * the port. 3902 * 3903 * LOCKING: 3904 * Kernel thread context (may sleep) 3905 * 3906 * RETURNS: 3907 * 0 on success, negative errno otherwise 3908 */ 3909 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 3910 { 3911 unsigned int class = dev->class; 3912 u16 *id = (void *)dev->link->ap->sector_buf; 3913 int rc; 3914 3915 /* read ID data */ 3916 rc = ata_dev_read_id(dev, &class, readid_flags, id); 3917 if (rc) 3918 return rc; 3919 3920 /* is the device still there? */ 3921 if (!ata_dev_same_device(dev, class, id)) 3922 return -ENODEV; 3923 3924 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 3925 return 0; 3926 } 3927 3928 /** 3929 * ata_dev_revalidate - Revalidate ATA device 3930 * @dev: device to revalidate 3931 * @new_class: new class code 3932 * @readid_flags: read ID flags 3933 * 3934 * Re-read IDENTIFY page, make sure @dev is still attached to the 3935 * port and reconfigure it according to the new IDENTIFY page. 3936 * 3937 * LOCKING: 3938 * Kernel thread context (may sleep) 3939 * 3940 * RETURNS: 3941 * 0 on success, negative errno otherwise 3942 */ 3943 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 3944 unsigned int readid_flags) 3945 { 3946 u64 n_sectors = dev->n_sectors; 3947 u64 n_native_sectors = dev->n_native_sectors; 3948 int rc; 3949 3950 if (!ata_dev_enabled(dev)) 3951 return -ENODEV; 3952 3953 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 3954 if (ata_class_enabled(new_class) && 3955 new_class != ATA_DEV_ATA && 3956 new_class != ATA_DEV_ATAPI && 3957 new_class != ATA_DEV_SEMB) { 3958 ata_dev_info(dev, "class mismatch %u != %u\n", 3959 dev->class, new_class); 3960 rc = -ENODEV; 3961 goto fail; 3962 } 3963 3964 /* re-read ID */ 3965 rc = ata_dev_reread_id(dev, readid_flags); 3966 if (rc) 3967 goto fail; 3968 3969 /* configure device according to the new ID */ 3970 rc = ata_dev_configure(dev); 3971 if (rc) 3972 goto fail; 3973 3974 /* verify n_sectors hasn't changed */ 3975 if (dev->class != ATA_DEV_ATA || !n_sectors || 3976 dev->n_sectors == n_sectors) 3977 return 0; 3978 3979 /* n_sectors has changed */ 3980 ata_dev_warn(dev, "n_sectors mismatch %llu != %llu\n", 3981 (unsigned long long)n_sectors, 3982 (unsigned long long)dev->n_sectors); 3983 3984 /* 3985 * Something could have caused HPA to be unlocked 3986 * involuntarily. If n_native_sectors hasn't changed and the 3987 * new size matches it, keep the device. 3988 */ 3989 if (dev->n_native_sectors == n_native_sectors && 3990 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 3991 ata_dev_warn(dev, 3992 "new n_sectors matches native, probably " 3993 "late HPA unlock, n_sectors updated\n"); 3994 /* use the larger n_sectors */ 3995 return 0; 3996 } 3997 3998 /* 3999 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4000 * unlocking HPA in those cases. 4001 * 4002 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4003 */ 4004 if (dev->n_native_sectors == n_native_sectors && 4005 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4006 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4007 ata_dev_warn(dev, 4008 "old n_sectors matches native, probably " 4009 "late HPA lock, will try to unlock HPA\n"); 4010 /* try unlocking HPA */ 4011 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4012 rc = -EIO; 4013 } else 4014 rc = -ENODEV; 4015 4016 /* restore original n_[native_]sectors and fail */ 4017 dev->n_native_sectors = n_native_sectors; 4018 dev->n_sectors = n_sectors; 4019 fail: 4020 ata_dev_err(dev, "revalidation failed (errno=%d)\n", rc); 4021 return rc; 4022 } 4023 4024 struct ata_blacklist_entry { 4025 const char *model_num; 4026 const char *model_rev; 4027 unsigned long horkage; 4028 }; 4029 4030 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4031 /* Devices with DMA related problems under Linux */ 4032 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4033 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4034 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4035 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4036 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4037 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4038 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4039 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4040 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4041 { "CRD-848[02]B", NULL, ATA_HORKAGE_NODMA }, 4042 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4043 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4044 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4045 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4046 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4047 { "HITACHI CDR-8[34]35",NULL, ATA_HORKAGE_NODMA }, 4048 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4049 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4050 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4051 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4052 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4053 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4054 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4055 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4056 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4057 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4058 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4059 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4060 { "2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA }, 4061 /* Odd clown on sil3726/4726 PMPs */ 4062 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4063 4064 /* Weird ATAPI devices */ 4065 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4066 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4067 4068 /* Devices we expect to fail diagnostics */ 4069 4070 /* Devices where NCQ should be avoided */ 4071 /* NCQ is slow */ 4072 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4073 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4074 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4075 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4076 /* NCQ is broken */ 4077 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4078 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4079 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4080 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4081 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4082 4083 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4084 { "ST31500341AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4085 ATA_HORKAGE_FIRMWARE_WARN }, 4086 4087 { "ST31000333AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4088 ATA_HORKAGE_FIRMWARE_WARN }, 4089 4090 { "ST3640[36]23AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4091 ATA_HORKAGE_FIRMWARE_WARN }, 4092 4093 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4094 ATA_HORKAGE_FIRMWARE_WARN }, 4095 4096 /* Blacklist entries taken from Silicon Image 3124/3132 4097 Windows driver .inf file - also several Linux problem reports */ 4098 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4099 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4100 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4101 4102 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4103 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4104 4105 /* devices which puke on READ_NATIVE_MAX */ 4106 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4107 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4108 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4109 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4110 4111 /* this one allows HPA unlocking but fails IOs on the area */ 4112 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4113 4114 /* Devices which report 1 sector over size HPA */ 4115 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4116 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4117 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4118 4119 /* Devices which get the IVB wrong */ 4120 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4121 /* Maybe we should just blacklist TSSTcorp... */ 4122 { "TSSTcorp CDDVDW SH-S202[HJN]", "SB0[01]", ATA_HORKAGE_IVB, }, 4123 4124 /* Devices that do not need bridging limits applied */ 4125 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4126 4127 /* Devices which aren't very happy with higher link speeds */ 4128 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4129 4130 /* 4131 * Devices which choke on SETXFER. Applies only if both the 4132 * device and controller are SATA. 4133 */ 4134 { "PIONEER DVD-RW DVRTD08", NULL, ATA_HORKAGE_NOSETXFER }, 4135 { "PIONEER DVD-RW DVRTD08A", NULL, ATA_HORKAGE_NOSETXFER }, 4136 { "PIONEER DVD-RW DVR-215", NULL, ATA_HORKAGE_NOSETXFER }, 4137 { "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER }, 4138 { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, 4139 4140 /* End Marker */ 4141 { } 4142 }; 4143 4144 /** 4145 * glob_match - match a text string against a glob-style pattern 4146 * @text: the string to be examined 4147 * @pattern: the glob-style pattern to be matched against 4148 * 4149 * Either/both of text and pattern can be empty strings. 4150 * 4151 * Match text against a glob-style pattern, with wildcards and simple sets: 4152 * 4153 * ? matches any single character. 4154 * * matches any run of characters. 4155 * [xyz] matches a single character from the set: x, y, or z. 4156 * [a-d] matches a single character from the range: a, b, c, or d. 4157 * [a-d0-9] matches a single character from either range. 4158 * 4159 * The special characters ?, [, -, or *, can be matched using a set, eg. [*] 4160 * Behaviour with malformed patterns is undefined, though generally reasonable. 4161 * 4162 * Sample patterns: "SD1?", "SD1[0-5]", "*R0", "SD*1?[012]*xx" 4163 * 4164 * This function uses one level of recursion per '*' in pattern. 4165 * Since it calls _nothing_ else, and has _no_ explicit local variables, 4166 * this will not cause stack problems for any reasonable use here. 4167 * 4168 * RETURNS: 4169 * 0 on match, 1 otherwise. 4170 */ 4171 static int glob_match (const char *text, const char *pattern) 4172 { 4173 do { 4174 /* Match single character or a '?' wildcard */ 4175 if (*text == *pattern || *pattern == '?') { 4176 if (!*pattern++) 4177 return 0; /* End of both strings: match */ 4178 } else { 4179 /* Match single char against a '[' bracketed ']' pattern set */ 4180 if (!*text || *pattern != '[') 4181 break; /* Not a pattern set */ 4182 while (*++pattern && *pattern != ']' && *text != *pattern) { 4183 if (*pattern == '-' && *(pattern - 1) != '[') 4184 if (*text > *(pattern - 1) && *text < *(pattern + 1)) { 4185 ++pattern; 4186 break; 4187 } 4188 } 4189 if (!*pattern || *pattern == ']') 4190 return 1; /* No match */ 4191 while (*pattern && *pattern++ != ']'); 4192 } 4193 } while (*++text && *pattern); 4194 4195 /* Match any run of chars against a '*' wildcard */ 4196 if (*pattern == '*') { 4197 if (!*++pattern) 4198 return 0; /* Match: avoid recursion at end of pattern */ 4199 /* Loop to handle additional pattern chars after the wildcard */ 4200 while (*text) { 4201 if (glob_match(text, pattern) == 0) 4202 return 0; /* Remainder matched */ 4203 ++text; /* Absorb (match) this char and try again */ 4204 } 4205 } 4206 if (!*text && !*pattern) 4207 return 0; /* End of both strings: match */ 4208 return 1; /* No match */ 4209 } 4210 4211 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4212 { 4213 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4214 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4215 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4216 4217 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4218 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4219 4220 while (ad->model_num) { 4221 if (!glob_match(model_num, ad->model_num)) { 4222 if (ad->model_rev == NULL) 4223 return ad->horkage; 4224 if (!glob_match(model_rev, ad->model_rev)) 4225 return ad->horkage; 4226 } 4227 ad++; 4228 } 4229 return 0; 4230 } 4231 4232 static int ata_dma_blacklisted(const struct ata_device *dev) 4233 { 4234 /* We don't support polling DMA. 4235 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4236 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4237 */ 4238 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4239 (dev->flags & ATA_DFLAG_CDB_INTR)) 4240 return 1; 4241 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4242 } 4243 4244 /** 4245 * ata_is_40wire - check drive side detection 4246 * @dev: device 4247 * 4248 * Perform drive side detection decoding, allowing for device vendors 4249 * who can't follow the documentation. 4250 */ 4251 4252 static int ata_is_40wire(struct ata_device *dev) 4253 { 4254 if (dev->horkage & ATA_HORKAGE_IVB) 4255 return ata_drive_40wire_relaxed(dev->id); 4256 return ata_drive_40wire(dev->id); 4257 } 4258 4259 /** 4260 * cable_is_40wire - 40/80/SATA decider 4261 * @ap: port to consider 4262 * 4263 * This function encapsulates the policy for speed management 4264 * in one place. At the moment we don't cache the result but 4265 * there is a good case for setting ap->cbl to the result when 4266 * we are called with unknown cables (and figuring out if it 4267 * impacts hotplug at all). 4268 * 4269 * Return 1 if the cable appears to be 40 wire. 4270 */ 4271 4272 static int cable_is_40wire(struct ata_port *ap) 4273 { 4274 struct ata_link *link; 4275 struct ata_device *dev; 4276 4277 /* If the controller thinks we are 40 wire, we are. */ 4278 if (ap->cbl == ATA_CBL_PATA40) 4279 return 1; 4280 4281 /* If the controller thinks we are 80 wire, we are. */ 4282 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4283 return 0; 4284 4285 /* If the system is known to be 40 wire short cable (eg 4286 * laptop), then we allow 80 wire modes even if the drive 4287 * isn't sure. 4288 */ 4289 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4290 return 0; 4291 4292 /* If the controller doesn't know, we scan. 4293 * 4294 * Note: We look for all 40 wire detects at this point. Any 4295 * 80 wire detect is taken to be 80 wire cable because 4296 * - in many setups only the one drive (slave if present) will 4297 * give a valid detect 4298 * - if you have a non detect capable drive you don't want it 4299 * to colour the choice 4300 */ 4301 ata_for_each_link(link, ap, EDGE) { 4302 ata_for_each_dev(dev, link, ENABLED) { 4303 if (!ata_is_40wire(dev)) 4304 return 0; 4305 } 4306 } 4307 return 1; 4308 } 4309 4310 /** 4311 * ata_dev_xfermask - Compute supported xfermask of the given device 4312 * @dev: Device to compute xfermask for 4313 * 4314 * Compute supported xfermask of @dev and store it in 4315 * dev->*_mask. This function is responsible for applying all 4316 * known limits including host controller limits, device 4317 * blacklist, etc... 4318 * 4319 * LOCKING: 4320 * None. 4321 */ 4322 static void ata_dev_xfermask(struct ata_device *dev) 4323 { 4324 struct ata_link *link = dev->link; 4325 struct ata_port *ap = link->ap; 4326 struct ata_host *host = ap->host; 4327 unsigned long xfer_mask; 4328 4329 /* controller modes available */ 4330 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4331 ap->mwdma_mask, ap->udma_mask); 4332 4333 /* drive modes available */ 4334 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4335 dev->mwdma_mask, dev->udma_mask); 4336 xfer_mask &= ata_id_xfermask(dev->id); 4337 4338 /* 4339 * CFA Advanced TrueIDE timings are not allowed on a shared 4340 * cable 4341 */ 4342 if (ata_dev_pair(dev)) { 4343 /* No PIO5 or PIO6 */ 4344 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4345 /* No MWDMA3 or MWDMA 4 */ 4346 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4347 } 4348 4349 if (ata_dma_blacklisted(dev)) { 4350 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4351 ata_dev_warn(dev, 4352 "device is on DMA blacklist, disabling DMA\n"); 4353 } 4354 4355 if ((host->flags & ATA_HOST_SIMPLEX) && 4356 host->simplex_claimed && host->simplex_claimed != ap) { 4357 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4358 ata_dev_warn(dev, 4359 "simplex DMA is claimed by other device, disabling DMA\n"); 4360 } 4361 4362 if (ap->flags & ATA_FLAG_NO_IORDY) 4363 xfer_mask &= ata_pio_mask_no_iordy(dev); 4364 4365 if (ap->ops->mode_filter) 4366 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4367 4368 /* Apply cable rule here. Don't apply it early because when 4369 * we handle hot plug the cable type can itself change. 4370 * Check this last so that we know if the transfer rate was 4371 * solely limited by the cable. 4372 * Unknown or 80 wire cables reported host side are checked 4373 * drive side as well. Cases where we know a 40wire cable 4374 * is used safely for 80 are not checked here. 4375 */ 4376 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4377 /* UDMA/44 or higher would be available */ 4378 if (cable_is_40wire(ap)) { 4379 ata_dev_warn(dev, 4380 "limited to UDMA/33 due to 40-wire cable\n"); 4381 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4382 } 4383 4384 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4385 &dev->mwdma_mask, &dev->udma_mask); 4386 } 4387 4388 /** 4389 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4390 * @dev: Device to which command will be sent 4391 * 4392 * Issue SET FEATURES - XFER MODE command to device @dev 4393 * on port @ap. 4394 * 4395 * LOCKING: 4396 * PCI/etc. bus probe sem. 4397 * 4398 * RETURNS: 4399 * 0 on success, AC_ERR_* mask otherwise. 4400 */ 4401 4402 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4403 { 4404 struct ata_taskfile tf; 4405 unsigned int err_mask; 4406 4407 /* set up set-features taskfile */ 4408 DPRINTK("set features - xfer mode\n"); 4409 4410 /* Some controllers and ATAPI devices show flaky interrupt 4411 * behavior after setting xfer mode. Use polling instead. 4412 */ 4413 ata_tf_init(dev, &tf); 4414 tf.command = ATA_CMD_SET_FEATURES; 4415 tf.feature = SETFEATURES_XFER; 4416 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4417 tf.protocol = ATA_PROT_NODATA; 4418 /* If we are using IORDY we must send the mode setting command */ 4419 if (ata_pio_need_iordy(dev)) 4420 tf.nsect = dev->xfer_mode; 4421 /* If the device has IORDY and the controller does not - turn it off */ 4422 else if (ata_id_has_iordy(dev->id)) 4423 tf.nsect = 0x01; 4424 else /* In the ancient relic department - skip all of this */ 4425 return 0; 4426 4427 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4428 4429 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4430 return err_mask; 4431 } 4432 4433 /** 4434 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4435 * @dev: Device to which command will be sent 4436 * @enable: Whether to enable or disable the feature 4437 * @feature: The sector count represents the feature to set 4438 * 4439 * Issue SET FEATURES - SATA FEATURES command to device @dev 4440 * on port @ap with sector count 4441 * 4442 * LOCKING: 4443 * PCI/etc. bus probe sem. 4444 * 4445 * RETURNS: 4446 * 0 on success, AC_ERR_* mask otherwise. 4447 */ 4448 unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, u8 feature) 4449 { 4450 struct ata_taskfile tf; 4451 unsigned int err_mask; 4452 4453 /* set up set-features taskfile */ 4454 DPRINTK("set features - SATA features\n"); 4455 4456 ata_tf_init(dev, &tf); 4457 tf.command = ATA_CMD_SET_FEATURES; 4458 tf.feature = enable; 4459 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4460 tf.protocol = ATA_PROT_NODATA; 4461 tf.nsect = feature; 4462 4463 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4464 4465 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4466 return err_mask; 4467 } 4468 4469 /** 4470 * ata_dev_init_params - Issue INIT DEV PARAMS command 4471 * @dev: Device to which command will be sent 4472 * @heads: Number of heads (taskfile parameter) 4473 * @sectors: Number of sectors (taskfile parameter) 4474 * 4475 * LOCKING: 4476 * Kernel thread context (may sleep) 4477 * 4478 * RETURNS: 4479 * 0 on success, AC_ERR_* mask otherwise. 4480 */ 4481 static unsigned int ata_dev_init_params(struct ata_device *dev, 4482 u16 heads, u16 sectors) 4483 { 4484 struct ata_taskfile tf; 4485 unsigned int err_mask; 4486 4487 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4488 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4489 return AC_ERR_INVALID; 4490 4491 /* set up init dev params taskfile */ 4492 DPRINTK("init dev params \n"); 4493 4494 ata_tf_init(dev, &tf); 4495 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4496 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4497 tf.protocol = ATA_PROT_NODATA; 4498 tf.nsect = sectors; 4499 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4500 4501 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4502 /* A clean abort indicates an original or just out of spec drive 4503 and we should continue as we issue the setup based on the 4504 drive reported working geometry */ 4505 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4506 err_mask = 0; 4507 4508 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4509 return err_mask; 4510 } 4511 4512 /** 4513 * ata_sg_clean - Unmap DMA memory associated with command 4514 * @qc: Command containing DMA memory to be released 4515 * 4516 * Unmap all mapped DMA memory associated with this command. 4517 * 4518 * LOCKING: 4519 * spin_lock_irqsave(host lock) 4520 */ 4521 void ata_sg_clean(struct ata_queued_cmd *qc) 4522 { 4523 struct ata_port *ap = qc->ap; 4524 struct scatterlist *sg = qc->sg; 4525 int dir = qc->dma_dir; 4526 4527 WARN_ON_ONCE(sg == NULL); 4528 4529 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4530 4531 if (qc->n_elem) 4532 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4533 4534 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4535 qc->sg = NULL; 4536 } 4537 4538 /** 4539 * atapi_check_dma - Check whether ATAPI DMA can be supported 4540 * @qc: Metadata associated with taskfile to check 4541 * 4542 * Allow low-level driver to filter ATA PACKET commands, returning 4543 * a status indicating whether or not it is OK to use DMA for the 4544 * supplied PACKET command. 4545 * 4546 * LOCKING: 4547 * spin_lock_irqsave(host lock) 4548 * 4549 * RETURNS: 0 when ATAPI DMA can be used 4550 * nonzero otherwise 4551 */ 4552 int atapi_check_dma(struct ata_queued_cmd *qc) 4553 { 4554 struct ata_port *ap = qc->ap; 4555 4556 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4557 * few ATAPI devices choke on such DMA requests. 4558 */ 4559 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4560 unlikely(qc->nbytes & 15)) 4561 return 1; 4562 4563 if (ap->ops->check_atapi_dma) 4564 return ap->ops->check_atapi_dma(qc); 4565 4566 return 0; 4567 } 4568 4569 /** 4570 * ata_std_qc_defer - Check whether a qc needs to be deferred 4571 * @qc: ATA command in question 4572 * 4573 * Non-NCQ commands cannot run with any other command, NCQ or 4574 * not. As upper layer only knows the queue depth, we are 4575 * responsible for maintaining exclusion. This function checks 4576 * whether a new command @qc can be issued. 4577 * 4578 * LOCKING: 4579 * spin_lock_irqsave(host lock) 4580 * 4581 * RETURNS: 4582 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4583 */ 4584 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4585 { 4586 struct ata_link *link = qc->dev->link; 4587 4588 if (qc->tf.protocol == ATA_PROT_NCQ) { 4589 if (!ata_tag_valid(link->active_tag)) 4590 return 0; 4591 } else { 4592 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4593 return 0; 4594 } 4595 4596 return ATA_DEFER_LINK; 4597 } 4598 4599 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4600 4601 /** 4602 * ata_sg_init - Associate command with scatter-gather table. 4603 * @qc: Command to be associated 4604 * @sg: Scatter-gather table. 4605 * @n_elem: Number of elements in s/g table. 4606 * 4607 * Initialize the data-related elements of queued_cmd @qc 4608 * to point to a scatter-gather table @sg, containing @n_elem 4609 * elements. 4610 * 4611 * LOCKING: 4612 * spin_lock_irqsave(host lock) 4613 */ 4614 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4615 unsigned int n_elem) 4616 { 4617 qc->sg = sg; 4618 qc->n_elem = n_elem; 4619 qc->cursg = qc->sg; 4620 } 4621 4622 /** 4623 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4624 * @qc: Command with scatter-gather table to be mapped. 4625 * 4626 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4627 * 4628 * LOCKING: 4629 * spin_lock_irqsave(host lock) 4630 * 4631 * RETURNS: 4632 * Zero on success, negative on error. 4633 * 4634 */ 4635 static int ata_sg_setup(struct ata_queued_cmd *qc) 4636 { 4637 struct ata_port *ap = qc->ap; 4638 unsigned int n_elem; 4639 4640 VPRINTK("ENTER, ata%u\n", ap->print_id); 4641 4642 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4643 if (n_elem < 1) 4644 return -1; 4645 4646 DPRINTK("%d sg elements mapped\n", n_elem); 4647 qc->orig_n_elem = qc->n_elem; 4648 qc->n_elem = n_elem; 4649 qc->flags |= ATA_QCFLAG_DMAMAP; 4650 4651 return 0; 4652 } 4653 4654 /** 4655 * swap_buf_le16 - swap halves of 16-bit words in place 4656 * @buf: Buffer to swap 4657 * @buf_words: Number of 16-bit words in buffer. 4658 * 4659 * Swap halves of 16-bit words if needed to convert from 4660 * little-endian byte order to native cpu byte order, or 4661 * vice-versa. 4662 * 4663 * LOCKING: 4664 * Inherited from caller. 4665 */ 4666 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4667 { 4668 #ifdef __BIG_ENDIAN 4669 unsigned int i; 4670 4671 for (i = 0; i < buf_words; i++) 4672 buf[i] = le16_to_cpu(buf[i]); 4673 #endif /* __BIG_ENDIAN */ 4674 } 4675 4676 /** 4677 * ata_qc_new - Request an available ATA command, for queueing 4678 * @ap: target port 4679 * 4680 * LOCKING: 4681 * None. 4682 */ 4683 4684 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4685 { 4686 struct ata_queued_cmd *qc = NULL; 4687 unsigned int i; 4688 4689 /* no command while frozen */ 4690 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4691 return NULL; 4692 4693 /* the last tag is reserved for internal command. */ 4694 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 4695 if (!test_and_set_bit(i, &ap->qc_allocated)) { 4696 qc = __ata_qc_from_tag(ap, i); 4697 break; 4698 } 4699 4700 if (qc) 4701 qc->tag = i; 4702 4703 return qc; 4704 } 4705 4706 /** 4707 * ata_qc_new_init - Request an available ATA command, and initialize it 4708 * @dev: Device from whom we request an available command structure 4709 * 4710 * LOCKING: 4711 * None. 4712 */ 4713 4714 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 4715 { 4716 struct ata_port *ap = dev->link->ap; 4717 struct ata_queued_cmd *qc; 4718 4719 qc = ata_qc_new(ap); 4720 if (qc) { 4721 qc->scsicmd = NULL; 4722 qc->ap = ap; 4723 qc->dev = dev; 4724 4725 ata_qc_reinit(qc); 4726 } 4727 4728 return qc; 4729 } 4730 4731 /** 4732 * ata_qc_free - free unused ata_queued_cmd 4733 * @qc: Command to complete 4734 * 4735 * Designed to free unused ata_queued_cmd object 4736 * in case something prevents using it. 4737 * 4738 * LOCKING: 4739 * spin_lock_irqsave(host lock) 4740 */ 4741 void ata_qc_free(struct ata_queued_cmd *qc) 4742 { 4743 struct ata_port *ap; 4744 unsigned int tag; 4745 4746 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4747 ap = qc->ap; 4748 4749 qc->flags = 0; 4750 tag = qc->tag; 4751 if (likely(ata_tag_valid(tag))) { 4752 qc->tag = ATA_TAG_POISON; 4753 clear_bit(tag, &ap->qc_allocated); 4754 } 4755 } 4756 4757 void __ata_qc_complete(struct ata_queued_cmd *qc) 4758 { 4759 struct ata_port *ap; 4760 struct ata_link *link; 4761 4762 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4763 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4764 ap = qc->ap; 4765 link = qc->dev->link; 4766 4767 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4768 ata_sg_clean(qc); 4769 4770 /* command should be marked inactive atomically with qc completion */ 4771 if (qc->tf.protocol == ATA_PROT_NCQ) { 4772 link->sactive &= ~(1 << qc->tag); 4773 if (!link->sactive) 4774 ap->nr_active_links--; 4775 } else { 4776 link->active_tag = ATA_TAG_POISON; 4777 ap->nr_active_links--; 4778 } 4779 4780 /* clear exclusive status */ 4781 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4782 ap->excl_link == link)) 4783 ap->excl_link = NULL; 4784 4785 /* atapi: mark qc as inactive to prevent the interrupt handler 4786 * from completing the command twice later, before the error handler 4787 * is called. (when rc != 0 and atapi request sense is needed) 4788 */ 4789 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4790 ap->qc_active &= ~(1 << qc->tag); 4791 4792 /* call completion callback */ 4793 qc->complete_fn(qc); 4794 } 4795 4796 static void fill_result_tf(struct ata_queued_cmd *qc) 4797 { 4798 struct ata_port *ap = qc->ap; 4799 4800 qc->result_tf.flags = qc->tf.flags; 4801 ap->ops->qc_fill_rtf(qc); 4802 } 4803 4804 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4805 { 4806 struct ata_device *dev = qc->dev; 4807 4808 if (ata_is_nodata(qc->tf.protocol)) 4809 return; 4810 4811 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4812 return; 4813 4814 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4815 } 4816 4817 /** 4818 * ata_qc_complete - Complete an active ATA command 4819 * @qc: Command to complete 4820 * 4821 * Indicate to the mid and upper layers that an ATA command has 4822 * completed, with either an ok or not-ok status. 4823 * 4824 * Refrain from calling this function multiple times when 4825 * successfully completing multiple NCQ commands. 4826 * ata_qc_complete_multiple() should be used instead, which will 4827 * properly update IRQ expect state. 4828 * 4829 * LOCKING: 4830 * spin_lock_irqsave(host lock) 4831 */ 4832 void ata_qc_complete(struct ata_queued_cmd *qc) 4833 { 4834 struct ata_port *ap = qc->ap; 4835 4836 /* XXX: New EH and old EH use different mechanisms to 4837 * synchronize EH with regular execution path. 4838 * 4839 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4840 * Normal execution path is responsible for not accessing a 4841 * failed qc. libata core enforces the rule by returning NULL 4842 * from ata_qc_from_tag() for failed qcs. 4843 * 4844 * Old EH depends on ata_qc_complete() nullifying completion 4845 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4846 * not synchronize with interrupt handler. Only PIO task is 4847 * taken care of. 4848 */ 4849 if (ap->ops->error_handler) { 4850 struct ata_device *dev = qc->dev; 4851 struct ata_eh_info *ehi = &dev->link->eh_info; 4852 4853 if (unlikely(qc->err_mask)) 4854 qc->flags |= ATA_QCFLAG_FAILED; 4855 4856 /* 4857 * Finish internal commands without any further processing 4858 * and always with the result TF filled. 4859 */ 4860 if (unlikely(ata_tag_internal(qc->tag))) { 4861 fill_result_tf(qc); 4862 __ata_qc_complete(qc); 4863 return; 4864 } 4865 4866 /* 4867 * Non-internal qc has failed. Fill the result TF and 4868 * summon EH. 4869 */ 4870 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4871 fill_result_tf(qc); 4872 ata_qc_schedule_eh(qc); 4873 return; 4874 } 4875 4876 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4877 4878 /* read result TF if requested */ 4879 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4880 fill_result_tf(qc); 4881 4882 /* Some commands need post-processing after successful 4883 * completion. 4884 */ 4885 switch (qc->tf.command) { 4886 case ATA_CMD_SET_FEATURES: 4887 if (qc->tf.feature != SETFEATURES_WC_ON && 4888 qc->tf.feature != SETFEATURES_WC_OFF) 4889 break; 4890 /* fall through */ 4891 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 4892 case ATA_CMD_SET_MULTI: /* multi_count changed */ 4893 /* revalidate device */ 4894 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 4895 ata_port_schedule_eh(ap); 4896 break; 4897 4898 case ATA_CMD_SLEEP: 4899 dev->flags |= ATA_DFLAG_SLEEPING; 4900 break; 4901 } 4902 4903 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 4904 ata_verify_xfer(qc); 4905 4906 __ata_qc_complete(qc); 4907 } else { 4908 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 4909 return; 4910 4911 /* read result TF if failed or requested */ 4912 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 4913 fill_result_tf(qc); 4914 4915 __ata_qc_complete(qc); 4916 } 4917 } 4918 4919 /** 4920 * ata_qc_complete_multiple - Complete multiple qcs successfully 4921 * @ap: port in question 4922 * @qc_active: new qc_active mask 4923 * 4924 * Complete in-flight commands. This functions is meant to be 4925 * called from low-level driver's interrupt routine to complete 4926 * requests normally. ap->qc_active and @qc_active is compared 4927 * and commands are completed accordingly. 4928 * 4929 * Always use this function when completing multiple NCQ commands 4930 * from IRQ handlers instead of calling ata_qc_complete() 4931 * multiple times to keep IRQ expect status properly in sync. 4932 * 4933 * LOCKING: 4934 * spin_lock_irqsave(host lock) 4935 * 4936 * RETURNS: 4937 * Number of completed commands on success, -errno otherwise. 4938 */ 4939 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 4940 { 4941 int nr_done = 0; 4942 u32 done_mask; 4943 4944 done_mask = ap->qc_active ^ qc_active; 4945 4946 if (unlikely(done_mask & qc_active)) { 4947 ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n", 4948 ap->qc_active, qc_active); 4949 return -EINVAL; 4950 } 4951 4952 while (done_mask) { 4953 struct ata_queued_cmd *qc; 4954 unsigned int tag = __ffs(done_mask); 4955 4956 qc = ata_qc_from_tag(ap, tag); 4957 if (qc) { 4958 ata_qc_complete(qc); 4959 nr_done++; 4960 } 4961 done_mask &= ~(1 << tag); 4962 } 4963 4964 return nr_done; 4965 } 4966 4967 /** 4968 * ata_qc_issue - issue taskfile to device 4969 * @qc: command to issue to device 4970 * 4971 * Prepare an ATA command to submission to device. 4972 * This includes mapping the data into a DMA-able 4973 * area, filling in the S/G table, and finally 4974 * writing the taskfile to hardware, starting the command. 4975 * 4976 * LOCKING: 4977 * spin_lock_irqsave(host lock) 4978 */ 4979 void ata_qc_issue(struct ata_queued_cmd *qc) 4980 { 4981 struct ata_port *ap = qc->ap; 4982 struct ata_link *link = qc->dev->link; 4983 u8 prot = qc->tf.protocol; 4984 4985 /* Make sure only one non-NCQ command is outstanding. The 4986 * check is skipped for old EH because it reuses active qc to 4987 * request ATAPI sense. 4988 */ 4989 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 4990 4991 if (ata_is_ncq(prot)) { 4992 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 4993 4994 if (!link->sactive) 4995 ap->nr_active_links++; 4996 link->sactive |= 1 << qc->tag; 4997 } else { 4998 WARN_ON_ONCE(link->sactive); 4999 5000 ap->nr_active_links++; 5001 link->active_tag = qc->tag; 5002 } 5003 5004 qc->flags |= ATA_QCFLAG_ACTIVE; 5005 ap->qc_active |= 1 << qc->tag; 5006 5007 /* 5008 * We guarantee to LLDs that they will have at least one 5009 * non-zero sg if the command is a data command. 5010 */ 5011 if (WARN_ON_ONCE(ata_is_data(prot) && 5012 (!qc->sg || !qc->n_elem || !qc->nbytes))) 5013 goto sys_err; 5014 5015 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5016 (ap->flags & ATA_FLAG_PIO_DMA))) 5017 if (ata_sg_setup(qc)) 5018 goto sys_err; 5019 5020 /* if device is sleeping, schedule reset and abort the link */ 5021 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5022 link->eh_info.action |= ATA_EH_RESET; 5023 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5024 ata_link_abort(link); 5025 return; 5026 } 5027 5028 ap->ops->qc_prep(qc); 5029 5030 qc->err_mask |= ap->ops->qc_issue(qc); 5031 if (unlikely(qc->err_mask)) 5032 goto err; 5033 return; 5034 5035 sys_err: 5036 qc->err_mask |= AC_ERR_SYSTEM; 5037 err: 5038 ata_qc_complete(qc); 5039 } 5040 5041 /** 5042 * sata_scr_valid - test whether SCRs are accessible 5043 * @link: ATA link to test SCR accessibility for 5044 * 5045 * Test whether SCRs are accessible for @link. 5046 * 5047 * LOCKING: 5048 * None. 5049 * 5050 * RETURNS: 5051 * 1 if SCRs are accessible, 0 otherwise. 5052 */ 5053 int sata_scr_valid(struct ata_link *link) 5054 { 5055 struct ata_port *ap = link->ap; 5056 5057 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5058 } 5059 5060 /** 5061 * sata_scr_read - read SCR register of the specified port 5062 * @link: ATA link to read SCR for 5063 * @reg: SCR to read 5064 * @val: Place to store read value 5065 * 5066 * Read SCR register @reg of @link into *@val. This function is 5067 * guaranteed to succeed if @link is ap->link, the cable type of 5068 * the port is SATA and the port implements ->scr_read. 5069 * 5070 * LOCKING: 5071 * None if @link is ap->link. Kernel thread context otherwise. 5072 * 5073 * RETURNS: 5074 * 0 on success, negative errno on failure. 5075 */ 5076 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5077 { 5078 if (ata_is_host_link(link)) { 5079 if (sata_scr_valid(link)) 5080 return link->ap->ops->scr_read(link, reg, val); 5081 return -EOPNOTSUPP; 5082 } 5083 5084 return sata_pmp_scr_read(link, reg, val); 5085 } 5086 5087 /** 5088 * sata_scr_write - write SCR register of the specified port 5089 * @link: ATA link to write SCR for 5090 * @reg: SCR to write 5091 * @val: value to write 5092 * 5093 * Write @val to SCR register @reg of @link. This function is 5094 * guaranteed to succeed if @link is ap->link, the cable type of 5095 * the port is SATA and the port implements ->scr_read. 5096 * 5097 * LOCKING: 5098 * None if @link is ap->link. Kernel thread context otherwise. 5099 * 5100 * RETURNS: 5101 * 0 on success, negative errno on failure. 5102 */ 5103 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5104 { 5105 if (ata_is_host_link(link)) { 5106 if (sata_scr_valid(link)) 5107 return link->ap->ops->scr_write(link, reg, val); 5108 return -EOPNOTSUPP; 5109 } 5110 5111 return sata_pmp_scr_write(link, reg, val); 5112 } 5113 5114 /** 5115 * sata_scr_write_flush - write SCR register of the specified port and flush 5116 * @link: ATA link to write SCR for 5117 * @reg: SCR to write 5118 * @val: value to write 5119 * 5120 * This function is identical to sata_scr_write() except that this 5121 * function performs flush after writing to the register. 5122 * 5123 * LOCKING: 5124 * None if @link is ap->link. Kernel thread context otherwise. 5125 * 5126 * RETURNS: 5127 * 0 on success, negative errno on failure. 5128 */ 5129 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5130 { 5131 if (ata_is_host_link(link)) { 5132 int rc; 5133 5134 if (sata_scr_valid(link)) { 5135 rc = link->ap->ops->scr_write(link, reg, val); 5136 if (rc == 0) 5137 rc = link->ap->ops->scr_read(link, reg, &val); 5138 return rc; 5139 } 5140 return -EOPNOTSUPP; 5141 } 5142 5143 return sata_pmp_scr_write(link, reg, val); 5144 } 5145 5146 /** 5147 * ata_phys_link_online - test whether the given link is online 5148 * @link: ATA link to test 5149 * 5150 * Test whether @link is online. Note that this function returns 5151 * 0 if online status of @link cannot be obtained, so 5152 * ata_link_online(link) != !ata_link_offline(link). 5153 * 5154 * LOCKING: 5155 * None. 5156 * 5157 * RETURNS: 5158 * True if the port online status is available and online. 5159 */ 5160 bool ata_phys_link_online(struct ata_link *link) 5161 { 5162 u32 sstatus; 5163 5164 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5165 ata_sstatus_online(sstatus)) 5166 return true; 5167 return false; 5168 } 5169 5170 /** 5171 * ata_phys_link_offline - test whether the given link is offline 5172 * @link: ATA link to test 5173 * 5174 * Test whether @link is offline. Note that this function 5175 * returns 0 if offline status of @link cannot be obtained, so 5176 * ata_link_online(link) != !ata_link_offline(link). 5177 * 5178 * LOCKING: 5179 * None. 5180 * 5181 * RETURNS: 5182 * True if the port offline status is available and offline. 5183 */ 5184 bool ata_phys_link_offline(struct ata_link *link) 5185 { 5186 u32 sstatus; 5187 5188 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5189 !ata_sstatus_online(sstatus)) 5190 return true; 5191 return false; 5192 } 5193 5194 /** 5195 * ata_link_online - test whether the given link is online 5196 * @link: ATA link to test 5197 * 5198 * Test whether @link is online. This is identical to 5199 * ata_phys_link_online() when there's no slave link. When 5200 * there's a slave link, this function should only be called on 5201 * the master link and will return true if any of M/S links is 5202 * online. 5203 * 5204 * LOCKING: 5205 * None. 5206 * 5207 * RETURNS: 5208 * True if the port online status is available and online. 5209 */ 5210 bool ata_link_online(struct ata_link *link) 5211 { 5212 struct ata_link *slave = link->ap->slave_link; 5213 5214 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5215 5216 return ata_phys_link_online(link) || 5217 (slave && ata_phys_link_online(slave)); 5218 } 5219 5220 /** 5221 * ata_link_offline - test whether the given link is offline 5222 * @link: ATA link to test 5223 * 5224 * Test whether @link is offline. This is identical to 5225 * ata_phys_link_offline() when there's no slave link. When 5226 * there's a slave link, this function should only be called on 5227 * the master link and will return true if both M/S links are 5228 * offline. 5229 * 5230 * LOCKING: 5231 * None. 5232 * 5233 * RETURNS: 5234 * True if the port offline status is available and offline. 5235 */ 5236 bool ata_link_offline(struct ata_link *link) 5237 { 5238 struct ata_link *slave = link->ap->slave_link; 5239 5240 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5241 5242 return ata_phys_link_offline(link) && 5243 (!slave || ata_phys_link_offline(slave)); 5244 } 5245 5246 #ifdef CONFIG_PM 5247 static int ata_port_request_pm(struct ata_port *ap, pm_message_t mesg, 5248 unsigned int action, unsigned int ehi_flags, 5249 int wait) 5250 { 5251 struct ata_link *link; 5252 unsigned long flags; 5253 int rc; 5254 5255 /* Previous resume operation might still be in 5256 * progress. Wait for PM_PENDING to clear. 5257 */ 5258 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5259 ata_port_wait_eh(ap); 5260 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5261 } 5262 5263 /* request PM ops to EH */ 5264 spin_lock_irqsave(ap->lock, flags); 5265 5266 ap->pm_mesg = mesg; 5267 if (wait) { 5268 rc = 0; 5269 ap->pm_result = &rc; 5270 } 5271 5272 ap->pflags |= ATA_PFLAG_PM_PENDING; 5273 ata_for_each_link(link, ap, HOST_FIRST) { 5274 link->eh_info.action |= action; 5275 link->eh_info.flags |= ehi_flags; 5276 } 5277 5278 ata_port_schedule_eh(ap); 5279 5280 spin_unlock_irqrestore(ap->lock, flags); 5281 5282 /* wait and check result */ 5283 if (wait) { 5284 ata_port_wait_eh(ap); 5285 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5286 } 5287 5288 return rc; 5289 } 5290 5291 #define to_ata_port(d) container_of(d, struct ata_port, tdev) 5292 5293 static int ata_port_suspend_common(struct device *dev, pm_message_t mesg) 5294 { 5295 struct ata_port *ap = to_ata_port(dev); 5296 unsigned int ehi_flags = ATA_EHI_QUIET; 5297 int rc; 5298 5299 /* 5300 * On some hardware, device fails to respond after spun down 5301 * for suspend. As the device won't be used before being 5302 * resumed, we don't need to touch the device. Ask EH to skip 5303 * the usual stuff and proceed directly to suspend. 5304 * 5305 * http://thread.gmane.org/gmane.linux.ide/46764 5306 */ 5307 if (mesg.event == PM_EVENT_SUSPEND) 5308 ehi_flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_NO_RECOVERY; 5309 5310 rc = ata_port_request_pm(ap, mesg, 0, ehi_flags, 1); 5311 return rc; 5312 } 5313 5314 static int ata_port_suspend(struct device *dev) 5315 { 5316 if (pm_runtime_suspended(dev)) 5317 return 0; 5318 5319 return ata_port_suspend_common(dev, PMSG_SUSPEND); 5320 } 5321 5322 static int ata_port_do_freeze(struct device *dev) 5323 { 5324 if (pm_runtime_suspended(dev)) 5325 pm_runtime_resume(dev); 5326 5327 return ata_port_suspend_common(dev, PMSG_FREEZE); 5328 } 5329 5330 static int ata_port_poweroff(struct device *dev) 5331 { 5332 if (pm_runtime_suspended(dev)) 5333 return 0; 5334 5335 return ata_port_suspend_common(dev, PMSG_HIBERNATE); 5336 } 5337 5338 static int ata_port_resume_common(struct device *dev) 5339 { 5340 struct ata_port *ap = to_ata_port(dev); 5341 int rc; 5342 5343 rc = ata_port_request_pm(ap, PMSG_ON, ATA_EH_RESET, 5344 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 1); 5345 return rc; 5346 } 5347 5348 static int ata_port_resume(struct device *dev) 5349 { 5350 int rc; 5351 5352 rc = ata_port_resume_common(dev); 5353 if (!rc) { 5354 pm_runtime_disable(dev); 5355 pm_runtime_set_active(dev); 5356 pm_runtime_enable(dev); 5357 } 5358 5359 return rc; 5360 } 5361 5362 static int ata_port_runtime_idle(struct device *dev) 5363 { 5364 return pm_runtime_suspend(dev); 5365 } 5366 5367 static const struct dev_pm_ops ata_port_pm_ops = { 5368 .suspend = ata_port_suspend, 5369 .resume = ata_port_resume, 5370 .freeze = ata_port_do_freeze, 5371 .thaw = ata_port_resume, 5372 .poweroff = ata_port_poweroff, 5373 .restore = ata_port_resume, 5374 5375 .runtime_suspend = ata_port_suspend, 5376 .runtime_resume = ata_port_resume_common, 5377 .runtime_idle = ata_port_runtime_idle, 5378 }; 5379 5380 /** 5381 * ata_host_suspend - suspend host 5382 * @host: host to suspend 5383 * @mesg: PM message 5384 * 5385 * Suspend @host. Actual operation is performed by port suspend. 5386 */ 5387 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5388 { 5389 host->dev->power.power_state = mesg; 5390 return 0; 5391 } 5392 5393 /** 5394 * ata_host_resume - resume host 5395 * @host: host to resume 5396 * 5397 * Resume @host. Actual operation is performed by port resume. 5398 */ 5399 void ata_host_resume(struct ata_host *host) 5400 { 5401 host->dev->power.power_state = PMSG_ON; 5402 } 5403 #endif 5404 5405 struct device_type ata_port_type = { 5406 .name = "ata_port", 5407 #ifdef CONFIG_PM 5408 .pm = &ata_port_pm_ops, 5409 #endif 5410 }; 5411 5412 /** 5413 * ata_dev_init - Initialize an ata_device structure 5414 * @dev: Device structure to initialize 5415 * 5416 * Initialize @dev in preparation for probing. 5417 * 5418 * LOCKING: 5419 * Inherited from caller. 5420 */ 5421 void ata_dev_init(struct ata_device *dev) 5422 { 5423 struct ata_link *link = ata_dev_phys_link(dev); 5424 struct ata_port *ap = link->ap; 5425 unsigned long flags; 5426 5427 /* SATA spd limit is bound to the attached device, reset together */ 5428 link->sata_spd_limit = link->hw_sata_spd_limit; 5429 link->sata_spd = 0; 5430 5431 /* High bits of dev->flags are used to record warm plug 5432 * requests which occur asynchronously. Synchronize using 5433 * host lock. 5434 */ 5435 spin_lock_irqsave(ap->lock, flags); 5436 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5437 dev->horkage = 0; 5438 spin_unlock_irqrestore(ap->lock, flags); 5439 5440 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5441 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5442 dev->pio_mask = UINT_MAX; 5443 dev->mwdma_mask = UINT_MAX; 5444 dev->udma_mask = UINT_MAX; 5445 } 5446 5447 /** 5448 * ata_link_init - Initialize an ata_link structure 5449 * @ap: ATA port link is attached to 5450 * @link: Link structure to initialize 5451 * @pmp: Port multiplier port number 5452 * 5453 * Initialize @link. 5454 * 5455 * LOCKING: 5456 * Kernel thread context (may sleep) 5457 */ 5458 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5459 { 5460 int i; 5461 5462 /* clear everything except for devices */ 5463 memset((void *)link + ATA_LINK_CLEAR_BEGIN, 0, 5464 ATA_LINK_CLEAR_END - ATA_LINK_CLEAR_BEGIN); 5465 5466 link->ap = ap; 5467 link->pmp = pmp; 5468 link->active_tag = ATA_TAG_POISON; 5469 link->hw_sata_spd_limit = UINT_MAX; 5470 5471 /* can't use iterator, ap isn't initialized yet */ 5472 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5473 struct ata_device *dev = &link->device[i]; 5474 5475 dev->link = link; 5476 dev->devno = dev - link->device; 5477 #ifdef CONFIG_ATA_ACPI 5478 dev->gtf_filter = ata_acpi_gtf_filter; 5479 #endif 5480 ata_dev_init(dev); 5481 } 5482 } 5483 5484 /** 5485 * sata_link_init_spd - Initialize link->sata_spd_limit 5486 * @link: Link to configure sata_spd_limit for 5487 * 5488 * Initialize @link->[hw_]sata_spd_limit to the currently 5489 * configured value. 5490 * 5491 * LOCKING: 5492 * Kernel thread context (may sleep). 5493 * 5494 * RETURNS: 5495 * 0 on success, -errno on failure. 5496 */ 5497 int sata_link_init_spd(struct ata_link *link) 5498 { 5499 u8 spd; 5500 int rc; 5501 5502 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5503 if (rc) 5504 return rc; 5505 5506 spd = (link->saved_scontrol >> 4) & 0xf; 5507 if (spd) 5508 link->hw_sata_spd_limit &= (1 << spd) - 1; 5509 5510 ata_force_link_limits(link); 5511 5512 link->sata_spd_limit = link->hw_sata_spd_limit; 5513 5514 return 0; 5515 } 5516 5517 /** 5518 * ata_port_alloc - allocate and initialize basic ATA port resources 5519 * @host: ATA host this allocated port belongs to 5520 * 5521 * Allocate and initialize basic ATA port resources. 5522 * 5523 * RETURNS: 5524 * Allocate ATA port on success, NULL on failure. 5525 * 5526 * LOCKING: 5527 * Inherited from calling layer (may sleep). 5528 */ 5529 struct ata_port *ata_port_alloc(struct ata_host *host) 5530 { 5531 struct ata_port *ap; 5532 5533 DPRINTK("ENTER\n"); 5534 5535 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5536 if (!ap) 5537 return NULL; 5538 5539 ap->pflags |= ATA_PFLAG_INITIALIZING | ATA_PFLAG_FROZEN; 5540 ap->lock = &host->lock; 5541 ap->print_id = -1; 5542 ap->host = host; 5543 ap->dev = host->dev; 5544 5545 #if defined(ATA_VERBOSE_DEBUG) 5546 /* turn on all debugging levels */ 5547 ap->msg_enable = 0x00FF; 5548 #elif defined(ATA_DEBUG) 5549 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5550 #else 5551 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5552 #endif 5553 5554 mutex_init(&ap->scsi_scan_mutex); 5555 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5556 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5557 INIT_LIST_HEAD(&ap->eh_done_q); 5558 init_waitqueue_head(&ap->eh_wait_q); 5559 init_completion(&ap->park_req_pending); 5560 init_timer_deferrable(&ap->fastdrain_timer); 5561 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5562 ap->fastdrain_timer.data = (unsigned long)ap; 5563 5564 ap->cbl = ATA_CBL_NONE; 5565 5566 ata_link_init(ap, &ap->link, 0); 5567 5568 #ifdef ATA_IRQ_TRAP 5569 ap->stats.unhandled_irq = 1; 5570 ap->stats.idle_irq = 1; 5571 #endif 5572 ata_sff_port_init(ap); 5573 5574 return ap; 5575 } 5576 5577 static void ata_host_release(struct device *gendev, void *res) 5578 { 5579 struct ata_host *host = dev_get_drvdata(gendev); 5580 int i; 5581 5582 for (i = 0; i < host->n_ports; i++) { 5583 struct ata_port *ap = host->ports[i]; 5584 5585 if (!ap) 5586 continue; 5587 5588 if (ap->scsi_host) 5589 scsi_host_put(ap->scsi_host); 5590 5591 kfree(ap->pmp_link); 5592 kfree(ap->slave_link); 5593 kfree(ap); 5594 host->ports[i] = NULL; 5595 } 5596 5597 dev_set_drvdata(gendev, NULL); 5598 } 5599 5600 /** 5601 * ata_host_alloc - allocate and init basic ATA host resources 5602 * @dev: generic device this host is associated with 5603 * @max_ports: maximum number of ATA ports associated with this host 5604 * 5605 * Allocate and initialize basic ATA host resources. LLD calls 5606 * this function to allocate a host, initializes it fully and 5607 * attaches it using ata_host_register(). 5608 * 5609 * @max_ports ports are allocated and host->n_ports is 5610 * initialized to @max_ports. The caller is allowed to decrease 5611 * host->n_ports before calling ata_host_register(). The unused 5612 * ports will be automatically freed on registration. 5613 * 5614 * RETURNS: 5615 * Allocate ATA host on success, NULL on failure. 5616 * 5617 * LOCKING: 5618 * Inherited from calling layer (may sleep). 5619 */ 5620 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5621 { 5622 struct ata_host *host; 5623 size_t sz; 5624 int i; 5625 5626 DPRINTK("ENTER\n"); 5627 5628 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5629 return NULL; 5630 5631 /* alloc a container for our list of ATA ports (buses) */ 5632 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5633 /* alloc a container for our list of ATA ports (buses) */ 5634 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5635 if (!host) 5636 goto err_out; 5637 5638 devres_add(dev, host); 5639 dev_set_drvdata(dev, host); 5640 5641 spin_lock_init(&host->lock); 5642 mutex_init(&host->eh_mutex); 5643 host->dev = dev; 5644 host->n_ports = max_ports; 5645 5646 /* allocate ports bound to this host */ 5647 for (i = 0; i < max_ports; i++) { 5648 struct ata_port *ap; 5649 5650 ap = ata_port_alloc(host); 5651 if (!ap) 5652 goto err_out; 5653 5654 ap->port_no = i; 5655 host->ports[i] = ap; 5656 } 5657 5658 devres_remove_group(dev, NULL); 5659 return host; 5660 5661 err_out: 5662 devres_release_group(dev, NULL); 5663 return NULL; 5664 } 5665 5666 /** 5667 * ata_host_alloc_pinfo - alloc host and init with port_info array 5668 * @dev: generic device this host is associated with 5669 * @ppi: array of ATA port_info to initialize host with 5670 * @n_ports: number of ATA ports attached to this host 5671 * 5672 * Allocate ATA host and initialize with info from @ppi. If NULL 5673 * terminated, @ppi may contain fewer entries than @n_ports. The 5674 * last entry will be used for the remaining ports. 5675 * 5676 * RETURNS: 5677 * Allocate ATA host on success, NULL on failure. 5678 * 5679 * LOCKING: 5680 * Inherited from calling layer (may sleep). 5681 */ 5682 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5683 const struct ata_port_info * const * ppi, 5684 int n_ports) 5685 { 5686 const struct ata_port_info *pi; 5687 struct ata_host *host; 5688 int i, j; 5689 5690 host = ata_host_alloc(dev, n_ports); 5691 if (!host) 5692 return NULL; 5693 5694 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5695 struct ata_port *ap = host->ports[i]; 5696 5697 if (ppi[j]) 5698 pi = ppi[j++]; 5699 5700 ap->pio_mask = pi->pio_mask; 5701 ap->mwdma_mask = pi->mwdma_mask; 5702 ap->udma_mask = pi->udma_mask; 5703 ap->flags |= pi->flags; 5704 ap->link.flags |= pi->link_flags; 5705 ap->ops = pi->port_ops; 5706 5707 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5708 host->ops = pi->port_ops; 5709 } 5710 5711 return host; 5712 } 5713 5714 /** 5715 * ata_slave_link_init - initialize slave link 5716 * @ap: port to initialize slave link for 5717 * 5718 * Create and initialize slave link for @ap. This enables slave 5719 * link handling on the port. 5720 * 5721 * In libata, a port contains links and a link contains devices. 5722 * There is single host link but if a PMP is attached to it, 5723 * there can be multiple fan-out links. On SATA, there's usually 5724 * a single device connected to a link but PATA and SATA 5725 * controllers emulating TF based interface can have two - master 5726 * and slave. 5727 * 5728 * However, there are a few controllers which don't fit into this 5729 * abstraction too well - SATA controllers which emulate TF 5730 * interface with both master and slave devices but also have 5731 * separate SCR register sets for each device. These controllers 5732 * need separate links for physical link handling 5733 * (e.g. onlineness, link speed) but should be treated like a 5734 * traditional M/S controller for everything else (e.g. command 5735 * issue, softreset). 5736 * 5737 * slave_link is libata's way of handling this class of 5738 * controllers without impacting core layer too much. For 5739 * anything other than physical link handling, the default host 5740 * link is used for both master and slave. For physical link 5741 * handling, separate @ap->slave_link is used. All dirty details 5742 * are implemented inside libata core layer. From LLD's POV, the 5743 * only difference is that prereset, hardreset and postreset are 5744 * called once more for the slave link, so the reset sequence 5745 * looks like the following. 5746 * 5747 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5748 * softreset(M) -> postreset(M) -> postreset(S) 5749 * 5750 * Note that softreset is called only for the master. Softreset 5751 * resets both M/S by definition, so SRST on master should handle 5752 * both (the standard method will work just fine). 5753 * 5754 * LOCKING: 5755 * Should be called before host is registered. 5756 * 5757 * RETURNS: 5758 * 0 on success, -errno on failure. 5759 */ 5760 int ata_slave_link_init(struct ata_port *ap) 5761 { 5762 struct ata_link *link; 5763 5764 WARN_ON(ap->slave_link); 5765 WARN_ON(ap->flags & ATA_FLAG_PMP); 5766 5767 link = kzalloc(sizeof(*link), GFP_KERNEL); 5768 if (!link) 5769 return -ENOMEM; 5770 5771 ata_link_init(ap, link, 1); 5772 ap->slave_link = link; 5773 return 0; 5774 } 5775 5776 static void ata_host_stop(struct device *gendev, void *res) 5777 { 5778 struct ata_host *host = dev_get_drvdata(gendev); 5779 int i; 5780 5781 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5782 5783 for (i = 0; i < host->n_ports; i++) { 5784 struct ata_port *ap = host->ports[i]; 5785 5786 if (ap->ops->port_stop) 5787 ap->ops->port_stop(ap); 5788 } 5789 5790 if (host->ops->host_stop) 5791 host->ops->host_stop(host); 5792 } 5793 5794 /** 5795 * ata_finalize_port_ops - finalize ata_port_operations 5796 * @ops: ata_port_operations to finalize 5797 * 5798 * An ata_port_operations can inherit from another ops and that 5799 * ops can again inherit from another. This can go on as many 5800 * times as necessary as long as there is no loop in the 5801 * inheritance chain. 5802 * 5803 * Ops tables are finalized when the host is started. NULL or 5804 * unspecified entries are inherited from the closet ancestor 5805 * which has the method and the entry is populated with it. 5806 * After finalization, the ops table directly points to all the 5807 * methods and ->inherits is no longer necessary and cleared. 5808 * 5809 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5810 * 5811 * LOCKING: 5812 * None. 5813 */ 5814 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5815 { 5816 static DEFINE_SPINLOCK(lock); 5817 const struct ata_port_operations *cur; 5818 void **begin = (void **)ops; 5819 void **end = (void **)&ops->inherits; 5820 void **pp; 5821 5822 if (!ops || !ops->inherits) 5823 return; 5824 5825 spin_lock(&lock); 5826 5827 for (cur = ops->inherits; cur; cur = cur->inherits) { 5828 void **inherit = (void **)cur; 5829 5830 for (pp = begin; pp < end; pp++, inherit++) 5831 if (!*pp) 5832 *pp = *inherit; 5833 } 5834 5835 for (pp = begin; pp < end; pp++) 5836 if (IS_ERR(*pp)) 5837 *pp = NULL; 5838 5839 ops->inherits = NULL; 5840 5841 spin_unlock(&lock); 5842 } 5843 5844 /** 5845 * ata_host_start - start and freeze ports of an ATA host 5846 * @host: ATA host to start ports for 5847 * 5848 * Start and then freeze ports of @host. Started status is 5849 * recorded in host->flags, so this function can be called 5850 * multiple times. Ports are guaranteed to get started only 5851 * once. If host->ops isn't initialized yet, its set to the 5852 * first non-dummy port ops. 5853 * 5854 * LOCKING: 5855 * Inherited from calling layer (may sleep). 5856 * 5857 * RETURNS: 5858 * 0 if all ports are started successfully, -errno otherwise. 5859 */ 5860 int ata_host_start(struct ata_host *host) 5861 { 5862 int have_stop = 0; 5863 void *start_dr = NULL; 5864 int i, rc; 5865 5866 if (host->flags & ATA_HOST_STARTED) 5867 return 0; 5868 5869 ata_finalize_port_ops(host->ops); 5870 5871 for (i = 0; i < host->n_ports; i++) { 5872 struct ata_port *ap = host->ports[i]; 5873 5874 ata_finalize_port_ops(ap->ops); 5875 5876 if (!host->ops && !ata_port_is_dummy(ap)) 5877 host->ops = ap->ops; 5878 5879 if (ap->ops->port_stop) 5880 have_stop = 1; 5881 } 5882 5883 if (host->ops->host_stop) 5884 have_stop = 1; 5885 5886 if (have_stop) { 5887 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 5888 if (!start_dr) 5889 return -ENOMEM; 5890 } 5891 5892 for (i = 0; i < host->n_ports; i++) { 5893 struct ata_port *ap = host->ports[i]; 5894 5895 if (ap->ops->port_start) { 5896 rc = ap->ops->port_start(ap); 5897 if (rc) { 5898 if (rc != -ENODEV) 5899 dev_err(host->dev, 5900 "failed to start port %d (errno=%d)\n", 5901 i, rc); 5902 goto err_out; 5903 } 5904 } 5905 ata_eh_freeze_port(ap); 5906 } 5907 5908 if (start_dr) 5909 devres_add(host->dev, start_dr); 5910 host->flags |= ATA_HOST_STARTED; 5911 return 0; 5912 5913 err_out: 5914 while (--i >= 0) { 5915 struct ata_port *ap = host->ports[i]; 5916 5917 if (ap->ops->port_stop) 5918 ap->ops->port_stop(ap); 5919 } 5920 devres_free(start_dr); 5921 return rc; 5922 } 5923 5924 /** 5925 * ata_sas_host_init - Initialize a host struct 5926 * @host: host to initialize 5927 * @dev: device host is attached to 5928 * @flags: host flags 5929 * @ops: port_ops 5930 * 5931 * LOCKING: 5932 * PCI/etc. bus probe sem. 5933 * 5934 */ 5935 /* KILLME - the only user left is ipr */ 5936 void ata_host_init(struct ata_host *host, struct device *dev, 5937 unsigned long flags, struct ata_port_operations *ops) 5938 { 5939 spin_lock_init(&host->lock); 5940 mutex_init(&host->eh_mutex); 5941 host->dev = dev; 5942 host->flags = flags; 5943 host->ops = ops; 5944 } 5945 5946 void __ata_port_probe(struct ata_port *ap) 5947 { 5948 struct ata_eh_info *ehi = &ap->link.eh_info; 5949 unsigned long flags; 5950 5951 /* kick EH for boot probing */ 5952 spin_lock_irqsave(ap->lock, flags); 5953 5954 ehi->probe_mask |= ATA_ALL_DEVICES; 5955 ehi->action |= ATA_EH_RESET; 5956 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 5957 5958 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 5959 ap->pflags |= ATA_PFLAG_LOADING; 5960 ata_port_schedule_eh(ap); 5961 5962 spin_unlock_irqrestore(ap->lock, flags); 5963 } 5964 5965 int ata_port_probe(struct ata_port *ap) 5966 { 5967 int rc = 0; 5968 5969 if (ap->ops->error_handler) { 5970 __ata_port_probe(ap); 5971 ata_port_wait_eh(ap); 5972 } else { 5973 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 5974 rc = ata_bus_probe(ap); 5975 DPRINTK("ata%u: bus probe end\n", ap->print_id); 5976 } 5977 return rc; 5978 } 5979 5980 5981 static void async_port_probe(void *data, async_cookie_t cookie) 5982 { 5983 struct ata_port *ap = data; 5984 5985 /* 5986 * If we're not allowed to scan this host in parallel, 5987 * we need to wait until all previous scans have completed 5988 * before going further. 5989 * Jeff Garzik says this is only within a controller, so we 5990 * don't need to wait for port 0, only for later ports. 5991 */ 5992 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 5993 async_synchronize_cookie(cookie); 5994 5995 (void)ata_port_probe(ap); 5996 5997 /* in order to keep device order, we need to synchronize at this point */ 5998 async_synchronize_cookie(cookie); 5999 6000 ata_scsi_scan_host(ap, 1); 6001 } 6002 6003 /** 6004 * ata_host_register - register initialized ATA host 6005 * @host: ATA host to register 6006 * @sht: template for SCSI host 6007 * 6008 * Register initialized ATA host. @host is allocated using 6009 * ata_host_alloc() and fully initialized by LLD. This function 6010 * starts ports, registers @host with ATA and SCSI layers and 6011 * probe registered devices. 6012 * 6013 * LOCKING: 6014 * Inherited from calling layer (may sleep). 6015 * 6016 * RETURNS: 6017 * 0 on success, -errno otherwise. 6018 */ 6019 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6020 { 6021 int i, rc; 6022 6023 /* host must have been started */ 6024 if (!(host->flags & ATA_HOST_STARTED)) { 6025 dev_err(host->dev, "BUG: trying to register unstarted host\n"); 6026 WARN_ON(1); 6027 return -EINVAL; 6028 } 6029 6030 /* Blow away unused ports. This happens when LLD can't 6031 * determine the exact number of ports to allocate at 6032 * allocation time. 6033 */ 6034 for (i = host->n_ports; host->ports[i]; i++) 6035 kfree(host->ports[i]); 6036 6037 /* give ports names and add SCSI hosts */ 6038 for (i = 0; i < host->n_ports; i++) 6039 host->ports[i]->print_id = atomic_inc_return(&ata_print_id); 6040 6041 6042 /* Create associated sysfs transport objects */ 6043 for (i = 0; i < host->n_ports; i++) { 6044 rc = ata_tport_add(host->dev,host->ports[i]); 6045 if (rc) { 6046 goto err_tadd; 6047 } 6048 } 6049 6050 rc = ata_scsi_add_hosts(host, sht); 6051 if (rc) 6052 goto err_tadd; 6053 6054 /* associate with ACPI nodes */ 6055 ata_acpi_associate(host); 6056 6057 /* set cable, sata_spd_limit and report */ 6058 for (i = 0; i < host->n_ports; i++) { 6059 struct ata_port *ap = host->ports[i]; 6060 unsigned long xfer_mask; 6061 6062 /* set SATA cable type if still unset */ 6063 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6064 ap->cbl = ATA_CBL_SATA; 6065 6066 /* init sata_spd_limit to the current value */ 6067 sata_link_init_spd(&ap->link); 6068 if (ap->slave_link) 6069 sata_link_init_spd(ap->slave_link); 6070 6071 /* print per-port info to dmesg */ 6072 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6073 ap->udma_mask); 6074 6075 if (!ata_port_is_dummy(ap)) { 6076 ata_port_info(ap, "%cATA max %s %s\n", 6077 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6078 ata_mode_string(xfer_mask), 6079 ap->link.eh_info.desc); 6080 ata_ehi_clear_desc(&ap->link.eh_info); 6081 } else 6082 ata_port_info(ap, "DUMMY\n"); 6083 } 6084 6085 /* perform each probe asynchronously */ 6086 for (i = 0; i < host->n_ports; i++) { 6087 struct ata_port *ap = host->ports[i]; 6088 async_schedule(async_port_probe, ap); 6089 } 6090 6091 return 0; 6092 6093 err_tadd: 6094 while (--i >= 0) { 6095 ata_tport_delete(host->ports[i]); 6096 } 6097 return rc; 6098 6099 } 6100 6101 /** 6102 * ata_host_activate - start host, request IRQ and register it 6103 * @host: target ATA host 6104 * @irq: IRQ to request 6105 * @irq_handler: irq_handler used when requesting IRQ 6106 * @irq_flags: irq_flags used when requesting IRQ 6107 * @sht: scsi_host_template to use when registering the host 6108 * 6109 * After allocating an ATA host and initializing it, most libata 6110 * LLDs perform three steps to activate the host - start host, 6111 * request IRQ and register it. This helper takes necessasry 6112 * arguments and performs the three steps in one go. 6113 * 6114 * An invalid IRQ skips the IRQ registration and expects the host to 6115 * have set polling mode on the port. In this case, @irq_handler 6116 * should be NULL. 6117 * 6118 * LOCKING: 6119 * Inherited from calling layer (may sleep). 6120 * 6121 * RETURNS: 6122 * 0 on success, -errno otherwise. 6123 */ 6124 int ata_host_activate(struct ata_host *host, int irq, 6125 irq_handler_t irq_handler, unsigned long irq_flags, 6126 struct scsi_host_template *sht) 6127 { 6128 int i, rc; 6129 6130 rc = ata_host_start(host); 6131 if (rc) 6132 return rc; 6133 6134 /* Special case for polling mode */ 6135 if (!irq) { 6136 WARN_ON(irq_handler); 6137 return ata_host_register(host, sht); 6138 } 6139 6140 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6141 dev_driver_string(host->dev), host); 6142 if (rc) 6143 return rc; 6144 6145 for (i = 0; i < host->n_ports; i++) 6146 ata_port_desc(host->ports[i], "irq %d", irq); 6147 6148 rc = ata_host_register(host, sht); 6149 /* if failed, just free the IRQ and leave ports alone */ 6150 if (rc) 6151 devm_free_irq(host->dev, irq, host); 6152 6153 return rc; 6154 } 6155 6156 /** 6157 * ata_port_detach - Detach ATA port in prepration of device removal 6158 * @ap: ATA port to be detached 6159 * 6160 * Detach all ATA devices and the associated SCSI devices of @ap; 6161 * then, remove the associated SCSI host. @ap is guaranteed to 6162 * be quiescent on return from this function. 6163 * 6164 * LOCKING: 6165 * Kernel thread context (may sleep). 6166 */ 6167 static void ata_port_detach(struct ata_port *ap) 6168 { 6169 unsigned long flags; 6170 6171 if (!ap->ops->error_handler) 6172 goto skip_eh; 6173 6174 /* tell EH we're leaving & flush EH */ 6175 spin_lock_irqsave(ap->lock, flags); 6176 ap->pflags |= ATA_PFLAG_UNLOADING; 6177 ata_port_schedule_eh(ap); 6178 spin_unlock_irqrestore(ap->lock, flags); 6179 6180 /* wait till EH commits suicide */ 6181 ata_port_wait_eh(ap); 6182 6183 /* it better be dead now */ 6184 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6185 6186 cancel_delayed_work_sync(&ap->hotplug_task); 6187 6188 skip_eh: 6189 if (ap->pmp_link) { 6190 int i; 6191 for (i = 0; i < SATA_PMP_MAX_PORTS; i++) 6192 ata_tlink_delete(&ap->pmp_link[i]); 6193 } 6194 ata_tport_delete(ap); 6195 6196 /* remove the associated SCSI host */ 6197 scsi_remove_host(ap->scsi_host); 6198 } 6199 6200 /** 6201 * ata_host_detach - Detach all ports of an ATA host 6202 * @host: Host to detach 6203 * 6204 * Detach all ports of @host. 6205 * 6206 * LOCKING: 6207 * Kernel thread context (may sleep). 6208 */ 6209 void ata_host_detach(struct ata_host *host) 6210 { 6211 int i; 6212 6213 for (i = 0; i < host->n_ports; i++) 6214 ata_port_detach(host->ports[i]); 6215 6216 /* the host is dead now, dissociate ACPI */ 6217 ata_acpi_dissociate(host); 6218 } 6219 6220 #ifdef CONFIG_PCI 6221 6222 /** 6223 * ata_pci_remove_one - PCI layer callback for device removal 6224 * @pdev: PCI device that was removed 6225 * 6226 * PCI layer indicates to libata via this hook that hot-unplug or 6227 * module unload event has occurred. Detach all ports. Resource 6228 * release is handled via devres. 6229 * 6230 * LOCKING: 6231 * Inherited from PCI layer (may sleep). 6232 */ 6233 void ata_pci_remove_one(struct pci_dev *pdev) 6234 { 6235 struct device *dev = &pdev->dev; 6236 struct ata_host *host = dev_get_drvdata(dev); 6237 6238 ata_host_detach(host); 6239 } 6240 6241 /* move to PCI subsystem */ 6242 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6243 { 6244 unsigned long tmp = 0; 6245 6246 switch (bits->width) { 6247 case 1: { 6248 u8 tmp8 = 0; 6249 pci_read_config_byte(pdev, bits->reg, &tmp8); 6250 tmp = tmp8; 6251 break; 6252 } 6253 case 2: { 6254 u16 tmp16 = 0; 6255 pci_read_config_word(pdev, bits->reg, &tmp16); 6256 tmp = tmp16; 6257 break; 6258 } 6259 case 4: { 6260 u32 tmp32 = 0; 6261 pci_read_config_dword(pdev, bits->reg, &tmp32); 6262 tmp = tmp32; 6263 break; 6264 } 6265 6266 default: 6267 return -EINVAL; 6268 } 6269 6270 tmp &= bits->mask; 6271 6272 return (tmp == bits->val) ? 1 : 0; 6273 } 6274 6275 #ifdef CONFIG_PM 6276 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6277 { 6278 pci_save_state(pdev); 6279 pci_disable_device(pdev); 6280 6281 if (mesg.event & PM_EVENT_SLEEP) 6282 pci_set_power_state(pdev, PCI_D3hot); 6283 } 6284 6285 int ata_pci_device_do_resume(struct pci_dev *pdev) 6286 { 6287 int rc; 6288 6289 pci_set_power_state(pdev, PCI_D0); 6290 pci_restore_state(pdev); 6291 6292 rc = pcim_enable_device(pdev); 6293 if (rc) { 6294 dev_err(&pdev->dev, 6295 "failed to enable device after resume (%d)\n", rc); 6296 return rc; 6297 } 6298 6299 pci_set_master(pdev); 6300 return 0; 6301 } 6302 6303 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6304 { 6305 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6306 int rc = 0; 6307 6308 rc = ata_host_suspend(host, mesg); 6309 if (rc) 6310 return rc; 6311 6312 ata_pci_device_do_suspend(pdev, mesg); 6313 6314 return 0; 6315 } 6316 6317 int ata_pci_device_resume(struct pci_dev *pdev) 6318 { 6319 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6320 int rc; 6321 6322 rc = ata_pci_device_do_resume(pdev); 6323 if (rc == 0) 6324 ata_host_resume(host); 6325 return rc; 6326 } 6327 #endif /* CONFIG_PM */ 6328 6329 #endif /* CONFIG_PCI */ 6330 6331 static int __init ata_parse_force_one(char **cur, 6332 struct ata_force_ent *force_ent, 6333 const char **reason) 6334 { 6335 /* FIXME: Currently, there's no way to tag init const data and 6336 * using __initdata causes build failure on some versions of 6337 * gcc. Once __initdataconst is implemented, add const to the 6338 * following structure. 6339 */ 6340 static struct ata_force_param force_tbl[] __initdata = { 6341 { "40c", .cbl = ATA_CBL_PATA40 }, 6342 { "80c", .cbl = ATA_CBL_PATA80 }, 6343 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6344 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6345 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6346 { "sata", .cbl = ATA_CBL_SATA }, 6347 { "1.5Gbps", .spd_limit = 1 }, 6348 { "3.0Gbps", .spd_limit = 2 }, 6349 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6350 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6351 { "dump_id", .horkage_on = ATA_HORKAGE_DUMP_ID }, 6352 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6353 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6354 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6355 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6356 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6357 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6358 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6359 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6360 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6361 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6362 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6363 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6364 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6365 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6366 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6367 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6368 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6369 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6370 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6371 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6372 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6373 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6374 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6375 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6376 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6377 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6378 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6379 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6380 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6381 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6382 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6383 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6384 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6385 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6386 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6387 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6388 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6389 }; 6390 char *start = *cur, *p = *cur; 6391 char *id, *val, *endp; 6392 const struct ata_force_param *match_fp = NULL; 6393 int nr_matches = 0, i; 6394 6395 /* find where this param ends and update *cur */ 6396 while (*p != '\0' && *p != ',') 6397 p++; 6398 6399 if (*p == '\0') 6400 *cur = p; 6401 else 6402 *cur = p + 1; 6403 6404 *p = '\0'; 6405 6406 /* parse */ 6407 p = strchr(start, ':'); 6408 if (!p) { 6409 val = strstrip(start); 6410 goto parse_val; 6411 } 6412 *p = '\0'; 6413 6414 id = strstrip(start); 6415 val = strstrip(p + 1); 6416 6417 /* parse id */ 6418 p = strchr(id, '.'); 6419 if (p) { 6420 *p++ = '\0'; 6421 force_ent->device = simple_strtoul(p, &endp, 10); 6422 if (p == endp || *endp != '\0') { 6423 *reason = "invalid device"; 6424 return -EINVAL; 6425 } 6426 } 6427 6428 force_ent->port = simple_strtoul(id, &endp, 10); 6429 if (p == endp || *endp != '\0') { 6430 *reason = "invalid port/link"; 6431 return -EINVAL; 6432 } 6433 6434 parse_val: 6435 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6436 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6437 const struct ata_force_param *fp = &force_tbl[i]; 6438 6439 if (strncasecmp(val, fp->name, strlen(val))) 6440 continue; 6441 6442 nr_matches++; 6443 match_fp = fp; 6444 6445 if (strcasecmp(val, fp->name) == 0) { 6446 nr_matches = 1; 6447 break; 6448 } 6449 } 6450 6451 if (!nr_matches) { 6452 *reason = "unknown value"; 6453 return -EINVAL; 6454 } 6455 if (nr_matches > 1) { 6456 *reason = "ambigious value"; 6457 return -EINVAL; 6458 } 6459 6460 force_ent->param = *match_fp; 6461 6462 return 0; 6463 } 6464 6465 static void __init ata_parse_force_param(void) 6466 { 6467 int idx = 0, size = 1; 6468 int last_port = -1, last_device = -1; 6469 char *p, *cur, *next; 6470 6471 /* calculate maximum number of params and allocate force_tbl */ 6472 for (p = ata_force_param_buf; *p; p++) 6473 if (*p == ',') 6474 size++; 6475 6476 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6477 if (!ata_force_tbl) { 6478 printk(KERN_WARNING "ata: failed to extend force table, " 6479 "libata.force ignored\n"); 6480 return; 6481 } 6482 6483 /* parse and populate the table */ 6484 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6485 const char *reason = ""; 6486 struct ata_force_ent te = { .port = -1, .device = -1 }; 6487 6488 next = cur; 6489 if (ata_parse_force_one(&next, &te, &reason)) { 6490 printk(KERN_WARNING "ata: failed to parse force " 6491 "parameter \"%s\" (%s)\n", 6492 cur, reason); 6493 continue; 6494 } 6495 6496 if (te.port == -1) { 6497 te.port = last_port; 6498 te.device = last_device; 6499 } 6500 6501 ata_force_tbl[idx++] = te; 6502 6503 last_port = te.port; 6504 last_device = te.device; 6505 } 6506 6507 ata_force_tbl_size = idx; 6508 } 6509 6510 static int __init ata_init(void) 6511 { 6512 int rc; 6513 6514 ata_parse_force_param(); 6515 6516 rc = ata_sff_init(); 6517 if (rc) { 6518 kfree(ata_force_tbl); 6519 return rc; 6520 } 6521 6522 libata_transport_init(); 6523 ata_scsi_transport_template = ata_attach_transport(); 6524 if (!ata_scsi_transport_template) { 6525 ata_sff_exit(); 6526 rc = -ENOMEM; 6527 goto err_out; 6528 } 6529 6530 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6531 return 0; 6532 6533 err_out: 6534 return rc; 6535 } 6536 6537 static void __exit ata_exit(void) 6538 { 6539 ata_release_transport(ata_scsi_transport_template); 6540 libata_transport_exit(); 6541 ata_sff_exit(); 6542 kfree(ata_force_tbl); 6543 } 6544 6545 subsys_initcall(ata_init); 6546 module_exit(ata_exit); 6547 6548 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6549 6550 int ata_ratelimit(void) 6551 { 6552 return __ratelimit(&ratelimit); 6553 } 6554 6555 /** 6556 * ata_msleep - ATA EH owner aware msleep 6557 * @ap: ATA port to attribute the sleep to 6558 * @msecs: duration to sleep in milliseconds 6559 * 6560 * Sleeps @msecs. If the current task is owner of @ap's EH, the 6561 * ownership is released before going to sleep and reacquired 6562 * after the sleep is complete. IOW, other ports sharing the 6563 * @ap->host will be allowed to own the EH while this task is 6564 * sleeping. 6565 * 6566 * LOCKING: 6567 * Might sleep. 6568 */ 6569 void ata_msleep(struct ata_port *ap, unsigned int msecs) 6570 { 6571 bool owns_eh = ap && ap->host->eh_owner == current; 6572 6573 if (owns_eh) 6574 ata_eh_release(ap); 6575 6576 msleep(msecs); 6577 6578 if (owns_eh) 6579 ata_eh_acquire(ap); 6580 } 6581 6582 /** 6583 * ata_wait_register - wait until register value changes 6584 * @ap: ATA port to wait register for, can be NULL 6585 * @reg: IO-mapped register 6586 * @mask: Mask to apply to read register value 6587 * @val: Wait condition 6588 * @interval: polling interval in milliseconds 6589 * @timeout: timeout in milliseconds 6590 * 6591 * Waiting for some bits of register to change is a common 6592 * operation for ATA controllers. This function reads 32bit LE 6593 * IO-mapped register @reg and tests for the following condition. 6594 * 6595 * (*@reg & mask) != val 6596 * 6597 * If the condition is met, it returns; otherwise, the process is 6598 * repeated after @interval_msec until timeout. 6599 * 6600 * LOCKING: 6601 * Kernel thread context (may sleep) 6602 * 6603 * RETURNS: 6604 * The final register value. 6605 */ 6606 u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val, 6607 unsigned long interval, unsigned long timeout) 6608 { 6609 unsigned long deadline; 6610 u32 tmp; 6611 6612 tmp = ioread32(reg); 6613 6614 /* Calculate timeout _after_ the first read to make sure 6615 * preceding writes reach the controller before starting to 6616 * eat away the timeout. 6617 */ 6618 deadline = ata_deadline(jiffies, timeout); 6619 6620 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6621 ata_msleep(ap, interval); 6622 tmp = ioread32(reg); 6623 } 6624 6625 return tmp; 6626 } 6627 6628 /* 6629 * Dummy port_ops 6630 */ 6631 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6632 { 6633 return AC_ERR_SYSTEM; 6634 } 6635 6636 static void ata_dummy_error_handler(struct ata_port *ap) 6637 { 6638 /* truly dummy */ 6639 } 6640 6641 struct ata_port_operations ata_dummy_port_ops = { 6642 .qc_prep = ata_noop_qc_prep, 6643 .qc_issue = ata_dummy_qc_issue, 6644 .error_handler = ata_dummy_error_handler, 6645 }; 6646 6647 const struct ata_port_info ata_dummy_port_info = { 6648 .port_ops = &ata_dummy_port_ops, 6649 }; 6650 6651 /* 6652 * Utility print functions 6653 */ 6654 int ata_port_printk(const struct ata_port *ap, const char *level, 6655 const char *fmt, ...) 6656 { 6657 struct va_format vaf; 6658 va_list args; 6659 int r; 6660 6661 va_start(args, fmt); 6662 6663 vaf.fmt = fmt; 6664 vaf.va = &args; 6665 6666 r = printk("%sata%u: %pV", level, ap->print_id, &vaf); 6667 6668 va_end(args); 6669 6670 return r; 6671 } 6672 EXPORT_SYMBOL(ata_port_printk); 6673 6674 int ata_link_printk(const struct ata_link *link, const char *level, 6675 const char *fmt, ...) 6676 { 6677 struct va_format vaf; 6678 va_list args; 6679 int r; 6680 6681 va_start(args, fmt); 6682 6683 vaf.fmt = fmt; 6684 vaf.va = &args; 6685 6686 if (sata_pmp_attached(link->ap) || link->ap->slave_link) 6687 r = printk("%sata%u.%02u: %pV", 6688 level, link->ap->print_id, link->pmp, &vaf); 6689 else 6690 r = printk("%sata%u: %pV", 6691 level, link->ap->print_id, &vaf); 6692 6693 va_end(args); 6694 6695 return r; 6696 } 6697 EXPORT_SYMBOL(ata_link_printk); 6698 6699 int ata_dev_printk(const struct ata_device *dev, const char *level, 6700 const char *fmt, ...) 6701 { 6702 struct va_format vaf; 6703 va_list args; 6704 int r; 6705 6706 va_start(args, fmt); 6707 6708 vaf.fmt = fmt; 6709 vaf.va = &args; 6710 6711 r = printk("%sata%u.%02u: %pV", 6712 level, dev->link->ap->print_id, dev->link->pmp + dev->devno, 6713 &vaf); 6714 6715 va_end(args); 6716 6717 return r; 6718 } 6719 EXPORT_SYMBOL(ata_dev_printk); 6720 6721 void ata_print_version(const struct device *dev, const char *version) 6722 { 6723 dev_printk(KERN_DEBUG, dev, "version %s\n", version); 6724 } 6725 EXPORT_SYMBOL(ata_print_version); 6726 6727 /* 6728 * libata is essentially a library of internal helper functions for 6729 * low-level ATA host controller drivers. As such, the API/ABI is 6730 * likely to change as new drivers are added and updated. 6731 * Do not depend on ABI/API stability. 6732 */ 6733 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6734 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6735 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6736 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6737 EXPORT_SYMBOL_GPL(sata_port_ops); 6738 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6739 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6740 EXPORT_SYMBOL_GPL(ata_link_next); 6741 EXPORT_SYMBOL_GPL(ata_dev_next); 6742 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6743 EXPORT_SYMBOL_GPL(ata_scsi_unlock_native_capacity); 6744 EXPORT_SYMBOL_GPL(ata_host_init); 6745 EXPORT_SYMBOL_GPL(ata_host_alloc); 6746 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6747 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6748 EXPORT_SYMBOL_GPL(ata_host_start); 6749 EXPORT_SYMBOL_GPL(ata_host_register); 6750 EXPORT_SYMBOL_GPL(ata_host_activate); 6751 EXPORT_SYMBOL_GPL(ata_host_detach); 6752 EXPORT_SYMBOL_GPL(ata_sg_init); 6753 EXPORT_SYMBOL_GPL(ata_qc_complete); 6754 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6755 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6756 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6757 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6758 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6759 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6760 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6761 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6762 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6763 EXPORT_SYMBOL_GPL(ata_mode_string); 6764 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6765 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6766 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6767 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6768 EXPORT_SYMBOL_GPL(ata_dev_disable); 6769 EXPORT_SYMBOL_GPL(sata_set_spd); 6770 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6771 EXPORT_SYMBOL_GPL(sata_link_debounce); 6772 EXPORT_SYMBOL_GPL(sata_link_resume); 6773 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 6774 EXPORT_SYMBOL_GPL(ata_std_prereset); 6775 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6776 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6777 EXPORT_SYMBOL_GPL(ata_std_postreset); 6778 EXPORT_SYMBOL_GPL(ata_dev_classify); 6779 EXPORT_SYMBOL_GPL(ata_dev_pair); 6780 EXPORT_SYMBOL_GPL(ata_ratelimit); 6781 EXPORT_SYMBOL_GPL(ata_msleep); 6782 EXPORT_SYMBOL_GPL(ata_wait_register); 6783 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6784 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6785 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6786 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6787 EXPORT_SYMBOL_GPL(__ata_change_queue_depth); 6788 EXPORT_SYMBOL_GPL(sata_scr_valid); 6789 EXPORT_SYMBOL_GPL(sata_scr_read); 6790 EXPORT_SYMBOL_GPL(sata_scr_write); 6791 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6792 EXPORT_SYMBOL_GPL(ata_link_online); 6793 EXPORT_SYMBOL_GPL(ata_link_offline); 6794 #ifdef CONFIG_PM 6795 EXPORT_SYMBOL_GPL(ata_host_suspend); 6796 EXPORT_SYMBOL_GPL(ata_host_resume); 6797 #endif /* CONFIG_PM */ 6798 EXPORT_SYMBOL_GPL(ata_id_string); 6799 EXPORT_SYMBOL_GPL(ata_id_c_string); 6800 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6801 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6802 6803 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6804 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6805 EXPORT_SYMBOL_GPL(ata_timing_compute); 6806 EXPORT_SYMBOL_GPL(ata_timing_merge); 6807 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6808 6809 #ifdef CONFIG_PCI 6810 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6811 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6812 #ifdef CONFIG_PM 6813 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6814 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6815 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6816 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6817 #endif /* CONFIG_PM */ 6818 #endif /* CONFIG_PCI */ 6819 6820 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6821 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6822 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6823 EXPORT_SYMBOL_GPL(ata_port_desc); 6824 #ifdef CONFIG_PCI 6825 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6826 #endif /* CONFIG_PCI */ 6827 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6828 EXPORT_SYMBOL_GPL(ata_link_abort); 6829 EXPORT_SYMBOL_GPL(ata_port_abort); 6830 EXPORT_SYMBOL_GPL(ata_port_freeze); 6831 EXPORT_SYMBOL_GPL(sata_async_notification); 6832 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6833 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6834 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6835 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6836 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 6837 EXPORT_SYMBOL_GPL(ata_do_eh); 6838 EXPORT_SYMBOL_GPL(ata_std_error_handler); 6839 6840 EXPORT_SYMBOL_GPL(ata_cable_40wire); 6841 EXPORT_SYMBOL_GPL(ata_cable_80wire); 6842 EXPORT_SYMBOL_GPL(ata_cable_unknown); 6843 EXPORT_SYMBOL_GPL(ata_cable_ignore); 6844 EXPORT_SYMBOL_GPL(ata_cable_sata); 6845