1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/interrupt.h> 54 #include <linux/completion.h> 55 #include <linux/suspend.h> 56 #include <linux/workqueue.h> 57 #include <linux/scatterlist.h> 58 #include <linux/io.h> 59 #include <linux/async.h> 60 #include <linux/log2.h> 61 #include <linux/slab.h> 62 #include <scsi/scsi.h> 63 #include <scsi/scsi_cmnd.h> 64 #include <scsi/scsi_host.h> 65 #include <linux/libata.h> 66 #include <asm/byteorder.h> 67 #include <linux/cdrom.h> 68 #include <linux/ratelimit.h> 69 70 #include "libata.h" 71 72 73 /* debounce timing parameters in msecs { interval, duration, timeout } */ 74 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 75 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 76 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 77 78 const struct ata_port_operations ata_base_port_ops = { 79 .prereset = ata_std_prereset, 80 .postreset = ata_std_postreset, 81 .error_handler = ata_std_error_handler, 82 }; 83 84 const struct ata_port_operations sata_port_ops = { 85 .inherits = &ata_base_port_ops, 86 87 .qc_defer = ata_std_qc_defer, 88 .hardreset = sata_std_hardreset, 89 }; 90 91 static unsigned int ata_dev_init_params(struct ata_device *dev, 92 u16 heads, u16 sectors); 93 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 94 static unsigned int ata_dev_set_feature(struct ata_device *dev, 95 u8 enable, u8 feature); 96 static void ata_dev_xfermask(struct ata_device *dev); 97 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 98 99 unsigned int ata_print_id = 1; 100 101 struct workqueue_struct *ata_aux_wq; 102 103 struct ata_force_param { 104 const char *name; 105 unsigned int cbl; 106 int spd_limit; 107 unsigned long xfer_mask; 108 unsigned int horkage_on; 109 unsigned int horkage_off; 110 unsigned int lflags; 111 }; 112 113 struct ata_force_ent { 114 int port; 115 int device; 116 struct ata_force_param param; 117 }; 118 119 static struct ata_force_ent *ata_force_tbl; 120 static int ata_force_tbl_size; 121 122 static char ata_force_param_buf[PAGE_SIZE] __initdata; 123 /* param_buf is thrown away after initialization, disallow read */ 124 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 125 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 126 127 static int atapi_enabled = 1; 128 module_param(atapi_enabled, int, 0444); 129 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on [default])"); 130 131 static int atapi_dmadir = 0; 132 module_param(atapi_dmadir, int, 0444); 133 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off [default], 1=on)"); 134 135 int atapi_passthru16 = 1; 136 module_param(atapi_passthru16, int, 0444); 137 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices (0=off, 1=on [default])"); 138 139 int libata_fua = 0; 140 module_param_named(fua, libata_fua, int, 0444); 141 MODULE_PARM_DESC(fua, "FUA support (0=off [default], 1=on)"); 142 143 static int ata_ignore_hpa; 144 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 145 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 146 147 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 148 module_param_named(dma, libata_dma_mask, int, 0444); 149 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 150 151 static int ata_probe_timeout; 152 module_param(ata_probe_timeout, int, 0444); 153 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 154 155 int libata_noacpi = 0; 156 module_param_named(noacpi, libata_noacpi, int, 0444); 157 MODULE_PARM_DESC(noacpi, "Disable the use of ACPI in probe/suspend/resume (0=off [default], 1=on)"); 158 159 int libata_allow_tpm = 0; 160 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 161 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands (0=off [default], 1=on)"); 162 163 MODULE_AUTHOR("Jeff Garzik"); 164 MODULE_DESCRIPTION("Library module for ATA devices"); 165 MODULE_LICENSE("GPL"); 166 MODULE_VERSION(DRV_VERSION); 167 168 169 static bool ata_sstatus_online(u32 sstatus) 170 { 171 return (sstatus & 0xf) == 0x3; 172 } 173 174 /** 175 * ata_link_next - link iteration helper 176 * @link: the previous link, NULL to start 177 * @ap: ATA port containing links to iterate 178 * @mode: iteration mode, one of ATA_LITER_* 179 * 180 * LOCKING: 181 * Host lock or EH context. 182 * 183 * RETURNS: 184 * Pointer to the next link. 185 */ 186 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 187 enum ata_link_iter_mode mode) 188 { 189 BUG_ON(mode != ATA_LITER_EDGE && 190 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 191 192 /* NULL link indicates start of iteration */ 193 if (!link) 194 switch (mode) { 195 case ATA_LITER_EDGE: 196 case ATA_LITER_PMP_FIRST: 197 if (sata_pmp_attached(ap)) 198 return ap->pmp_link; 199 /* fall through */ 200 case ATA_LITER_HOST_FIRST: 201 return &ap->link; 202 } 203 204 /* we just iterated over the host link, what's next? */ 205 if (link == &ap->link) 206 switch (mode) { 207 case ATA_LITER_HOST_FIRST: 208 if (sata_pmp_attached(ap)) 209 return ap->pmp_link; 210 /* fall through */ 211 case ATA_LITER_PMP_FIRST: 212 if (unlikely(ap->slave_link)) 213 return ap->slave_link; 214 /* fall through */ 215 case ATA_LITER_EDGE: 216 return NULL; 217 } 218 219 /* slave_link excludes PMP */ 220 if (unlikely(link == ap->slave_link)) 221 return NULL; 222 223 /* we were over a PMP link */ 224 if (++link < ap->pmp_link + ap->nr_pmp_links) 225 return link; 226 227 if (mode == ATA_LITER_PMP_FIRST) 228 return &ap->link; 229 230 return NULL; 231 } 232 233 /** 234 * ata_dev_next - device iteration helper 235 * @dev: the previous device, NULL to start 236 * @link: ATA link containing devices to iterate 237 * @mode: iteration mode, one of ATA_DITER_* 238 * 239 * LOCKING: 240 * Host lock or EH context. 241 * 242 * RETURNS: 243 * Pointer to the next device. 244 */ 245 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 246 enum ata_dev_iter_mode mode) 247 { 248 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 249 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 250 251 /* NULL dev indicates start of iteration */ 252 if (!dev) 253 switch (mode) { 254 case ATA_DITER_ENABLED: 255 case ATA_DITER_ALL: 256 dev = link->device; 257 goto check; 258 case ATA_DITER_ENABLED_REVERSE: 259 case ATA_DITER_ALL_REVERSE: 260 dev = link->device + ata_link_max_devices(link) - 1; 261 goto check; 262 } 263 264 next: 265 /* move to the next one */ 266 switch (mode) { 267 case ATA_DITER_ENABLED: 268 case ATA_DITER_ALL: 269 if (++dev < link->device + ata_link_max_devices(link)) 270 goto check; 271 return NULL; 272 case ATA_DITER_ENABLED_REVERSE: 273 case ATA_DITER_ALL_REVERSE: 274 if (--dev >= link->device) 275 goto check; 276 return NULL; 277 } 278 279 check: 280 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 281 !ata_dev_enabled(dev)) 282 goto next; 283 return dev; 284 } 285 286 /** 287 * ata_dev_phys_link - find physical link for a device 288 * @dev: ATA device to look up physical link for 289 * 290 * Look up physical link which @dev is attached to. Note that 291 * this is different from @dev->link only when @dev is on slave 292 * link. For all other cases, it's the same as @dev->link. 293 * 294 * LOCKING: 295 * Don't care. 296 * 297 * RETURNS: 298 * Pointer to the found physical link. 299 */ 300 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 301 { 302 struct ata_port *ap = dev->link->ap; 303 304 if (!ap->slave_link) 305 return dev->link; 306 if (!dev->devno) 307 return &ap->link; 308 return ap->slave_link; 309 } 310 311 /** 312 * ata_force_cbl - force cable type according to libata.force 313 * @ap: ATA port of interest 314 * 315 * Force cable type according to libata.force and whine about it. 316 * The last entry which has matching port number is used, so it 317 * can be specified as part of device force parameters. For 318 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 319 * same effect. 320 * 321 * LOCKING: 322 * EH context. 323 */ 324 void ata_force_cbl(struct ata_port *ap) 325 { 326 int i; 327 328 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 329 const struct ata_force_ent *fe = &ata_force_tbl[i]; 330 331 if (fe->port != -1 && fe->port != ap->print_id) 332 continue; 333 334 if (fe->param.cbl == ATA_CBL_NONE) 335 continue; 336 337 ap->cbl = fe->param.cbl; 338 ata_port_printk(ap, KERN_NOTICE, 339 "FORCE: cable set to %s\n", fe->param.name); 340 return; 341 } 342 } 343 344 /** 345 * ata_force_link_limits - force link limits according to libata.force 346 * @link: ATA link of interest 347 * 348 * Force link flags and SATA spd limit according to libata.force 349 * and whine about it. When only the port part is specified 350 * (e.g. 1:), the limit applies to all links connected to both 351 * the host link and all fan-out ports connected via PMP. If the 352 * device part is specified as 0 (e.g. 1.00:), it specifies the 353 * first fan-out link not the host link. Device number 15 always 354 * points to the host link whether PMP is attached or not. If the 355 * controller has slave link, device number 16 points to it. 356 * 357 * LOCKING: 358 * EH context. 359 */ 360 static void ata_force_link_limits(struct ata_link *link) 361 { 362 bool did_spd = false; 363 int linkno = link->pmp; 364 int i; 365 366 if (ata_is_host_link(link)) 367 linkno += 15; 368 369 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 370 const struct ata_force_ent *fe = &ata_force_tbl[i]; 371 372 if (fe->port != -1 && fe->port != link->ap->print_id) 373 continue; 374 375 if (fe->device != -1 && fe->device != linkno) 376 continue; 377 378 /* only honor the first spd limit */ 379 if (!did_spd && fe->param.spd_limit) { 380 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 381 ata_link_printk(link, KERN_NOTICE, 382 "FORCE: PHY spd limit set to %s\n", 383 fe->param.name); 384 did_spd = true; 385 } 386 387 /* let lflags stack */ 388 if (fe->param.lflags) { 389 link->flags |= fe->param.lflags; 390 ata_link_printk(link, KERN_NOTICE, 391 "FORCE: link flag 0x%x forced -> 0x%x\n", 392 fe->param.lflags, link->flags); 393 } 394 } 395 } 396 397 /** 398 * ata_force_xfermask - force xfermask according to libata.force 399 * @dev: ATA device of interest 400 * 401 * Force xfer_mask according to libata.force and whine about it. 402 * For consistency with link selection, device number 15 selects 403 * the first device connected to the host link. 404 * 405 * LOCKING: 406 * EH context. 407 */ 408 static void ata_force_xfermask(struct ata_device *dev) 409 { 410 int devno = dev->link->pmp + dev->devno; 411 int alt_devno = devno; 412 int i; 413 414 /* allow n.15/16 for devices attached to host port */ 415 if (ata_is_host_link(dev->link)) 416 alt_devno += 15; 417 418 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 419 const struct ata_force_ent *fe = &ata_force_tbl[i]; 420 unsigned long pio_mask, mwdma_mask, udma_mask; 421 422 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 423 continue; 424 425 if (fe->device != -1 && fe->device != devno && 426 fe->device != alt_devno) 427 continue; 428 429 if (!fe->param.xfer_mask) 430 continue; 431 432 ata_unpack_xfermask(fe->param.xfer_mask, 433 &pio_mask, &mwdma_mask, &udma_mask); 434 if (udma_mask) 435 dev->udma_mask = udma_mask; 436 else if (mwdma_mask) { 437 dev->udma_mask = 0; 438 dev->mwdma_mask = mwdma_mask; 439 } else { 440 dev->udma_mask = 0; 441 dev->mwdma_mask = 0; 442 dev->pio_mask = pio_mask; 443 } 444 445 ata_dev_printk(dev, KERN_NOTICE, 446 "FORCE: xfer_mask set to %s\n", fe->param.name); 447 return; 448 } 449 } 450 451 /** 452 * ata_force_horkage - force horkage according to libata.force 453 * @dev: ATA device of interest 454 * 455 * Force horkage according to libata.force and whine about it. 456 * For consistency with link selection, device number 15 selects 457 * the first device connected to the host link. 458 * 459 * LOCKING: 460 * EH context. 461 */ 462 static void ata_force_horkage(struct ata_device *dev) 463 { 464 int devno = dev->link->pmp + dev->devno; 465 int alt_devno = devno; 466 int i; 467 468 /* allow n.15/16 for devices attached to host port */ 469 if (ata_is_host_link(dev->link)) 470 alt_devno += 15; 471 472 for (i = 0; i < ata_force_tbl_size; i++) { 473 const struct ata_force_ent *fe = &ata_force_tbl[i]; 474 475 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 476 continue; 477 478 if (fe->device != -1 && fe->device != devno && 479 fe->device != alt_devno) 480 continue; 481 482 if (!(~dev->horkage & fe->param.horkage_on) && 483 !(dev->horkage & fe->param.horkage_off)) 484 continue; 485 486 dev->horkage |= fe->param.horkage_on; 487 dev->horkage &= ~fe->param.horkage_off; 488 489 ata_dev_printk(dev, KERN_NOTICE, 490 "FORCE: horkage modified (%s)\n", fe->param.name); 491 } 492 } 493 494 /** 495 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 496 * @opcode: SCSI opcode 497 * 498 * Determine ATAPI command type from @opcode. 499 * 500 * LOCKING: 501 * None. 502 * 503 * RETURNS: 504 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 505 */ 506 int atapi_cmd_type(u8 opcode) 507 { 508 switch (opcode) { 509 case GPCMD_READ_10: 510 case GPCMD_READ_12: 511 return ATAPI_READ; 512 513 case GPCMD_WRITE_10: 514 case GPCMD_WRITE_12: 515 case GPCMD_WRITE_AND_VERIFY_10: 516 return ATAPI_WRITE; 517 518 case GPCMD_READ_CD: 519 case GPCMD_READ_CD_MSF: 520 return ATAPI_READ_CD; 521 522 case ATA_16: 523 case ATA_12: 524 if (atapi_passthru16) 525 return ATAPI_PASS_THRU; 526 /* fall thru */ 527 default: 528 return ATAPI_MISC; 529 } 530 } 531 532 /** 533 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 534 * @tf: Taskfile to convert 535 * @pmp: Port multiplier port 536 * @is_cmd: This FIS is for command 537 * @fis: Buffer into which data will output 538 * 539 * Converts a standard ATA taskfile to a Serial ATA 540 * FIS structure (Register - Host to Device). 541 * 542 * LOCKING: 543 * Inherited from caller. 544 */ 545 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 546 { 547 fis[0] = 0x27; /* Register - Host to Device FIS */ 548 fis[1] = pmp & 0xf; /* Port multiplier number*/ 549 if (is_cmd) 550 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 551 552 fis[2] = tf->command; 553 fis[3] = tf->feature; 554 555 fis[4] = tf->lbal; 556 fis[5] = tf->lbam; 557 fis[6] = tf->lbah; 558 fis[7] = tf->device; 559 560 fis[8] = tf->hob_lbal; 561 fis[9] = tf->hob_lbam; 562 fis[10] = tf->hob_lbah; 563 fis[11] = tf->hob_feature; 564 565 fis[12] = tf->nsect; 566 fis[13] = tf->hob_nsect; 567 fis[14] = 0; 568 fis[15] = tf->ctl; 569 570 fis[16] = 0; 571 fis[17] = 0; 572 fis[18] = 0; 573 fis[19] = 0; 574 } 575 576 /** 577 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 578 * @fis: Buffer from which data will be input 579 * @tf: Taskfile to output 580 * 581 * Converts a serial ATA FIS structure to a standard ATA taskfile. 582 * 583 * LOCKING: 584 * Inherited from caller. 585 */ 586 587 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 588 { 589 tf->command = fis[2]; /* status */ 590 tf->feature = fis[3]; /* error */ 591 592 tf->lbal = fis[4]; 593 tf->lbam = fis[5]; 594 tf->lbah = fis[6]; 595 tf->device = fis[7]; 596 597 tf->hob_lbal = fis[8]; 598 tf->hob_lbam = fis[9]; 599 tf->hob_lbah = fis[10]; 600 601 tf->nsect = fis[12]; 602 tf->hob_nsect = fis[13]; 603 } 604 605 static const u8 ata_rw_cmds[] = { 606 /* pio multi */ 607 ATA_CMD_READ_MULTI, 608 ATA_CMD_WRITE_MULTI, 609 ATA_CMD_READ_MULTI_EXT, 610 ATA_CMD_WRITE_MULTI_EXT, 611 0, 612 0, 613 0, 614 ATA_CMD_WRITE_MULTI_FUA_EXT, 615 /* pio */ 616 ATA_CMD_PIO_READ, 617 ATA_CMD_PIO_WRITE, 618 ATA_CMD_PIO_READ_EXT, 619 ATA_CMD_PIO_WRITE_EXT, 620 0, 621 0, 622 0, 623 0, 624 /* dma */ 625 ATA_CMD_READ, 626 ATA_CMD_WRITE, 627 ATA_CMD_READ_EXT, 628 ATA_CMD_WRITE_EXT, 629 0, 630 0, 631 0, 632 ATA_CMD_WRITE_FUA_EXT 633 }; 634 635 /** 636 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 637 * @tf: command to examine and configure 638 * @dev: device tf belongs to 639 * 640 * Examine the device configuration and tf->flags to calculate 641 * the proper read/write commands and protocol to use. 642 * 643 * LOCKING: 644 * caller. 645 */ 646 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 647 { 648 u8 cmd; 649 650 int index, fua, lba48, write; 651 652 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 653 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 654 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 655 656 if (dev->flags & ATA_DFLAG_PIO) { 657 tf->protocol = ATA_PROT_PIO; 658 index = dev->multi_count ? 0 : 8; 659 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 660 /* Unable to use DMA due to host limitation */ 661 tf->protocol = ATA_PROT_PIO; 662 index = dev->multi_count ? 0 : 8; 663 } else { 664 tf->protocol = ATA_PROT_DMA; 665 index = 16; 666 } 667 668 cmd = ata_rw_cmds[index + fua + lba48 + write]; 669 if (cmd) { 670 tf->command = cmd; 671 return 0; 672 } 673 return -1; 674 } 675 676 /** 677 * ata_tf_read_block - Read block address from ATA taskfile 678 * @tf: ATA taskfile of interest 679 * @dev: ATA device @tf belongs to 680 * 681 * LOCKING: 682 * None. 683 * 684 * Read block address from @tf. This function can handle all 685 * three address formats - LBA, LBA48 and CHS. tf->protocol and 686 * flags select the address format to use. 687 * 688 * RETURNS: 689 * Block address read from @tf. 690 */ 691 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 692 { 693 u64 block = 0; 694 695 if (tf->flags & ATA_TFLAG_LBA) { 696 if (tf->flags & ATA_TFLAG_LBA48) { 697 block |= (u64)tf->hob_lbah << 40; 698 block |= (u64)tf->hob_lbam << 32; 699 block |= (u64)tf->hob_lbal << 24; 700 } else 701 block |= (tf->device & 0xf) << 24; 702 703 block |= tf->lbah << 16; 704 block |= tf->lbam << 8; 705 block |= tf->lbal; 706 } else { 707 u32 cyl, head, sect; 708 709 cyl = tf->lbam | (tf->lbah << 8); 710 head = tf->device & 0xf; 711 sect = tf->lbal; 712 713 if (!sect) { 714 ata_dev_printk(dev, KERN_WARNING, "device reported " 715 "invalid CHS sector 0\n"); 716 sect = 1; /* oh well */ 717 } 718 719 block = (cyl * dev->heads + head) * dev->sectors + sect - 1; 720 } 721 722 return block; 723 } 724 725 /** 726 * ata_build_rw_tf - Build ATA taskfile for given read/write request 727 * @tf: Target ATA taskfile 728 * @dev: ATA device @tf belongs to 729 * @block: Block address 730 * @n_block: Number of blocks 731 * @tf_flags: RW/FUA etc... 732 * @tag: tag 733 * 734 * LOCKING: 735 * None. 736 * 737 * Build ATA taskfile @tf for read/write request described by 738 * @block, @n_block, @tf_flags and @tag on @dev. 739 * 740 * RETURNS: 741 * 742 * 0 on success, -ERANGE if the request is too large for @dev, 743 * -EINVAL if the request is invalid. 744 */ 745 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 746 u64 block, u32 n_block, unsigned int tf_flags, 747 unsigned int tag) 748 { 749 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 750 tf->flags |= tf_flags; 751 752 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 753 /* yay, NCQ */ 754 if (!lba_48_ok(block, n_block)) 755 return -ERANGE; 756 757 tf->protocol = ATA_PROT_NCQ; 758 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 759 760 if (tf->flags & ATA_TFLAG_WRITE) 761 tf->command = ATA_CMD_FPDMA_WRITE; 762 else 763 tf->command = ATA_CMD_FPDMA_READ; 764 765 tf->nsect = tag << 3; 766 tf->hob_feature = (n_block >> 8) & 0xff; 767 tf->feature = n_block & 0xff; 768 769 tf->hob_lbah = (block >> 40) & 0xff; 770 tf->hob_lbam = (block >> 32) & 0xff; 771 tf->hob_lbal = (block >> 24) & 0xff; 772 tf->lbah = (block >> 16) & 0xff; 773 tf->lbam = (block >> 8) & 0xff; 774 tf->lbal = block & 0xff; 775 776 tf->device = 1 << 6; 777 if (tf->flags & ATA_TFLAG_FUA) 778 tf->device |= 1 << 7; 779 } else if (dev->flags & ATA_DFLAG_LBA) { 780 tf->flags |= ATA_TFLAG_LBA; 781 782 if (lba_28_ok(block, n_block)) { 783 /* use LBA28 */ 784 tf->device |= (block >> 24) & 0xf; 785 } else if (lba_48_ok(block, n_block)) { 786 if (!(dev->flags & ATA_DFLAG_LBA48)) 787 return -ERANGE; 788 789 /* use LBA48 */ 790 tf->flags |= ATA_TFLAG_LBA48; 791 792 tf->hob_nsect = (n_block >> 8) & 0xff; 793 794 tf->hob_lbah = (block >> 40) & 0xff; 795 tf->hob_lbam = (block >> 32) & 0xff; 796 tf->hob_lbal = (block >> 24) & 0xff; 797 } else 798 /* request too large even for LBA48 */ 799 return -ERANGE; 800 801 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 802 return -EINVAL; 803 804 tf->nsect = n_block & 0xff; 805 806 tf->lbah = (block >> 16) & 0xff; 807 tf->lbam = (block >> 8) & 0xff; 808 tf->lbal = block & 0xff; 809 810 tf->device |= ATA_LBA; 811 } else { 812 /* CHS */ 813 u32 sect, head, cyl, track; 814 815 /* The request -may- be too large for CHS addressing. */ 816 if (!lba_28_ok(block, n_block)) 817 return -ERANGE; 818 819 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 820 return -EINVAL; 821 822 /* Convert LBA to CHS */ 823 track = (u32)block / dev->sectors; 824 cyl = track / dev->heads; 825 head = track % dev->heads; 826 sect = (u32)block % dev->sectors + 1; 827 828 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 829 (u32)block, track, cyl, head, sect); 830 831 /* Check whether the converted CHS can fit. 832 Cylinder: 0-65535 833 Head: 0-15 834 Sector: 1-255*/ 835 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 836 return -ERANGE; 837 838 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 839 tf->lbal = sect; 840 tf->lbam = cyl; 841 tf->lbah = cyl >> 8; 842 tf->device |= head; 843 } 844 845 return 0; 846 } 847 848 /** 849 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 850 * @pio_mask: pio_mask 851 * @mwdma_mask: mwdma_mask 852 * @udma_mask: udma_mask 853 * 854 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 855 * unsigned int xfer_mask. 856 * 857 * LOCKING: 858 * None. 859 * 860 * RETURNS: 861 * Packed xfer_mask. 862 */ 863 unsigned long ata_pack_xfermask(unsigned long pio_mask, 864 unsigned long mwdma_mask, 865 unsigned long udma_mask) 866 { 867 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 868 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 869 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 870 } 871 872 /** 873 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 874 * @xfer_mask: xfer_mask to unpack 875 * @pio_mask: resulting pio_mask 876 * @mwdma_mask: resulting mwdma_mask 877 * @udma_mask: resulting udma_mask 878 * 879 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 880 * Any NULL distination masks will be ignored. 881 */ 882 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 883 unsigned long *mwdma_mask, unsigned long *udma_mask) 884 { 885 if (pio_mask) 886 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 887 if (mwdma_mask) 888 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 889 if (udma_mask) 890 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 891 } 892 893 static const struct ata_xfer_ent { 894 int shift, bits; 895 u8 base; 896 } ata_xfer_tbl[] = { 897 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 898 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 899 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 900 { -1, }, 901 }; 902 903 /** 904 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 905 * @xfer_mask: xfer_mask of interest 906 * 907 * Return matching XFER_* value for @xfer_mask. Only the highest 908 * bit of @xfer_mask is considered. 909 * 910 * LOCKING: 911 * None. 912 * 913 * RETURNS: 914 * Matching XFER_* value, 0xff if no match found. 915 */ 916 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 917 { 918 int highbit = fls(xfer_mask) - 1; 919 const struct ata_xfer_ent *ent; 920 921 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 922 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 923 return ent->base + highbit - ent->shift; 924 return 0xff; 925 } 926 927 /** 928 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 929 * @xfer_mode: XFER_* of interest 930 * 931 * Return matching xfer_mask for @xfer_mode. 932 * 933 * LOCKING: 934 * None. 935 * 936 * RETURNS: 937 * Matching xfer_mask, 0 if no match found. 938 */ 939 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 940 { 941 const struct ata_xfer_ent *ent; 942 943 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 944 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 945 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 946 & ~((1 << ent->shift) - 1); 947 return 0; 948 } 949 950 /** 951 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 952 * @xfer_mode: XFER_* of interest 953 * 954 * Return matching xfer_shift for @xfer_mode. 955 * 956 * LOCKING: 957 * None. 958 * 959 * RETURNS: 960 * Matching xfer_shift, -1 if no match found. 961 */ 962 int ata_xfer_mode2shift(unsigned long xfer_mode) 963 { 964 const struct ata_xfer_ent *ent; 965 966 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 967 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 968 return ent->shift; 969 return -1; 970 } 971 972 /** 973 * ata_mode_string - convert xfer_mask to string 974 * @xfer_mask: mask of bits supported; only highest bit counts. 975 * 976 * Determine string which represents the highest speed 977 * (highest bit in @modemask). 978 * 979 * LOCKING: 980 * None. 981 * 982 * RETURNS: 983 * Constant C string representing highest speed listed in 984 * @mode_mask, or the constant C string "<n/a>". 985 */ 986 const char *ata_mode_string(unsigned long xfer_mask) 987 { 988 static const char * const xfer_mode_str[] = { 989 "PIO0", 990 "PIO1", 991 "PIO2", 992 "PIO3", 993 "PIO4", 994 "PIO5", 995 "PIO6", 996 "MWDMA0", 997 "MWDMA1", 998 "MWDMA2", 999 "MWDMA3", 1000 "MWDMA4", 1001 "UDMA/16", 1002 "UDMA/25", 1003 "UDMA/33", 1004 "UDMA/44", 1005 "UDMA/66", 1006 "UDMA/100", 1007 "UDMA/133", 1008 "UDMA7", 1009 }; 1010 int highbit; 1011 1012 highbit = fls(xfer_mask) - 1; 1013 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1014 return xfer_mode_str[highbit]; 1015 return "<n/a>"; 1016 } 1017 1018 static const char *sata_spd_string(unsigned int spd) 1019 { 1020 static const char * const spd_str[] = { 1021 "1.5 Gbps", 1022 "3.0 Gbps", 1023 "6.0 Gbps", 1024 }; 1025 1026 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1027 return "<unknown>"; 1028 return spd_str[spd - 1]; 1029 } 1030 1031 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) 1032 { 1033 struct ata_link *link = dev->link; 1034 struct ata_port *ap = link->ap; 1035 u32 scontrol; 1036 unsigned int err_mask; 1037 int rc; 1038 1039 /* 1040 * disallow DIPM for drivers which haven't set 1041 * ATA_FLAG_IPM. This is because when DIPM is enabled, 1042 * phy ready will be set in the interrupt status on 1043 * state changes, which will cause some drivers to 1044 * think there are errors - additionally drivers will 1045 * need to disable hot plug. 1046 */ 1047 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { 1048 ap->pm_policy = NOT_AVAILABLE; 1049 return -EINVAL; 1050 } 1051 1052 /* 1053 * For DIPM, we will only enable it for the 1054 * min_power setting. 1055 * 1056 * Why? Because Disks are too stupid to know that 1057 * If the host rejects a request to go to SLUMBER 1058 * they should retry at PARTIAL, and instead it 1059 * just would give up. So, for medium_power to 1060 * work at all, we need to only allow HIPM. 1061 */ 1062 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 1063 if (rc) 1064 return rc; 1065 1066 switch (policy) { 1067 case MIN_POWER: 1068 /* no restrictions on IPM transitions */ 1069 scontrol &= ~(0x3 << 8); 1070 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1071 if (rc) 1072 return rc; 1073 1074 /* enable DIPM */ 1075 if (dev->flags & ATA_DFLAG_DIPM) 1076 err_mask = ata_dev_set_feature(dev, 1077 SETFEATURES_SATA_ENABLE, SATA_DIPM); 1078 break; 1079 case MEDIUM_POWER: 1080 /* allow IPM to PARTIAL */ 1081 scontrol &= ~(0x1 << 8); 1082 scontrol |= (0x2 << 8); 1083 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1084 if (rc) 1085 return rc; 1086 1087 /* 1088 * we don't have to disable DIPM since IPM flags 1089 * disallow transitions to SLUMBER, which effectively 1090 * disable DIPM if it does not support PARTIAL 1091 */ 1092 break; 1093 case NOT_AVAILABLE: 1094 case MAX_PERFORMANCE: 1095 /* disable all IPM transitions */ 1096 scontrol |= (0x3 << 8); 1097 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1098 if (rc) 1099 return rc; 1100 1101 /* 1102 * we don't have to disable DIPM since IPM flags 1103 * disallow all transitions which effectively 1104 * disable DIPM anyway. 1105 */ 1106 break; 1107 } 1108 1109 /* FIXME: handle SET FEATURES failure */ 1110 (void) err_mask; 1111 1112 return 0; 1113 } 1114 1115 /** 1116 * ata_dev_enable_pm - enable SATA interface power management 1117 * @dev: device to enable power management 1118 * @policy: the link power management policy 1119 * 1120 * Enable SATA Interface power management. This will enable 1121 * Device Interface Power Management (DIPM) for min_power 1122 * policy, and then call driver specific callbacks for 1123 * enabling Host Initiated Power management. 1124 * 1125 * Locking: Caller. 1126 * Returns: -EINVAL if IPM is not supported, 0 otherwise. 1127 */ 1128 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) 1129 { 1130 int rc = 0; 1131 struct ata_port *ap = dev->link->ap; 1132 1133 /* set HIPM first, then DIPM */ 1134 if (ap->ops->enable_pm) 1135 rc = ap->ops->enable_pm(ap, policy); 1136 if (rc) 1137 goto enable_pm_out; 1138 rc = ata_dev_set_dipm(dev, policy); 1139 1140 enable_pm_out: 1141 if (rc) 1142 ap->pm_policy = MAX_PERFORMANCE; 1143 else 1144 ap->pm_policy = policy; 1145 return /* rc */; /* hopefully we can use 'rc' eventually */ 1146 } 1147 1148 #ifdef CONFIG_PM 1149 /** 1150 * ata_dev_disable_pm - disable SATA interface power management 1151 * @dev: device to disable power management 1152 * 1153 * Disable SATA Interface power management. This will disable 1154 * Device Interface Power Management (DIPM) without changing 1155 * policy, call driver specific callbacks for disabling Host 1156 * Initiated Power management. 1157 * 1158 * Locking: Caller. 1159 * Returns: void 1160 */ 1161 static void ata_dev_disable_pm(struct ata_device *dev) 1162 { 1163 struct ata_port *ap = dev->link->ap; 1164 1165 ata_dev_set_dipm(dev, MAX_PERFORMANCE); 1166 if (ap->ops->disable_pm) 1167 ap->ops->disable_pm(ap); 1168 } 1169 #endif /* CONFIG_PM */ 1170 1171 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) 1172 { 1173 ap->pm_policy = policy; 1174 ap->link.eh_info.action |= ATA_EH_LPM; 1175 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; 1176 ata_port_schedule_eh(ap); 1177 } 1178 1179 #ifdef CONFIG_PM 1180 static void ata_lpm_enable(struct ata_host *host) 1181 { 1182 struct ata_link *link; 1183 struct ata_port *ap; 1184 struct ata_device *dev; 1185 int i; 1186 1187 for (i = 0; i < host->n_ports; i++) { 1188 ap = host->ports[i]; 1189 ata_for_each_link(link, ap, EDGE) { 1190 ata_for_each_dev(dev, link, ALL) 1191 ata_dev_disable_pm(dev); 1192 } 1193 } 1194 } 1195 1196 static void ata_lpm_disable(struct ata_host *host) 1197 { 1198 int i; 1199 1200 for (i = 0; i < host->n_ports; i++) { 1201 struct ata_port *ap = host->ports[i]; 1202 ata_lpm_schedule(ap, ap->pm_policy); 1203 } 1204 } 1205 #endif /* CONFIG_PM */ 1206 1207 /** 1208 * ata_dev_classify - determine device type based on ATA-spec signature 1209 * @tf: ATA taskfile register set for device to be identified 1210 * 1211 * Determine from taskfile register contents whether a device is 1212 * ATA or ATAPI, as per "Signature and persistence" section 1213 * of ATA/PI spec (volume 1, sect 5.14). 1214 * 1215 * LOCKING: 1216 * None. 1217 * 1218 * RETURNS: 1219 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1220 * %ATA_DEV_UNKNOWN the event of failure. 1221 */ 1222 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1223 { 1224 /* Apple's open source Darwin code hints that some devices only 1225 * put a proper signature into the LBA mid/high registers, 1226 * So, we only check those. It's sufficient for uniqueness. 1227 * 1228 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1229 * signatures for ATA and ATAPI devices attached on SerialATA, 1230 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1231 * spec has never mentioned about using different signatures 1232 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1233 * Multiplier specification began to use 0x69/0x96 to identify 1234 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1235 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1236 * 0x69/0x96 shortly and described them as reserved for 1237 * SerialATA. 1238 * 1239 * We follow the current spec and consider that 0x69/0x96 1240 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1241 * Unfortunately, WDC WD1600JS-62MHB5 (a hard drive) reports 1242 * SEMB signature. This is worked around in 1243 * ata_dev_read_id(). 1244 */ 1245 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1246 DPRINTK("found ATA device by sig\n"); 1247 return ATA_DEV_ATA; 1248 } 1249 1250 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1251 DPRINTK("found ATAPI device by sig\n"); 1252 return ATA_DEV_ATAPI; 1253 } 1254 1255 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1256 DPRINTK("found PMP device by sig\n"); 1257 return ATA_DEV_PMP; 1258 } 1259 1260 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1261 DPRINTK("found SEMB device by sig (could be ATA device)\n"); 1262 return ATA_DEV_SEMB; 1263 } 1264 1265 DPRINTK("unknown device\n"); 1266 return ATA_DEV_UNKNOWN; 1267 } 1268 1269 /** 1270 * ata_id_string - Convert IDENTIFY DEVICE page into string 1271 * @id: IDENTIFY DEVICE results we will examine 1272 * @s: string into which data is output 1273 * @ofs: offset into identify device page 1274 * @len: length of string to return. must be an even number. 1275 * 1276 * The strings in the IDENTIFY DEVICE page are broken up into 1277 * 16-bit chunks. Run through the string, and output each 1278 * 8-bit chunk linearly, regardless of platform. 1279 * 1280 * LOCKING: 1281 * caller. 1282 */ 1283 1284 void ata_id_string(const u16 *id, unsigned char *s, 1285 unsigned int ofs, unsigned int len) 1286 { 1287 unsigned int c; 1288 1289 BUG_ON(len & 1); 1290 1291 while (len > 0) { 1292 c = id[ofs] >> 8; 1293 *s = c; 1294 s++; 1295 1296 c = id[ofs] & 0xff; 1297 *s = c; 1298 s++; 1299 1300 ofs++; 1301 len -= 2; 1302 } 1303 } 1304 1305 /** 1306 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1307 * @id: IDENTIFY DEVICE results we will examine 1308 * @s: string into which data is output 1309 * @ofs: offset into identify device page 1310 * @len: length of string to return. must be an odd number. 1311 * 1312 * This function is identical to ata_id_string except that it 1313 * trims trailing spaces and terminates the resulting string with 1314 * null. @len must be actual maximum length (even number) + 1. 1315 * 1316 * LOCKING: 1317 * caller. 1318 */ 1319 void ata_id_c_string(const u16 *id, unsigned char *s, 1320 unsigned int ofs, unsigned int len) 1321 { 1322 unsigned char *p; 1323 1324 ata_id_string(id, s, ofs, len - 1); 1325 1326 p = s + strnlen(s, len - 1); 1327 while (p > s && p[-1] == ' ') 1328 p--; 1329 *p = '\0'; 1330 } 1331 1332 static u64 ata_id_n_sectors(const u16 *id) 1333 { 1334 if (ata_id_has_lba(id)) { 1335 if (ata_id_has_lba48(id)) 1336 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1337 else 1338 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1339 } else { 1340 if (ata_id_current_chs_valid(id)) 1341 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1342 id[ATA_ID_CUR_SECTORS]; 1343 else 1344 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1345 id[ATA_ID_SECTORS]; 1346 } 1347 } 1348 1349 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1350 { 1351 u64 sectors = 0; 1352 1353 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1354 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1355 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1356 sectors |= (tf->lbah & 0xff) << 16; 1357 sectors |= (tf->lbam & 0xff) << 8; 1358 sectors |= (tf->lbal & 0xff); 1359 1360 return sectors; 1361 } 1362 1363 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1364 { 1365 u64 sectors = 0; 1366 1367 sectors |= (tf->device & 0x0f) << 24; 1368 sectors |= (tf->lbah & 0xff) << 16; 1369 sectors |= (tf->lbam & 0xff) << 8; 1370 sectors |= (tf->lbal & 0xff); 1371 1372 return sectors; 1373 } 1374 1375 /** 1376 * ata_read_native_max_address - Read native max address 1377 * @dev: target device 1378 * @max_sectors: out parameter for the result native max address 1379 * 1380 * Perform an LBA48 or LBA28 native size query upon the device in 1381 * question. 1382 * 1383 * RETURNS: 1384 * 0 on success, -EACCES if command is aborted by the drive. 1385 * -EIO on other errors. 1386 */ 1387 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1388 { 1389 unsigned int err_mask; 1390 struct ata_taskfile tf; 1391 int lba48 = ata_id_has_lba48(dev->id); 1392 1393 ata_tf_init(dev, &tf); 1394 1395 /* always clear all address registers */ 1396 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1397 1398 if (lba48) { 1399 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1400 tf.flags |= ATA_TFLAG_LBA48; 1401 } else 1402 tf.command = ATA_CMD_READ_NATIVE_MAX; 1403 1404 tf.protocol |= ATA_PROT_NODATA; 1405 tf.device |= ATA_LBA; 1406 1407 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1408 if (err_mask) { 1409 ata_dev_printk(dev, KERN_WARNING, "failed to read native " 1410 "max address (err_mask=0x%x)\n", err_mask); 1411 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1412 return -EACCES; 1413 return -EIO; 1414 } 1415 1416 if (lba48) 1417 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1418 else 1419 *max_sectors = ata_tf_to_lba(&tf) + 1; 1420 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1421 (*max_sectors)--; 1422 return 0; 1423 } 1424 1425 /** 1426 * ata_set_max_sectors - Set max sectors 1427 * @dev: target device 1428 * @new_sectors: new max sectors value to set for the device 1429 * 1430 * Set max sectors of @dev to @new_sectors. 1431 * 1432 * RETURNS: 1433 * 0 on success, -EACCES if command is aborted or denied (due to 1434 * previous non-volatile SET_MAX) by the drive. -EIO on other 1435 * errors. 1436 */ 1437 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1438 { 1439 unsigned int err_mask; 1440 struct ata_taskfile tf; 1441 int lba48 = ata_id_has_lba48(dev->id); 1442 1443 new_sectors--; 1444 1445 ata_tf_init(dev, &tf); 1446 1447 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1448 1449 if (lba48) { 1450 tf.command = ATA_CMD_SET_MAX_EXT; 1451 tf.flags |= ATA_TFLAG_LBA48; 1452 1453 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1454 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1455 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1456 } else { 1457 tf.command = ATA_CMD_SET_MAX; 1458 1459 tf.device |= (new_sectors >> 24) & 0xf; 1460 } 1461 1462 tf.protocol |= ATA_PROT_NODATA; 1463 tf.device |= ATA_LBA; 1464 1465 tf.lbal = (new_sectors >> 0) & 0xff; 1466 tf.lbam = (new_sectors >> 8) & 0xff; 1467 tf.lbah = (new_sectors >> 16) & 0xff; 1468 1469 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1470 if (err_mask) { 1471 ata_dev_printk(dev, KERN_WARNING, "failed to set " 1472 "max address (err_mask=0x%x)\n", err_mask); 1473 if (err_mask == AC_ERR_DEV && 1474 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1475 return -EACCES; 1476 return -EIO; 1477 } 1478 1479 return 0; 1480 } 1481 1482 /** 1483 * ata_hpa_resize - Resize a device with an HPA set 1484 * @dev: Device to resize 1485 * 1486 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1487 * it if required to the full size of the media. The caller must check 1488 * the drive has the HPA feature set enabled. 1489 * 1490 * RETURNS: 1491 * 0 on success, -errno on failure. 1492 */ 1493 static int ata_hpa_resize(struct ata_device *dev) 1494 { 1495 struct ata_eh_context *ehc = &dev->link->eh_context; 1496 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1497 bool unlock_hpa = ata_ignore_hpa || dev->flags & ATA_DFLAG_UNLOCK_HPA; 1498 u64 sectors = ata_id_n_sectors(dev->id); 1499 u64 native_sectors; 1500 int rc; 1501 1502 /* do we need to do it? */ 1503 if (dev->class != ATA_DEV_ATA || 1504 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1505 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1506 return 0; 1507 1508 /* read native max address */ 1509 rc = ata_read_native_max_address(dev, &native_sectors); 1510 if (rc) { 1511 /* If device aborted the command or HPA isn't going to 1512 * be unlocked, skip HPA resizing. 1513 */ 1514 if (rc == -EACCES || !unlock_hpa) { 1515 ata_dev_printk(dev, KERN_WARNING, "HPA support seems " 1516 "broken, skipping HPA handling\n"); 1517 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1518 1519 /* we can continue if device aborted the command */ 1520 if (rc == -EACCES) 1521 rc = 0; 1522 } 1523 1524 return rc; 1525 } 1526 dev->n_native_sectors = native_sectors; 1527 1528 /* nothing to do? */ 1529 if (native_sectors <= sectors || !unlock_hpa) { 1530 if (!print_info || native_sectors == sectors) 1531 return 0; 1532 1533 if (native_sectors > sectors) 1534 ata_dev_printk(dev, KERN_INFO, 1535 "HPA detected: current %llu, native %llu\n", 1536 (unsigned long long)sectors, 1537 (unsigned long long)native_sectors); 1538 else if (native_sectors < sectors) 1539 ata_dev_printk(dev, KERN_WARNING, 1540 "native sectors (%llu) is smaller than " 1541 "sectors (%llu)\n", 1542 (unsigned long long)native_sectors, 1543 (unsigned long long)sectors); 1544 return 0; 1545 } 1546 1547 /* let's unlock HPA */ 1548 rc = ata_set_max_sectors(dev, native_sectors); 1549 if (rc == -EACCES) { 1550 /* if device aborted the command, skip HPA resizing */ 1551 ata_dev_printk(dev, KERN_WARNING, "device aborted resize " 1552 "(%llu -> %llu), skipping HPA handling\n", 1553 (unsigned long long)sectors, 1554 (unsigned long long)native_sectors); 1555 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1556 return 0; 1557 } else if (rc) 1558 return rc; 1559 1560 /* re-read IDENTIFY data */ 1561 rc = ata_dev_reread_id(dev, 0); 1562 if (rc) { 1563 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " 1564 "data after HPA resizing\n"); 1565 return rc; 1566 } 1567 1568 if (print_info) { 1569 u64 new_sectors = ata_id_n_sectors(dev->id); 1570 ata_dev_printk(dev, KERN_INFO, 1571 "HPA unlocked: %llu -> %llu, native %llu\n", 1572 (unsigned long long)sectors, 1573 (unsigned long long)new_sectors, 1574 (unsigned long long)native_sectors); 1575 } 1576 1577 return 0; 1578 } 1579 1580 /** 1581 * ata_dump_id - IDENTIFY DEVICE info debugging output 1582 * @id: IDENTIFY DEVICE page to dump 1583 * 1584 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1585 * page. 1586 * 1587 * LOCKING: 1588 * caller. 1589 */ 1590 1591 static inline void ata_dump_id(const u16 *id) 1592 { 1593 DPRINTK("49==0x%04x " 1594 "53==0x%04x " 1595 "63==0x%04x " 1596 "64==0x%04x " 1597 "75==0x%04x \n", 1598 id[49], 1599 id[53], 1600 id[63], 1601 id[64], 1602 id[75]); 1603 DPRINTK("80==0x%04x " 1604 "81==0x%04x " 1605 "82==0x%04x " 1606 "83==0x%04x " 1607 "84==0x%04x \n", 1608 id[80], 1609 id[81], 1610 id[82], 1611 id[83], 1612 id[84]); 1613 DPRINTK("88==0x%04x " 1614 "93==0x%04x\n", 1615 id[88], 1616 id[93]); 1617 } 1618 1619 /** 1620 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1621 * @id: IDENTIFY data to compute xfer mask from 1622 * 1623 * Compute the xfermask for this device. This is not as trivial 1624 * as it seems if we must consider early devices correctly. 1625 * 1626 * FIXME: pre IDE drive timing (do we care ?). 1627 * 1628 * LOCKING: 1629 * None. 1630 * 1631 * RETURNS: 1632 * Computed xfermask 1633 */ 1634 unsigned long ata_id_xfermask(const u16 *id) 1635 { 1636 unsigned long pio_mask, mwdma_mask, udma_mask; 1637 1638 /* Usual case. Word 53 indicates word 64 is valid */ 1639 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1640 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1641 pio_mask <<= 3; 1642 pio_mask |= 0x7; 1643 } else { 1644 /* If word 64 isn't valid then Word 51 high byte holds 1645 * the PIO timing number for the maximum. Turn it into 1646 * a mask. 1647 */ 1648 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1649 if (mode < 5) /* Valid PIO range */ 1650 pio_mask = (2 << mode) - 1; 1651 else 1652 pio_mask = 1; 1653 1654 /* But wait.. there's more. Design your standards by 1655 * committee and you too can get a free iordy field to 1656 * process. However its the speeds not the modes that 1657 * are supported... Note drivers using the timing API 1658 * will get this right anyway 1659 */ 1660 } 1661 1662 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1663 1664 if (ata_id_is_cfa(id)) { 1665 /* 1666 * Process compact flash extended modes 1667 */ 1668 int pio = (id[ATA_ID_CFA_MODES] >> 0) & 0x7; 1669 int dma = (id[ATA_ID_CFA_MODES] >> 3) & 0x7; 1670 1671 if (pio) 1672 pio_mask |= (1 << 5); 1673 if (pio > 1) 1674 pio_mask |= (1 << 6); 1675 if (dma) 1676 mwdma_mask |= (1 << 3); 1677 if (dma > 1) 1678 mwdma_mask |= (1 << 4); 1679 } 1680 1681 udma_mask = 0; 1682 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1683 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1684 1685 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1686 } 1687 1688 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1689 { 1690 struct completion *waiting = qc->private_data; 1691 1692 complete(waiting); 1693 } 1694 1695 /** 1696 * ata_exec_internal_sg - execute libata internal command 1697 * @dev: Device to which the command is sent 1698 * @tf: Taskfile registers for the command and the result 1699 * @cdb: CDB for packet command 1700 * @dma_dir: Data tranfer direction of the command 1701 * @sgl: sg list for the data buffer of the command 1702 * @n_elem: Number of sg entries 1703 * @timeout: Timeout in msecs (0 for default) 1704 * 1705 * Executes libata internal command with timeout. @tf contains 1706 * command on entry and result on return. Timeout and error 1707 * conditions are reported via return value. No recovery action 1708 * is taken after a command times out. It's caller's duty to 1709 * clean up after timeout. 1710 * 1711 * LOCKING: 1712 * None. Should be called with kernel context, might sleep. 1713 * 1714 * RETURNS: 1715 * Zero on success, AC_ERR_* mask on failure 1716 */ 1717 unsigned ata_exec_internal_sg(struct ata_device *dev, 1718 struct ata_taskfile *tf, const u8 *cdb, 1719 int dma_dir, struct scatterlist *sgl, 1720 unsigned int n_elem, unsigned long timeout) 1721 { 1722 struct ata_link *link = dev->link; 1723 struct ata_port *ap = link->ap; 1724 u8 command = tf->command; 1725 int auto_timeout = 0; 1726 struct ata_queued_cmd *qc; 1727 unsigned int tag, preempted_tag; 1728 u32 preempted_sactive, preempted_qc_active; 1729 int preempted_nr_active_links; 1730 DECLARE_COMPLETION_ONSTACK(wait); 1731 unsigned long flags; 1732 unsigned int err_mask; 1733 int rc; 1734 1735 spin_lock_irqsave(ap->lock, flags); 1736 1737 /* no internal command while frozen */ 1738 if (ap->pflags & ATA_PFLAG_FROZEN) { 1739 spin_unlock_irqrestore(ap->lock, flags); 1740 return AC_ERR_SYSTEM; 1741 } 1742 1743 /* initialize internal qc */ 1744 1745 /* XXX: Tag 0 is used for drivers with legacy EH as some 1746 * drivers choke if any other tag is given. This breaks 1747 * ata_tag_internal() test for those drivers. Don't use new 1748 * EH stuff without converting to it. 1749 */ 1750 if (ap->ops->error_handler) 1751 tag = ATA_TAG_INTERNAL; 1752 else 1753 tag = 0; 1754 1755 if (test_and_set_bit(tag, &ap->qc_allocated)) 1756 BUG(); 1757 qc = __ata_qc_from_tag(ap, tag); 1758 1759 qc->tag = tag; 1760 qc->scsicmd = NULL; 1761 qc->ap = ap; 1762 qc->dev = dev; 1763 ata_qc_reinit(qc); 1764 1765 preempted_tag = link->active_tag; 1766 preempted_sactive = link->sactive; 1767 preempted_qc_active = ap->qc_active; 1768 preempted_nr_active_links = ap->nr_active_links; 1769 link->active_tag = ATA_TAG_POISON; 1770 link->sactive = 0; 1771 ap->qc_active = 0; 1772 ap->nr_active_links = 0; 1773 1774 /* prepare & issue qc */ 1775 qc->tf = *tf; 1776 if (cdb) 1777 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1778 qc->flags |= ATA_QCFLAG_RESULT_TF; 1779 qc->dma_dir = dma_dir; 1780 if (dma_dir != DMA_NONE) { 1781 unsigned int i, buflen = 0; 1782 struct scatterlist *sg; 1783 1784 for_each_sg(sgl, sg, n_elem, i) 1785 buflen += sg->length; 1786 1787 ata_sg_init(qc, sgl, n_elem); 1788 qc->nbytes = buflen; 1789 } 1790 1791 qc->private_data = &wait; 1792 qc->complete_fn = ata_qc_complete_internal; 1793 1794 ata_qc_issue(qc); 1795 1796 spin_unlock_irqrestore(ap->lock, flags); 1797 1798 if (!timeout) { 1799 if (ata_probe_timeout) 1800 timeout = ata_probe_timeout * 1000; 1801 else { 1802 timeout = ata_internal_cmd_timeout(dev, command); 1803 auto_timeout = 1; 1804 } 1805 } 1806 1807 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1808 1809 ata_sff_flush_pio_task(ap); 1810 1811 if (!rc) { 1812 spin_lock_irqsave(ap->lock, flags); 1813 1814 /* We're racing with irq here. If we lose, the 1815 * following test prevents us from completing the qc 1816 * twice. If we win, the port is frozen and will be 1817 * cleaned up by ->post_internal_cmd(). 1818 */ 1819 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1820 qc->err_mask |= AC_ERR_TIMEOUT; 1821 1822 if (ap->ops->error_handler) 1823 ata_port_freeze(ap); 1824 else 1825 ata_qc_complete(qc); 1826 1827 if (ata_msg_warn(ap)) 1828 ata_dev_printk(dev, KERN_WARNING, 1829 "qc timeout (cmd 0x%x)\n", command); 1830 } 1831 1832 spin_unlock_irqrestore(ap->lock, flags); 1833 } 1834 1835 /* do post_internal_cmd */ 1836 if (ap->ops->post_internal_cmd) 1837 ap->ops->post_internal_cmd(qc); 1838 1839 /* perform minimal error analysis */ 1840 if (qc->flags & ATA_QCFLAG_FAILED) { 1841 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1842 qc->err_mask |= AC_ERR_DEV; 1843 1844 if (!qc->err_mask) 1845 qc->err_mask |= AC_ERR_OTHER; 1846 1847 if (qc->err_mask & ~AC_ERR_OTHER) 1848 qc->err_mask &= ~AC_ERR_OTHER; 1849 } 1850 1851 /* finish up */ 1852 spin_lock_irqsave(ap->lock, flags); 1853 1854 *tf = qc->result_tf; 1855 err_mask = qc->err_mask; 1856 1857 ata_qc_free(qc); 1858 link->active_tag = preempted_tag; 1859 link->sactive = preempted_sactive; 1860 ap->qc_active = preempted_qc_active; 1861 ap->nr_active_links = preempted_nr_active_links; 1862 1863 spin_unlock_irqrestore(ap->lock, flags); 1864 1865 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1866 ata_internal_cmd_timed_out(dev, command); 1867 1868 return err_mask; 1869 } 1870 1871 /** 1872 * ata_exec_internal - execute libata internal command 1873 * @dev: Device to which the command is sent 1874 * @tf: Taskfile registers for the command and the result 1875 * @cdb: CDB for packet command 1876 * @dma_dir: Data tranfer direction of the command 1877 * @buf: Data buffer of the command 1878 * @buflen: Length of data buffer 1879 * @timeout: Timeout in msecs (0 for default) 1880 * 1881 * Wrapper around ata_exec_internal_sg() which takes simple 1882 * buffer instead of sg list. 1883 * 1884 * LOCKING: 1885 * None. Should be called with kernel context, might sleep. 1886 * 1887 * RETURNS: 1888 * Zero on success, AC_ERR_* mask on failure 1889 */ 1890 unsigned ata_exec_internal(struct ata_device *dev, 1891 struct ata_taskfile *tf, const u8 *cdb, 1892 int dma_dir, void *buf, unsigned int buflen, 1893 unsigned long timeout) 1894 { 1895 struct scatterlist *psg = NULL, sg; 1896 unsigned int n_elem = 0; 1897 1898 if (dma_dir != DMA_NONE) { 1899 WARN_ON(!buf); 1900 sg_init_one(&sg, buf, buflen); 1901 psg = &sg; 1902 n_elem++; 1903 } 1904 1905 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1906 timeout); 1907 } 1908 1909 /** 1910 * ata_do_simple_cmd - execute simple internal command 1911 * @dev: Device to which the command is sent 1912 * @cmd: Opcode to execute 1913 * 1914 * Execute a 'simple' command, that only consists of the opcode 1915 * 'cmd' itself, without filling any other registers 1916 * 1917 * LOCKING: 1918 * Kernel thread context (may sleep). 1919 * 1920 * RETURNS: 1921 * Zero on success, AC_ERR_* mask on failure 1922 */ 1923 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1924 { 1925 struct ata_taskfile tf; 1926 1927 ata_tf_init(dev, &tf); 1928 1929 tf.command = cmd; 1930 tf.flags |= ATA_TFLAG_DEVICE; 1931 tf.protocol = ATA_PROT_NODATA; 1932 1933 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1934 } 1935 1936 /** 1937 * ata_pio_need_iordy - check if iordy needed 1938 * @adev: ATA device 1939 * 1940 * Check if the current speed of the device requires IORDY. Used 1941 * by various controllers for chip configuration. 1942 */ 1943 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1944 { 1945 /* Don't set IORDY if we're preparing for reset. IORDY may 1946 * lead to controller lock up on certain controllers if the 1947 * port is not occupied. See bko#11703 for details. 1948 */ 1949 if (adev->link->ap->pflags & ATA_PFLAG_RESETTING) 1950 return 0; 1951 /* Controller doesn't support IORDY. Probably a pointless 1952 * check as the caller should know this. 1953 */ 1954 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1955 return 0; 1956 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 1957 if (ata_id_is_cfa(adev->id) 1958 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 1959 return 0; 1960 /* PIO3 and higher it is mandatory */ 1961 if (adev->pio_mode > XFER_PIO_2) 1962 return 1; 1963 /* We turn it on when possible */ 1964 if (ata_id_has_iordy(adev->id)) 1965 return 1; 1966 return 0; 1967 } 1968 1969 /** 1970 * ata_pio_mask_no_iordy - Return the non IORDY mask 1971 * @adev: ATA device 1972 * 1973 * Compute the highest mode possible if we are not using iordy. Return 1974 * -1 if no iordy mode is available. 1975 */ 1976 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 1977 { 1978 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 1979 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 1980 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 1981 /* Is the speed faster than the drive allows non IORDY ? */ 1982 if (pio) { 1983 /* This is cycle times not frequency - watch the logic! */ 1984 if (pio > 240) /* PIO2 is 240nS per cycle */ 1985 return 3 << ATA_SHIFT_PIO; 1986 return 7 << ATA_SHIFT_PIO; 1987 } 1988 } 1989 return 3 << ATA_SHIFT_PIO; 1990 } 1991 1992 /** 1993 * ata_do_dev_read_id - default ID read method 1994 * @dev: device 1995 * @tf: proposed taskfile 1996 * @id: data buffer 1997 * 1998 * Issue the identify taskfile and hand back the buffer containing 1999 * identify data. For some RAID controllers and for pre ATA devices 2000 * this function is wrapped or replaced by the driver 2001 */ 2002 unsigned int ata_do_dev_read_id(struct ata_device *dev, 2003 struct ata_taskfile *tf, u16 *id) 2004 { 2005 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 2006 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 2007 } 2008 2009 /** 2010 * ata_dev_read_id - Read ID data from the specified device 2011 * @dev: target device 2012 * @p_class: pointer to class of the target device (may be changed) 2013 * @flags: ATA_READID_* flags 2014 * @id: buffer to read IDENTIFY data into 2015 * 2016 * Read ID data from the specified device. ATA_CMD_ID_ATA is 2017 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 2018 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 2019 * for pre-ATA4 drives. 2020 * 2021 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 2022 * now we abort if we hit that case. 2023 * 2024 * LOCKING: 2025 * Kernel thread context (may sleep) 2026 * 2027 * RETURNS: 2028 * 0 on success, -errno otherwise. 2029 */ 2030 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 2031 unsigned int flags, u16 *id) 2032 { 2033 struct ata_port *ap = dev->link->ap; 2034 unsigned int class = *p_class; 2035 struct ata_taskfile tf; 2036 unsigned int err_mask = 0; 2037 const char *reason; 2038 bool is_semb = class == ATA_DEV_SEMB; 2039 int may_fallback = 1, tried_spinup = 0; 2040 int rc; 2041 2042 if (ata_msg_ctl(ap)) 2043 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2044 2045 retry: 2046 ata_tf_init(dev, &tf); 2047 2048 switch (class) { 2049 case ATA_DEV_SEMB: 2050 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 2051 case ATA_DEV_ATA: 2052 tf.command = ATA_CMD_ID_ATA; 2053 break; 2054 case ATA_DEV_ATAPI: 2055 tf.command = ATA_CMD_ID_ATAPI; 2056 break; 2057 default: 2058 rc = -ENODEV; 2059 reason = "unsupported class"; 2060 goto err_out; 2061 } 2062 2063 tf.protocol = ATA_PROT_PIO; 2064 2065 /* Some devices choke if TF registers contain garbage. Make 2066 * sure those are properly initialized. 2067 */ 2068 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2069 2070 /* Device presence detection is unreliable on some 2071 * controllers. Always poll IDENTIFY if available. 2072 */ 2073 tf.flags |= ATA_TFLAG_POLLING; 2074 2075 if (ap->ops->read_id) 2076 err_mask = ap->ops->read_id(dev, &tf, id); 2077 else 2078 err_mask = ata_do_dev_read_id(dev, &tf, id); 2079 2080 if (err_mask) { 2081 if (err_mask & AC_ERR_NODEV_HINT) { 2082 ata_dev_printk(dev, KERN_DEBUG, 2083 "NODEV after polling detection\n"); 2084 return -ENOENT; 2085 } 2086 2087 if (is_semb) { 2088 ata_dev_printk(dev, KERN_INFO, "IDENTIFY failed on " 2089 "device w/ SEMB sig, disabled\n"); 2090 /* SEMB is not supported yet */ 2091 *p_class = ATA_DEV_SEMB_UNSUP; 2092 return 0; 2093 } 2094 2095 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 2096 /* Device or controller might have reported 2097 * the wrong device class. Give a shot at the 2098 * other IDENTIFY if the current one is 2099 * aborted by the device. 2100 */ 2101 if (may_fallback) { 2102 may_fallback = 0; 2103 2104 if (class == ATA_DEV_ATA) 2105 class = ATA_DEV_ATAPI; 2106 else 2107 class = ATA_DEV_ATA; 2108 goto retry; 2109 } 2110 2111 /* Control reaches here iff the device aborted 2112 * both flavors of IDENTIFYs which happens 2113 * sometimes with phantom devices. 2114 */ 2115 ata_dev_printk(dev, KERN_DEBUG, 2116 "both IDENTIFYs aborted, assuming NODEV\n"); 2117 return -ENOENT; 2118 } 2119 2120 rc = -EIO; 2121 reason = "I/O error"; 2122 goto err_out; 2123 } 2124 2125 /* Falling back doesn't make sense if ID data was read 2126 * successfully at least once. 2127 */ 2128 may_fallback = 0; 2129 2130 swap_buf_le16(id, ATA_ID_WORDS); 2131 2132 /* sanity check */ 2133 rc = -EINVAL; 2134 reason = "device reports invalid type"; 2135 2136 if (class == ATA_DEV_ATA) { 2137 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 2138 goto err_out; 2139 } else { 2140 if (ata_id_is_ata(id)) 2141 goto err_out; 2142 } 2143 2144 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 2145 tried_spinup = 1; 2146 /* 2147 * Drive powered-up in standby mode, and requires a specific 2148 * SET_FEATURES spin-up subcommand before it will accept 2149 * anything other than the original IDENTIFY command. 2150 */ 2151 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 2152 if (err_mask && id[2] != 0x738c) { 2153 rc = -EIO; 2154 reason = "SPINUP failed"; 2155 goto err_out; 2156 } 2157 /* 2158 * If the drive initially returned incomplete IDENTIFY info, 2159 * we now must reissue the IDENTIFY command. 2160 */ 2161 if (id[2] == 0x37c8) 2162 goto retry; 2163 } 2164 2165 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2166 /* 2167 * The exact sequence expected by certain pre-ATA4 drives is: 2168 * SRST RESET 2169 * IDENTIFY (optional in early ATA) 2170 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2171 * anything else.. 2172 * Some drives were very specific about that exact sequence. 2173 * 2174 * Note that ATA4 says lba is mandatory so the second check 2175 * should never trigger. 2176 */ 2177 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2178 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2179 if (err_mask) { 2180 rc = -EIO; 2181 reason = "INIT_DEV_PARAMS failed"; 2182 goto err_out; 2183 } 2184 2185 /* current CHS translation info (id[53-58]) might be 2186 * changed. reread the identify device info. 2187 */ 2188 flags &= ~ATA_READID_POSTRESET; 2189 goto retry; 2190 } 2191 } 2192 2193 *p_class = class; 2194 2195 return 0; 2196 2197 err_out: 2198 if (ata_msg_warn(ap)) 2199 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " 2200 "(%s, err_mask=0x%x)\n", reason, err_mask); 2201 return rc; 2202 } 2203 2204 static int ata_do_link_spd_horkage(struct ata_device *dev) 2205 { 2206 struct ata_link *plink = ata_dev_phys_link(dev); 2207 u32 target, target_limit; 2208 2209 if (!sata_scr_valid(plink)) 2210 return 0; 2211 2212 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2213 target = 1; 2214 else 2215 return 0; 2216 2217 target_limit = (1 << target) - 1; 2218 2219 /* if already on stricter limit, no need to push further */ 2220 if (plink->sata_spd_limit <= target_limit) 2221 return 0; 2222 2223 plink->sata_spd_limit = target_limit; 2224 2225 /* Request another EH round by returning -EAGAIN if link is 2226 * going faster than the target speed. Forward progress is 2227 * guaranteed by setting sata_spd_limit to target_limit above. 2228 */ 2229 if (plink->sata_spd > target) { 2230 ata_dev_printk(dev, KERN_INFO, 2231 "applying link speed limit horkage to %s\n", 2232 sata_spd_string(target)); 2233 return -EAGAIN; 2234 } 2235 return 0; 2236 } 2237 2238 static inline u8 ata_dev_knobble(struct ata_device *dev) 2239 { 2240 struct ata_port *ap = dev->link->ap; 2241 2242 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2243 return 0; 2244 2245 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2246 } 2247 2248 static int ata_dev_config_ncq(struct ata_device *dev, 2249 char *desc, size_t desc_sz) 2250 { 2251 struct ata_port *ap = dev->link->ap; 2252 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2253 unsigned int err_mask; 2254 char *aa_desc = ""; 2255 2256 if (!ata_id_has_ncq(dev->id)) { 2257 desc[0] = '\0'; 2258 return 0; 2259 } 2260 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2261 snprintf(desc, desc_sz, "NCQ (not used)"); 2262 return 0; 2263 } 2264 if (ap->flags & ATA_FLAG_NCQ) { 2265 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2266 dev->flags |= ATA_DFLAG_NCQ; 2267 } 2268 2269 if (!(dev->horkage & ATA_HORKAGE_BROKEN_FPDMA_AA) && 2270 (ap->flags & ATA_FLAG_FPDMA_AA) && 2271 ata_id_has_fpdma_aa(dev->id)) { 2272 err_mask = ata_dev_set_feature(dev, SETFEATURES_SATA_ENABLE, 2273 SATA_FPDMA_AA); 2274 if (err_mask) { 2275 ata_dev_printk(dev, KERN_ERR, "failed to enable AA" 2276 "(error_mask=0x%x)\n", err_mask); 2277 if (err_mask != AC_ERR_DEV) { 2278 dev->horkage |= ATA_HORKAGE_BROKEN_FPDMA_AA; 2279 return -EIO; 2280 } 2281 } else 2282 aa_desc = ", AA"; 2283 } 2284 2285 if (hdepth >= ddepth) 2286 snprintf(desc, desc_sz, "NCQ (depth %d)%s", ddepth, aa_desc); 2287 else 2288 snprintf(desc, desc_sz, "NCQ (depth %d/%d)%s", hdepth, 2289 ddepth, aa_desc); 2290 return 0; 2291 } 2292 2293 /** 2294 * ata_dev_configure - Configure the specified ATA/ATAPI device 2295 * @dev: Target device to configure 2296 * 2297 * Configure @dev according to @dev->id. Generic and low-level 2298 * driver specific fixups are also applied. 2299 * 2300 * LOCKING: 2301 * Kernel thread context (may sleep) 2302 * 2303 * RETURNS: 2304 * 0 on success, -errno otherwise 2305 */ 2306 int ata_dev_configure(struct ata_device *dev) 2307 { 2308 struct ata_port *ap = dev->link->ap; 2309 struct ata_eh_context *ehc = &dev->link->eh_context; 2310 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2311 const u16 *id = dev->id; 2312 unsigned long xfer_mask; 2313 char revbuf[7]; /* XYZ-99\0 */ 2314 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2315 char modelbuf[ATA_ID_PROD_LEN+1]; 2316 int rc; 2317 2318 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2319 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", 2320 __func__); 2321 return 0; 2322 } 2323 2324 if (ata_msg_probe(ap)) 2325 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2326 2327 /* set horkage */ 2328 dev->horkage |= ata_dev_blacklisted(dev); 2329 ata_force_horkage(dev); 2330 2331 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2332 ata_dev_printk(dev, KERN_INFO, 2333 "unsupported device, disabling\n"); 2334 ata_dev_disable(dev); 2335 return 0; 2336 } 2337 2338 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2339 dev->class == ATA_DEV_ATAPI) { 2340 ata_dev_printk(dev, KERN_WARNING, 2341 "WARNING: ATAPI is %s, device ignored.\n", 2342 atapi_enabled ? "not supported with this driver" 2343 : "disabled"); 2344 ata_dev_disable(dev); 2345 return 0; 2346 } 2347 2348 rc = ata_do_link_spd_horkage(dev); 2349 if (rc) 2350 return rc; 2351 2352 /* let ACPI work its magic */ 2353 rc = ata_acpi_on_devcfg(dev); 2354 if (rc) 2355 return rc; 2356 2357 /* massage HPA, do it early as it might change IDENTIFY data */ 2358 rc = ata_hpa_resize(dev); 2359 if (rc) 2360 return rc; 2361 2362 /* print device capabilities */ 2363 if (ata_msg_probe(ap)) 2364 ata_dev_printk(dev, KERN_DEBUG, 2365 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2366 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2367 __func__, 2368 id[49], id[82], id[83], id[84], 2369 id[85], id[86], id[87], id[88]); 2370 2371 /* initialize to-be-configured parameters */ 2372 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2373 dev->max_sectors = 0; 2374 dev->cdb_len = 0; 2375 dev->n_sectors = 0; 2376 dev->cylinders = 0; 2377 dev->heads = 0; 2378 dev->sectors = 0; 2379 dev->multi_count = 0; 2380 2381 /* 2382 * common ATA, ATAPI feature tests 2383 */ 2384 2385 /* find max transfer mode; for printk only */ 2386 xfer_mask = ata_id_xfermask(id); 2387 2388 if (ata_msg_probe(ap)) 2389 ata_dump_id(id); 2390 2391 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2392 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2393 sizeof(fwrevbuf)); 2394 2395 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2396 sizeof(modelbuf)); 2397 2398 /* ATA-specific feature tests */ 2399 if (dev->class == ATA_DEV_ATA) { 2400 if (ata_id_is_cfa(id)) { 2401 /* CPRM may make this media unusable */ 2402 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2403 ata_dev_printk(dev, KERN_WARNING, 2404 "supports DRM functions and may " 2405 "not be fully accessable.\n"); 2406 snprintf(revbuf, 7, "CFA"); 2407 } else { 2408 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2409 /* Warn the user if the device has TPM extensions */ 2410 if (ata_id_has_tpm(id)) 2411 ata_dev_printk(dev, KERN_WARNING, 2412 "supports DRM functions and may " 2413 "not be fully accessable.\n"); 2414 } 2415 2416 dev->n_sectors = ata_id_n_sectors(id); 2417 2418 /* get current R/W Multiple count setting */ 2419 if ((dev->id[47] >> 8) == 0x80 && (dev->id[59] & 0x100)) { 2420 unsigned int max = dev->id[47] & 0xff; 2421 unsigned int cnt = dev->id[59] & 0xff; 2422 /* only recognize/allow powers of two here */ 2423 if (is_power_of_2(max) && is_power_of_2(cnt)) 2424 if (cnt <= max) 2425 dev->multi_count = cnt; 2426 } 2427 2428 if (ata_id_has_lba(id)) { 2429 const char *lba_desc; 2430 char ncq_desc[24]; 2431 2432 lba_desc = "LBA"; 2433 dev->flags |= ATA_DFLAG_LBA; 2434 if (ata_id_has_lba48(id)) { 2435 dev->flags |= ATA_DFLAG_LBA48; 2436 lba_desc = "LBA48"; 2437 2438 if (dev->n_sectors >= (1UL << 28) && 2439 ata_id_has_flush_ext(id)) 2440 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2441 } 2442 2443 /* config NCQ */ 2444 rc = ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2445 if (rc) 2446 return rc; 2447 2448 /* print device info to dmesg */ 2449 if (ata_msg_drv(ap) && print_info) { 2450 ata_dev_printk(dev, KERN_INFO, 2451 "%s: %s, %s, max %s\n", 2452 revbuf, modelbuf, fwrevbuf, 2453 ata_mode_string(xfer_mask)); 2454 ata_dev_printk(dev, KERN_INFO, 2455 "%Lu sectors, multi %u: %s %s\n", 2456 (unsigned long long)dev->n_sectors, 2457 dev->multi_count, lba_desc, ncq_desc); 2458 } 2459 } else { 2460 /* CHS */ 2461 2462 /* Default translation */ 2463 dev->cylinders = id[1]; 2464 dev->heads = id[3]; 2465 dev->sectors = id[6]; 2466 2467 if (ata_id_current_chs_valid(id)) { 2468 /* Current CHS translation is valid. */ 2469 dev->cylinders = id[54]; 2470 dev->heads = id[55]; 2471 dev->sectors = id[56]; 2472 } 2473 2474 /* print device info to dmesg */ 2475 if (ata_msg_drv(ap) && print_info) { 2476 ata_dev_printk(dev, KERN_INFO, 2477 "%s: %s, %s, max %s\n", 2478 revbuf, modelbuf, fwrevbuf, 2479 ata_mode_string(xfer_mask)); 2480 ata_dev_printk(dev, KERN_INFO, 2481 "%Lu sectors, multi %u, CHS %u/%u/%u\n", 2482 (unsigned long long)dev->n_sectors, 2483 dev->multi_count, dev->cylinders, 2484 dev->heads, dev->sectors); 2485 } 2486 } 2487 2488 dev->cdb_len = 16; 2489 } 2490 2491 /* ATAPI-specific feature tests */ 2492 else if (dev->class == ATA_DEV_ATAPI) { 2493 const char *cdb_intr_string = ""; 2494 const char *atapi_an_string = ""; 2495 const char *dma_dir_string = ""; 2496 u32 sntf; 2497 2498 rc = atapi_cdb_len(id); 2499 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2500 if (ata_msg_warn(ap)) 2501 ata_dev_printk(dev, KERN_WARNING, 2502 "unsupported CDB len\n"); 2503 rc = -EINVAL; 2504 goto err_out_nosup; 2505 } 2506 dev->cdb_len = (unsigned int) rc; 2507 2508 /* Enable ATAPI AN if both the host and device have 2509 * the support. If PMP is attached, SNTF is required 2510 * to enable ATAPI AN to discern between PHY status 2511 * changed notifications and ATAPI ANs. 2512 */ 2513 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2514 (!sata_pmp_attached(ap) || 2515 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2516 unsigned int err_mask; 2517 2518 /* issue SET feature command to turn this on */ 2519 err_mask = ata_dev_set_feature(dev, 2520 SETFEATURES_SATA_ENABLE, SATA_AN); 2521 if (err_mask) 2522 ata_dev_printk(dev, KERN_ERR, 2523 "failed to enable ATAPI AN " 2524 "(err_mask=0x%x)\n", err_mask); 2525 else { 2526 dev->flags |= ATA_DFLAG_AN; 2527 atapi_an_string = ", ATAPI AN"; 2528 } 2529 } 2530 2531 if (ata_id_cdb_intr(dev->id)) { 2532 dev->flags |= ATA_DFLAG_CDB_INTR; 2533 cdb_intr_string = ", CDB intr"; 2534 } 2535 2536 if (atapi_dmadir || atapi_id_dmadir(dev->id)) { 2537 dev->flags |= ATA_DFLAG_DMADIR; 2538 dma_dir_string = ", DMADIR"; 2539 } 2540 2541 /* print device info to dmesg */ 2542 if (ata_msg_drv(ap) && print_info) 2543 ata_dev_printk(dev, KERN_INFO, 2544 "ATAPI: %s, %s, max %s%s%s%s\n", 2545 modelbuf, fwrevbuf, 2546 ata_mode_string(xfer_mask), 2547 cdb_intr_string, atapi_an_string, 2548 dma_dir_string); 2549 } 2550 2551 /* determine max_sectors */ 2552 dev->max_sectors = ATA_MAX_SECTORS; 2553 if (dev->flags & ATA_DFLAG_LBA48) 2554 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2555 2556 if (!(dev->horkage & ATA_HORKAGE_IPM)) { 2557 if (ata_id_has_hipm(dev->id)) 2558 dev->flags |= ATA_DFLAG_HIPM; 2559 if (ata_id_has_dipm(dev->id)) 2560 dev->flags |= ATA_DFLAG_DIPM; 2561 } 2562 2563 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2564 200 sectors */ 2565 if (ata_dev_knobble(dev)) { 2566 if (ata_msg_drv(ap) && print_info) 2567 ata_dev_printk(dev, KERN_INFO, 2568 "applying bridge limits\n"); 2569 dev->udma_mask &= ATA_UDMA5; 2570 dev->max_sectors = ATA_MAX_SECTORS; 2571 } 2572 2573 if ((dev->class == ATA_DEV_ATAPI) && 2574 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2575 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2576 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2577 } 2578 2579 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2580 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2581 dev->max_sectors); 2582 2583 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { 2584 dev->horkage |= ATA_HORKAGE_IPM; 2585 2586 /* reset link pm_policy for this port to no pm */ 2587 ap->pm_policy = MAX_PERFORMANCE; 2588 } 2589 2590 if (ap->ops->dev_config) 2591 ap->ops->dev_config(dev); 2592 2593 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2594 /* Let the user know. We don't want to disallow opens for 2595 rescue purposes, or in case the vendor is just a blithering 2596 idiot. Do this after the dev_config call as some controllers 2597 with buggy firmware may want to avoid reporting false device 2598 bugs */ 2599 2600 if (print_info) { 2601 ata_dev_printk(dev, KERN_WARNING, 2602 "Drive reports diagnostics failure. This may indicate a drive\n"); 2603 ata_dev_printk(dev, KERN_WARNING, 2604 "fault or invalid emulation. Contact drive vendor for information.\n"); 2605 } 2606 } 2607 2608 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2609 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires " 2610 "firmware update to be fully functional.\n"); 2611 ata_dev_printk(dev, KERN_WARNING, " contact the vendor " 2612 "or visit http://ata.wiki.kernel.org.\n"); 2613 } 2614 2615 return 0; 2616 2617 err_out_nosup: 2618 if (ata_msg_probe(ap)) 2619 ata_dev_printk(dev, KERN_DEBUG, 2620 "%s: EXIT, err\n", __func__); 2621 return rc; 2622 } 2623 2624 /** 2625 * ata_cable_40wire - return 40 wire cable type 2626 * @ap: port 2627 * 2628 * Helper method for drivers which want to hardwire 40 wire cable 2629 * detection. 2630 */ 2631 2632 int ata_cable_40wire(struct ata_port *ap) 2633 { 2634 return ATA_CBL_PATA40; 2635 } 2636 2637 /** 2638 * ata_cable_80wire - return 80 wire cable type 2639 * @ap: port 2640 * 2641 * Helper method for drivers which want to hardwire 80 wire cable 2642 * detection. 2643 */ 2644 2645 int ata_cable_80wire(struct ata_port *ap) 2646 { 2647 return ATA_CBL_PATA80; 2648 } 2649 2650 /** 2651 * ata_cable_unknown - return unknown PATA cable. 2652 * @ap: port 2653 * 2654 * Helper method for drivers which have no PATA cable detection. 2655 */ 2656 2657 int ata_cable_unknown(struct ata_port *ap) 2658 { 2659 return ATA_CBL_PATA_UNK; 2660 } 2661 2662 /** 2663 * ata_cable_ignore - return ignored PATA cable. 2664 * @ap: port 2665 * 2666 * Helper method for drivers which don't use cable type to limit 2667 * transfer mode. 2668 */ 2669 int ata_cable_ignore(struct ata_port *ap) 2670 { 2671 return ATA_CBL_PATA_IGN; 2672 } 2673 2674 /** 2675 * ata_cable_sata - return SATA cable type 2676 * @ap: port 2677 * 2678 * Helper method for drivers which have SATA cables 2679 */ 2680 2681 int ata_cable_sata(struct ata_port *ap) 2682 { 2683 return ATA_CBL_SATA; 2684 } 2685 2686 /** 2687 * ata_bus_probe - Reset and probe ATA bus 2688 * @ap: Bus to probe 2689 * 2690 * Master ATA bus probing function. Initiates a hardware-dependent 2691 * bus reset, then attempts to identify any devices found on 2692 * the bus. 2693 * 2694 * LOCKING: 2695 * PCI/etc. bus probe sem. 2696 * 2697 * RETURNS: 2698 * Zero on success, negative errno otherwise. 2699 */ 2700 2701 int ata_bus_probe(struct ata_port *ap) 2702 { 2703 unsigned int classes[ATA_MAX_DEVICES]; 2704 int tries[ATA_MAX_DEVICES]; 2705 int rc; 2706 struct ata_device *dev; 2707 2708 ata_for_each_dev(dev, &ap->link, ALL) 2709 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2710 2711 retry: 2712 ata_for_each_dev(dev, &ap->link, ALL) { 2713 /* If we issue an SRST then an ATA drive (not ATAPI) 2714 * may change configuration and be in PIO0 timing. If 2715 * we do a hard reset (or are coming from power on) 2716 * this is true for ATA or ATAPI. Until we've set a 2717 * suitable controller mode we should not touch the 2718 * bus as we may be talking too fast. 2719 */ 2720 dev->pio_mode = XFER_PIO_0; 2721 2722 /* If the controller has a pio mode setup function 2723 * then use it to set the chipset to rights. Don't 2724 * touch the DMA setup as that will be dealt with when 2725 * configuring devices. 2726 */ 2727 if (ap->ops->set_piomode) 2728 ap->ops->set_piomode(ap, dev); 2729 } 2730 2731 /* reset and determine device classes */ 2732 ap->ops->phy_reset(ap); 2733 2734 ata_for_each_dev(dev, &ap->link, ALL) { 2735 if (dev->class != ATA_DEV_UNKNOWN) 2736 classes[dev->devno] = dev->class; 2737 else 2738 classes[dev->devno] = ATA_DEV_NONE; 2739 2740 dev->class = ATA_DEV_UNKNOWN; 2741 } 2742 2743 /* read IDENTIFY page and configure devices. We have to do the identify 2744 specific sequence bass-ackwards so that PDIAG- is released by 2745 the slave device */ 2746 2747 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2748 if (tries[dev->devno]) 2749 dev->class = classes[dev->devno]; 2750 2751 if (!ata_dev_enabled(dev)) 2752 continue; 2753 2754 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2755 dev->id); 2756 if (rc) 2757 goto fail; 2758 } 2759 2760 /* Now ask for the cable type as PDIAG- should have been released */ 2761 if (ap->ops->cable_detect) 2762 ap->cbl = ap->ops->cable_detect(ap); 2763 2764 /* We may have SATA bridge glue hiding here irrespective of 2765 * the reported cable types and sensed types. When SATA 2766 * drives indicate we have a bridge, we don't know which end 2767 * of the link the bridge is which is a problem. 2768 */ 2769 ata_for_each_dev(dev, &ap->link, ENABLED) 2770 if (ata_id_is_sata(dev->id)) 2771 ap->cbl = ATA_CBL_SATA; 2772 2773 /* After the identify sequence we can now set up the devices. We do 2774 this in the normal order so that the user doesn't get confused */ 2775 2776 ata_for_each_dev(dev, &ap->link, ENABLED) { 2777 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2778 rc = ata_dev_configure(dev); 2779 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2780 if (rc) 2781 goto fail; 2782 } 2783 2784 /* configure transfer mode */ 2785 rc = ata_set_mode(&ap->link, &dev); 2786 if (rc) 2787 goto fail; 2788 2789 ata_for_each_dev(dev, &ap->link, ENABLED) 2790 return 0; 2791 2792 return -ENODEV; 2793 2794 fail: 2795 tries[dev->devno]--; 2796 2797 switch (rc) { 2798 case -EINVAL: 2799 /* eeek, something went very wrong, give up */ 2800 tries[dev->devno] = 0; 2801 break; 2802 2803 case -ENODEV: 2804 /* give it just one more chance */ 2805 tries[dev->devno] = min(tries[dev->devno], 1); 2806 case -EIO: 2807 if (tries[dev->devno] == 1) { 2808 /* This is the last chance, better to slow 2809 * down than lose it. 2810 */ 2811 sata_down_spd_limit(&ap->link, 0); 2812 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2813 } 2814 } 2815 2816 if (!tries[dev->devno]) 2817 ata_dev_disable(dev); 2818 2819 goto retry; 2820 } 2821 2822 /** 2823 * sata_print_link_status - Print SATA link status 2824 * @link: SATA link to printk link status about 2825 * 2826 * This function prints link speed and status of a SATA link. 2827 * 2828 * LOCKING: 2829 * None. 2830 */ 2831 static void sata_print_link_status(struct ata_link *link) 2832 { 2833 u32 sstatus, scontrol, tmp; 2834 2835 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2836 return; 2837 sata_scr_read(link, SCR_CONTROL, &scontrol); 2838 2839 if (ata_phys_link_online(link)) { 2840 tmp = (sstatus >> 4) & 0xf; 2841 ata_link_printk(link, KERN_INFO, 2842 "SATA link up %s (SStatus %X SControl %X)\n", 2843 sata_spd_string(tmp), sstatus, scontrol); 2844 } else { 2845 ata_link_printk(link, KERN_INFO, 2846 "SATA link down (SStatus %X SControl %X)\n", 2847 sstatus, scontrol); 2848 } 2849 } 2850 2851 /** 2852 * ata_dev_pair - return other device on cable 2853 * @adev: device 2854 * 2855 * Obtain the other device on the same cable, or if none is 2856 * present NULL is returned 2857 */ 2858 2859 struct ata_device *ata_dev_pair(struct ata_device *adev) 2860 { 2861 struct ata_link *link = adev->link; 2862 struct ata_device *pair = &link->device[1 - adev->devno]; 2863 if (!ata_dev_enabled(pair)) 2864 return NULL; 2865 return pair; 2866 } 2867 2868 /** 2869 * sata_down_spd_limit - adjust SATA spd limit downward 2870 * @link: Link to adjust SATA spd limit for 2871 * @spd_limit: Additional limit 2872 * 2873 * Adjust SATA spd limit of @link downward. Note that this 2874 * function only adjusts the limit. The change must be applied 2875 * using sata_set_spd(). 2876 * 2877 * If @spd_limit is non-zero, the speed is limited to equal to or 2878 * lower than @spd_limit if such speed is supported. If 2879 * @spd_limit is slower than any supported speed, only the lowest 2880 * supported speed is allowed. 2881 * 2882 * LOCKING: 2883 * Inherited from caller. 2884 * 2885 * RETURNS: 2886 * 0 on success, negative errno on failure 2887 */ 2888 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2889 { 2890 u32 sstatus, spd, mask; 2891 int rc, bit; 2892 2893 if (!sata_scr_valid(link)) 2894 return -EOPNOTSUPP; 2895 2896 /* If SCR can be read, use it to determine the current SPD. 2897 * If not, use cached value in link->sata_spd. 2898 */ 2899 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2900 if (rc == 0 && ata_sstatus_online(sstatus)) 2901 spd = (sstatus >> 4) & 0xf; 2902 else 2903 spd = link->sata_spd; 2904 2905 mask = link->sata_spd_limit; 2906 if (mask <= 1) 2907 return -EINVAL; 2908 2909 /* unconditionally mask off the highest bit */ 2910 bit = fls(mask) - 1; 2911 mask &= ~(1 << bit); 2912 2913 /* Mask off all speeds higher than or equal to the current 2914 * one. Force 1.5Gbps if current SPD is not available. 2915 */ 2916 if (spd > 1) 2917 mask &= (1 << (spd - 1)) - 1; 2918 else 2919 mask &= 1; 2920 2921 /* were we already at the bottom? */ 2922 if (!mask) 2923 return -EINVAL; 2924 2925 if (spd_limit) { 2926 if (mask & ((1 << spd_limit) - 1)) 2927 mask &= (1 << spd_limit) - 1; 2928 else { 2929 bit = ffs(mask) - 1; 2930 mask = 1 << bit; 2931 } 2932 } 2933 2934 link->sata_spd_limit = mask; 2935 2936 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", 2937 sata_spd_string(fls(mask))); 2938 2939 return 0; 2940 } 2941 2942 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2943 { 2944 struct ata_link *host_link = &link->ap->link; 2945 u32 limit, target, spd; 2946 2947 limit = link->sata_spd_limit; 2948 2949 /* Don't configure downstream link faster than upstream link. 2950 * It doesn't speed up anything and some PMPs choke on such 2951 * configuration. 2952 */ 2953 if (!ata_is_host_link(link) && host_link->sata_spd) 2954 limit &= (1 << host_link->sata_spd) - 1; 2955 2956 if (limit == UINT_MAX) 2957 target = 0; 2958 else 2959 target = fls(limit); 2960 2961 spd = (*scontrol >> 4) & 0xf; 2962 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 2963 2964 return spd != target; 2965 } 2966 2967 /** 2968 * sata_set_spd_needed - is SATA spd configuration needed 2969 * @link: Link in question 2970 * 2971 * Test whether the spd limit in SControl matches 2972 * @link->sata_spd_limit. This function is used to determine 2973 * whether hardreset is necessary to apply SATA spd 2974 * configuration. 2975 * 2976 * LOCKING: 2977 * Inherited from caller. 2978 * 2979 * RETURNS: 2980 * 1 if SATA spd configuration is needed, 0 otherwise. 2981 */ 2982 static int sata_set_spd_needed(struct ata_link *link) 2983 { 2984 u32 scontrol; 2985 2986 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 2987 return 1; 2988 2989 return __sata_set_spd_needed(link, &scontrol); 2990 } 2991 2992 /** 2993 * sata_set_spd - set SATA spd according to spd limit 2994 * @link: Link to set SATA spd for 2995 * 2996 * Set SATA spd of @link according to sata_spd_limit. 2997 * 2998 * LOCKING: 2999 * Inherited from caller. 3000 * 3001 * RETURNS: 3002 * 0 if spd doesn't need to be changed, 1 if spd has been 3003 * changed. Negative errno if SCR registers are inaccessible. 3004 */ 3005 int sata_set_spd(struct ata_link *link) 3006 { 3007 u32 scontrol; 3008 int rc; 3009 3010 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3011 return rc; 3012 3013 if (!__sata_set_spd_needed(link, &scontrol)) 3014 return 0; 3015 3016 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3017 return rc; 3018 3019 return 1; 3020 } 3021 3022 /* 3023 * This mode timing computation functionality is ported over from 3024 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 3025 */ 3026 /* 3027 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 3028 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 3029 * for UDMA6, which is currently supported only by Maxtor drives. 3030 * 3031 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 3032 */ 3033 3034 static const struct ata_timing ata_timing[] = { 3035 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 3036 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 3037 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 3038 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 3039 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 3040 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 3041 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 3042 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 3043 3044 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 3045 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 3046 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 3047 3048 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 3049 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 3050 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 3051 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 3052 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 3053 3054 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3055 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3056 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 3057 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 3058 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 3059 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 3060 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 3061 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 3062 3063 { 0xFF } 3064 }; 3065 3066 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3067 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3068 3069 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3070 { 3071 q->setup = EZ(t->setup * 1000, T); 3072 q->act8b = EZ(t->act8b * 1000, T); 3073 q->rec8b = EZ(t->rec8b * 1000, T); 3074 q->cyc8b = EZ(t->cyc8b * 1000, T); 3075 q->active = EZ(t->active * 1000, T); 3076 q->recover = EZ(t->recover * 1000, T); 3077 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 3078 q->cycle = EZ(t->cycle * 1000, T); 3079 q->udma = EZ(t->udma * 1000, UT); 3080 } 3081 3082 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3083 struct ata_timing *m, unsigned int what) 3084 { 3085 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3086 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3087 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3088 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3089 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3090 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3091 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 3092 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3093 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3094 } 3095 3096 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3097 { 3098 const struct ata_timing *t = ata_timing; 3099 3100 while (xfer_mode > t->mode) 3101 t++; 3102 3103 if (xfer_mode == t->mode) 3104 return t; 3105 return NULL; 3106 } 3107 3108 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3109 struct ata_timing *t, int T, int UT) 3110 { 3111 const u16 *id = adev->id; 3112 const struct ata_timing *s; 3113 struct ata_timing p; 3114 3115 /* 3116 * Find the mode. 3117 */ 3118 3119 if (!(s = ata_timing_find_mode(speed))) 3120 return -EINVAL; 3121 3122 memcpy(t, s, sizeof(*s)); 3123 3124 /* 3125 * If the drive is an EIDE drive, it can tell us it needs extended 3126 * PIO/MW_DMA cycle timing. 3127 */ 3128 3129 if (id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3130 memset(&p, 0, sizeof(p)); 3131 3132 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 3133 if (speed <= XFER_PIO_2) 3134 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO]; 3135 else if ((speed <= XFER_PIO_4) || 3136 (speed == XFER_PIO_5 && !ata_id_is_cfa(id))) 3137 p.cycle = p.cyc8b = id[ATA_ID_EIDE_PIO_IORDY]; 3138 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) 3139 p.cycle = id[ATA_ID_EIDE_DMA_MIN]; 3140 3141 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3142 } 3143 3144 /* 3145 * Convert the timing to bus clock counts. 3146 */ 3147 3148 ata_timing_quantize(t, t, T, UT); 3149 3150 /* 3151 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3152 * S.M.A.R.T * and some other commands. We have to ensure that the 3153 * DMA cycle timing is slower/equal than the fastest PIO timing. 3154 */ 3155 3156 if (speed > XFER_PIO_6) { 3157 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3158 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3159 } 3160 3161 /* 3162 * Lengthen active & recovery time so that cycle time is correct. 3163 */ 3164 3165 if (t->act8b + t->rec8b < t->cyc8b) { 3166 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3167 t->rec8b = t->cyc8b - t->act8b; 3168 } 3169 3170 if (t->active + t->recover < t->cycle) { 3171 t->active += (t->cycle - (t->active + t->recover)) / 2; 3172 t->recover = t->cycle - t->active; 3173 } 3174 3175 /* In a few cases quantisation may produce enough errors to 3176 leave t->cycle too low for the sum of active and recovery 3177 if so we must correct this */ 3178 if (t->active + t->recover > t->cycle) 3179 t->cycle = t->active + t->recover; 3180 3181 return 0; 3182 } 3183 3184 /** 3185 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3186 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3187 * @cycle: cycle duration in ns 3188 * 3189 * Return matching xfer mode for @cycle. The returned mode is of 3190 * the transfer type specified by @xfer_shift. If @cycle is too 3191 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3192 * than the fastest known mode, the fasted mode is returned. 3193 * 3194 * LOCKING: 3195 * None. 3196 * 3197 * RETURNS: 3198 * Matching xfer_mode, 0xff if no match found. 3199 */ 3200 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3201 { 3202 u8 base_mode = 0xff, last_mode = 0xff; 3203 const struct ata_xfer_ent *ent; 3204 const struct ata_timing *t; 3205 3206 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3207 if (ent->shift == xfer_shift) 3208 base_mode = ent->base; 3209 3210 for (t = ata_timing_find_mode(base_mode); 3211 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3212 unsigned short this_cycle; 3213 3214 switch (xfer_shift) { 3215 case ATA_SHIFT_PIO: 3216 case ATA_SHIFT_MWDMA: 3217 this_cycle = t->cycle; 3218 break; 3219 case ATA_SHIFT_UDMA: 3220 this_cycle = t->udma; 3221 break; 3222 default: 3223 return 0xff; 3224 } 3225 3226 if (cycle > this_cycle) 3227 break; 3228 3229 last_mode = t->mode; 3230 } 3231 3232 return last_mode; 3233 } 3234 3235 /** 3236 * ata_down_xfermask_limit - adjust dev xfer masks downward 3237 * @dev: Device to adjust xfer masks 3238 * @sel: ATA_DNXFER_* selector 3239 * 3240 * Adjust xfer masks of @dev downward. Note that this function 3241 * does not apply the change. Invoking ata_set_mode() afterwards 3242 * will apply the limit. 3243 * 3244 * LOCKING: 3245 * Inherited from caller. 3246 * 3247 * RETURNS: 3248 * 0 on success, negative errno on failure 3249 */ 3250 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3251 { 3252 char buf[32]; 3253 unsigned long orig_mask, xfer_mask; 3254 unsigned long pio_mask, mwdma_mask, udma_mask; 3255 int quiet, highbit; 3256 3257 quiet = !!(sel & ATA_DNXFER_QUIET); 3258 sel &= ~ATA_DNXFER_QUIET; 3259 3260 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3261 dev->mwdma_mask, 3262 dev->udma_mask); 3263 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3264 3265 switch (sel) { 3266 case ATA_DNXFER_PIO: 3267 highbit = fls(pio_mask) - 1; 3268 pio_mask &= ~(1 << highbit); 3269 break; 3270 3271 case ATA_DNXFER_DMA: 3272 if (udma_mask) { 3273 highbit = fls(udma_mask) - 1; 3274 udma_mask &= ~(1 << highbit); 3275 if (!udma_mask) 3276 return -ENOENT; 3277 } else if (mwdma_mask) { 3278 highbit = fls(mwdma_mask) - 1; 3279 mwdma_mask &= ~(1 << highbit); 3280 if (!mwdma_mask) 3281 return -ENOENT; 3282 } 3283 break; 3284 3285 case ATA_DNXFER_40C: 3286 udma_mask &= ATA_UDMA_MASK_40C; 3287 break; 3288 3289 case ATA_DNXFER_FORCE_PIO0: 3290 pio_mask &= 1; 3291 case ATA_DNXFER_FORCE_PIO: 3292 mwdma_mask = 0; 3293 udma_mask = 0; 3294 break; 3295 3296 default: 3297 BUG(); 3298 } 3299 3300 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3301 3302 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3303 return -ENOENT; 3304 3305 if (!quiet) { 3306 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3307 snprintf(buf, sizeof(buf), "%s:%s", 3308 ata_mode_string(xfer_mask), 3309 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3310 else 3311 snprintf(buf, sizeof(buf), "%s", 3312 ata_mode_string(xfer_mask)); 3313 3314 ata_dev_printk(dev, KERN_WARNING, 3315 "limiting speed to %s\n", buf); 3316 } 3317 3318 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3319 &dev->udma_mask); 3320 3321 return 0; 3322 } 3323 3324 static int ata_dev_set_mode(struct ata_device *dev) 3325 { 3326 struct ata_port *ap = dev->link->ap; 3327 struct ata_eh_context *ehc = &dev->link->eh_context; 3328 const bool nosetxfer = dev->horkage & ATA_HORKAGE_NOSETXFER; 3329 const char *dev_err_whine = ""; 3330 int ign_dev_err = 0; 3331 unsigned int err_mask = 0; 3332 int rc; 3333 3334 dev->flags &= ~ATA_DFLAG_PIO; 3335 if (dev->xfer_shift == ATA_SHIFT_PIO) 3336 dev->flags |= ATA_DFLAG_PIO; 3337 3338 if (nosetxfer && ap->flags & ATA_FLAG_SATA && ata_id_is_sata(dev->id)) 3339 dev_err_whine = " (SET_XFERMODE skipped)"; 3340 else { 3341 if (nosetxfer) 3342 ata_dev_printk(dev, KERN_WARNING, 3343 "NOSETXFER but PATA detected - can't " 3344 "skip SETXFER, might malfunction\n"); 3345 err_mask = ata_dev_set_xfermode(dev); 3346 } 3347 3348 if (err_mask & ~AC_ERR_DEV) 3349 goto fail; 3350 3351 /* revalidate */ 3352 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3353 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3354 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3355 if (rc) 3356 return rc; 3357 3358 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3359 /* Old CFA may refuse this command, which is just fine */ 3360 if (ata_id_is_cfa(dev->id)) 3361 ign_dev_err = 1; 3362 /* Catch several broken garbage emulations plus some pre 3363 ATA devices */ 3364 if (ata_id_major_version(dev->id) == 0 && 3365 dev->pio_mode <= XFER_PIO_2) 3366 ign_dev_err = 1; 3367 /* Some very old devices and some bad newer ones fail 3368 any kind of SET_XFERMODE request but support PIO0-2 3369 timings and no IORDY */ 3370 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3371 ign_dev_err = 1; 3372 } 3373 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3374 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3375 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3376 dev->dma_mode == XFER_MW_DMA_0 && 3377 (dev->id[63] >> 8) & 1) 3378 ign_dev_err = 1; 3379 3380 /* if the device is actually configured correctly, ignore dev err */ 3381 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3382 ign_dev_err = 1; 3383 3384 if (err_mask & AC_ERR_DEV) { 3385 if (!ign_dev_err) 3386 goto fail; 3387 else 3388 dev_err_whine = " (device error ignored)"; 3389 } 3390 3391 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3392 dev->xfer_shift, (int)dev->xfer_mode); 3393 3394 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n", 3395 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3396 dev_err_whine); 3397 3398 return 0; 3399 3400 fail: 3401 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 3402 "(err_mask=0x%x)\n", err_mask); 3403 return -EIO; 3404 } 3405 3406 /** 3407 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3408 * @link: link on which timings will be programmed 3409 * @r_failed_dev: out parameter for failed device 3410 * 3411 * Standard implementation of the function used to tune and set 3412 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3413 * ata_dev_set_mode() fails, pointer to the failing device is 3414 * returned in @r_failed_dev. 3415 * 3416 * LOCKING: 3417 * PCI/etc. bus probe sem. 3418 * 3419 * RETURNS: 3420 * 0 on success, negative errno otherwise 3421 */ 3422 3423 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3424 { 3425 struct ata_port *ap = link->ap; 3426 struct ata_device *dev; 3427 int rc = 0, used_dma = 0, found = 0; 3428 3429 /* step 1: calculate xfer_mask */ 3430 ata_for_each_dev(dev, link, ENABLED) { 3431 unsigned long pio_mask, dma_mask; 3432 unsigned int mode_mask; 3433 3434 mode_mask = ATA_DMA_MASK_ATA; 3435 if (dev->class == ATA_DEV_ATAPI) 3436 mode_mask = ATA_DMA_MASK_ATAPI; 3437 else if (ata_id_is_cfa(dev->id)) 3438 mode_mask = ATA_DMA_MASK_CFA; 3439 3440 ata_dev_xfermask(dev); 3441 ata_force_xfermask(dev); 3442 3443 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3444 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3445 3446 if (libata_dma_mask & mode_mask) 3447 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3448 else 3449 dma_mask = 0; 3450 3451 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3452 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3453 3454 found = 1; 3455 if (ata_dma_enabled(dev)) 3456 used_dma = 1; 3457 } 3458 if (!found) 3459 goto out; 3460 3461 /* step 2: always set host PIO timings */ 3462 ata_for_each_dev(dev, link, ENABLED) { 3463 if (dev->pio_mode == 0xff) { 3464 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 3465 rc = -EINVAL; 3466 goto out; 3467 } 3468 3469 dev->xfer_mode = dev->pio_mode; 3470 dev->xfer_shift = ATA_SHIFT_PIO; 3471 if (ap->ops->set_piomode) 3472 ap->ops->set_piomode(ap, dev); 3473 } 3474 3475 /* step 3: set host DMA timings */ 3476 ata_for_each_dev(dev, link, ENABLED) { 3477 if (!ata_dma_enabled(dev)) 3478 continue; 3479 3480 dev->xfer_mode = dev->dma_mode; 3481 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3482 if (ap->ops->set_dmamode) 3483 ap->ops->set_dmamode(ap, dev); 3484 } 3485 3486 /* step 4: update devices' xfer mode */ 3487 ata_for_each_dev(dev, link, ENABLED) { 3488 rc = ata_dev_set_mode(dev); 3489 if (rc) 3490 goto out; 3491 } 3492 3493 /* Record simplex status. If we selected DMA then the other 3494 * host channels are not permitted to do so. 3495 */ 3496 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3497 ap->host->simplex_claimed = ap; 3498 3499 out: 3500 if (rc) 3501 *r_failed_dev = dev; 3502 return rc; 3503 } 3504 3505 /** 3506 * ata_wait_ready - wait for link to become ready 3507 * @link: link to be waited on 3508 * @deadline: deadline jiffies for the operation 3509 * @check_ready: callback to check link readiness 3510 * 3511 * Wait for @link to become ready. @check_ready should return 3512 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3513 * link doesn't seem to be occupied, other errno for other error 3514 * conditions. 3515 * 3516 * Transient -ENODEV conditions are allowed for 3517 * ATA_TMOUT_FF_WAIT. 3518 * 3519 * LOCKING: 3520 * EH context. 3521 * 3522 * RETURNS: 3523 * 0 if @linke is ready before @deadline; otherwise, -errno. 3524 */ 3525 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3526 int (*check_ready)(struct ata_link *link)) 3527 { 3528 unsigned long start = jiffies; 3529 unsigned long nodev_deadline; 3530 int warned = 0; 3531 3532 /* choose which 0xff timeout to use, read comment in libata.h */ 3533 if (link->ap->host->flags & ATA_HOST_PARALLEL_SCAN) 3534 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT_LONG); 3535 else 3536 nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3537 3538 /* Slave readiness can't be tested separately from master. On 3539 * M/S emulation configuration, this function should be called 3540 * only on the master and it will handle both master and slave. 3541 */ 3542 WARN_ON(link == link->ap->slave_link); 3543 3544 if (time_after(nodev_deadline, deadline)) 3545 nodev_deadline = deadline; 3546 3547 while (1) { 3548 unsigned long now = jiffies; 3549 int ready, tmp; 3550 3551 ready = tmp = check_ready(link); 3552 if (ready > 0) 3553 return 0; 3554 3555 /* 3556 * -ENODEV could be transient. Ignore -ENODEV if link 3557 * is online. Also, some SATA devices take a long 3558 * time to clear 0xff after reset. Wait for 3559 * ATA_TMOUT_FF_WAIT[_LONG] on -ENODEV if link isn't 3560 * offline. 3561 * 3562 * Note that some PATA controllers (pata_ali) explode 3563 * if status register is read more than once when 3564 * there's no device attached. 3565 */ 3566 if (ready == -ENODEV) { 3567 if (ata_link_online(link)) 3568 ready = 0; 3569 else if ((link->ap->flags & ATA_FLAG_SATA) && 3570 !ata_link_offline(link) && 3571 time_before(now, nodev_deadline)) 3572 ready = 0; 3573 } 3574 3575 if (ready) 3576 return ready; 3577 if (time_after(now, deadline)) 3578 return -EBUSY; 3579 3580 if (!warned && time_after(now, start + 5 * HZ) && 3581 (deadline - now > 3 * HZ)) { 3582 ata_link_printk(link, KERN_WARNING, 3583 "link is slow to respond, please be patient " 3584 "(ready=%d)\n", tmp); 3585 warned = 1; 3586 } 3587 3588 msleep(50); 3589 } 3590 } 3591 3592 /** 3593 * ata_wait_after_reset - wait for link to become ready after reset 3594 * @link: link to be waited on 3595 * @deadline: deadline jiffies for the operation 3596 * @check_ready: callback to check link readiness 3597 * 3598 * Wait for @link to become ready after reset. 3599 * 3600 * LOCKING: 3601 * EH context. 3602 * 3603 * RETURNS: 3604 * 0 if @linke is ready before @deadline; otherwise, -errno. 3605 */ 3606 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3607 int (*check_ready)(struct ata_link *link)) 3608 { 3609 msleep(ATA_WAIT_AFTER_RESET); 3610 3611 return ata_wait_ready(link, deadline, check_ready); 3612 } 3613 3614 /** 3615 * sata_link_debounce - debounce SATA phy status 3616 * @link: ATA link to debounce SATA phy status for 3617 * @params: timing parameters { interval, duratinon, timeout } in msec 3618 * @deadline: deadline jiffies for the operation 3619 * 3620 * Make sure SStatus of @link reaches stable state, determined by 3621 * holding the same value where DET is not 1 for @duration polled 3622 * every @interval, before @timeout. Timeout constraints the 3623 * beginning of the stable state. Because DET gets stuck at 1 on 3624 * some controllers after hot unplugging, this functions waits 3625 * until timeout then returns 0 if DET is stable at 1. 3626 * 3627 * @timeout is further limited by @deadline. The sooner of the 3628 * two is used. 3629 * 3630 * LOCKING: 3631 * Kernel thread context (may sleep) 3632 * 3633 * RETURNS: 3634 * 0 on success, -errno on failure. 3635 */ 3636 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3637 unsigned long deadline) 3638 { 3639 unsigned long interval = params[0]; 3640 unsigned long duration = params[1]; 3641 unsigned long last_jiffies, t; 3642 u32 last, cur; 3643 int rc; 3644 3645 t = ata_deadline(jiffies, params[2]); 3646 if (time_before(t, deadline)) 3647 deadline = t; 3648 3649 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3650 return rc; 3651 cur &= 0xf; 3652 3653 last = cur; 3654 last_jiffies = jiffies; 3655 3656 while (1) { 3657 msleep(interval); 3658 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3659 return rc; 3660 cur &= 0xf; 3661 3662 /* DET stable? */ 3663 if (cur == last) { 3664 if (cur == 1 && time_before(jiffies, deadline)) 3665 continue; 3666 if (time_after(jiffies, 3667 ata_deadline(last_jiffies, duration))) 3668 return 0; 3669 continue; 3670 } 3671 3672 /* unstable, start over */ 3673 last = cur; 3674 last_jiffies = jiffies; 3675 3676 /* Check deadline. If debouncing failed, return 3677 * -EPIPE to tell upper layer to lower link speed. 3678 */ 3679 if (time_after(jiffies, deadline)) 3680 return -EPIPE; 3681 } 3682 } 3683 3684 /** 3685 * sata_link_resume - resume SATA link 3686 * @link: ATA link to resume SATA 3687 * @params: timing parameters { interval, duratinon, timeout } in msec 3688 * @deadline: deadline jiffies for the operation 3689 * 3690 * Resume SATA phy @link and debounce it. 3691 * 3692 * LOCKING: 3693 * Kernel thread context (may sleep) 3694 * 3695 * RETURNS: 3696 * 0 on success, -errno on failure. 3697 */ 3698 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3699 unsigned long deadline) 3700 { 3701 int tries = ATA_LINK_RESUME_TRIES; 3702 u32 scontrol, serror; 3703 int rc; 3704 3705 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3706 return rc; 3707 3708 /* 3709 * Writes to SControl sometimes get ignored under certain 3710 * controllers (ata_piix SIDPR). Make sure DET actually is 3711 * cleared. 3712 */ 3713 do { 3714 scontrol = (scontrol & 0x0f0) | 0x300; 3715 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3716 return rc; 3717 /* 3718 * Some PHYs react badly if SStatus is pounded 3719 * immediately after resuming. Delay 200ms before 3720 * debouncing. 3721 */ 3722 msleep(200); 3723 3724 /* is SControl restored correctly? */ 3725 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3726 return rc; 3727 } while ((scontrol & 0xf0f) != 0x300 && --tries); 3728 3729 if ((scontrol & 0xf0f) != 0x300) { 3730 ata_link_printk(link, KERN_ERR, 3731 "failed to resume link (SControl %X)\n", 3732 scontrol); 3733 return 0; 3734 } 3735 3736 if (tries < ATA_LINK_RESUME_TRIES) 3737 ata_link_printk(link, KERN_WARNING, 3738 "link resume succeeded after %d retries\n", 3739 ATA_LINK_RESUME_TRIES - tries); 3740 3741 if ((rc = sata_link_debounce(link, params, deadline))) 3742 return rc; 3743 3744 /* clear SError, some PHYs require this even for SRST to work */ 3745 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3746 rc = sata_scr_write(link, SCR_ERROR, serror); 3747 3748 return rc != -EINVAL ? rc : 0; 3749 } 3750 3751 /** 3752 * ata_std_prereset - prepare for reset 3753 * @link: ATA link to be reset 3754 * @deadline: deadline jiffies for the operation 3755 * 3756 * @link is about to be reset. Initialize it. Failure from 3757 * prereset makes libata abort whole reset sequence and give up 3758 * that port, so prereset should be best-effort. It does its 3759 * best to prepare for reset sequence but if things go wrong, it 3760 * should just whine, not fail. 3761 * 3762 * LOCKING: 3763 * Kernel thread context (may sleep) 3764 * 3765 * RETURNS: 3766 * 0 on success, -errno otherwise. 3767 */ 3768 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3769 { 3770 struct ata_port *ap = link->ap; 3771 struct ata_eh_context *ehc = &link->eh_context; 3772 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3773 int rc; 3774 3775 /* if we're about to do hardreset, nothing more to do */ 3776 if (ehc->i.action & ATA_EH_HARDRESET) 3777 return 0; 3778 3779 /* if SATA, resume link */ 3780 if (ap->flags & ATA_FLAG_SATA) { 3781 rc = sata_link_resume(link, timing, deadline); 3782 /* whine about phy resume failure but proceed */ 3783 if (rc && rc != -EOPNOTSUPP) 3784 ata_link_printk(link, KERN_WARNING, "failed to resume " 3785 "link for reset (errno=%d)\n", rc); 3786 } 3787 3788 /* no point in trying softreset on offline link */ 3789 if (ata_phys_link_offline(link)) 3790 ehc->i.action &= ~ATA_EH_SOFTRESET; 3791 3792 return 0; 3793 } 3794 3795 /** 3796 * sata_link_hardreset - reset link via SATA phy reset 3797 * @link: link to reset 3798 * @timing: timing parameters { interval, duratinon, timeout } in msec 3799 * @deadline: deadline jiffies for the operation 3800 * @online: optional out parameter indicating link onlineness 3801 * @check_ready: optional callback to check link readiness 3802 * 3803 * SATA phy-reset @link using DET bits of SControl register. 3804 * After hardreset, link readiness is waited upon using 3805 * ata_wait_ready() if @check_ready is specified. LLDs are 3806 * allowed to not specify @check_ready and wait itself after this 3807 * function returns. Device classification is LLD's 3808 * responsibility. 3809 * 3810 * *@online is set to one iff reset succeeded and @link is online 3811 * after reset. 3812 * 3813 * LOCKING: 3814 * Kernel thread context (may sleep) 3815 * 3816 * RETURNS: 3817 * 0 on success, -errno otherwise. 3818 */ 3819 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3820 unsigned long deadline, 3821 bool *online, int (*check_ready)(struct ata_link *)) 3822 { 3823 u32 scontrol; 3824 int rc; 3825 3826 DPRINTK("ENTER\n"); 3827 3828 if (online) 3829 *online = false; 3830 3831 if (sata_set_spd_needed(link)) { 3832 /* SATA spec says nothing about how to reconfigure 3833 * spd. To be on the safe side, turn off phy during 3834 * reconfiguration. This works for at least ICH7 AHCI 3835 * and Sil3124. 3836 */ 3837 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3838 goto out; 3839 3840 scontrol = (scontrol & 0x0f0) | 0x304; 3841 3842 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3843 goto out; 3844 3845 sata_set_spd(link); 3846 } 3847 3848 /* issue phy wake/reset */ 3849 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3850 goto out; 3851 3852 scontrol = (scontrol & 0x0f0) | 0x301; 3853 3854 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3855 goto out; 3856 3857 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3858 * 10.4.2 says at least 1 ms. 3859 */ 3860 msleep(1); 3861 3862 /* bring link back */ 3863 rc = sata_link_resume(link, timing, deadline); 3864 if (rc) 3865 goto out; 3866 /* if link is offline nothing more to do */ 3867 if (ata_phys_link_offline(link)) 3868 goto out; 3869 3870 /* Link is online. From this point, -ENODEV too is an error. */ 3871 if (online) 3872 *online = true; 3873 3874 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3875 /* If PMP is supported, we have to do follow-up SRST. 3876 * Some PMPs don't send D2H Reg FIS after hardreset if 3877 * the first port is empty. Wait only for 3878 * ATA_TMOUT_PMP_SRST_WAIT. 3879 */ 3880 if (check_ready) { 3881 unsigned long pmp_deadline; 3882 3883 pmp_deadline = ata_deadline(jiffies, 3884 ATA_TMOUT_PMP_SRST_WAIT); 3885 if (time_after(pmp_deadline, deadline)) 3886 pmp_deadline = deadline; 3887 ata_wait_ready(link, pmp_deadline, check_ready); 3888 } 3889 rc = -EAGAIN; 3890 goto out; 3891 } 3892 3893 rc = 0; 3894 if (check_ready) 3895 rc = ata_wait_ready(link, deadline, check_ready); 3896 out: 3897 if (rc && rc != -EAGAIN) { 3898 /* online is set iff link is online && reset succeeded */ 3899 if (online) 3900 *online = false; 3901 ata_link_printk(link, KERN_ERR, 3902 "COMRESET failed (errno=%d)\n", rc); 3903 } 3904 DPRINTK("EXIT, rc=%d\n", rc); 3905 return rc; 3906 } 3907 3908 /** 3909 * sata_std_hardreset - COMRESET w/o waiting or classification 3910 * @link: link to reset 3911 * @class: resulting class of attached device 3912 * @deadline: deadline jiffies for the operation 3913 * 3914 * Standard SATA COMRESET w/o waiting or classification. 3915 * 3916 * LOCKING: 3917 * Kernel thread context (may sleep) 3918 * 3919 * RETURNS: 3920 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3921 */ 3922 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3923 unsigned long deadline) 3924 { 3925 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3926 bool online; 3927 int rc; 3928 3929 /* do hardreset */ 3930 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3931 return online ? -EAGAIN : rc; 3932 } 3933 3934 /** 3935 * ata_std_postreset - standard postreset callback 3936 * @link: the target ata_link 3937 * @classes: classes of attached devices 3938 * 3939 * This function is invoked after a successful reset. Note that 3940 * the device might have been reset more than once using 3941 * different reset methods before postreset is invoked. 3942 * 3943 * LOCKING: 3944 * Kernel thread context (may sleep) 3945 */ 3946 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3947 { 3948 u32 serror; 3949 3950 DPRINTK("ENTER\n"); 3951 3952 /* reset complete, clear SError */ 3953 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3954 sata_scr_write(link, SCR_ERROR, serror); 3955 3956 /* print link status */ 3957 sata_print_link_status(link); 3958 3959 DPRINTK("EXIT\n"); 3960 } 3961 3962 /** 3963 * ata_dev_same_device - Determine whether new ID matches configured device 3964 * @dev: device to compare against 3965 * @new_class: class of the new device 3966 * @new_id: IDENTIFY page of the new device 3967 * 3968 * Compare @new_class and @new_id against @dev and determine 3969 * whether @dev is the device indicated by @new_class and 3970 * @new_id. 3971 * 3972 * LOCKING: 3973 * None. 3974 * 3975 * RETURNS: 3976 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3977 */ 3978 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3979 const u16 *new_id) 3980 { 3981 const u16 *old_id = dev->id; 3982 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3983 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3984 3985 if (dev->class != new_class) { 3986 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", 3987 dev->class, new_class); 3988 return 0; 3989 } 3990 3991 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3992 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3993 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3994 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3995 3996 if (strcmp(model[0], model[1])) { 3997 ata_dev_printk(dev, KERN_INFO, "model number mismatch " 3998 "'%s' != '%s'\n", model[0], model[1]); 3999 return 0; 4000 } 4001 4002 if (strcmp(serial[0], serial[1])) { 4003 ata_dev_printk(dev, KERN_INFO, "serial number mismatch " 4004 "'%s' != '%s'\n", serial[0], serial[1]); 4005 return 0; 4006 } 4007 4008 return 1; 4009 } 4010 4011 /** 4012 * ata_dev_reread_id - Re-read IDENTIFY data 4013 * @dev: target ATA device 4014 * @readid_flags: read ID flags 4015 * 4016 * Re-read IDENTIFY page and make sure @dev is still attached to 4017 * the port. 4018 * 4019 * LOCKING: 4020 * Kernel thread context (may sleep) 4021 * 4022 * RETURNS: 4023 * 0 on success, negative errno otherwise 4024 */ 4025 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4026 { 4027 unsigned int class = dev->class; 4028 u16 *id = (void *)dev->link->ap->sector_buf; 4029 int rc; 4030 4031 /* read ID data */ 4032 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4033 if (rc) 4034 return rc; 4035 4036 /* is the device still there? */ 4037 if (!ata_dev_same_device(dev, class, id)) 4038 return -ENODEV; 4039 4040 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4041 return 0; 4042 } 4043 4044 /** 4045 * ata_dev_revalidate - Revalidate ATA device 4046 * @dev: device to revalidate 4047 * @new_class: new class code 4048 * @readid_flags: read ID flags 4049 * 4050 * Re-read IDENTIFY page, make sure @dev is still attached to the 4051 * port and reconfigure it according to the new IDENTIFY page. 4052 * 4053 * LOCKING: 4054 * Kernel thread context (may sleep) 4055 * 4056 * RETURNS: 4057 * 0 on success, negative errno otherwise 4058 */ 4059 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4060 unsigned int readid_flags) 4061 { 4062 u64 n_sectors = dev->n_sectors; 4063 u64 n_native_sectors = dev->n_native_sectors; 4064 int rc; 4065 4066 if (!ata_dev_enabled(dev)) 4067 return -ENODEV; 4068 4069 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4070 if (ata_class_enabled(new_class) && 4071 new_class != ATA_DEV_ATA && 4072 new_class != ATA_DEV_ATAPI && 4073 new_class != ATA_DEV_SEMB) { 4074 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", 4075 dev->class, new_class); 4076 rc = -ENODEV; 4077 goto fail; 4078 } 4079 4080 /* re-read ID */ 4081 rc = ata_dev_reread_id(dev, readid_flags); 4082 if (rc) 4083 goto fail; 4084 4085 /* configure device according to the new ID */ 4086 rc = ata_dev_configure(dev); 4087 if (rc) 4088 goto fail; 4089 4090 /* verify n_sectors hasn't changed */ 4091 if (dev->class != ATA_DEV_ATA || !n_sectors || 4092 dev->n_sectors == n_sectors) 4093 return 0; 4094 4095 /* n_sectors has changed */ 4096 ata_dev_printk(dev, KERN_WARNING, "n_sectors mismatch %llu != %llu\n", 4097 (unsigned long long)n_sectors, 4098 (unsigned long long)dev->n_sectors); 4099 4100 /* 4101 * Something could have caused HPA to be unlocked 4102 * involuntarily. If n_native_sectors hasn't changed and the 4103 * new size matches it, keep the device. 4104 */ 4105 if (dev->n_native_sectors == n_native_sectors && 4106 dev->n_sectors > n_sectors && dev->n_sectors == n_native_sectors) { 4107 ata_dev_printk(dev, KERN_WARNING, 4108 "new n_sectors matches native, probably " 4109 "late HPA unlock, continuing\n"); 4110 /* keep using the old n_sectors */ 4111 dev->n_sectors = n_sectors; 4112 return 0; 4113 } 4114 4115 /* 4116 * Some BIOSes boot w/o HPA but resume w/ HPA locked. Try 4117 * unlocking HPA in those cases. 4118 * 4119 * https://bugzilla.kernel.org/show_bug.cgi?id=15396 4120 */ 4121 if (dev->n_native_sectors == n_native_sectors && 4122 dev->n_sectors < n_sectors && n_sectors == n_native_sectors && 4123 !(dev->horkage & ATA_HORKAGE_BROKEN_HPA)) { 4124 ata_dev_printk(dev, KERN_WARNING, 4125 "old n_sectors matches native, probably " 4126 "late HPA lock, will try to unlock HPA\n"); 4127 /* try unlocking HPA */ 4128 dev->flags |= ATA_DFLAG_UNLOCK_HPA; 4129 rc = -EIO; 4130 } else 4131 rc = -ENODEV; 4132 4133 /* restore original n_[native_]sectors and fail */ 4134 dev->n_native_sectors = n_native_sectors; 4135 dev->n_sectors = n_sectors; 4136 fail: 4137 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); 4138 return rc; 4139 } 4140 4141 struct ata_blacklist_entry { 4142 const char *model_num; 4143 const char *model_rev; 4144 unsigned long horkage; 4145 }; 4146 4147 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4148 /* Devices with DMA related problems under Linux */ 4149 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4150 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4151 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4152 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4153 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4154 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4155 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4156 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4157 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4158 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, 4159 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA }, 4160 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4161 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4162 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4163 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4164 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4165 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, 4166 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA }, 4167 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4168 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4169 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4170 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4171 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4172 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4173 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4174 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4175 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4176 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4177 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4178 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4179 /* Odd clown on sil3726/4726 PMPs */ 4180 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4181 4182 /* Weird ATAPI devices */ 4183 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4184 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4185 4186 /* Devices we expect to fail diagnostics */ 4187 4188 /* Devices where NCQ should be avoided */ 4189 /* NCQ is slow */ 4190 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4191 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4192 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4193 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4194 /* NCQ is broken */ 4195 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4196 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4197 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4198 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4199 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4200 4201 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4202 { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ | 4203 ATA_HORKAGE_FIRMWARE_WARN }, 4204 { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ | 4205 ATA_HORKAGE_FIRMWARE_WARN }, 4206 { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ | 4207 ATA_HORKAGE_FIRMWARE_WARN }, 4208 { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ | 4209 ATA_HORKAGE_FIRMWARE_WARN }, 4210 { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ | 4211 ATA_HORKAGE_FIRMWARE_WARN }, 4212 4213 { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ | 4214 ATA_HORKAGE_FIRMWARE_WARN }, 4215 { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ | 4216 ATA_HORKAGE_FIRMWARE_WARN }, 4217 { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ | 4218 ATA_HORKAGE_FIRMWARE_WARN }, 4219 { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ | 4220 ATA_HORKAGE_FIRMWARE_WARN }, 4221 { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ | 4222 ATA_HORKAGE_FIRMWARE_WARN }, 4223 4224 { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ | 4225 ATA_HORKAGE_FIRMWARE_WARN }, 4226 { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ | 4227 ATA_HORKAGE_FIRMWARE_WARN }, 4228 { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ | 4229 ATA_HORKAGE_FIRMWARE_WARN }, 4230 { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ | 4231 ATA_HORKAGE_FIRMWARE_WARN }, 4232 { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ | 4233 ATA_HORKAGE_FIRMWARE_WARN }, 4234 4235 { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ | 4236 ATA_HORKAGE_FIRMWARE_WARN }, 4237 { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ | 4238 ATA_HORKAGE_FIRMWARE_WARN }, 4239 { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ | 4240 ATA_HORKAGE_FIRMWARE_WARN }, 4241 { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ | 4242 ATA_HORKAGE_FIRMWARE_WARN }, 4243 { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ | 4244 ATA_HORKAGE_FIRMWARE_WARN }, 4245 4246 { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ | 4247 ATA_HORKAGE_FIRMWARE_WARN }, 4248 { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ | 4249 ATA_HORKAGE_FIRMWARE_WARN }, 4250 { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ | 4251 ATA_HORKAGE_FIRMWARE_WARN }, 4252 { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ | 4253 ATA_HORKAGE_FIRMWARE_WARN }, 4254 { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ | 4255 ATA_HORKAGE_FIRMWARE_WARN }, 4256 4257 { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ | 4258 ATA_HORKAGE_FIRMWARE_WARN }, 4259 { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ | 4260 ATA_HORKAGE_FIRMWARE_WARN }, 4261 { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ | 4262 ATA_HORKAGE_FIRMWARE_WARN }, 4263 { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ | 4264 ATA_HORKAGE_FIRMWARE_WARN }, 4265 { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ | 4266 ATA_HORKAGE_FIRMWARE_WARN }, 4267 4268 /* Blacklist entries taken from Silicon Image 3124/3132 4269 Windows driver .inf file - also several Linux problem reports */ 4270 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4271 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4272 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4273 4274 /* https://bugzilla.kernel.org/show_bug.cgi?id=15573 */ 4275 { "C300-CTFDDAC128MAG", "0001", ATA_HORKAGE_NONCQ, }, 4276 4277 /* devices which puke on READ_NATIVE_MAX */ 4278 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4279 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4280 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4281 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4282 4283 /* this one allows HPA unlocking but fails IOs on the area */ 4284 { "OCZ-VERTEX", "1.30", ATA_HORKAGE_BROKEN_HPA }, 4285 4286 /* Devices which report 1 sector over size HPA */ 4287 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4288 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4289 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4290 4291 /* Devices which get the IVB wrong */ 4292 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4293 /* Maybe we should just blacklist TSSTcorp... */ 4294 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, }, 4295 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, }, 4296 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, }, 4297 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, }, 4298 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, 4299 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, 4300 4301 /* Devices that do not need bridging limits applied */ 4302 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4303 4304 /* Devices which aren't very happy with higher link speeds */ 4305 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4306 4307 /* 4308 * Devices which choke on SETXFER. Applies only if both the 4309 * device and controller are SATA. 4310 */ 4311 { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, 4312 4313 /* End Marker */ 4314 { } 4315 }; 4316 4317 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) 4318 { 4319 const char *p; 4320 int len; 4321 4322 /* 4323 * check for trailing wildcard: *\0 4324 */ 4325 p = strchr(patt, wildchar); 4326 if (p && ((*(p + 1)) == 0)) 4327 len = p - patt; 4328 else { 4329 len = strlen(name); 4330 if (!len) { 4331 if (!*patt) 4332 return 0; 4333 return -1; 4334 } 4335 } 4336 4337 return strncmp(patt, name, len); 4338 } 4339 4340 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4341 { 4342 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4343 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4344 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4345 4346 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4347 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4348 4349 while (ad->model_num) { 4350 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { 4351 if (ad->model_rev == NULL) 4352 return ad->horkage; 4353 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) 4354 return ad->horkage; 4355 } 4356 ad++; 4357 } 4358 return 0; 4359 } 4360 4361 static int ata_dma_blacklisted(const struct ata_device *dev) 4362 { 4363 /* We don't support polling DMA. 4364 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4365 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4366 */ 4367 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4368 (dev->flags & ATA_DFLAG_CDB_INTR)) 4369 return 1; 4370 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4371 } 4372 4373 /** 4374 * ata_is_40wire - check drive side detection 4375 * @dev: device 4376 * 4377 * Perform drive side detection decoding, allowing for device vendors 4378 * who can't follow the documentation. 4379 */ 4380 4381 static int ata_is_40wire(struct ata_device *dev) 4382 { 4383 if (dev->horkage & ATA_HORKAGE_IVB) 4384 return ata_drive_40wire_relaxed(dev->id); 4385 return ata_drive_40wire(dev->id); 4386 } 4387 4388 /** 4389 * cable_is_40wire - 40/80/SATA decider 4390 * @ap: port to consider 4391 * 4392 * This function encapsulates the policy for speed management 4393 * in one place. At the moment we don't cache the result but 4394 * there is a good case for setting ap->cbl to the result when 4395 * we are called with unknown cables (and figuring out if it 4396 * impacts hotplug at all). 4397 * 4398 * Return 1 if the cable appears to be 40 wire. 4399 */ 4400 4401 static int cable_is_40wire(struct ata_port *ap) 4402 { 4403 struct ata_link *link; 4404 struct ata_device *dev; 4405 4406 /* If the controller thinks we are 40 wire, we are. */ 4407 if (ap->cbl == ATA_CBL_PATA40) 4408 return 1; 4409 4410 /* If the controller thinks we are 80 wire, we are. */ 4411 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4412 return 0; 4413 4414 /* If the system is known to be 40 wire short cable (eg 4415 * laptop), then we allow 80 wire modes even if the drive 4416 * isn't sure. 4417 */ 4418 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4419 return 0; 4420 4421 /* If the controller doesn't know, we scan. 4422 * 4423 * Note: We look for all 40 wire detects at this point. Any 4424 * 80 wire detect is taken to be 80 wire cable because 4425 * - in many setups only the one drive (slave if present) will 4426 * give a valid detect 4427 * - if you have a non detect capable drive you don't want it 4428 * to colour the choice 4429 */ 4430 ata_for_each_link(link, ap, EDGE) { 4431 ata_for_each_dev(dev, link, ENABLED) { 4432 if (!ata_is_40wire(dev)) 4433 return 0; 4434 } 4435 } 4436 return 1; 4437 } 4438 4439 /** 4440 * ata_dev_xfermask - Compute supported xfermask of the given device 4441 * @dev: Device to compute xfermask for 4442 * 4443 * Compute supported xfermask of @dev and store it in 4444 * dev->*_mask. This function is responsible for applying all 4445 * known limits including host controller limits, device 4446 * blacklist, etc... 4447 * 4448 * LOCKING: 4449 * None. 4450 */ 4451 static void ata_dev_xfermask(struct ata_device *dev) 4452 { 4453 struct ata_link *link = dev->link; 4454 struct ata_port *ap = link->ap; 4455 struct ata_host *host = ap->host; 4456 unsigned long xfer_mask; 4457 4458 /* controller modes available */ 4459 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4460 ap->mwdma_mask, ap->udma_mask); 4461 4462 /* drive modes available */ 4463 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4464 dev->mwdma_mask, dev->udma_mask); 4465 xfer_mask &= ata_id_xfermask(dev->id); 4466 4467 /* 4468 * CFA Advanced TrueIDE timings are not allowed on a shared 4469 * cable 4470 */ 4471 if (ata_dev_pair(dev)) { 4472 /* No PIO5 or PIO6 */ 4473 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4474 /* No MWDMA3 or MWDMA 4 */ 4475 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4476 } 4477 4478 if (ata_dma_blacklisted(dev)) { 4479 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4480 ata_dev_printk(dev, KERN_WARNING, 4481 "device is on DMA blacklist, disabling DMA\n"); 4482 } 4483 4484 if ((host->flags & ATA_HOST_SIMPLEX) && 4485 host->simplex_claimed && host->simplex_claimed != ap) { 4486 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4487 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4488 "other device, disabling DMA\n"); 4489 } 4490 4491 if (ap->flags & ATA_FLAG_NO_IORDY) 4492 xfer_mask &= ata_pio_mask_no_iordy(dev); 4493 4494 if (ap->ops->mode_filter) 4495 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4496 4497 /* Apply cable rule here. Don't apply it early because when 4498 * we handle hot plug the cable type can itself change. 4499 * Check this last so that we know if the transfer rate was 4500 * solely limited by the cable. 4501 * Unknown or 80 wire cables reported host side are checked 4502 * drive side as well. Cases where we know a 40wire cable 4503 * is used safely for 80 are not checked here. 4504 */ 4505 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4506 /* UDMA/44 or higher would be available */ 4507 if (cable_is_40wire(ap)) { 4508 ata_dev_printk(dev, KERN_WARNING, 4509 "limited to UDMA/33 due to 40-wire cable\n"); 4510 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4511 } 4512 4513 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4514 &dev->mwdma_mask, &dev->udma_mask); 4515 } 4516 4517 /** 4518 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4519 * @dev: Device to which command will be sent 4520 * 4521 * Issue SET FEATURES - XFER MODE command to device @dev 4522 * on port @ap. 4523 * 4524 * LOCKING: 4525 * PCI/etc. bus probe sem. 4526 * 4527 * RETURNS: 4528 * 0 on success, AC_ERR_* mask otherwise. 4529 */ 4530 4531 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4532 { 4533 struct ata_taskfile tf; 4534 unsigned int err_mask; 4535 4536 /* set up set-features taskfile */ 4537 DPRINTK("set features - xfer mode\n"); 4538 4539 /* Some controllers and ATAPI devices show flaky interrupt 4540 * behavior after setting xfer mode. Use polling instead. 4541 */ 4542 ata_tf_init(dev, &tf); 4543 tf.command = ATA_CMD_SET_FEATURES; 4544 tf.feature = SETFEATURES_XFER; 4545 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4546 tf.protocol = ATA_PROT_NODATA; 4547 /* If we are using IORDY we must send the mode setting command */ 4548 if (ata_pio_need_iordy(dev)) 4549 tf.nsect = dev->xfer_mode; 4550 /* If the device has IORDY and the controller does not - turn it off */ 4551 else if (ata_id_has_iordy(dev->id)) 4552 tf.nsect = 0x01; 4553 else /* In the ancient relic department - skip all of this */ 4554 return 0; 4555 4556 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4557 4558 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4559 return err_mask; 4560 } 4561 /** 4562 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4563 * @dev: Device to which command will be sent 4564 * @enable: Whether to enable or disable the feature 4565 * @feature: The sector count represents the feature to set 4566 * 4567 * Issue SET FEATURES - SATA FEATURES command to device @dev 4568 * on port @ap with sector count 4569 * 4570 * LOCKING: 4571 * PCI/etc. bus probe sem. 4572 * 4573 * RETURNS: 4574 * 0 on success, AC_ERR_* mask otherwise. 4575 */ 4576 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, 4577 u8 feature) 4578 { 4579 struct ata_taskfile tf; 4580 unsigned int err_mask; 4581 4582 /* set up set-features taskfile */ 4583 DPRINTK("set features - SATA features\n"); 4584 4585 ata_tf_init(dev, &tf); 4586 tf.command = ATA_CMD_SET_FEATURES; 4587 tf.feature = enable; 4588 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4589 tf.protocol = ATA_PROT_NODATA; 4590 tf.nsect = feature; 4591 4592 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4593 4594 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4595 return err_mask; 4596 } 4597 4598 /** 4599 * ata_dev_init_params - Issue INIT DEV PARAMS command 4600 * @dev: Device to which command will be sent 4601 * @heads: Number of heads (taskfile parameter) 4602 * @sectors: Number of sectors (taskfile parameter) 4603 * 4604 * LOCKING: 4605 * Kernel thread context (may sleep) 4606 * 4607 * RETURNS: 4608 * 0 on success, AC_ERR_* mask otherwise. 4609 */ 4610 static unsigned int ata_dev_init_params(struct ata_device *dev, 4611 u16 heads, u16 sectors) 4612 { 4613 struct ata_taskfile tf; 4614 unsigned int err_mask; 4615 4616 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4617 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4618 return AC_ERR_INVALID; 4619 4620 /* set up init dev params taskfile */ 4621 DPRINTK("init dev params \n"); 4622 4623 ata_tf_init(dev, &tf); 4624 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4625 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4626 tf.protocol = ATA_PROT_NODATA; 4627 tf.nsect = sectors; 4628 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4629 4630 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4631 /* A clean abort indicates an original or just out of spec drive 4632 and we should continue as we issue the setup based on the 4633 drive reported working geometry */ 4634 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4635 err_mask = 0; 4636 4637 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4638 return err_mask; 4639 } 4640 4641 /** 4642 * ata_sg_clean - Unmap DMA memory associated with command 4643 * @qc: Command containing DMA memory to be released 4644 * 4645 * Unmap all mapped DMA memory associated with this command. 4646 * 4647 * LOCKING: 4648 * spin_lock_irqsave(host lock) 4649 */ 4650 void ata_sg_clean(struct ata_queued_cmd *qc) 4651 { 4652 struct ata_port *ap = qc->ap; 4653 struct scatterlist *sg = qc->sg; 4654 int dir = qc->dma_dir; 4655 4656 WARN_ON_ONCE(sg == NULL); 4657 4658 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4659 4660 if (qc->n_elem) 4661 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4662 4663 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4664 qc->sg = NULL; 4665 } 4666 4667 /** 4668 * atapi_check_dma - Check whether ATAPI DMA can be supported 4669 * @qc: Metadata associated with taskfile to check 4670 * 4671 * Allow low-level driver to filter ATA PACKET commands, returning 4672 * a status indicating whether or not it is OK to use DMA for the 4673 * supplied PACKET command. 4674 * 4675 * LOCKING: 4676 * spin_lock_irqsave(host lock) 4677 * 4678 * RETURNS: 0 when ATAPI DMA can be used 4679 * nonzero otherwise 4680 */ 4681 int atapi_check_dma(struct ata_queued_cmd *qc) 4682 { 4683 struct ata_port *ap = qc->ap; 4684 4685 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4686 * few ATAPI devices choke on such DMA requests. 4687 */ 4688 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4689 unlikely(qc->nbytes & 15)) 4690 return 1; 4691 4692 if (ap->ops->check_atapi_dma) 4693 return ap->ops->check_atapi_dma(qc); 4694 4695 return 0; 4696 } 4697 4698 /** 4699 * ata_std_qc_defer - Check whether a qc needs to be deferred 4700 * @qc: ATA command in question 4701 * 4702 * Non-NCQ commands cannot run with any other command, NCQ or 4703 * not. As upper layer only knows the queue depth, we are 4704 * responsible for maintaining exclusion. This function checks 4705 * whether a new command @qc can be issued. 4706 * 4707 * LOCKING: 4708 * spin_lock_irqsave(host lock) 4709 * 4710 * RETURNS: 4711 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4712 */ 4713 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4714 { 4715 struct ata_link *link = qc->dev->link; 4716 4717 if (qc->tf.protocol == ATA_PROT_NCQ) { 4718 if (!ata_tag_valid(link->active_tag)) 4719 return 0; 4720 } else { 4721 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4722 return 0; 4723 } 4724 4725 return ATA_DEFER_LINK; 4726 } 4727 4728 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4729 4730 /** 4731 * ata_sg_init - Associate command with scatter-gather table. 4732 * @qc: Command to be associated 4733 * @sg: Scatter-gather table. 4734 * @n_elem: Number of elements in s/g table. 4735 * 4736 * Initialize the data-related elements of queued_cmd @qc 4737 * to point to a scatter-gather table @sg, containing @n_elem 4738 * elements. 4739 * 4740 * LOCKING: 4741 * spin_lock_irqsave(host lock) 4742 */ 4743 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4744 unsigned int n_elem) 4745 { 4746 qc->sg = sg; 4747 qc->n_elem = n_elem; 4748 qc->cursg = qc->sg; 4749 } 4750 4751 /** 4752 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4753 * @qc: Command with scatter-gather table to be mapped. 4754 * 4755 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4756 * 4757 * LOCKING: 4758 * spin_lock_irqsave(host lock) 4759 * 4760 * RETURNS: 4761 * Zero on success, negative on error. 4762 * 4763 */ 4764 static int ata_sg_setup(struct ata_queued_cmd *qc) 4765 { 4766 struct ata_port *ap = qc->ap; 4767 unsigned int n_elem; 4768 4769 VPRINTK("ENTER, ata%u\n", ap->print_id); 4770 4771 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4772 if (n_elem < 1) 4773 return -1; 4774 4775 DPRINTK("%d sg elements mapped\n", n_elem); 4776 qc->orig_n_elem = qc->n_elem; 4777 qc->n_elem = n_elem; 4778 qc->flags |= ATA_QCFLAG_DMAMAP; 4779 4780 return 0; 4781 } 4782 4783 /** 4784 * swap_buf_le16 - swap halves of 16-bit words in place 4785 * @buf: Buffer to swap 4786 * @buf_words: Number of 16-bit words in buffer. 4787 * 4788 * Swap halves of 16-bit words if needed to convert from 4789 * little-endian byte order to native cpu byte order, or 4790 * vice-versa. 4791 * 4792 * LOCKING: 4793 * Inherited from caller. 4794 */ 4795 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4796 { 4797 #ifdef __BIG_ENDIAN 4798 unsigned int i; 4799 4800 for (i = 0; i < buf_words; i++) 4801 buf[i] = le16_to_cpu(buf[i]); 4802 #endif /* __BIG_ENDIAN */ 4803 } 4804 4805 /** 4806 * ata_qc_new - Request an available ATA command, for queueing 4807 * @ap: target port 4808 * 4809 * LOCKING: 4810 * None. 4811 */ 4812 4813 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4814 { 4815 struct ata_queued_cmd *qc = NULL; 4816 unsigned int i; 4817 4818 /* no command while frozen */ 4819 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4820 return NULL; 4821 4822 /* the last tag is reserved for internal command. */ 4823 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 4824 if (!test_and_set_bit(i, &ap->qc_allocated)) { 4825 qc = __ata_qc_from_tag(ap, i); 4826 break; 4827 } 4828 4829 if (qc) 4830 qc->tag = i; 4831 4832 return qc; 4833 } 4834 4835 /** 4836 * ata_qc_new_init - Request an available ATA command, and initialize it 4837 * @dev: Device from whom we request an available command structure 4838 * 4839 * LOCKING: 4840 * None. 4841 */ 4842 4843 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 4844 { 4845 struct ata_port *ap = dev->link->ap; 4846 struct ata_queued_cmd *qc; 4847 4848 qc = ata_qc_new(ap); 4849 if (qc) { 4850 qc->scsicmd = NULL; 4851 qc->ap = ap; 4852 qc->dev = dev; 4853 4854 ata_qc_reinit(qc); 4855 } 4856 4857 return qc; 4858 } 4859 4860 /** 4861 * ata_qc_free - free unused ata_queued_cmd 4862 * @qc: Command to complete 4863 * 4864 * Designed to free unused ata_queued_cmd object 4865 * in case something prevents using it. 4866 * 4867 * LOCKING: 4868 * spin_lock_irqsave(host lock) 4869 */ 4870 void ata_qc_free(struct ata_queued_cmd *qc) 4871 { 4872 struct ata_port *ap; 4873 unsigned int tag; 4874 4875 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4876 ap = qc->ap; 4877 4878 qc->flags = 0; 4879 tag = qc->tag; 4880 if (likely(ata_tag_valid(tag))) { 4881 qc->tag = ATA_TAG_POISON; 4882 clear_bit(tag, &ap->qc_allocated); 4883 } 4884 } 4885 4886 void __ata_qc_complete(struct ata_queued_cmd *qc) 4887 { 4888 struct ata_port *ap; 4889 struct ata_link *link; 4890 4891 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4892 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4893 ap = qc->ap; 4894 link = qc->dev->link; 4895 4896 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4897 ata_sg_clean(qc); 4898 4899 /* command should be marked inactive atomically with qc completion */ 4900 if (qc->tf.protocol == ATA_PROT_NCQ) { 4901 link->sactive &= ~(1 << qc->tag); 4902 if (!link->sactive) 4903 ap->nr_active_links--; 4904 } else { 4905 link->active_tag = ATA_TAG_POISON; 4906 ap->nr_active_links--; 4907 } 4908 4909 /* clear exclusive status */ 4910 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4911 ap->excl_link == link)) 4912 ap->excl_link = NULL; 4913 4914 /* atapi: mark qc as inactive to prevent the interrupt handler 4915 * from completing the command twice later, before the error handler 4916 * is called. (when rc != 0 and atapi request sense is needed) 4917 */ 4918 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4919 ap->qc_active &= ~(1 << qc->tag); 4920 4921 /* call completion callback */ 4922 qc->complete_fn(qc); 4923 } 4924 4925 static void fill_result_tf(struct ata_queued_cmd *qc) 4926 { 4927 struct ata_port *ap = qc->ap; 4928 4929 qc->result_tf.flags = qc->tf.flags; 4930 ap->ops->qc_fill_rtf(qc); 4931 } 4932 4933 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4934 { 4935 struct ata_device *dev = qc->dev; 4936 4937 if (ata_tag_internal(qc->tag)) 4938 return; 4939 4940 if (ata_is_nodata(qc->tf.protocol)) 4941 return; 4942 4943 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4944 return; 4945 4946 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4947 } 4948 4949 /** 4950 * ata_qc_complete - Complete an active ATA command 4951 * @qc: Command to complete 4952 * 4953 * Indicate to the mid and upper layers that an ATA 4954 * command has completed, with either an ok or not-ok status. 4955 * 4956 * LOCKING: 4957 * spin_lock_irqsave(host lock) 4958 */ 4959 void ata_qc_complete(struct ata_queued_cmd *qc) 4960 { 4961 struct ata_port *ap = qc->ap; 4962 4963 /* XXX: New EH and old EH use different mechanisms to 4964 * synchronize EH with regular execution path. 4965 * 4966 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4967 * Normal execution path is responsible for not accessing a 4968 * failed qc. libata core enforces the rule by returning NULL 4969 * from ata_qc_from_tag() for failed qcs. 4970 * 4971 * Old EH depends on ata_qc_complete() nullifying completion 4972 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4973 * not synchronize with interrupt handler. Only PIO task is 4974 * taken care of. 4975 */ 4976 if (ap->ops->error_handler) { 4977 struct ata_device *dev = qc->dev; 4978 struct ata_eh_info *ehi = &dev->link->eh_info; 4979 4980 if (unlikely(qc->err_mask)) 4981 qc->flags |= ATA_QCFLAG_FAILED; 4982 4983 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4984 /* always fill result TF for failed qc */ 4985 fill_result_tf(qc); 4986 4987 if (!ata_tag_internal(qc->tag)) 4988 ata_qc_schedule_eh(qc); 4989 else 4990 __ata_qc_complete(qc); 4991 return; 4992 } 4993 4994 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4995 4996 /* read result TF if requested */ 4997 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4998 fill_result_tf(qc); 4999 5000 /* Some commands need post-processing after successful 5001 * completion. 5002 */ 5003 switch (qc->tf.command) { 5004 case ATA_CMD_SET_FEATURES: 5005 if (qc->tf.feature != SETFEATURES_WC_ON && 5006 qc->tf.feature != SETFEATURES_WC_OFF) 5007 break; 5008 /* fall through */ 5009 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 5010 case ATA_CMD_SET_MULTI: /* multi_count changed */ 5011 /* revalidate device */ 5012 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 5013 ata_port_schedule_eh(ap); 5014 break; 5015 5016 case ATA_CMD_SLEEP: 5017 dev->flags |= ATA_DFLAG_SLEEPING; 5018 break; 5019 } 5020 5021 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 5022 ata_verify_xfer(qc); 5023 5024 __ata_qc_complete(qc); 5025 } else { 5026 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 5027 return; 5028 5029 /* read result TF if failed or requested */ 5030 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 5031 fill_result_tf(qc); 5032 5033 __ata_qc_complete(qc); 5034 } 5035 } 5036 5037 /** 5038 * ata_qc_complete_multiple - Complete multiple qcs successfully 5039 * @ap: port in question 5040 * @qc_active: new qc_active mask 5041 * 5042 * Complete in-flight commands. This functions is meant to be 5043 * called from low-level driver's interrupt routine to complete 5044 * requests normally. ap->qc_active and @qc_active is compared 5045 * and commands are completed accordingly. 5046 * 5047 * LOCKING: 5048 * spin_lock_irqsave(host lock) 5049 * 5050 * RETURNS: 5051 * Number of completed commands on success, -errno otherwise. 5052 */ 5053 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5054 { 5055 int nr_done = 0; 5056 u32 done_mask; 5057 5058 done_mask = ap->qc_active ^ qc_active; 5059 5060 if (unlikely(done_mask & qc_active)) { 5061 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " 5062 "(%08x->%08x)\n", ap->qc_active, qc_active); 5063 return -EINVAL; 5064 } 5065 5066 while (done_mask) { 5067 struct ata_queued_cmd *qc; 5068 unsigned int tag = __ffs(done_mask); 5069 5070 qc = ata_qc_from_tag(ap, tag); 5071 if (qc) { 5072 ata_qc_complete(qc); 5073 nr_done++; 5074 } 5075 done_mask &= ~(1 << tag); 5076 } 5077 5078 return nr_done; 5079 } 5080 5081 /** 5082 * ata_qc_issue - issue taskfile to device 5083 * @qc: command to issue to device 5084 * 5085 * Prepare an ATA command to submission to device. 5086 * This includes mapping the data into a DMA-able 5087 * area, filling in the S/G table, and finally 5088 * writing the taskfile to hardware, starting the command. 5089 * 5090 * LOCKING: 5091 * spin_lock_irqsave(host lock) 5092 */ 5093 void ata_qc_issue(struct ata_queued_cmd *qc) 5094 { 5095 struct ata_port *ap = qc->ap; 5096 struct ata_link *link = qc->dev->link; 5097 u8 prot = qc->tf.protocol; 5098 5099 /* Make sure only one non-NCQ command is outstanding. The 5100 * check is skipped for old EH because it reuses active qc to 5101 * request ATAPI sense. 5102 */ 5103 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5104 5105 if (ata_is_ncq(prot)) { 5106 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5107 5108 if (!link->sactive) 5109 ap->nr_active_links++; 5110 link->sactive |= 1 << qc->tag; 5111 } else { 5112 WARN_ON_ONCE(link->sactive); 5113 5114 ap->nr_active_links++; 5115 link->active_tag = qc->tag; 5116 } 5117 5118 qc->flags |= ATA_QCFLAG_ACTIVE; 5119 ap->qc_active |= 1 << qc->tag; 5120 5121 /* We guarantee to LLDs that they will have at least one 5122 * non-zero sg if the command is a data command. 5123 */ 5124 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5125 5126 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5127 (ap->flags & ATA_FLAG_PIO_DMA))) 5128 if (ata_sg_setup(qc)) 5129 goto sg_err; 5130 5131 /* if device is sleeping, schedule reset and abort the link */ 5132 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5133 link->eh_info.action |= ATA_EH_RESET; 5134 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5135 ata_link_abort(link); 5136 return; 5137 } 5138 5139 ap->ops->qc_prep(qc); 5140 5141 qc->err_mask |= ap->ops->qc_issue(qc); 5142 if (unlikely(qc->err_mask)) 5143 goto err; 5144 return; 5145 5146 sg_err: 5147 qc->err_mask |= AC_ERR_SYSTEM; 5148 err: 5149 ata_qc_complete(qc); 5150 } 5151 5152 /** 5153 * sata_scr_valid - test whether SCRs are accessible 5154 * @link: ATA link to test SCR accessibility for 5155 * 5156 * Test whether SCRs are accessible for @link. 5157 * 5158 * LOCKING: 5159 * None. 5160 * 5161 * RETURNS: 5162 * 1 if SCRs are accessible, 0 otherwise. 5163 */ 5164 int sata_scr_valid(struct ata_link *link) 5165 { 5166 struct ata_port *ap = link->ap; 5167 5168 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5169 } 5170 5171 /** 5172 * sata_scr_read - read SCR register of the specified port 5173 * @link: ATA link to read SCR for 5174 * @reg: SCR to read 5175 * @val: Place to store read value 5176 * 5177 * Read SCR register @reg of @link into *@val. This function is 5178 * guaranteed to succeed if @link is ap->link, the cable type of 5179 * the port is SATA and the port implements ->scr_read. 5180 * 5181 * LOCKING: 5182 * None if @link is ap->link. Kernel thread context otherwise. 5183 * 5184 * RETURNS: 5185 * 0 on success, negative errno on failure. 5186 */ 5187 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5188 { 5189 if (ata_is_host_link(link)) { 5190 if (sata_scr_valid(link)) 5191 return link->ap->ops->scr_read(link, reg, val); 5192 return -EOPNOTSUPP; 5193 } 5194 5195 return sata_pmp_scr_read(link, reg, val); 5196 } 5197 5198 /** 5199 * sata_scr_write - write SCR register of the specified port 5200 * @link: ATA link to write SCR for 5201 * @reg: SCR to write 5202 * @val: value to write 5203 * 5204 * Write @val to SCR register @reg of @link. This function is 5205 * guaranteed to succeed if @link is ap->link, the cable type of 5206 * the port is SATA and the port implements ->scr_read. 5207 * 5208 * LOCKING: 5209 * None if @link is ap->link. Kernel thread context otherwise. 5210 * 5211 * RETURNS: 5212 * 0 on success, negative errno on failure. 5213 */ 5214 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5215 { 5216 if (ata_is_host_link(link)) { 5217 if (sata_scr_valid(link)) 5218 return link->ap->ops->scr_write(link, reg, val); 5219 return -EOPNOTSUPP; 5220 } 5221 5222 return sata_pmp_scr_write(link, reg, val); 5223 } 5224 5225 /** 5226 * sata_scr_write_flush - write SCR register of the specified port and flush 5227 * @link: ATA link to write SCR for 5228 * @reg: SCR to write 5229 * @val: value to write 5230 * 5231 * This function is identical to sata_scr_write() except that this 5232 * function performs flush after writing to the register. 5233 * 5234 * LOCKING: 5235 * None if @link is ap->link. Kernel thread context otherwise. 5236 * 5237 * RETURNS: 5238 * 0 on success, negative errno on failure. 5239 */ 5240 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5241 { 5242 if (ata_is_host_link(link)) { 5243 int rc; 5244 5245 if (sata_scr_valid(link)) { 5246 rc = link->ap->ops->scr_write(link, reg, val); 5247 if (rc == 0) 5248 rc = link->ap->ops->scr_read(link, reg, &val); 5249 return rc; 5250 } 5251 return -EOPNOTSUPP; 5252 } 5253 5254 return sata_pmp_scr_write(link, reg, val); 5255 } 5256 5257 /** 5258 * ata_phys_link_online - test whether the given link is online 5259 * @link: ATA link to test 5260 * 5261 * Test whether @link is online. Note that this function returns 5262 * 0 if online status of @link cannot be obtained, so 5263 * ata_link_online(link) != !ata_link_offline(link). 5264 * 5265 * LOCKING: 5266 * None. 5267 * 5268 * RETURNS: 5269 * True if the port online status is available and online. 5270 */ 5271 bool ata_phys_link_online(struct ata_link *link) 5272 { 5273 u32 sstatus; 5274 5275 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5276 ata_sstatus_online(sstatus)) 5277 return true; 5278 return false; 5279 } 5280 5281 /** 5282 * ata_phys_link_offline - test whether the given link is offline 5283 * @link: ATA link to test 5284 * 5285 * Test whether @link is offline. Note that this function 5286 * returns 0 if offline status of @link cannot be obtained, so 5287 * ata_link_online(link) != !ata_link_offline(link). 5288 * 5289 * LOCKING: 5290 * None. 5291 * 5292 * RETURNS: 5293 * True if the port offline status is available and offline. 5294 */ 5295 bool ata_phys_link_offline(struct ata_link *link) 5296 { 5297 u32 sstatus; 5298 5299 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5300 !ata_sstatus_online(sstatus)) 5301 return true; 5302 return false; 5303 } 5304 5305 /** 5306 * ata_link_online - test whether the given link is online 5307 * @link: ATA link to test 5308 * 5309 * Test whether @link is online. This is identical to 5310 * ata_phys_link_online() when there's no slave link. When 5311 * there's a slave link, this function should only be called on 5312 * the master link and will return true if any of M/S links is 5313 * online. 5314 * 5315 * LOCKING: 5316 * None. 5317 * 5318 * RETURNS: 5319 * True if the port online status is available and online. 5320 */ 5321 bool ata_link_online(struct ata_link *link) 5322 { 5323 struct ata_link *slave = link->ap->slave_link; 5324 5325 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5326 5327 return ata_phys_link_online(link) || 5328 (slave && ata_phys_link_online(slave)); 5329 } 5330 5331 /** 5332 * ata_link_offline - test whether the given link is offline 5333 * @link: ATA link to test 5334 * 5335 * Test whether @link is offline. This is identical to 5336 * ata_phys_link_offline() when there's no slave link. When 5337 * there's a slave link, this function should only be called on 5338 * the master link and will return true if both M/S links are 5339 * offline. 5340 * 5341 * LOCKING: 5342 * None. 5343 * 5344 * RETURNS: 5345 * True if the port offline status is available and offline. 5346 */ 5347 bool ata_link_offline(struct ata_link *link) 5348 { 5349 struct ata_link *slave = link->ap->slave_link; 5350 5351 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5352 5353 return ata_phys_link_offline(link) && 5354 (!slave || ata_phys_link_offline(slave)); 5355 } 5356 5357 #ifdef CONFIG_PM 5358 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 5359 unsigned int action, unsigned int ehi_flags, 5360 int wait) 5361 { 5362 unsigned long flags; 5363 int i, rc; 5364 5365 for (i = 0; i < host->n_ports; i++) { 5366 struct ata_port *ap = host->ports[i]; 5367 struct ata_link *link; 5368 5369 /* Previous resume operation might still be in 5370 * progress. Wait for PM_PENDING to clear. 5371 */ 5372 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5373 ata_port_wait_eh(ap); 5374 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5375 } 5376 5377 /* request PM ops to EH */ 5378 spin_lock_irqsave(ap->lock, flags); 5379 5380 ap->pm_mesg = mesg; 5381 if (wait) { 5382 rc = 0; 5383 ap->pm_result = &rc; 5384 } 5385 5386 ap->pflags |= ATA_PFLAG_PM_PENDING; 5387 ata_for_each_link(link, ap, HOST_FIRST) { 5388 link->eh_info.action |= action; 5389 link->eh_info.flags |= ehi_flags; 5390 } 5391 5392 ata_port_schedule_eh(ap); 5393 5394 spin_unlock_irqrestore(ap->lock, flags); 5395 5396 /* wait and check result */ 5397 if (wait) { 5398 ata_port_wait_eh(ap); 5399 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5400 if (rc) 5401 return rc; 5402 } 5403 } 5404 5405 return 0; 5406 } 5407 5408 /** 5409 * ata_host_suspend - suspend host 5410 * @host: host to suspend 5411 * @mesg: PM message 5412 * 5413 * Suspend @host. Actual operation is performed by EH. This 5414 * function requests EH to perform PM operations and waits for EH 5415 * to finish. 5416 * 5417 * LOCKING: 5418 * Kernel thread context (may sleep). 5419 * 5420 * RETURNS: 5421 * 0 on success, -errno on failure. 5422 */ 5423 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5424 { 5425 int rc; 5426 5427 /* 5428 * disable link pm on all ports before requesting 5429 * any pm activity 5430 */ 5431 ata_lpm_enable(host); 5432 5433 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5434 if (rc == 0) 5435 host->dev->power.power_state = mesg; 5436 return rc; 5437 } 5438 5439 /** 5440 * ata_host_resume - resume host 5441 * @host: host to resume 5442 * 5443 * Resume @host. Actual operation is performed by EH. This 5444 * function requests EH to perform PM operations and returns. 5445 * Note that all resume operations are performed parallely. 5446 * 5447 * LOCKING: 5448 * Kernel thread context (may sleep). 5449 */ 5450 void ata_host_resume(struct ata_host *host) 5451 { 5452 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, 5453 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5454 host->dev->power.power_state = PMSG_ON; 5455 5456 /* reenable link pm */ 5457 ata_lpm_disable(host); 5458 } 5459 #endif 5460 5461 /** 5462 * ata_dev_init - Initialize an ata_device structure 5463 * @dev: Device structure to initialize 5464 * 5465 * Initialize @dev in preparation for probing. 5466 * 5467 * LOCKING: 5468 * Inherited from caller. 5469 */ 5470 void ata_dev_init(struct ata_device *dev) 5471 { 5472 struct ata_link *link = ata_dev_phys_link(dev); 5473 struct ata_port *ap = link->ap; 5474 unsigned long flags; 5475 5476 /* SATA spd limit is bound to the attached device, reset together */ 5477 link->sata_spd_limit = link->hw_sata_spd_limit; 5478 link->sata_spd = 0; 5479 5480 /* High bits of dev->flags are used to record warm plug 5481 * requests which occur asynchronously. Synchronize using 5482 * host lock. 5483 */ 5484 spin_lock_irqsave(ap->lock, flags); 5485 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5486 dev->horkage = 0; 5487 spin_unlock_irqrestore(ap->lock, flags); 5488 5489 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5490 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5491 dev->pio_mask = UINT_MAX; 5492 dev->mwdma_mask = UINT_MAX; 5493 dev->udma_mask = UINT_MAX; 5494 } 5495 5496 /** 5497 * ata_link_init - Initialize an ata_link structure 5498 * @ap: ATA port link is attached to 5499 * @link: Link structure to initialize 5500 * @pmp: Port multiplier port number 5501 * 5502 * Initialize @link. 5503 * 5504 * LOCKING: 5505 * Kernel thread context (may sleep) 5506 */ 5507 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5508 { 5509 int i; 5510 5511 /* clear everything except for devices */ 5512 memset(link, 0, offsetof(struct ata_link, device[0])); 5513 5514 link->ap = ap; 5515 link->pmp = pmp; 5516 link->active_tag = ATA_TAG_POISON; 5517 link->hw_sata_spd_limit = UINT_MAX; 5518 5519 /* can't use iterator, ap isn't initialized yet */ 5520 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5521 struct ata_device *dev = &link->device[i]; 5522 5523 dev->link = link; 5524 dev->devno = dev - link->device; 5525 #ifdef CONFIG_ATA_ACPI 5526 dev->gtf_filter = ata_acpi_gtf_filter; 5527 #endif 5528 ata_dev_init(dev); 5529 } 5530 } 5531 5532 /** 5533 * sata_link_init_spd - Initialize link->sata_spd_limit 5534 * @link: Link to configure sata_spd_limit for 5535 * 5536 * Initialize @link->[hw_]sata_spd_limit to the currently 5537 * configured value. 5538 * 5539 * LOCKING: 5540 * Kernel thread context (may sleep). 5541 * 5542 * RETURNS: 5543 * 0 on success, -errno on failure. 5544 */ 5545 int sata_link_init_spd(struct ata_link *link) 5546 { 5547 u8 spd; 5548 int rc; 5549 5550 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5551 if (rc) 5552 return rc; 5553 5554 spd = (link->saved_scontrol >> 4) & 0xf; 5555 if (spd) 5556 link->hw_sata_spd_limit &= (1 << spd) - 1; 5557 5558 ata_force_link_limits(link); 5559 5560 link->sata_spd_limit = link->hw_sata_spd_limit; 5561 5562 return 0; 5563 } 5564 5565 /** 5566 * ata_port_alloc - allocate and initialize basic ATA port resources 5567 * @host: ATA host this allocated port belongs to 5568 * 5569 * Allocate and initialize basic ATA port resources. 5570 * 5571 * RETURNS: 5572 * Allocate ATA port on success, NULL on failure. 5573 * 5574 * LOCKING: 5575 * Inherited from calling layer (may sleep). 5576 */ 5577 struct ata_port *ata_port_alloc(struct ata_host *host) 5578 { 5579 struct ata_port *ap; 5580 5581 DPRINTK("ENTER\n"); 5582 5583 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5584 if (!ap) 5585 return NULL; 5586 5587 ap->pflags |= ATA_PFLAG_INITIALIZING; 5588 ap->lock = &host->lock; 5589 ap->print_id = -1; 5590 ap->host = host; 5591 ap->dev = host->dev; 5592 5593 #if defined(ATA_VERBOSE_DEBUG) 5594 /* turn on all debugging levels */ 5595 ap->msg_enable = 0x00FF; 5596 #elif defined(ATA_DEBUG) 5597 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5598 #else 5599 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5600 #endif 5601 5602 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5603 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5604 INIT_LIST_HEAD(&ap->eh_done_q); 5605 init_waitqueue_head(&ap->eh_wait_q); 5606 init_completion(&ap->park_req_pending); 5607 init_timer_deferrable(&ap->fastdrain_timer); 5608 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5609 ap->fastdrain_timer.data = (unsigned long)ap; 5610 5611 ap->cbl = ATA_CBL_NONE; 5612 5613 ata_link_init(ap, &ap->link, 0); 5614 5615 #ifdef ATA_IRQ_TRAP 5616 ap->stats.unhandled_irq = 1; 5617 ap->stats.idle_irq = 1; 5618 #endif 5619 ata_sff_port_init(ap); 5620 5621 return ap; 5622 } 5623 5624 static void ata_host_release(struct device *gendev, void *res) 5625 { 5626 struct ata_host *host = dev_get_drvdata(gendev); 5627 int i; 5628 5629 for (i = 0; i < host->n_ports; i++) { 5630 struct ata_port *ap = host->ports[i]; 5631 5632 if (!ap) 5633 continue; 5634 5635 if (ap->scsi_host) 5636 scsi_host_put(ap->scsi_host); 5637 5638 kfree(ap->pmp_link); 5639 kfree(ap->slave_link); 5640 kfree(ap); 5641 host->ports[i] = NULL; 5642 } 5643 5644 dev_set_drvdata(gendev, NULL); 5645 } 5646 5647 /** 5648 * ata_host_alloc - allocate and init basic ATA host resources 5649 * @dev: generic device this host is associated with 5650 * @max_ports: maximum number of ATA ports associated with this host 5651 * 5652 * Allocate and initialize basic ATA host resources. LLD calls 5653 * this function to allocate a host, initializes it fully and 5654 * attaches it using ata_host_register(). 5655 * 5656 * @max_ports ports are allocated and host->n_ports is 5657 * initialized to @max_ports. The caller is allowed to decrease 5658 * host->n_ports before calling ata_host_register(). The unused 5659 * ports will be automatically freed on registration. 5660 * 5661 * RETURNS: 5662 * Allocate ATA host on success, NULL on failure. 5663 * 5664 * LOCKING: 5665 * Inherited from calling layer (may sleep). 5666 */ 5667 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5668 { 5669 struct ata_host *host; 5670 size_t sz; 5671 int i; 5672 5673 DPRINTK("ENTER\n"); 5674 5675 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5676 return NULL; 5677 5678 /* alloc a container for our list of ATA ports (buses) */ 5679 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5680 /* alloc a container for our list of ATA ports (buses) */ 5681 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5682 if (!host) 5683 goto err_out; 5684 5685 devres_add(dev, host); 5686 dev_set_drvdata(dev, host); 5687 5688 spin_lock_init(&host->lock); 5689 host->dev = dev; 5690 host->n_ports = max_ports; 5691 5692 /* allocate ports bound to this host */ 5693 for (i = 0; i < max_ports; i++) { 5694 struct ata_port *ap; 5695 5696 ap = ata_port_alloc(host); 5697 if (!ap) 5698 goto err_out; 5699 5700 ap->port_no = i; 5701 host->ports[i] = ap; 5702 } 5703 5704 devres_remove_group(dev, NULL); 5705 return host; 5706 5707 err_out: 5708 devres_release_group(dev, NULL); 5709 return NULL; 5710 } 5711 5712 /** 5713 * ata_host_alloc_pinfo - alloc host and init with port_info array 5714 * @dev: generic device this host is associated with 5715 * @ppi: array of ATA port_info to initialize host with 5716 * @n_ports: number of ATA ports attached to this host 5717 * 5718 * Allocate ATA host and initialize with info from @ppi. If NULL 5719 * terminated, @ppi may contain fewer entries than @n_ports. The 5720 * last entry will be used for the remaining ports. 5721 * 5722 * RETURNS: 5723 * Allocate ATA host on success, NULL on failure. 5724 * 5725 * LOCKING: 5726 * Inherited from calling layer (may sleep). 5727 */ 5728 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5729 const struct ata_port_info * const * ppi, 5730 int n_ports) 5731 { 5732 const struct ata_port_info *pi; 5733 struct ata_host *host; 5734 int i, j; 5735 5736 host = ata_host_alloc(dev, n_ports); 5737 if (!host) 5738 return NULL; 5739 5740 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5741 struct ata_port *ap = host->ports[i]; 5742 5743 if (ppi[j]) 5744 pi = ppi[j++]; 5745 5746 ap->pio_mask = pi->pio_mask; 5747 ap->mwdma_mask = pi->mwdma_mask; 5748 ap->udma_mask = pi->udma_mask; 5749 ap->flags |= pi->flags; 5750 ap->link.flags |= pi->link_flags; 5751 ap->ops = pi->port_ops; 5752 5753 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5754 host->ops = pi->port_ops; 5755 } 5756 5757 return host; 5758 } 5759 5760 /** 5761 * ata_slave_link_init - initialize slave link 5762 * @ap: port to initialize slave link for 5763 * 5764 * Create and initialize slave link for @ap. This enables slave 5765 * link handling on the port. 5766 * 5767 * In libata, a port contains links and a link contains devices. 5768 * There is single host link but if a PMP is attached to it, 5769 * there can be multiple fan-out links. On SATA, there's usually 5770 * a single device connected to a link but PATA and SATA 5771 * controllers emulating TF based interface can have two - master 5772 * and slave. 5773 * 5774 * However, there are a few controllers which don't fit into this 5775 * abstraction too well - SATA controllers which emulate TF 5776 * interface with both master and slave devices but also have 5777 * separate SCR register sets for each device. These controllers 5778 * need separate links for physical link handling 5779 * (e.g. onlineness, link speed) but should be treated like a 5780 * traditional M/S controller for everything else (e.g. command 5781 * issue, softreset). 5782 * 5783 * slave_link is libata's way of handling this class of 5784 * controllers without impacting core layer too much. For 5785 * anything other than physical link handling, the default host 5786 * link is used for both master and slave. For physical link 5787 * handling, separate @ap->slave_link is used. All dirty details 5788 * are implemented inside libata core layer. From LLD's POV, the 5789 * only difference is that prereset, hardreset and postreset are 5790 * called once more for the slave link, so the reset sequence 5791 * looks like the following. 5792 * 5793 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5794 * softreset(M) -> postreset(M) -> postreset(S) 5795 * 5796 * Note that softreset is called only for the master. Softreset 5797 * resets both M/S by definition, so SRST on master should handle 5798 * both (the standard method will work just fine). 5799 * 5800 * LOCKING: 5801 * Should be called before host is registered. 5802 * 5803 * RETURNS: 5804 * 0 on success, -errno on failure. 5805 */ 5806 int ata_slave_link_init(struct ata_port *ap) 5807 { 5808 struct ata_link *link; 5809 5810 WARN_ON(ap->slave_link); 5811 WARN_ON(ap->flags & ATA_FLAG_PMP); 5812 5813 link = kzalloc(sizeof(*link), GFP_KERNEL); 5814 if (!link) 5815 return -ENOMEM; 5816 5817 ata_link_init(ap, link, 1); 5818 ap->slave_link = link; 5819 return 0; 5820 } 5821 5822 static void ata_host_stop(struct device *gendev, void *res) 5823 { 5824 struct ata_host *host = dev_get_drvdata(gendev); 5825 int i; 5826 5827 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5828 5829 for (i = 0; i < host->n_ports; i++) { 5830 struct ata_port *ap = host->ports[i]; 5831 5832 if (ap->ops->port_stop) 5833 ap->ops->port_stop(ap); 5834 } 5835 5836 if (host->ops->host_stop) 5837 host->ops->host_stop(host); 5838 } 5839 5840 /** 5841 * ata_finalize_port_ops - finalize ata_port_operations 5842 * @ops: ata_port_operations to finalize 5843 * 5844 * An ata_port_operations can inherit from another ops and that 5845 * ops can again inherit from another. This can go on as many 5846 * times as necessary as long as there is no loop in the 5847 * inheritance chain. 5848 * 5849 * Ops tables are finalized when the host is started. NULL or 5850 * unspecified entries are inherited from the closet ancestor 5851 * which has the method and the entry is populated with it. 5852 * After finalization, the ops table directly points to all the 5853 * methods and ->inherits is no longer necessary and cleared. 5854 * 5855 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5856 * 5857 * LOCKING: 5858 * None. 5859 */ 5860 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5861 { 5862 static DEFINE_SPINLOCK(lock); 5863 const struct ata_port_operations *cur; 5864 void **begin = (void **)ops; 5865 void **end = (void **)&ops->inherits; 5866 void **pp; 5867 5868 if (!ops || !ops->inherits) 5869 return; 5870 5871 spin_lock(&lock); 5872 5873 for (cur = ops->inherits; cur; cur = cur->inherits) { 5874 void **inherit = (void **)cur; 5875 5876 for (pp = begin; pp < end; pp++, inherit++) 5877 if (!*pp) 5878 *pp = *inherit; 5879 } 5880 5881 for (pp = begin; pp < end; pp++) 5882 if (IS_ERR(*pp)) 5883 *pp = NULL; 5884 5885 ops->inherits = NULL; 5886 5887 spin_unlock(&lock); 5888 } 5889 5890 /** 5891 * ata_host_start - start and freeze ports of an ATA host 5892 * @host: ATA host to start ports for 5893 * 5894 * Start and then freeze ports of @host. Started status is 5895 * recorded in host->flags, so this function can be called 5896 * multiple times. Ports are guaranteed to get started only 5897 * once. If host->ops isn't initialized yet, its set to the 5898 * first non-dummy port ops. 5899 * 5900 * LOCKING: 5901 * Inherited from calling layer (may sleep). 5902 * 5903 * RETURNS: 5904 * 0 if all ports are started successfully, -errno otherwise. 5905 */ 5906 int ata_host_start(struct ata_host *host) 5907 { 5908 int have_stop = 0; 5909 void *start_dr = NULL; 5910 int i, rc; 5911 5912 if (host->flags & ATA_HOST_STARTED) 5913 return 0; 5914 5915 ata_finalize_port_ops(host->ops); 5916 5917 for (i = 0; i < host->n_ports; i++) { 5918 struct ata_port *ap = host->ports[i]; 5919 5920 ata_finalize_port_ops(ap->ops); 5921 5922 if (!host->ops && !ata_port_is_dummy(ap)) 5923 host->ops = ap->ops; 5924 5925 if (ap->ops->port_stop) 5926 have_stop = 1; 5927 } 5928 5929 if (host->ops->host_stop) 5930 have_stop = 1; 5931 5932 if (have_stop) { 5933 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 5934 if (!start_dr) 5935 return -ENOMEM; 5936 } 5937 5938 for (i = 0; i < host->n_ports; i++) { 5939 struct ata_port *ap = host->ports[i]; 5940 5941 if (ap->ops->port_start) { 5942 rc = ap->ops->port_start(ap); 5943 if (rc) { 5944 if (rc != -ENODEV) 5945 dev_printk(KERN_ERR, host->dev, 5946 "failed to start port %d " 5947 "(errno=%d)\n", i, rc); 5948 goto err_out; 5949 } 5950 } 5951 ata_eh_freeze_port(ap); 5952 } 5953 5954 if (start_dr) 5955 devres_add(host->dev, start_dr); 5956 host->flags |= ATA_HOST_STARTED; 5957 return 0; 5958 5959 err_out: 5960 while (--i >= 0) { 5961 struct ata_port *ap = host->ports[i]; 5962 5963 if (ap->ops->port_stop) 5964 ap->ops->port_stop(ap); 5965 } 5966 devres_free(start_dr); 5967 return rc; 5968 } 5969 5970 /** 5971 * ata_sas_host_init - Initialize a host struct 5972 * @host: host to initialize 5973 * @dev: device host is attached to 5974 * @flags: host flags 5975 * @ops: port_ops 5976 * 5977 * LOCKING: 5978 * PCI/etc. bus probe sem. 5979 * 5980 */ 5981 /* KILLME - the only user left is ipr */ 5982 void ata_host_init(struct ata_host *host, struct device *dev, 5983 unsigned long flags, struct ata_port_operations *ops) 5984 { 5985 spin_lock_init(&host->lock); 5986 host->dev = dev; 5987 host->flags = flags; 5988 host->ops = ops; 5989 } 5990 5991 5992 static void async_port_probe(void *data, async_cookie_t cookie) 5993 { 5994 int rc; 5995 struct ata_port *ap = data; 5996 5997 /* 5998 * If we're not allowed to scan this host in parallel, 5999 * we need to wait until all previous scans have completed 6000 * before going further. 6001 * Jeff Garzik says this is only within a controller, so we 6002 * don't need to wait for port 0, only for later ports. 6003 */ 6004 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 6005 async_synchronize_cookie(cookie); 6006 6007 /* probe */ 6008 if (ap->ops->error_handler) { 6009 struct ata_eh_info *ehi = &ap->link.eh_info; 6010 unsigned long flags; 6011 6012 /* kick EH for boot probing */ 6013 spin_lock_irqsave(ap->lock, flags); 6014 6015 ehi->probe_mask |= ATA_ALL_DEVICES; 6016 ehi->action |= ATA_EH_RESET | ATA_EH_LPM; 6017 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 6018 6019 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6020 ap->pflags |= ATA_PFLAG_LOADING; 6021 ata_port_schedule_eh(ap); 6022 6023 spin_unlock_irqrestore(ap->lock, flags); 6024 6025 /* wait for EH to finish */ 6026 ata_port_wait_eh(ap); 6027 } else { 6028 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6029 rc = ata_bus_probe(ap); 6030 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6031 6032 if (rc) { 6033 /* FIXME: do something useful here? 6034 * Current libata behavior will 6035 * tear down everything when 6036 * the module is removed 6037 * or the h/w is unplugged. 6038 */ 6039 } 6040 } 6041 6042 /* in order to keep device order, we need to synchronize at this point */ 6043 async_synchronize_cookie(cookie); 6044 6045 ata_scsi_scan_host(ap, 1); 6046 6047 } 6048 /** 6049 * ata_host_register - register initialized ATA host 6050 * @host: ATA host to register 6051 * @sht: template for SCSI host 6052 * 6053 * Register initialized ATA host. @host is allocated using 6054 * ata_host_alloc() and fully initialized by LLD. This function 6055 * starts ports, registers @host with ATA and SCSI layers and 6056 * probe registered devices. 6057 * 6058 * LOCKING: 6059 * Inherited from calling layer (may sleep). 6060 * 6061 * RETURNS: 6062 * 0 on success, -errno otherwise. 6063 */ 6064 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6065 { 6066 int i, rc; 6067 6068 /* host must have been started */ 6069 if (!(host->flags & ATA_HOST_STARTED)) { 6070 dev_printk(KERN_ERR, host->dev, 6071 "BUG: trying to register unstarted host\n"); 6072 WARN_ON(1); 6073 return -EINVAL; 6074 } 6075 6076 /* Blow away unused ports. This happens when LLD can't 6077 * determine the exact number of ports to allocate at 6078 * allocation time. 6079 */ 6080 for (i = host->n_ports; host->ports[i]; i++) 6081 kfree(host->ports[i]); 6082 6083 /* give ports names and add SCSI hosts */ 6084 for (i = 0; i < host->n_ports; i++) 6085 host->ports[i]->print_id = ata_print_id++; 6086 6087 rc = ata_scsi_add_hosts(host, sht); 6088 if (rc) 6089 return rc; 6090 6091 /* associate with ACPI nodes */ 6092 ata_acpi_associate(host); 6093 6094 /* set cable, sata_spd_limit and report */ 6095 for (i = 0; i < host->n_ports; i++) { 6096 struct ata_port *ap = host->ports[i]; 6097 unsigned long xfer_mask; 6098 6099 /* set SATA cable type if still unset */ 6100 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6101 ap->cbl = ATA_CBL_SATA; 6102 6103 /* init sata_spd_limit to the current value */ 6104 sata_link_init_spd(&ap->link); 6105 if (ap->slave_link) 6106 sata_link_init_spd(ap->slave_link); 6107 6108 /* print per-port info to dmesg */ 6109 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6110 ap->udma_mask); 6111 6112 if (!ata_port_is_dummy(ap)) { 6113 ata_port_printk(ap, KERN_INFO, 6114 "%cATA max %s %s\n", 6115 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6116 ata_mode_string(xfer_mask), 6117 ap->link.eh_info.desc); 6118 ata_ehi_clear_desc(&ap->link.eh_info); 6119 } else 6120 ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 6121 } 6122 6123 /* perform each probe asynchronously */ 6124 for (i = 0; i < host->n_ports; i++) { 6125 struct ata_port *ap = host->ports[i]; 6126 async_schedule(async_port_probe, ap); 6127 } 6128 6129 return 0; 6130 } 6131 6132 /** 6133 * ata_host_activate - start host, request IRQ and register it 6134 * @host: target ATA host 6135 * @irq: IRQ to request 6136 * @irq_handler: irq_handler used when requesting IRQ 6137 * @irq_flags: irq_flags used when requesting IRQ 6138 * @sht: scsi_host_template to use when registering the host 6139 * 6140 * After allocating an ATA host and initializing it, most libata 6141 * LLDs perform three steps to activate the host - start host, 6142 * request IRQ and register it. This helper takes necessasry 6143 * arguments and performs the three steps in one go. 6144 * 6145 * An invalid IRQ skips the IRQ registration and expects the host to 6146 * have set polling mode on the port. In this case, @irq_handler 6147 * should be NULL. 6148 * 6149 * LOCKING: 6150 * Inherited from calling layer (may sleep). 6151 * 6152 * RETURNS: 6153 * 0 on success, -errno otherwise. 6154 */ 6155 int ata_host_activate(struct ata_host *host, int irq, 6156 irq_handler_t irq_handler, unsigned long irq_flags, 6157 struct scsi_host_template *sht) 6158 { 6159 int i, rc; 6160 6161 rc = ata_host_start(host); 6162 if (rc) 6163 return rc; 6164 6165 /* Special case for polling mode */ 6166 if (!irq) { 6167 WARN_ON(irq_handler); 6168 return ata_host_register(host, sht); 6169 } 6170 6171 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6172 dev_driver_string(host->dev), host); 6173 if (rc) 6174 return rc; 6175 6176 for (i = 0; i < host->n_ports; i++) 6177 ata_port_desc(host->ports[i], "irq %d", irq); 6178 6179 rc = ata_host_register(host, sht); 6180 /* if failed, just free the IRQ and leave ports alone */ 6181 if (rc) 6182 devm_free_irq(host->dev, irq, host); 6183 6184 return rc; 6185 } 6186 6187 /** 6188 * ata_port_detach - Detach ATA port in prepration of device removal 6189 * @ap: ATA port to be detached 6190 * 6191 * Detach all ATA devices and the associated SCSI devices of @ap; 6192 * then, remove the associated SCSI host. @ap is guaranteed to 6193 * be quiescent on return from this function. 6194 * 6195 * LOCKING: 6196 * Kernel thread context (may sleep). 6197 */ 6198 static void ata_port_detach(struct ata_port *ap) 6199 { 6200 unsigned long flags; 6201 6202 if (!ap->ops->error_handler) 6203 goto skip_eh; 6204 6205 /* tell EH we're leaving & flush EH */ 6206 spin_lock_irqsave(ap->lock, flags); 6207 ap->pflags |= ATA_PFLAG_UNLOADING; 6208 ata_port_schedule_eh(ap); 6209 spin_unlock_irqrestore(ap->lock, flags); 6210 6211 /* wait till EH commits suicide */ 6212 ata_port_wait_eh(ap); 6213 6214 /* it better be dead now */ 6215 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6216 6217 cancel_rearming_delayed_work(&ap->hotplug_task); 6218 6219 skip_eh: 6220 /* remove the associated SCSI host */ 6221 scsi_remove_host(ap->scsi_host); 6222 } 6223 6224 /** 6225 * ata_host_detach - Detach all ports of an ATA host 6226 * @host: Host to detach 6227 * 6228 * Detach all ports of @host. 6229 * 6230 * LOCKING: 6231 * Kernel thread context (may sleep). 6232 */ 6233 void ata_host_detach(struct ata_host *host) 6234 { 6235 int i; 6236 6237 for (i = 0; i < host->n_ports; i++) 6238 ata_port_detach(host->ports[i]); 6239 6240 /* the host is dead now, dissociate ACPI */ 6241 ata_acpi_dissociate(host); 6242 } 6243 6244 #ifdef CONFIG_PCI 6245 6246 /** 6247 * ata_pci_remove_one - PCI layer callback for device removal 6248 * @pdev: PCI device that was removed 6249 * 6250 * PCI layer indicates to libata via this hook that hot-unplug or 6251 * module unload event has occurred. Detach all ports. Resource 6252 * release is handled via devres. 6253 * 6254 * LOCKING: 6255 * Inherited from PCI layer (may sleep). 6256 */ 6257 void ata_pci_remove_one(struct pci_dev *pdev) 6258 { 6259 struct device *dev = &pdev->dev; 6260 struct ata_host *host = dev_get_drvdata(dev); 6261 6262 ata_host_detach(host); 6263 } 6264 6265 /* move to PCI subsystem */ 6266 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6267 { 6268 unsigned long tmp = 0; 6269 6270 switch (bits->width) { 6271 case 1: { 6272 u8 tmp8 = 0; 6273 pci_read_config_byte(pdev, bits->reg, &tmp8); 6274 tmp = tmp8; 6275 break; 6276 } 6277 case 2: { 6278 u16 tmp16 = 0; 6279 pci_read_config_word(pdev, bits->reg, &tmp16); 6280 tmp = tmp16; 6281 break; 6282 } 6283 case 4: { 6284 u32 tmp32 = 0; 6285 pci_read_config_dword(pdev, bits->reg, &tmp32); 6286 tmp = tmp32; 6287 break; 6288 } 6289 6290 default: 6291 return -EINVAL; 6292 } 6293 6294 tmp &= bits->mask; 6295 6296 return (tmp == bits->val) ? 1 : 0; 6297 } 6298 6299 #ifdef CONFIG_PM 6300 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6301 { 6302 pci_save_state(pdev); 6303 pci_disable_device(pdev); 6304 6305 if (mesg.event & PM_EVENT_SLEEP) 6306 pci_set_power_state(pdev, PCI_D3hot); 6307 } 6308 6309 int ata_pci_device_do_resume(struct pci_dev *pdev) 6310 { 6311 int rc; 6312 6313 pci_set_power_state(pdev, PCI_D0); 6314 pci_restore_state(pdev); 6315 6316 rc = pcim_enable_device(pdev); 6317 if (rc) { 6318 dev_printk(KERN_ERR, &pdev->dev, 6319 "failed to enable device after resume (%d)\n", rc); 6320 return rc; 6321 } 6322 6323 pci_set_master(pdev); 6324 return 0; 6325 } 6326 6327 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6328 { 6329 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6330 int rc = 0; 6331 6332 rc = ata_host_suspend(host, mesg); 6333 if (rc) 6334 return rc; 6335 6336 ata_pci_device_do_suspend(pdev, mesg); 6337 6338 return 0; 6339 } 6340 6341 int ata_pci_device_resume(struct pci_dev *pdev) 6342 { 6343 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6344 int rc; 6345 6346 rc = ata_pci_device_do_resume(pdev); 6347 if (rc == 0) 6348 ata_host_resume(host); 6349 return rc; 6350 } 6351 #endif /* CONFIG_PM */ 6352 6353 #endif /* CONFIG_PCI */ 6354 6355 static int __init ata_parse_force_one(char **cur, 6356 struct ata_force_ent *force_ent, 6357 const char **reason) 6358 { 6359 /* FIXME: Currently, there's no way to tag init const data and 6360 * using __initdata causes build failure on some versions of 6361 * gcc. Once __initdataconst is implemented, add const to the 6362 * following structure. 6363 */ 6364 static struct ata_force_param force_tbl[] __initdata = { 6365 { "40c", .cbl = ATA_CBL_PATA40 }, 6366 { "80c", .cbl = ATA_CBL_PATA80 }, 6367 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6368 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6369 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6370 { "sata", .cbl = ATA_CBL_SATA }, 6371 { "1.5Gbps", .spd_limit = 1 }, 6372 { "3.0Gbps", .spd_limit = 2 }, 6373 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6374 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6375 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6376 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6377 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6378 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6379 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6380 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6381 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6382 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6383 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6384 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6385 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6386 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6387 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6388 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6389 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6390 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6391 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6392 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6393 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6394 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6395 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6396 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6397 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6398 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6399 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6400 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6401 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6402 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6403 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6404 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6405 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6406 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6407 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6408 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6409 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6410 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6411 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6412 }; 6413 char *start = *cur, *p = *cur; 6414 char *id, *val, *endp; 6415 const struct ata_force_param *match_fp = NULL; 6416 int nr_matches = 0, i; 6417 6418 /* find where this param ends and update *cur */ 6419 while (*p != '\0' && *p != ',') 6420 p++; 6421 6422 if (*p == '\0') 6423 *cur = p; 6424 else 6425 *cur = p + 1; 6426 6427 *p = '\0'; 6428 6429 /* parse */ 6430 p = strchr(start, ':'); 6431 if (!p) { 6432 val = strstrip(start); 6433 goto parse_val; 6434 } 6435 *p = '\0'; 6436 6437 id = strstrip(start); 6438 val = strstrip(p + 1); 6439 6440 /* parse id */ 6441 p = strchr(id, '.'); 6442 if (p) { 6443 *p++ = '\0'; 6444 force_ent->device = simple_strtoul(p, &endp, 10); 6445 if (p == endp || *endp != '\0') { 6446 *reason = "invalid device"; 6447 return -EINVAL; 6448 } 6449 } 6450 6451 force_ent->port = simple_strtoul(id, &endp, 10); 6452 if (p == endp || *endp != '\0') { 6453 *reason = "invalid port/link"; 6454 return -EINVAL; 6455 } 6456 6457 parse_val: 6458 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6459 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6460 const struct ata_force_param *fp = &force_tbl[i]; 6461 6462 if (strncasecmp(val, fp->name, strlen(val))) 6463 continue; 6464 6465 nr_matches++; 6466 match_fp = fp; 6467 6468 if (strcasecmp(val, fp->name) == 0) { 6469 nr_matches = 1; 6470 break; 6471 } 6472 } 6473 6474 if (!nr_matches) { 6475 *reason = "unknown value"; 6476 return -EINVAL; 6477 } 6478 if (nr_matches > 1) { 6479 *reason = "ambigious value"; 6480 return -EINVAL; 6481 } 6482 6483 force_ent->param = *match_fp; 6484 6485 return 0; 6486 } 6487 6488 static void __init ata_parse_force_param(void) 6489 { 6490 int idx = 0, size = 1; 6491 int last_port = -1, last_device = -1; 6492 char *p, *cur, *next; 6493 6494 /* calculate maximum number of params and allocate force_tbl */ 6495 for (p = ata_force_param_buf; *p; p++) 6496 if (*p == ',') 6497 size++; 6498 6499 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6500 if (!ata_force_tbl) { 6501 printk(KERN_WARNING "ata: failed to extend force table, " 6502 "libata.force ignored\n"); 6503 return; 6504 } 6505 6506 /* parse and populate the table */ 6507 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6508 const char *reason = ""; 6509 struct ata_force_ent te = { .port = -1, .device = -1 }; 6510 6511 next = cur; 6512 if (ata_parse_force_one(&next, &te, &reason)) { 6513 printk(KERN_WARNING "ata: failed to parse force " 6514 "parameter \"%s\" (%s)\n", 6515 cur, reason); 6516 continue; 6517 } 6518 6519 if (te.port == -1) { 6520 te.port = last_port; 6521 te.device = last_device; 6522 } 6523 6524 ata_force_tbl[idx++] = te; 6525 6526 last_port = te.port; 6527 last_device = te.device; 6528 } 6529 6530 ata_force_tbl_size = idx; 6531 } 6532 6533 static int __init ata_init(void) 6534 { 6535 int rc = -ENOMEM; 6536 6537 ata_parse_force_param(); 6538 6539 ata_aux_wq = create_singlethread_workqueue("ata_aux"); 6540 if (!ata_aux_wq) 6541 goto fail; 6542 6543 rc = ata_sff_init(); 6544 if (rc) 6545 goto fail; 6546 6547 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6548 return 0; 6549 6550 fail: 6551 kfree(ata_force_tbl); 6552 if (ata_aux_wq) 6553 destroy_workqueue(ata_aux_wq); 6554 return rc; 6555 } 6556 6557 static void __exit ata_exit(void) 6558 { 6559 ata_sff_exit(); 6560 kfree(ata_force_tbl); 6561 destroy_workqueue(ata_aux_wq); 6562 } 6563 6564 subsys_initcall(ata_init); 6565 module_exit(ata_exit); 6566 6567 static DEFINE_RATELIMIT_STATE(ratelimit, HZ / 5, 1); 6568 6569 int ata_ratelimit(void) 6570 { 6571 return __ratelimit(&ratelimit); 6572 } 6573 6574 /** 6575 * ata_wait_register - wait until register value changes 6576 * @reg: IO-mapped register 6577 * @mask: Mask to apply to read register value 6578 * @val: Wait condition 6579 * @interval: polling interval in milliseconds 6580 * @timeout: timeout in milliseconds 6581 * 6582 * Waiting for some bits of register to change is a common 6583 * operation for ATA controllers. This function reads 32bit LE 6584 * IO-mapped register @reg and tests for the following condition. 6585 * 6586 * (*@reg & mask) != val 6587 * 6588 * If the condition is met, it returns; otherwise, the process is 6589 * repeated after @interval_msec until timeout. 6590 * 6591 * LOCKING: 6592 * Kernel thread context (may sleep) 6593 * 6594 * RETURNS: 6595 * The final register value. 6596 */ 6597 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 6598 unsigned long interval, unsigned long timeout) 6599 { 6600 unsigned long deadline; 6601 u32 tmp; 6602 6603 tmp = ioread32(reg); 6604 6605 /* Calculate timeout _after_ the first read to make sure 6606 * preceding writes reach the controller before starting to 6607 * eat away the timeout. 6608 */ 6609 deadline = ata_deadline(jiffies, timeout); 6610 6611 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6612 msleep(interval); 6613 tmp = ioread32(reg); 6614 } 6615 6616 return tmp; 6617 } 6618 6619 /* 6620 * Dummy port_ops 6621 */ 6622 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6623 { 6624 return AC_ERR_SYSTEM; 6625 } 6626 6627 static void ata_dummy_error_handler(struct ata_port *ap) 6628 { 6629 /* truly dummy */ 6630 } 6631 6632 struct ata_port_operations ata_dummy_port_ops = { 6633 .qc_prep = ata_noop_qc_prep, 6634 .qc_issue = ata_dummy_qc_issue, 6635 .error_handler = ata_dummy_error_handler, 6636 }; 6637 6638 const struct ata_port_info ata_dummy_port_info = { 6639 .port_ops = &ata_dummy_port_ops, 6640 }; 6641 6642 /* 6643 * libata is essentially a library of internal helper functions for 6644 * low-level ATA host controller drivers. As such, the API/ABI is 6645 * likely to change as new drivers are added and updated. 6646 * Do not depend on ABI/API stability. 6647 */ 6648 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6649 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6650 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6651 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6652 EXPORT_SYMBOL_GPL(sata_port_ops); 6653 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6654 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6655 EXPORT_SYMBOL_GPL(ata_link_next); 6656 EXPORT_SYMBOL_GPL(ata_dev_next); 6657 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6658 EXPORT_SYMBOL_GPL(ata_host_init); 6659 EXPORT_SYMBOL_GPL(ata_host_alloc); 6660 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6661 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6662 EXPORT_SYMBOL_GPL(ata_host_start); 6663 EXPORT_SYMBOL_GPL(ata_host_register); 6664 EXPORT_SYMBOL_GPL(ata_host_activate); 6665 EXPORT_SYMBOL_GPL(ata_host_detach); 6666 EXPORT_SYMBOL_GPL(ata_sg_init); 6667 EXPORT_SYMBOL_GPL(ata_qc_complete); 6668 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6669 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6670 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6671 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6672 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6673 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6674 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6675 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6676 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6677 EXPORT_SYMBOL_GPL(ata_mode_string); 6678 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6679 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6680 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6681 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6682 EXPORT_SYMBOL_GPL(ata_dev_disable); 6683 EXPORT_SYMBOL_GPL(sata_set_spd); 6684 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6685 EXPORT_SYMBOL_GPL(sata_link_debounce); 6686 EXPORT_SYMBOL_GPL(sata_link_resume); 6687 EXPORT_SYMBOL_GPL(ata_std_prereset); 6688 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6689 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6690 EXPORT_SYMBOL_GPL(ata_std_postreset); 6691 EXPORT_SYMBOL_GPL(ata_dev_classify); 6692 EXPORT_SYMBOL_GPL(ata_dev_pair); 6693 EXPORT_SYMBOL_GPL(ata_ratelimit); 6694 EXPORT_SYMBOL_GPL(ata_wait_register); 6695 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6696 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6697 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6698 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6699 EXPORT_SYMBOL_GPL(sata_scr_valid); 6700 EXPORT_SYMBOL_GPL(sata_scr_read); 6701 EXPORT_SYMBOL_GPL(sata_scr_write); 6702 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6703 EXPORT_SYMBOL_GPL(ata_link_online); 6704 EXPORT_SYMBOL_GPL(ata_link_offline); 6705 #ifdef CONFIG_PM 6706 EXPORT_SYMBOL_GPL(ata_host_suspend); 6707 EXPORT_SYMBOL_GPL(ata_host_resume); 6708 #endif /* CONFIG_PM */ 6709 EXPORT_SYMBOL_GPL(ata_id_string); 6710 EXPORT_SYMBOL_GPL(ata_id_c_string); 6711 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6712 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6713 6714 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6715 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6716 EXPORT_SYMBOL_GPL(ata_timing_compute); 6717 EXPORT_SYMBOL_GPL(ata_timing_merge); 6718 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6719 6720 #ifdef CONFIG_PCI 6721 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6722 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6723 #ifdef CONFIG_PM 6724 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6725 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6726 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6727 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6728 #endif /* CONFIG_PM */ 6729 #endif /* CONFIG_PCI */ 6730 6731 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6732 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6733 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6734 EXPORT_SYMBOL_GPL(ata_port_desc); 6735 #ifdef CONFIG_PCI 6736 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6737 #endif /* CONFIG_PCI */ 6738 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6739 EXPORT_SYMBOL_GPL(ata_link_abort); 6740 EXPORT_SYMBOL_GPL(ata_port_abort); 6741 EXPORT_SYMBOL_GPL(ata_port_freeze); 6742 EXPORT_SYMBOL_GPL(sata_async_notification); 6743 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6744 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6745 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6746 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6747 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 6748 EXPORT_SYMBOL_GPL(ata_do_eh); 6749 EXPORT_SYMBOL_GPL(ata_std_error_handler); 6750 6751 EXPORT_SYMBOL_GPL(ata_cable_40wire); 6752 EXPORT_SYMBOL_GPL(ata_cable_80wire); 6753 EXPORT_SYMBOL_GPL(ata_cable_unknown); 6754 EXPORT_SYMBOL_GPL(ata_cable_ignore); 6755 EXPORT_SYMBOL_GPL(ata_cable_sata); 6756