1 /* 2 * libata-core.c - helper library for ATA 3 * 4 * Maintained by: Jeff Garzik <jgarzik@pobox.com> 5 * Please ALWAYS copy linux-ide@vger.kernel.org 6 * on emails. 7 * 8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 9 * Copyright 2003-2004 Jeff Garzik 10 * 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of the GNU General Public License as published by 14 * the Free Software Foundation; either version 2, or (at your option) 15 * any later version. 16 * 17 * This program is distributed in the hope that it will be useful, 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * GNU General Public License for more details. 21 * 22 * You should have received a copy of the GNU General Public License 23 * along with this program; see the file COPYING. If not, write to 24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 25 * 26 * 27 * libata documentation is available via 'make {ps|pdf}docs', 28 * as Documentation/DocBook/libata.* 29 * 30 * Hardware documentation available from http://www.t13.org/ and 31 * http://www.sata-io.org/ 32 * 33 * Standards documents from: 34 * http://www.t13.org (ATA standards, PCI DMA IDE spec) 35 * http://www.t10.org (SCSI MMC - for ATAPI MMC) 36 * http://www.sata-io.org (SATA) 37 * http://www.compactflash.org (CF) 38 * http://www.qic.org (QIC157 - Tape and DSC) 39 * http://www.ce-ata.org (CE-ATA: not supported) 40 * 41 */ 42 43 #include <linux/kernel.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> 46 #include <linux/init.h> 47 #include <linux/list.h> 48 #include <linux/mm.h> 49 #include <linux/spinlock.h> 50 #include <linux/blkdev.h> 51 #include <linux/delay.h> 52 #include <linux/timer.h> 53 #include <linux/interrupt.h> 54 #include <linux/completion.h> 55 #include <linux/suspend.h> 56 #include <linux/workqueue.h> 57 #include <linux/scatterlist.h> 58 #include <linux/io.h> 59 #include <linux/async.h> 60 #include <scsi/scsi.h> 61 #include <scsi/scsi_cmnd.h> 62 #include <scsi/scsi_host.h> 63 #include <linux/libata.h> 64 #include <asm/byteorder.h> 65 #include <linux/cdrom.h> 66 67 #include "libata.h" 68 69 70 /* debounce timing parameters in msecs { interval, duration, timeout } */ 71 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 72 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 73 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 74 75 const struct ata_port_operations ata_base_port_ops = { 76 .prereset = ata_std_prereset, 77 .postreset = ata_std_postreset, 78 .error_handler = ata_std_error_handler, 79 }; 80 81 const struct ata_port_operations sata_port_ops = { 82 .inherits = &ata_base_port_ops, 83 84 .qc_defer = ata_std_qc_defer, 85 .hardreset = sata_std_hardreset, 86 }; 87 88 static unsigned int ata_dev_init_params(struct ata_device *dev, 89 u16 heads, u16 sectors); 90 static unsigned int ata_dev_set_xfermode(struct ata_device *dev); 91 static unsigned int ata_dev_set_feature(struct ata_device *dev, 92 u8 enable, u8 feature); 93 static void ata_dev_xfermask(struct ata_device *dev); 94 static unsigned long ata_dev_blacklisted(const struct ata_device *dev); 95 96 unsigned int ata_print_id = 1; 97 static struct workqueue_struct *ata_wq; 98 99 struct workqueue_struct *ata_aux_wq; 100 101 struct ata_force_param { 102 const char *name; 103 unsigned int cbl; 104 int spd_limit; 105 unsigned long xfer_mask; 106 unsigned int horkage_on; 107 unsigned int horkage_off; 108 unsigned int lflags; 109 }; 110 111 struct ata_force_ent { 112 int port; 113 int device; 114 struct ata_force_param param; 115 }; 116 117 static struct ata_force_ent *ata_force_tbl; 118 static int ata_force_tbl_size; 119 120 static char ata_force_param_buf[PAGE_SIZE] __initdata; 121 /* param_buf is thrown away after initialization, disallow read */ 122 module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0); 123 MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)"); 124 125 static int atapi_enabled = 1; 126 module_param(atapi_enabled, int, 0444); 127 MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); 128 129 static int atapi_dmadir = 0; 130 module_param(atapi_dmadir, int, 0444); 131 MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); 132 133 int atapi_passthru16 = 1; 134 module_param(atapi_passthru16, int, 0444); 135 MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)"); 136 137 int libata_fua = 0; 138 module_param_named(fua, libata_fua, int, 0444); 139 MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); 140 141 static int ata_ignore_hpa; 142 module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644); 143 MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)"); 144 145 static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA; 146 module_param_named(dma, libata_dma_mask, int, 0444); 147 MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)"); 148 149 static int ata_probe_timeout; 150 module_param(ata_probe_timeout, int, 0444); 151 MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)"); 152 153 int libata_noacpi = 0; 154 module_param_named(noacpi, libata_noacpi, int, 0444); 155 MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set"); 156 157 int libata_allow_tpm = 0; 158 module_param_named(allow_tpm, libata_allow_tpm, int, 0444); 159 MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands"); 160 161 MODULE_AUTHOR("Jeff Garzik"); 162 MODULE_DESCRIPTION("Library module for ATA devices"); 163 MODULE_LICENSE("GPL"); 164 MODULE_VERSION(DRV_VERSION); 165 166 167 static bool ata_sstatus_online(u32 sstatus) 168 { 169 return (sstatus & 0xf) == 0x3; 170 } 171 172 /** 173 * ata_link_next - link iteration helper 174 * @link: the previous link, NULL to start 175 * @ap: ATA port containing links to iterate 176 * @mode: iteration mode, one of ATA_LITER_* 177 * 178 * LOCKING: 179 * Host lock or EH context. 180 * 181 * RETURNS: 182 * Pointer to the next link. 183 */ 184 struct ata_link *ata_link_next(struct ata_link *link, struct ata_port *ap, 185 enum ata_link_iter_mode mode) 186 { 187 BUG_ON(mode != ATA_LITER_EDGE && 188 mode != ATA_LITER_PMP_FIRST && mode != ATA_LITER_HOST_FIRST); 189 190 /* NULL link indicates start of iteration */ 191 if (!link) 192 switch (mode) { 193 case ATA_LITER_EDGE: 194 case ATA_LITER_PMP_FIRST: 195 if (sata_pmp_attached(ap)) 196 return ap->pmp_link; 197 /* fall through */ 198 case ATA_LITER_HOST_FIRST: 199 return &ap->link; 200 } 201 202 /* we just iterated over the host link, what's next? */ 203 if (link == &ap->link) 204 switch (mode) { 205 case ATA_LITER_HOST_FIRST: 206 if (sata_pmp_attached(ap)) 207 return ap->pmp_link; 208 /* fall through */ 209 case ATA_LITER_PMP_FIRST: 210 if (unlikely(ap->slave_link)) 211 return ap->slave_link; 212 /* fall through */ 213 case ATA_LITER_EDGE: 214 return NULL; 215 } 216 217 /* slave_link excludes PMP */ 218 if (unlikely(link == ap->slave_link)) 219 return NULL; 220 221 /* we were over a PMP link */ 222 if (++link < ap->pmp_link + ap->nr_pmp_links) 223 return link; 224 225 if (mode == ATA_LITER_PMP_FIRST) 226 return &ap->link; 227 228 return NULL; 229 } 230 231 /** 232 * ata_dev_next - device iteration helper 233 * @dev: the previous device, NULL to start 234 * @link: ATA link containing devices to iterate 235 * @mode: iteration mode, one of ATA_DITER_* 236 * 237 * LOCKING: 238 * Host lock or EH context. 239 * 240 * RETURNS: 241 * Pointer to the next device. 242 */ 243 struct ata_device *ata_dev_next(struct ata_device *dev, struct ata_link *link, 244 enum ata_dev_iter_mode mode) 245 { 246 BUG_ON(mode != ATA_DITER_ENABLED && mode != ATA_DITER_ENABLED_REVERSE && 247 mode != ATA_DITER_ALL && mode != ATA_DITER_ALL_REVERSE); 248 249 /* NULL dev indicates start of iteration */ 250 if (!dev) 251 switch (mode) { 252 case ATA_DITER_ENABLED: 253 case ATA_DITER_ALL: 254 dev = link->device; 255 goto check; 256 case ATA_DITER_ENABLED_REVERSE: 257 case ATA_DITER_ALL_REVERSE: 258 dev = link->device + ata_link_max_devices(link) - 1; 259 goto check; 260 } 261 262 next: 263 /* move to the next one */ 264 switch (mode) { 265 case ATA_DITER_ENABLED: 266 case ATA_DITER_ALL: 267 if (++dev < link->device + ata_link_max_devices(link)) 268 goto check; 269 return NULL; 270 case ATA_DITER_ENABLED_REVERSE: 271 case ATA_DITER_ALL_REVERSE: 272 if (--dev >= link->device) 273 goto check; 274 return NULL; 275 } 276 277 check: 278 if ((mode == ATA_DITER_ENABLED || mode == ATA_DITER_ENABLED_REVERSE) && 279 !ata_dev_enabled(dev)) 280 goto next; 281 return dev; 282 } 283 284 /** 285 * ata_dev_phys_link - find physical link for a device 286 * @dev: ATA device to look up physical link for 287 * 288 * Look up physical link which @dev is attached to. Note that 289 * this is different from @dev->link only when @dev is on slave 290 * link. For all other cases, it's the same as @dev->link. 291 * 292 * LOCKING: 293 * Don't care. 294 * 295 * RETURNS: 296 * Pointer to the found physical link. 297 */ 298 struct ata_link *ata_dev_phys_link(struct ata_device *dev) 299 { 300 struct ata_port *ap = dev->link->ap; 301 302 if (!ap->slave_link) 303 return dev->link; 304 if (!dev->devno) 305 return &ap->link; 306 return ap->slave_link; 307 } 308 309 /** 310 * ata_force_cbl - force cable type according to libata.force 311 * @ap: ATA port of interest 312 * 313 * Force cable type according to libata.force and whine about it. 314 * The last entry which has matching port number is used, so it 315 * can be specified as part of device force parameters. For 316 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the 317 * same effect. 318 * 319 * LOCKING: 320 * EH context. 321 */ 322 void ata_force_cbl(struct ata_port *ap) 323 { 324 int i; 325 326 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 327 const struct ata_force_ent *fe = &ata_force_tbl[i]; 328 329 if (fe->port != -1 && fe->port != ap->print_id) 330 continue; 331 332 if (fe->param.cbl == ATA_CBL_NONE) 333 continue; 334 335 ap->cbl = fe->param.cbl; 336 ata_port_printk(ap, KERN_NOTICE, 337 "FORCE: cable set to %s\n", fe->param.name); 338 return; 339 } 340 } 341 342 /** 343 * ata_force_link_limits - force link limits according to libata.force 344 * @link: ATA link of interest 345 * 346 * Force link flags and SATA spd limit according to libata.force 347 * and whine about it. When only the port part is specified 348 * (e.g. 1:), the limit applies to all links connected to both 349 * the host link and all fan-out ports connected via PMP. If the 350 * device part is specified as 0 (e.g. 1.00:), it specifies the 351 * first fan-out link not the host link. Device number 15 always 352 * points to the host link whether PMP is attached or not. If the 353 * controller has slave link, device number 16 points to it. 354 * 355 * LOCKING: 356 * EH context. 357 */ 358 static void ata_force_link_limits(struct ata_link *link) 359 { 360 bool did_spd = false; 361 int linkno = link->pmp; 362 int i; 363 364 if (ata_is_host_link(link)) 365 linkno += 15; 366 367 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 368 const struct ata_force_ent *fe = &ata_force_tbl[i]; 369 370 if (fe->port != -1 && fe->port != link->ap->print_id) 371 continue; 372 373 if (fe->device != -1 && fe->device != linkno) 374 continue; 375 376 /* only honor the first spd limit */ 377 if (!did_spd && fe->param.spd_limit) { 378 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1; 379 ata_link_printk(link, KERN_NOTICE, 380 "FORCE: PHY spd limit set to %s\n", 381 fe->param.name); 382 did_spd = true; 383 } 384 385 /* let lflags stack */ 386 if (fe->param.lflags) { 387 link->flags |= fe->param.lflags; 388 ata_link_printk(link, KERN_NOTICE, 389 "FORCE: link flag 0x%x forced -> 0x%x\n", 390 fe->param.lflags, link->flags); 391 } 392 } 393 } 394 395 /** 396 * ata_force_xfermask - force xfermask according to libata.force 397 * @dev: ATA device of interest 398 * 399 * Force xfer_mask according to libata.force and whine about it. 400 * For consistency with link selection, device number 15 selects 401 * the first device connected to the host link. 402 * 403 * LOCKING: 404 * EH context. 405 */ 406 static void ata_force_xfermask(struct ata_device *dev) 407 { 408 int devno = dev->link->pmp + dev->devno; 409 int alt_devno = devno; 410 int i; 411 412 /* allow n.15/16 for devices attached to host port */ 413 if (ata_is_host_link(dev->link)) 414 alt_devno += 15; 415 416 for (i = ata_force_tbl_size - 1; i >= 0; i--) { 417 const struct ata_force_ent *fe = &ata_force_tbl[i]; 418 unsigned long pio_mask, mwdma_mask, udma_mask; 419 420 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 421 continue; 422 423 if (fe->device != -1 && fe->device != devno && 424 fe->device != alt_devno) 425 continue; 426 427 if (!fe->param.xfer_mask) 428 continue; 429 430 ata_unpack_xfermask(fe->param.xfer_mask, 431 &pio_mask, &mwdma_mask, &udma_mask); 432 if (udma_mask) 433 dev->udma_mask = udma_mask; 434 else if (mwdma_mask) { 435 dev->udma_mask = 0; 436 dev->mwdma_mask = mwdma_mask; 437 } else { 438 dev->udma_mask = 0; 439 dev->mwdma_mask = 0; 440 dev->pio_mask = pio_mask; 441 } 442 443 ata_dev_printk(dev, KERN_NOTICE, 444 "FORCE: xfer_mask set to %s\n", fe->param.name); 445 return; 446 } 447 } 448 449 /** 450 * ata_force_horkage - force horkage according to libata.force 451 * @dev: ATA device of interest 452 * 453 * Force horkage according to libata.force and whine about it. 454 * For consistency with link selection, device number 15 selects 455 * the first device connected to the host link. 456 * 457 * LOCKING: 458 * EH context. 459 */ 460 static void ata_force_horkage(struct ata_device *dev) 461 { 462 int devno = dev->link->pmp + dev->devno; 463 int alt_devno = devno; 464 int i; 465 466 /* allow n.15/16 for devices attached to host port */ 467 if (ata_is_host_link(dev->link)) 468 alt_devno += 15; 469 470 for (i = 0; i < ata_force_tbl_size; i++) { 471 const struct ata_force_ent *fe = &ata_force_tbl[i]; 472 473 if (fe->port != -1 && fe->port != dev->link->ap->print_id) 474 continue; 475 476 if (fe->device != -1 && fe->device != devno && 477 fe->device != alt_devno) 478 continue; 479 480 if (!(~dev->horkage & fe->param.horkage_on) && 481 !(dev->horkage & fe->param.horkage_off)) 482 continue; 483 484 dev->horkage |= fe->param.horkage_on; 485 dev->horkage &= ~fe->param.horkage_off; 486 487 ata_dev_printk(dev, KERN_NOTICE, 488 "FORCE: horkage modified (%s)\n", fe->param.name); 489 } 490 } 491 492 /** 493 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode 494 * @opcode: SCSI opcode 495 * 496 * Determine ATAPI command type from @opcode. 497 * 498 * LOCKING: 499 * None. 500 * 501 * RETURNS: 502 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC} 503 */ 504 int atapi_cmd_type(u8 opcode) 505 { 506 switch (opcode) { 507 case GPCMD_READ_10: 508 case GPCMD_READ_12: 509 return ATAPI_READ; 510 511 case GPCMD_WRITE_10: 512 case GPCMD_WRITE_12: 513 case GPCMD_WRITE_AND_VERIFY_10: 514 return ATAPI_WRITE; 515 516 case GPCMD_READ_CD: 517 case GPCMD_READ_CD_MSF: 518 return ATAPI_READ_CD; 519 520 case ATA_16: 521 case ATA_12: 522 if (atapi_passthru16) 523 return ATAPI_PASS_THRU; 524 /* fall thru */ 525 default: 526 return ATAPI_MISC; 527 } 528 } 529 530 /** 531 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 532 * @tf: Taskfile to convert 533 * @pmp: Port multiplier port 534 * @is_cmd: This FIS is for command 535 * @fis: Buffer into which data will output 536 * 537 * Converts a standard ATA taskfile to a Serial ATA 538 * FIS structure (Register - Host to Device). 539 * 540 * LOCKING: 541 * Inherited from caller. 542 */ 543 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 544 { 545 fis[0] = 0x27; /* Register - Host to Device FIS */ 546 fis[1] = pmp & 0xf; /* Port multiplier number*/ 547 if (is_cmd) 548 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 549 550 fis[2] = tf->command; 551 fis[3] = tf->feature; 552 553 fis[4] = tf->lbal; 554 fis[5] = tf->lbam; 555 fis[6] = tf->lbah; 556 fis[7] = tf->device; 557 558 fis[8] = tf->hob_lbal; 559 fis[9] = tf->hob_lbam; 560 fis[10] = tf->hob_lbah; 561 fis[11] = tf->hob_feature; 562 563 fis[12] = tf->nsect; 564 fis[13] = tf->hob_nsect; 565 fis[14] = 0; 566 fis[15] = tf->ctl; 567 568 fis[16] = 0; 569 fis[17] = 0; 570 fis[18] = 0; 571 fis[19] = 0; 572 } 573 574 /** 575 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 576 * @fis: Buffer from which data will be input 577 * @tf: Taskfile to output 578 * 579 * Converts a serial ATA FIS structure to a standard ATA taskfile. 580 * 581 * LOCKING: 582 * Inherited from caller. 583 */ 584 585 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 586 { 587 tf->command = fis[2]; /* status */ 588 tf->feature = fis[3]; /* error */ 589 590 tf->lbal = fis[4]; 591 tf->lbam = fis[5]; 592 tf->lbah = fis[6]; 593 tf->device = fis[7]; 594 595 tf->hob_lbal = fis[8]; 596 tf->hob_lbam = fis[9]; 597 tf->hob_lbah = fis[10]; 598 599 tf->nsect = fis[12]; 600 tf->hob_nsect = fis[13]; 601 } 602 603 static const u8 ata_rw_cmds[] = { 604 /* pio multi */ 605 ATA_CMD_READ_MULTI, 606 ATA_CMD_WRITE_MULTI, 607 ATA_CMD_READ_MULTI_EXT, 608 ATA_CMD_WRITE_MULTI_EXT, 609 0, 610 0, 611 0, 612 ATA_CMD_WRITE_MULTI_FUA_EXT, 613 /* pio */ 614 ATA_CMD_PIO_READ, 615 ATA_CMD_PIO_WRITE, 616 ATA_CMD_PIO_READ_EXT, 617 ATA_CMD_PIO_WRITE_EXT, 618 0, 619 0, 620 0, 621 0, 622 /* dma */ 623 ATA_CMD_READ, 624 ATA_CMD_WRITE, 625 ATA_CMD_READ_EXT, 626 ATA_CMD_WRITE_EXT, 627 0, 628 0, 629 0, 630 ATA_CMD_WRITE_FUA_EXT 631 }; 632 633 /** 634 * ata_rwcmd_protocol - set taskfile r/w commands and protocol 635 * @tf: command to examine and configure 636 * @dev: device tf belongs to 637 * 638 * Examine the device configuration and tf->flags to calculate 639 * the proper read/write commands and protocol to use. 640 * 641 * LOCKING: 642 * caller. 643 */ 644 static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev) 645 { 646 u8 cmd; 647 648 int index, fua, lba48, write; 649 650 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0; 651 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0; 652 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0; 653 654 if (dev->flags & ATA_DFLAG_PIO) { 655 tf->protocol = ATA_PROT_PIO; 656 index = dev->multi_count ? 0 : 8; 657 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) { 658 /* Unable to use DMA due to host limitation */ 659 tf->protocol = ATA_PROT_PIO; 660 index = dev->multi_count ? 0 : 8; 661 } else { 662 tf->protocol = ATA_PROT_DMA; 663 index = 16; 664 } 665 666 cmd = ata_rw_cmds[index + fua + lba48 + write]; 667 if (cmd) { 668 tf->command = cmd; 669 return 0; 670 } 671 return -1; 672 } 673 674 /** 675 * ata_tf_read_block - Read block address from ATA taskfile 676 * @tf: ATA taskfile of interest 677 * @dev: ATA device @tf belongs to 678 * 679 * LOCKING: 680 * None. 681 * 682 * Read block address from @tf. This function can handle all 683 * three address formats - LBA, LBA48 and CHS. tf->protocol and 684 * flags select the address format to use. 685 * 686 * RETURNS: 687 * Block address read from @tf. 688 */ 689 u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev) 690 { 691 u64 block = 0; 692 693 if (tf->flags & ATA_TFLAG_LBA) { 694 if (tf->flags & ATA_TFLAG_LBA48) { 695 block |= (u64)tf->hob_lbah << 40; 696 block |= (u64)tf->hob_lbam << 32; 697 block |= (u64)tf->hob_lbal << 24; 698 } else 699 block |= (tf->device & 0xf) << 24; 700 701 block |= tf->lbah << 16; 702 block |= tf->lbam << 8; 703 block |= tf->lbal; 704 } else { 705 u32 cyl, head, sect; 706 707 cyl = tf->lbam | (tf->lbah << 8); 708 head = tf->device & 0xf; 709 sect = tf->lbal; 710 711 block = (cyl * dev->heads + head) * dev->sectors + sect; 712 } 713 714 return block; 715 } 716 717 /** 718 * ata_build_rw_tf - Build ATA taskfile for given read/write request 719 * @tf: Target ATA taskfile 720 * @dev: ATA device @tf belongs to 721 * @block: Block address 722 * @n_block: Number of blocks 723 * @tf_flags: RW/FUA etc... 724 * @tag: tag 725 * 726 * LOCKING: 727 * None. 728 * 729 * Build ATA taskfile @tf for read/write request described by 730 * @block, @n_block, @tf_flags and @tag on @dev. 731 * 732 * RETURNS: 733 * 734 * 0 on success, -ERANGE if the request is too large for @dev, 735 * -EINVAL if the request is invalid. 736 */ 737 int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev, 738 u64 block, u32 n_block, unsigned int tf_flags, 739 unsigned int tag) 740 { 741 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 742 tf->flags |= tf_flags; 743 744 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) { 745 /* yay, NCQ */ 746 if (!lba_48_ok(block, n_block)) 747 return -ERANGE; 748 749 tf->protocol = ATA_PROT_NCQ; 750 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 751 752 if (tf->flags & ATA_TFLAG_WRITE) 753 tf->command = ATA_CMD_FPDMA_WRITE; 754 else 755 tf->command = ATA_CMD_FPDMA_READ; 756 757 tf->nsect = tag << 3; 758 tf->hob_feature = (n_block >> 8) & 0xff; 759 tf->feature = n_block & 0xff; 760 761 tf->hob_lbah = (block >> 40) & 0xff; 762 tf->hob_lbam = (block >> 32) & 0xff; 763 tf->hob_lbal = (block >> 24) & 0xff; 764 tf->lbah = (block >> 16) & 0xff; 765 tf->lbam = (block >> 8) & 0xff; 766 tf->lbal = block & 0xff; 767 768 tf->device = 1 << 6; 769 if (tf->flags & ATA_TFLAG_FUA) 770 tf->device |= 1 << 7; 771 } else if (dev->flags & ATA_DFLAG_LBA) { 772 tf->flags |= ATA_TFLAG_LBA; 773 774 if (lba_28_ok(block, n_block)) { 775 /* use LBA28 */ 776 tf->device |= (block >> 24) & 0xf; 777 } else if (lba_48_ok(block, n_block)) { 778 if (!(dev->flags & ATA_DFLAG_LBA48)) 779 return -ERANGE; 780 781 /* use LBA48 */ 782 tf->flags |= ATA_TFLAG_LBA48; 783 784 tf->hob_nsect = (n_block >> 8) & 0xff; 785 786 tf->hob_lbah = (block >> 40) & 0xff; 787 tf->hob_lbam = (block >> 32) & 0xff; 788 tf->hob_lbal = (block >> 24) & 0xff; 789 } else 790 /* request too large even for LBA48 */ 791 return -ERANGE; 792 793 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 794 return -EINVAL; 795 796 tf->nsect = n_block & 0xff; 797 798 tf->lbah = (block >> 16) & 0xff; 799 tf->lbam = (block >> 8) & 0xff; 800 tf->lbal = block & 0xff; 801 802 tf->device |= ATA_LBA; 803 } else { 804 /* CHS */ 805 u32 sect, head, cyl, track; 806 807 /* The request -may- be too large for CHS addressing. */ 808 if (!lba_28_ok(block, n_block)) 809 return -ERANGE; 810 811 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0)) 812 return -EINVAL; 813 814 /* Convert LBA to CHS */ 815 track = (u32)block / dev->sectors; 816 cyl = track / dev->heads; 817 head = track % dev->heads; 818 sect = (u32)block % dev->sectors + 1; 819 820 DPRINTK("block %u track %u cyl %u head %u sect %u\n", 821 (u32)block, track, cyl, head, sect); 822 823 /* Check whether the converted CHS can fit. 824 Cylinder: 0-65535 825 Head: 0-15 826 Sector: 1-255*/ 827 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect)) 828 return -ERANGE; 829 830 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */ 831 tf->lbal = sect; 832 tf->lbam = cyl; 833 tf->lbah = cyl >> 8; 834 tf->device |= head; 835 } 836 837 return 0; 838 } 839 840 /** 841 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask 842 * @pio_mask: pio_mask 843 * @mwdma_mask: mwdma_mask 844 * @udma_mask: udma_mask 845 * 846 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single 847 * unsigned int xfer_mask. 848 * 849 * LOCKING: 850 * None. 851 * 852 * RETURNS: 853 * Packed xfer_mask. 854 */ 855 unsigned long ata_pack_xfermask(unsigned long pio_mask, 856 unsigned long mwdma_mask, 857 unsigned long udma_mask) 858 { 859 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) | 860 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) | 861 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA); 862 } 863 864 /** 865 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks 866 * @xfer_mask: xfer_mask to unpack 867 * @pio_mask: resulting pio_mask 868 * @mwdma_mask: resulting mwdma_mask 869 * @udma_mask: resulting udma_mask 870 * 871 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask. 872 * Any NULL distination masks will be ignored. 873 */ 874 void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask, 875 unsigned long *mwdma_mask, unsigned long *udma_mask) 876 { 877 if (pio_mask) 878 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO; 879 if (mwdma_mask) 880 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA; 881 if (udma_mask) 882 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA; 883 } 884 885 static const struct ata_xfer_ent { 886 int shift, bits; 887 u8 base; 888 } ata_xfer_tbl[] = { 889 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, 890 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, 891 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, 892 { -1, }, 893 }; 894 895 /** 896 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask 897 * @xfer_mask: xfer_mask of interest 898 * 899 * Return matching XFER_* value for @xfer_mask. Only the highest 900 * bit of @xfer_mask is considered. 901 * 902 * LOCKING: 903 * None. 904 * 905 * RETURNS: 906 * Matching XFER_* value, 0xff if no match found. 907 */ 908 u8 ata_xfer_mask2mode(unsigned long xfer_mask) 909 { 910 int highbit = fls(xfer_mask) - 1; 911 const struct ata_xfer_ent *ent; 912 913 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 914 if (highbit >= ent->shift && highbit < ent->shift + ent->bits) 915 return ent->base + highbit - ent->shift; 916 return 0xff; 917 } 918 919 /** 920 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_* 921 * @xfer_mode: XFER_* of interest 922 * 923 * Return matching xfer_mask for @xfer_mode. 924 * 925 * LOCKING: 926 * None. 927 * 928 * RETURNS: 929 * Matching xfer_mask, 0 if no match found. 930 */ 931 unsigned long ata_xfer_mode2mask(u8 xfer_mode) 932 { 933 const struct ata_xfer_ent *ent; 934 935 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 936 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 937 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1) 938 & ~((1 << ent->shift) - 1); 939 return 0; 940 } 941 942 /** 943 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_* 944 * @xfer_mode: XFER_* of interest 945 * 946 * Return matching xfer_shift for @xfer_mode. 947 * 948 * LOCKING: 949 * None. 950 * 951 * RETURNS: 952 * Matching xfer_shift, -1 if no match found. 953 */ 954 int ata_xfer_mode2shift(unsigned long xfer_mode) 955 { 956 const struct ata_xfer_ent *ent; 957 958 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 959 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits) 960 return ent->shift; 961 return -1; 962 } 963 964 /** 965 * ata_mode_string - convert xfer_mask to string 966 * @xfer_mask: mask of bits supported; only highest bit counts. 967 * 968 * Determine string which represents the highest speed 969 * (highest bit in @modemask). 970 * 971 * LOCKING: 972 * None. 973 * 974 * RETURNS: 975 * Constant C string representing highest speed listed in 976 * @mode_mask, or the constant C string "<n/a>". 977 */ 978 const char *ata_mode_string(unsigned long xfer_mask) 979 { 980 static const char * const xfer_mode_str[] = { 981 "PIO0", 982 "PIO1", 983 "PIO2", 984 "PIO3", 985 "PIO4", 986 "PIO5", 987 "PIO6", 988 "MWDMA0", 989 "MWDMA1", 990 "MWDMA2", 991 "MWDMA3", 992 "MWDMA4", 993 "UDMA/16", 994 "UDMA/25", 995 "UDMA/33", 996 "UDMA/44", 997 "UDMA/66", 998 "UDMA/100", 999 "UDMA/133", 1000 "UDMA7", 1001 }; 1002 int highbit; 1003 1004 highbit = fls(xfer_mask) - 1; 1005 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str)) 1006 return xfer_mode_str[highbit]; 1007 return "<n/a>"; 1008 } 1009 1010 static const char *sata_spd_string(unsigned int spd) 1011 { 1012 static const char * const spd_str[] = { 1013 "1.5 Gbps", 1014 "3.0 Gbps", 1015 "6.0 Gbps", 1016 }; 1017 1018 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) 1019 return "<unknown>"; 1020 return spd_str[spd - 1]; 1021 } 1022 1023 static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy) 1024 { 1025 struct ata_link *link = dev->link; 1026 struct ata_port *ap = link->ap; 1027 u32 scontrol; 1028 unsigned int err_mask; 1029 int rc; 1030 1031 /* 1032 * disallow DIPM for drivers which haven't set 1033 * ATA_FLAG_IPM. This is because when DIPM is enabled, 1034 * phy ready will be set in the interrupt status on 1035 * state changes, which will cause some drivers to 1036 * think there are errors - additionally drivers will 1037 * need to disable hot plug. 1038 */ 1039 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) { 1040 ap->pm_policy = NOT_AVAILABLE; 1041 return -EINVAL; 1042 } 1043 1044 /* 1045 * For DIPM, we will only enable it for the 1046 * min_power setting. 1047 * 1048 * Why? Because Disks are too stupid to know that 1049 * If the host rejects a request to go to SLUMBER 1050 * they should retry at PARTIAL, and instead it 1051 * just would give up. So, for medium_power to 1052 * work at all, we need to only allow HIPM. 1053 */ 1054 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 1055 if (rc) 1056 return rc; 1057 1058 switch (policy) { 1059 case MIN_POWER: 1060 /* no restrictions on IPM transitions */ 1061 scontrol &= ~(0x3 << 8); 1062 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1063 if (rc) 1064 return rc; 1065 1066 /* enable DIPM */ 1067 if (dev->flags & ATA_DFLAG_DIPM) 1068 err_mask = ata_dev_set_feature(dev, 1069 SETFEATURES_SATA_ENABLE, SATA_DIPM); 1070 break; 1071 case MEDIUM_POWER: 1072 /* allow IPM to PARTIAL */ 1073 scontrol &= ~(0x1 << 8); 1074 scontrol |= (0x2 << 8); 1075 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1076 if (rc) 1077 return rc; 1078 1079 /* 1080 * we don't have to disable DIPM since IPM flags 1081 * disallow transitions to SLUMBER, which effectively 1082 * disable DIPM if it does not support PARTIAL 1083 */ 1084 break; 1085 case NOT_AVAILABLE: 1086 case MAX_PERFORMANCE: 1087 /* disable all IPM transitions */ 1088 scontrol |= (0x3 << 8); 1089 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 1090 if (rc) 1091 return rc; 1092 1093 /* 1094 * we don't have to disable DIPM since IPM flags 1095 * disallow all transitions which effectively 1096 * disable DIPM anyway. 1097 */ 1098 break; 1099 } 1100 1101 /* FIXME: handle SET FEATURES failure */ 1102 (void) err_mask; 1103 1104 return 0; 1105 } 1106 1107 /** 1108 * ata_dev_enable_pm - enable SATA interface power management 1109 * @dev: device to enable power management 1110 * @policy: the link power management policy 1111 * 1112 * Enable SATA Interface power management. This will enable 1113 * Device Interface Power Management (DIPM) for min_power 1114 * policy, and then call driver specific callbacks for 1115 * enabling Host Initiated Power management. 1116 * 1117 * Locking: Caller. 1118 * Returns: -EINVAL if IPM is not supported, 0 otherwise. 1119 */ 1120 void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy) 1121 { 1122 int rc = 0; 1123 struct ata_port *ap = dev->link->ap; 1124 1125 /* set HIPM first, then DIPM */ 1126 if (ap->ops->enable_pm) 1127 rc = ap->ops->enable_pm(ap, policy); 1128 if (rc) 1129 goto enable_pm_out; 1130 rc = ata_dev_set_dipm(dev, policy); 1131 1132 enable_pm_out: 1133 if (rc) 1134 ap->pm_policy = MAX_PERFORMANCE; 1135 else 1136 ap->pm_policy = policy; 1137 return /* rc */; /* hopefully we can use 'rc' eventually */ 1138 } 1139 1140 #ifdef CONFIG_PM 1141 /** 1142 * ata_dev_disable_pm - disable SATA interface power management 1143 * @dev: device to disable power management 1144 * 1145 * Disable SATA Interface power management. This will disable 1146 * Device Interface Power Management (DIPM) without changing 1147 * policy, call driver specific callbacks for disabling Host 1148 * Initiated Power management. 1149 * 1150 * Locking: Caller. 1151 * Returns: void 1152 */ 1153 static void ata_dev_disable_pm(struct ata_device *dev) 1154 { 1155 struct ata_port *ap = dev->link->ap; 1156 1157 ata_dev_set_dipm(dev, MAX_PERFORMANCE); 1158 if (ap->ops->disable_pm) 1159 ap->ops->disable_pm(ap); 1160 } 1161 #endif /* CONFIG_PM */ 1162 1163 void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy) 1164 { 1165 ap->pm_policy = policy; 1166 ap->link.eh_info.action |= ATA_EH_LPM; 1167 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY; 1168 ata_port_schedule_eh(ap); 1169 } 1170 1171 #ifdef CONFIG_PM 1172 static void ata_lpm_enable(struct ata_host *host) 1173 { 1174 struct ata_link *link; 1175 struct ata_port *ap; 1176 struct ata_device *dev; 1177 int i; 1178 1179 for (i = 0; i < host->n_ports; i++) { 1180 ap = host->ports[i]; 1181 ata_for_each_link(link, ap, EDGE) { 1182 ata_for_each_dev(dev, link, ALL) 1183 ata_dev_disable_pm(dev); 1184 } 1185 } 1186 } 1187 1188 static void ata_lpm_disable(struct ata_host *host) 1189 { 1190 int i; 1191 1192 for (i = 0; i < host->n_ports; i++) { 1193 struct ata_port *ap = host->ports[i]; 1194 ata_lpm_schedule(ap, ap->pm_policy); 1195 } 1196 } 1197 #endif /* CONFIG_PM */ 1198 1199 /** 1200 * ata_dev_classify - determine device type based on ATA-spec signature 1201 * @tf: ATA taskfile register set for device to be identified 1202 * 1203 * Determine from taskfile register contents whether a device is 1204 * ATA or ATAPI, as per "Signature and persistence" section 1205 * of ATA/PI spec (volume 1, sect 5.14). 1206 * 1207 * LOCKING: 1208 * None. 1209 * 1210 * RETURNS: 1211 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1212 * %ATA_DEV_UNKNOWN the event of failure. 1213 */ 1214 unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1215 { 1216 /* Apple's open source Darwin code hints that some devices only 1217 * put a proper signature into the LBA mid/high registers, 1218 * So, we only check those. It's sufficient for uniqueness. 1219 * 1220 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate 1221 * signatures for ATA and ATAPI devices attached on SerialATA, 1222 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA 1223 * spec has never mentioned about using different signatures 1224 * for ATA/ATAPI devices. Then, Serial ATA II: Port 1225 * Multiplier specification began to use 0x69/0x96 to identify 1226 * port multpliers and 0x3c/0xc3 to identify SEMB device. 1227 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and 1228 * 0x69/0x96 shortly and described them as reserved for 1229 * SerialATA. 1230 * 1231 * We follow the current spec and consider that 0x69/0x96 1232 * identifies a port multiplier and 0x3c/0xc3 a SEMB device. 1233 */ 1234 if ((tf->lbam == 0) && (tf->lbah == 0)) { 1235 DPRINTK("found ATA device by sig\n"); 1236 return ATA_DEV_ATA; 1237 } 1238 1239 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) { 1240 DPRINTK("found ATAPI device by sig\n"); 1241 return ATA_DEV_ATAPI; 1242 } 1243 1244 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) { 1245 DPRINTK("found PMP device by sig\n"); 1246 return ATA_DEV_PMP; 1247 } 1248 1249 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) { 1250 printk(KERN_INFO "ata: SEMB device ignored\n"); 1251 return ATA_DEV_SEMB_UNSUP; /* not yet */ 1252 } 1253 1254 DPRINTK("unknown device\n"); 1255 return ATA_DEV_UNKNOWN; 1256 } 1257 1258 /** 1259 * ata_id_string - Convert IDENTIFY DEVICE page into string 1260 * @id: IDENTIFY DEVICE results we will examine 1261 * @s: string into which data is output 1262 * @ofs: offset into identify device page 1263 * @len: length of string to return. must be an even number. 1264 * 1265 * The strings in the IDENTIFY DEVICE page are broken up into 1266 * 16-bit chunks. Run through the string, and output each 1267 * 8-bit chunk linearly, regardless of platform. 1268 * 1269 * LOCKING: 1270 * caller. 1271 */ 1272 1273 void ata_id_string(const u16 *id, unsigned char *s, 1274 unsigned int ofs, unsigned int len) 1275 { 1276 unsigned int c; 1277 1278 BUG_ON(len & 1); 1279 1280 while (len > 0) { 1281 c = id[ofs] >> 8; 1282 *s = c; 1283 s++; 1284 1285 c = id[ofs] & 0xff; 1286 *s = c; 1287 s++; 1288 1289 ofs++; 1290 len -= 2; 1291 } 1292 } 1293 1294 /** 1295 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string 1296 * @id: IDENTIFY DEVICE results we will examine 1297 * @s: string into which data is output 1298 * @ofs: offset into identify device page 1299 * @len: length of string to return. must be an odd number. 1300 * 1301 * This function is identical to ata_id_string except that it 1302 * trims trailing spaces and terminates the resulting string with 1303 * null. @len must be actual maximum length (even number) + 1. 1304 * 1305 * LOCKING: 1306 * caller. 1307 */ 1308 void ata_id_c_string(const u16 *id, unsigned char *s, 1309 unsigned int ofs, unsigned int len) 1310 { 1311 unsigned char *p; 1312 1313 ata_id_string(id, s, ofs, len - 1); 1314 1315 p = s + strnlen(s, len - 1); 1316 while (p > s && p[-1] == ' ') 1317 p--; 1318 *p = '\0'; 1319 } 1320 1321 static u64 ata_id_n_sectors(const u16 *id) 1322 { 1323 if (ata_id_has_lba(id)) { 1324 if (ata_id_has_lba48(id)) 1325 return ata_id_u64(id, ATA_ID_LBA_CAPACITY_2); 1326 else 1327 return ata_id_u32(id, ATA_ID_LBA_CAPACITY); 1328 } else { 1329 if (ata_id_current_chs_valid(id)) 1330 return id[ATA_ID_CUR_CYLS] * id[ATA_ID_CUR_HEADS] * 1331 id[ATA_ID_CUR_SECTORS]; 1332 else 1333 return id[ATA_ID_CYLS] * id[ATA_ID_HEADS] * 1334 id[ATA_ID_SECTORS]; 1335 } 1336 } 1337 1338 u64 ata_tf_to_lba48(const struct ata_taskfile *tf) 1339 { 1340 u64 sectors = 0; 1341 1342 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40; 1343 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32; 1344 sectors |= ((u64)(tf->hob_lbal & 0xff)) << 24; 1345 sectors |= (tf->lbah & 0xff) << 16; 1346 sectors |= (tf->lbam & 0xff) << 8; 1347 sectors |= (tf->lbal & 0xff); 1348 1349 return sectors; 1350 } 1351 1352 u64 ata_tf_to_lba(const struct ata_taskfile *tf) 1353 { 1354 u64 sectors = 0; 1355 1356 sectors |= (tf->device & 0x0f) << 24; 1357 sectors |= (tf->lbah & 0xff) << 16; 1358 sectors |= (tf->lbam & 0xff) << 8; 1359 sectors |= (tf->lbal & 0xff); 1360 1361 return sectors; 1362 } 1363 1364 /** 1365 * ata_read_native_max_address - Read native max address 1366 * @dev: target device 1367 * @max_sectors: out parameter for the result native max address 1368 * 1369 * Perform an LBA48 or LBA28 native size query upon the device in 1370 * question. 1371 * 1372 * RETURNS: 1373 * 0 on success, -EACCES if command is aborted by the drive. 1374 * -EIO on other errors. 1375 */ 1376 static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors) 1377 { 1378 unsigned int err_mask; 1379 struct ata_taskfile tf; 1380 int lba48 = ata_id_has_lba48(dev->id); 1381 1382 ata_tf_init(dev, &tf); 1383 1384 /* always clear all address registers */ 1385 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1386 1387 if (lba48) { 1388 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT; 1389 tf.flags |= ATA_TFLAG_LBA48; 1390 } else 1391 tf.command = ATA_CMD_READ_NATIVE_MAX; 1392 1393 tf.protocol |= ATA_PROT_NODATA; 1394 tf.device |= ATA_LBA; 1395 1396 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1397 if (err_mask) { 1398 ata_dev_printk(dev, KERN_WARNING, "failed to read native " 1399 "max address (err_mask=0x%x)\n", err_mask); 1400 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 1401 return -EACCES; 1402 return -EIO; 1403 } 1404 1405 if (lba48) 1406 *max_sectors = ata_tf_to_lba48(&tf) + 1; 1407 else 1408 *max_sectors = ata_tf_to_lba(&tf) + 1; 1409 if (dev->horkage & ATA_HORKAGE_HPA_SIZE) 1410 (*max_sectors)--; 1411 return 0; 1412 } 1413 1414 /** 1415 * ata_set_max_sectors - Set max sectors 1416 * @dev: target device 1417 * @new_sectors: new max sectors value to set for the device 1418 * 1419 * Set max sectors of @dev to @new_sectors. 1420 * 1421 * RETURNS: 1422 * 0 on success, -EACCES if command is aborted or denied (due to 1423 * previous non-volatile SET_MAX) by the drive. -EIO on other 1424 * errors. 1425 */ 1426 static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors) 1427 { 1428 unsigned int err_mask; 1429 struct ata_taskfile tf; 1430 int lba48 = ata_id_has_lba48(dev->id); 1431 1432 new_sectors--; 1433 1434 ata_tf_init(dev, &tf); 1435 1436 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR; 1437 1438 if (lba48) { 1439 tf.command = ATA_CMD_SET_MAX_EXT; 1440 tf.flags |= ATA_TFLAG_LBA48; 1441 1442 tf.hob_lbal = (new_sectors >> 24) & 0xff; 1443 tf.hob_lbam = (new_sectors >> 32) & 0xff; 1444 tf.hob_lbah = (new_sectors >> 40) & 0xff; 1445 } else { 1446 tf.command = ATA_CMD_SET_MAX; 1447 1448 tf.device |= (new_sectors >> 24) & 0xf; 1449 } 1450 1451 tf.protocol |= ATA_PROT_NODATA; 1452 tf.device |= ATA_LBA; 1453 1454 tf.lbal = (new_sectors >> 0) & 0xff; 1455 tf.lbam = (new_sectors >> 8) & 0xff; 1456 tf.lbah = (new_sectors >> 16) & 0xff; 1457 1458 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1459 if (err_mask) { 1460 ata_dev_printk(dev, KERN_WARNING, "failed to set " 1461 "max address (err_mask=0x%x)\n", err_mask); 1462 if (err_mask == AC_ERR_DEV && 1463 (tf.feature & (ATA_ABORTED | ATA_IDNF))) 1464 return -EACCES; 1465 return -EIO; 1466 } 1467 1468 return 0; 1469 } 1470 1471 /** 1472 * ata_hpa_resize - Resize a device with an HPA set 1473 * @dev: Device to resize 1474 * 1475 * Read the size of an LBA28 or LBA48 disk with HPA features and resize 1476 * it if required to the full size of the media. The caller must check 1477 * the drive has the HPA feature set enabled. 1478 * 1479 * RETURNS: 1480 * 0 on success, -errno on failure. 1481 */ 1482 static int ata_hpa_resize(struct ata_device *dev) 1483 { 1484 struct ata_eh_context *ehc = &dev->link->eh_context; 1485 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 1486 u64 sectors = ata_id_n_sectors(dev->id); 1487 u64 native_sectors; 1488 int rc; 1489 1490 /* do we need to do it? */ 1491 if (dev->class != ATA_DEV_ATA || 1492 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1493 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1494 return 0; 1495 1496 /* read native max address */ 1497 rc = ata_read_native_max_address(dev, &native_sectors); 1498 if (rc) { 1499 /* If device aborted the command or HPA isn't going to 1500 * be unlocked, skip HPA resizing. 1501 */ 1502 if (rc == -EACCES || !ata_ignore_hpa) { 1503 ata_dev_printk(dev, KERN_WARNING, "HPA support seems " 1504 "broken, skipping HPA handling\n"); 1505 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1506 1507 /* we can continue if device aborted the command */ 1508 if (rc == -EACCES) 1509 rc = 0; 1510 } 1511 1512 return rc; 1513 } 1514 1515 /* nothing to do? */ 1516 if (native_sectors <= sectors || !ata_ignore_hpa) { 1517 if (!print_info || native_sectors == sectors) 1518 return 0; 1519 1520 if (native_sectors > sectors) 1521 ata_dev_printk(dev, KERN_INFO, 1522 "HPA detected: current %llu, native %llu\n", 1523 (unsigned long long)sectors, 1524 (unsigned long long)native_sectors); 1525 else if (native_sectors < sectors) 1526 ata_dev_printk(dev, KERN_WARNING, 1527 "native sectors (%llu) is smaller than " 1528 "sectors (%llu)\n", 1529 (unsigned long long)native_sectors, 1530 (unsigned long long)sectors); 1531 return 0; 1532 } 1533 1534 /* let's unlock HPA */ 1535 rc = ata_set_max_sectors(dev, native_sectors); 1536 if (rc == -EACCES) { 1537 /* if device aborted the command, skip HPA resizing */ 1538 ata_dev_printk(dev, KERN_WARNING, "device aborted resize " 1539 "(%llu -> %llu), skipping HPA handling\n", 1540 (unsigned long long)sectors, 1541 (unsigned long long)native_sectors); 1542 dev->horkage |= ATA_HORKAGE_BROKEN_HPA; 1543 return 0; 1544 } else if (rc) 1545 return rc; 1546 1547 /* re-read IDENTIFY data */ 1548 rc = ata_dev_reread_id(dev, 0); 1549 if (rc) { 1550 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY " 1551 "data after HPA resizing\n"); 1552 return rc; 1553 } 1554 1555 if (print_info) { 1556 u64 new_sectors = ata_id_n_sectors(dev->id); 1557 ata_dev_printk(dev, KERN_INFO, 1558 "HPA unlocked: %llu -> %llu, native %llu\n", 1559 (unsigned long long)sectors, 1560 (unsigned long long)new_sectors, 1561 (unsigned long long)native_sectors); 1562 } 1563 1564 return 0; 1565 } 1566 1567 /** 1568 * ata_dump_id - IDENTIFY DEVICE info debugging output 1569 * @id: IDENTIFY DEVICE page to dump 1570 * 1571 * Dump selected 16-bit words from the given IDENTIFY DEVICE 1572 * page. 1573 * 1574 * LOCKING: 1575 * caller. 1576 */ 1577 1578 static inline void ata_dump_id(const u16 *id) 1579 { 1580 DPRINTK("49==0x%04x " 1581 "53==0x%04x " 1582 "63==0x%04x " 1583 "64==0x%04x " 1584 "75==0x%04x \n", 1585 id[49], 1586 id[53], 1587 id[63], 1588 id[64], 1589 id[75]); 1590 DPRINTK("80==0x%04x " 1591 "81==0x%04x " 1592 "82==0x%04x " 1593 "83==0x%04x " 1594 "84==0x%04x \n", 1595 id[80], 1596 id[81], 1597 id[82], 1598 id[83], 1599 id[84]); 1600 DPRINTK("88==0x%04x " 1601 "93==0x%04x\n", 1602 id[88], 1603 id[93]); 1604 } 1605 1606 /** 1607 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data 1608 * @id: IDENTIFY data to compute xfer mask from 1609 * 1610 * Compute the xfermask for this device. This is not as trivial 1611 * as it seems if we must consider early devices correctly. 1612 * 1613 * FIXME: pre IDE drive timing (do we care ?). 1614 * 1615 * LOCKING: 1616 * None. 1617 * 1618 * RETURNS: 1619 * Computed xfermask 1620 */ 1621 unsigned long ata_id_xfermask(const u16 *id) 1622 { 1623 unsigned long pio_mask, mwdma_mask, udma_mask; 1624 1625 /* Usual case. Word 53 indicates word 64 is valid */ 1626 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) { 1627 pio_mask = id[ATA_ID_PIO_MODES] & 0x03; 1628 pio_mask <<= 3; 1629 pio_mask |= 0x7; 1630 } else { 1631 /* If word 64 isn't valid then Word 51 high byte holds 1632 * the PIO timing number for the maximum. Turn it into 1633 * a mask. 1634 */ 1635 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF; 1636 if (mode < 5) /* Valid PIO range */ 1637 pio_mask = (2 << mode) - 1; 1638 else 1639 pio_mask = 1; 1640 1641 /* But wait.. there's more. Design your standards by 1642 * committee and you too can get a free iordy field to 1643 * process. However its the speeds not the modes that 1644 * are supported... Note drivers using the timing API 1645 * will get this right anyway 1646 */ 1647 } 1648 1649 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07; 1650 1651 if (ata_id_is_cfa(id)) { 1652 /* 1653 * Process compact flash extended modes 1654 */ 1655 int pio = id[163] & 0x7; 1656 int dma = (id[163] >> 3) & 7; 1657 1658 if (pio) 1659 pio_mask |= (1 << 5); 1660 if (pio > 1) 1661 pio_mask |= (1 << 6); 1662 if (dma) 1663 mwdma_mask |= (1 << 3); 1664 if (dma > 1) 1665 mwdma_mask |= (1 << 4); 1666 } 1667 1668 udma_mask = 0; 1669 if (id[ATA_ID_FIELD_VALID] & (1 << 2)) 1670 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff; 1671 1672 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 1673 } 1674 1675 /** 1676 * ata_pio_queue_task - Queue port_task 1677 * @ap: The ata_port to queue port_task for 1678 * @data: data for @fn to use 1679 * @delay: delay time in msecs for workqueue function 1680 * 1681 * Schedule @fn(@data) for execution after @delay jiffies using 1682 * port_task. There is one port_task per port and it's the 1683 * user(low level driver)'s responsibility to make sure that only 1684 * one task is active at any given time. 1685 * 1686 * libata core layer takes care of synchronization between 1687 * port_task and EH. ata_pio_queue_task() may be ignored for EH 1688 * synchronization. 1689 * 1690 * LOCKING: 1691 * Inherited from caller. 1692 */ 1693 void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay) 1694 { 1695 ap->port_task_data = data; 1696 1697 /* may fail if ata_port_flush_task() in progress */ 1698 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay)); 1699 } 1700 1701 /** 1702 * ata_port_flush_task - Flush port_task 1703 * @ap: The ata_port to flush port_task for 1704 * 1705 * After this function completes, port_task is guranteed not to 1706 * be running or scheduled. 1707 * 1708 * LOCKING: 1709 * Kernel thread context (may sleep) 1710 */ 1711 void ata_port_flush_task(struct ata_port *ap) 1712 { 1713 DPRINTK("ENTER\n"); 1714 1715 cancel_rearming_delayed_work(&ap->port_task); 1716 1717 if (ata_msg_ctl(ap)) 1718 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__); 1719 } 1720 1721 static void ata_qc_complete_internal(struct ata_queued_cmd *qc) 1722 { 1723 struct completion *waiting = qc->private_data; 1724 1725 complete(waiting); 1726 } 1727 1728 /** 1729 * ata_exec_internal_sg - execute libata internal command 1730 * @dev: Device to which the command is sent 1731 * @tf: Taskfile registers for the command and the result 1732 * @cdb: CDB for packet command 1733 * @dma_dir: Data tranfer direction of the command 1734 * @sgl: sg list for the data buffer of the command 1735 * @n_elem: Number of sg entries 1736 * @timeout: Timeout in msecs (0 for default) 1737 * 1738 * Executes libata internal command with timeout. @tf contains 1739 * command on entry and result on return. Timeout and error 1740 * conditions are reported via return value. No recovery action 1741 * is taken after a command times out. It's caller's duty to 1742 * clean up after timeout. 1743 * 1744 * LOCKING: 1745 * None. Should be called with kernel context, might sleep. 1746 * 1747 * RETURNS: 1748 * Zero on success, AC_ERR_* mask on failure 1749 */ 1750 unsigned ata_exec_internal_sg(struct ata_device *dev, 1751 struct ata_taskfile *tf, const u8 *cdb, 1752 int dma_dir, struct scatterlist *sgl, 1753 unsigned int n_elem, unsigned long timeout) 1754 { 1755 struct ata_link *link = dev->link; 1756 struct ata_port *ap = link->ap; 1757 u8 command = tf->command; 1758 int auto_timeout = 0; 1759 struct ata_queued_cmd *qc; 1760 unsigned int tag, preempted_tag; 1761 u32 preempted_sactive, preempted_qc_active; 1762 int preempted_nr_active_links; 1763 DECLARE_COMPLETION_ONSTACK(wait); 1764 unsigned long flags; 1765 unsigned int err_mask; 1766 int rc; 1767 1768 spin_lock_irqsave(ap->lock, flags); 1769 1770 /* no internal command while frozen */ 1771 if (ap->pflags & ATA_PFLAG_FROZEN) { 1772 spin_unlock_irqrestore(ap->lock, flags); 1773 return AC_ERR_SYSTEM; 1774 } 1775 1776 /* initialize internal qc */ 1777 1778 /* XXX: Tag 0 is used for drivers with legacy EH as some 1779 * drivers choke if any other tag is given. This breaks 1780 * ata_tag_internal() test for those drivers. Don't use new 1781 * EH stuff without converting to it. 1782 */ 1783 if (ap->ops->error_handler) 1784 tag = ATA_TAG_INTERNAL; 1785 else 1786 tag = 0; 1787 1788 if (test_and_set_bit(tag, &ap->qc_allocated)) 1789 BUG(); 1790 qc = __ata_qc_from_tag(ap, tag); 1791 1792 qc->tag = tag; 1793 qc->scsicmd = NULL; 1794 qc->ap = ap; 1795 qc->dev = dev; 1796 ata_qc_reinit(qc); 1797 1798 preempted_tag = link->active_tag; 1799 preempted_sactive = link->sactive; 1800 preempted_qc_active = ap->qc_active; 1801 preempted_nr_active_links = ap->nr_active_links; 1802 link->active_tag = ATA_TAG_POISON; 1803 link->sactive = 0; 1804 ap->qc_active = 0; 1805 ap->nr_active_links = 0; 1806 1807 /* prepare & issue qc */ 1808 qc->tf = *tf; 1809 if (cdb) 1810 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); 1811 qc->flags |= ATA_QCFLAG_RESULT_TF; 1812 qc->dma_dir = dma_dir; 1813 if (dma_dir != DMA_NONE) { 1814 unsigned int i, buflen = 0; 1815 struct scatterlist *sg; 1816 1817 for_each_sg(sgl, sg, n_elem, i) 1818 buflen += sg->length; 1819 1820 ata_sg_init(qc, sgl, n_elem); 1821 qc->nbytes = buflen; 1822 } 1823 1824 qc->private_data = &wait; 1825 qc->complete_fn = ata_qc_complete_internal; 1826 1827 ata_qc_issue(qc); 1828 1829 spin_unlock_irqrestore(ap->lock, flags); 1830 1831 if (!timeout) { 1832 if (ata_probe_timeout) 1833 timeout = ata_probe_timeout * 1000; 1834 else { 1835 timeout = ata_internal_cmd_timeout(dev, command); 1836 auto_timeout = 1; 1837 } 1838 } 1839 1840 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout)); 1841 1842 ata_port_flush_task(ap); 1843 1844 if (!rc) { 1845 spin_lock_irqsave(ap->lock, flags); 1846 1847 /* We're racing with irq here. If we lose, the 1848 * following test prevents us from completing the qc 1849 * twice. If we win, the port is frozen and will be 1850 * cleaned up by ->post_internal_cmd(). 1851 */ 1852 if (qc->flags & ATA_QCFLAG_ACTIVE) { 1853 qc->err_mask |= AC_ERR_TIMEOUT; 1854 1855 if (ap->ops->error_handler) 1856 ata_port_freeze(ap); 1857 else 1858 ata_qc_complete(qc); 1859 1860 if (ata_msg_warn(ap)) 1861 ata_dev_printk(dev, KERN_WARNING, 1862 "qc timeout (cmd 0x%x)\n", command); 1863 } 1864 1865 spin_unlock_irqrestore(ap->lock, flags); 1866 } 1867 1868 /* do post_internal_cmd */ 1869 if (ap->ops->post_internal_cmd) 1870 ap->ops->post_internal_cmd(qc); 1871 1872 /* perform minimal error analysis */ 1873 if (qc->flags & ATA_QCFLAG_FAILED) { 1874 if (qc->result_tf.command & (ATA_ERR | ATA_DF)) 1875 qc->err_mask |= AC_ERR_DEV; 1876 1877 if (!qc->err_mask) 1878 qc->err_mask |= AC_ERR_OTHER; 1879 1880 if (qc->err_mask & ~AC_ERR_OTHER) 1881 qc->err_mask &= ~AC_ERR_OTHER; 1882 } 1883 1884 /* finish up */ 1885 spin_lock_irqsave(ap->lock, flags); 1886 1887 *tf = qc->result_tf; 1888 err_mask = qc->err_mask; 1889 1890 ata_qc_free(qc); 1891 link->active_tag = preempted_tag; 1892 link->sactive = preempted_sactive; 1893 ap->qc_active = preempted_qc_active; 1894 ap->nr_active_links = preempted_nr_active_links; 1895 1896 /* XXX - Some LLDDs (sata_mv) disable port on command failure. 1897 * Until those drivers are fixed, we detect the condition 1898 * here, fail the command with AC_ERR_SYSTEM and reenable the 1899 * port. 1900 * 1901 * Note that this doesn't change any behavior as internal 1902 * command failure results in disabling the device in the 1903 * higher layer for LLDDs without new reset/EH callbacks. 1904 * 1905 * Kill the following code as soon as those drivers are fixed. 1906 */ 1907 if (ap->flags & ATA_FLAG_DISABLED) { 1908 err_mask |= AC_ERR_SYSTEM; 1909 ata_port_probe(ap); 1910 } 1911 1912 spin_unlock_irqrestore(ap->lock, flags); 1913 1914 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout) 1915 ata_internal_cmd_timed_out(dev, command); 1916 1917 return err_mask; 1918 } 1919 1920 /** 1921 * ata_exec_internal - execute libata internal command 1922 * @dev: Device to which the command is sent 1923 * @tf: Taskfile registers for the command and the result 1924 * @cdb: CDB for packet command 1925 * @dma_dir: Data tranfer direction of the command 1926 * @buf: Data buffer of the command 1927 * @buflen: Length of data buffer 1928 * @timeout: Timeout in msecs (0 for default) 1929 * 1930 * Wrapper around ata_exec_internal_sg() which takes simple 1931 * buffer instead of sg list. 1932 * 1933 * LOCKING: 1934 * None. Should be called with kernel context, might sleep. 1935 * 1936 * RETURNS: 1937 * Zero on success, AC_ERR_* mask on failure 1938 */ 1939 unsigned ata_exec_internal(struct ata_device *dev, 1940 struct ata_taskfile *tf, const u8 *cdb, 1941 int dma_dir, void *buf, unsigned int buflen, 1942 unsigned long timeout) 1943 { 1944 struct scatterlist *psg = NULL, sg; 1945 unsigned int n_elem = 0; 1946 1947 if (dma_dir != DMA_NONE) { 1948 WARN_ON(!buf); 1949 sg_init_one(&sg, buf, buflen); 1950 psg = &sg; 1951 n_elem++; 1952 } 1953 1954 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem, 1955 timeout); 1956 } 1957 1958 /** 1959 * ata_do_simple_cmd - execute simple internal command 1960 * @dev: Device to which the command is sent 1961 * @cmd: Opcode to execute 1962 * 1963 * Execute a 'simple' command, that only consists of the opcode 1964 * 'cmd' itself, without filling any other registers 1965 * 1966 * LOCKING: 1967 * Kernel thread context (may sleep). 1968 * 1969 * RETURNS: 1970 * Zero on success, AC_ERR_* mask on failure 1971 */ 1972 unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd) 1973 { 1974 struct ata_taskfile tf; 1975 1976 ata_tf_init(dev, &tf); 1977 1978 tf.command = cmd; 1979 tf.flags |= ATA_TFLAG_DEVICE; 1980 tf.protocol = ATA_PROT_NODATA; 1981 1982 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 1983 } 1984 1985 /** 1986 * ata_pio_need_iordy - check if iordy needed 1987 * @adev: ATA device 1988 * 1989 * Check if the current speed of the device requires IORDY. Used 1990 * by various controllers for chip configuration. 1991 */ 1992 1993 unsigned int ata_pio_need_iordy(const struct ata_device *adev) 1994 { 1995 /* Controller doesn't support IORDY. Probably a pointless check 1996 as the caller should know this */ 1997 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY) 1998 return 0; 1999 /* CF spec. r4.1 Table 22 says no iordy on PIO5 and PIO6. */ 2000 if (ata_id_is_cfa(adev->id) 2001 && (adev->pio_mode == XFER_PIO_5 || adev->pio_mode == XFER_PIO_6)) 2002 return 0; 2003 /* PIO3 and higher it is mandatory */ 2004 if (adev->pio_mode > XFER_PIO_2) 2005 return 1; 2006 /* We turn it on when possible */ 2007 if (ata_id_has_iordy(adev->id)) 2008 return 1; 2009 return 0; 2010 } 2011 2012 /** 2013 * ata_pio_mask_no_iordy - Return the non IORDY mask 2014 * @adev: ATA device 2015 * 2016 * Compute the highest mode possible if we are not using iordy. Return 2017 * -1 if no iordy mode is available. 2018 */ 2019 2020 static u32 ata_pio_mask_no_iordy(const struct ata_device *adev) 2021 { 2022 /* If we have no drive specific rule, then PIO 2 is non IORDY */ 2023 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */ 2024 u16 pio = adev->id[ATA_ID_EIDE_PIO]; 2025 /* Is the speed faster than the drive allows non IORDY ? */ 2026 if (pio) { 2027 /* This is cycle times not frequency - watch the logic! */ 2028 if (pio > 240) /* PIO2 is 240nS per cycle */ 2029 return 3 << ATA_SHIFT_PIO; 2030 return 7 << ATA_SHIFT_PIO; 2031 } 2032 } 2033 return 3 << ATA_SHIFT_PIO; 2034 } 2035 2036 /** 2037 * ata_do_dev_read_id - default ID read method 2038 * @dev: device 2039 * @tf: proposed taskfile 2040 * @id: data buffer 2041 * 2042 * Issue the identify taskfile and hand back the buffer containing 2043 * identify data. For some RAID controllers and for pre ATA devices 2044 * this function is wrapped or replaced by the driver 2045 */ 2046 unsigned int ata_do_dev_read_id(struct ata_device *dev, 2047 struct ata_taskfile *tf, u16 *id) 2048 { 2049 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE, 2050 id, sizeof(id[0]) * ATA_ID_WORDS, 0); 2051 } 2052 2053 /** 2054 * ata_dev_read_id - Read ID data from the specified device 2055 * @dev: target device 2056 * @p_class: pointer to class of the target device (may be changed) 2057 * @flags: ATA_READID_* flags 2058 * @id: buffer to read IDENTIFY data into 2059 * 2060 * Read ID data from the specified device. ATA_CMD_ID_ATA is 2061 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI 2062 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS 2063 * for pre-ATA4 drives. 2064 * 2065 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right 2066 * now we abort if we hit that case. 2067 * 2068 * LOCKING: 2069 * Kernel thread context (may sleep) 2070 * 2071 * RETURNS: 2072 * 0 on success, -errno otherwise. 2073 */ 2074 int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class, 2075 unsigned int flags, u16 *id) 2076 { 2077 struct ata_port *ap = dev->link->ap; 2078 unsigned int class = *p_class; 2079 struct ata_taskfile tf; 2080 unsigned int err_mask = 0; 2081 const char *reason; 2082 int may_fallback = 1, tried_spinup = 0; 2083 int rc; 2084 2085 if (ata_msg_ctl(ap)) 2086 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2087 2088 retry: 2089 ata_tf_init(dev, &tf); 2090 2091 switch (class) { 2092 case ATA_DEV_ATA: 2093 tf.command = ATA_CMD_ID_ATA; 2094 break; 2095 case ATA_DEV_ATAPI: 2096 tf.command = ATA_CMD_ID_ATAPI; 2097 break; 2098 default: 2099 rc = -ENODEV; 2100 reason = "unsupported class"; 2101 goto err_out; 2102 } 2103 2104 tf.protocol = ATA_PROT_PIO; 2105 2106 /* Some devices choke if TF registers contain garbage. Make 2107 * sure those are properly initialized. 2108 */ 2109 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 2110 2111 /* Device presence detection is unreliable on some 2112 * controllers. Always poll IDENTIFY if available. 2113 */ 2114 tf.flags |= ATA_TFLAG_POLLING; 2115 2116 if (ap->ops->read_id) 2117 err_mask = ap->ops->read_id(dev, &tf, id); 2118 else 2119 err_mask = ata_do_dev_read_id(dev, &tf, id); 2120 2121 if (err_mask) { 2122 if (err_mask & AC_ERR_NODEV_HINT) { 2123 ata_dev_printk(dev, KERN_DEBUG, 2124 "NODEV after polling detection\n"); 2125 return -ENOENT; 2126 } 2127 2128 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) { 2129 /* Device or controller might have reported 2130 * the wrong device class. Give a shot at the 2131 * other IDENTIFY if the current one is 2132 * aborted by the device. 2133 */ 2134 if (may_fallback) { 2135 may_fallback = 0; 2136 2137 if (class == ATA_DEV_ATA) 2138 class = ATA_DEV_ATAPI; 2139 else 2140 class = ATA_DEV_ATA; 2141 goto retry; 2142 } 2143 2144 /* Control reaches here iff the device aborted 2145 * both flavors of IDENTIFYs which happens 2146 * sometimes with phantom devices. 2147 */ 2148 ata_dev_printk(dev, KERN_DEBUG, 2149 "both IDENTIFYs aborted, assuming NODEV\n"); 2150 return -ENOENT; 2151 } 2152 2153 rc = -EIO; 2154 reason = "I/O error"; 2155 goto err_out; 2156 } 2157 2158 /* Falling back doesn't make sense if ID data was read 2159 * successfully at least once. 2160 */ 2161 may_fallback = 0; 2162 2163 swap_buf_le16(id, ATA_ID_WORDS); 2164 2165 /* sanity check */ 2166 rc = -EINVAL; 2167 reason = "device reports invalid type"; 2168 2169 if (class == ATA_DEV_ATA) { 2170 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 2171 goto err_out; 2172 } else { 2173 if (ata_id_is_ata(id)) 2174 goto err_out; 2175 } 2176 2177 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) { 2178 tried_spinup = 1; 2179 /* 2180 * Drive powered-up in standby mode, and requires a specific 2181 * SET_FEATURES spin-up subcommand before it will accept 2182 * anything other than the original IDENTIFY command. 2183 */ 2184 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0); 2185 if (err_mask && id[2] != 0x738c) { 2186 rc = -EIO; 2187 reason = "SPINUP failed"; 2188 goto err_out; 2189 } 2190 /* 2191 * If the drive initially returned incomplete IDENTIFY info, 2192 * we now must reissue the IDENTIFY command. 2193 */ 2194 if (id[2] == 0x37c8) 2195 goto retry; 2196 } 2197 2198 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2199 /* 2200 * The exact sequence expected by certain pre-ATA4 drives is: 2201 * SRST RESET 2202 * IDENTIFY (optional in early ATA) 2203 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA) 2204 * anything else.. 2205 * Some drives were very specific about that exact sequence. 2206 * 2207 * Note that ATA4 says lba is mandatory so the second check 2208 * shoud never trigger. 2209 */ 2210 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) { 2211 err_mask = ata_dev_init_params(dev, id[3], id[6]); 2212 if (err_mask) { 2213 rc = -EIO; 2214 reason = "INIT_DEV_PARAMS failed"; 2215 goto err_out; 2216 } 2217 2218 /* current CHS translation info (id[53-58]) might be 2219 * changed. reread the identify device info. 2220 */ 2221 flags &= ~ATA_READID_POSTRESET; 2222 goto retry; 2223 } 2224 } 2225 2226 *p_class = class; 2227 2228 return 0; 2229 2230 err_out: 2231 if (ata_msg_warn(ap)) 2232 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY " 2233 "(%s, err_mask=0x%x)\n", reason, err_mask); 2234 return rc; 2235 } 2236 2237 static int ata_do_link_spd_horkage(struct ata_device *dev) 2238 { 2239 struct ata_link *plink = ata_dev_phys_link(dev); 2240 u32 target, target_limit; 2241 2242 if (!sata_scr_valid(plink)) 2243 return 0; 2244 2245 if (dev->horkage & ATA_HORKAGE_1_5_GBPS) 2246 target = 1; 2247 else 2248 return 0; 2249 2250 target_limit = (1 << target) - 1; 2251 2252 /* if already on stricter limit, no need to push further */ 2253 if (plink->sata_spd_limit <= target_limit) 2254 return 0; 2255 2256 plink->sata_spd_limit = target_limit; 2257 2258 /* Request another EH round by returning -EAGAIN if link is 2259 * going faster than the target speed. Forward progress is 2260 * guaranteed by setting sata_spd_limit to target_limit above. 2261 */ 2262 if (plink->sata_spd > target) { 2263 ata_dev_printk(dev, KERN_INFO, 2264 "applying link speed limit horkage to %s\n", 2265 sata_spd_string(target)); 2266 return -EAGAIN; 2267 } 2268 return 0; 2269 } 2270 2271 static inline u8 ata_dev_knobble(struct ata_device *dev) 2272 { 2273 struct ata_port *ap = dev->link->ap; 2274 2275 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_BRIDGE_OK) 2276 return 0; 2277 2278 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id))); 2279 } 2280 2281 static void ata_dev_config_ncq(struct ata_device *dev, 2282 char *desc, size_t desc_sz) 2283 { 2284 struct ata_port *ap = dev->link->ap; 2285 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id); 2286 2287 if (!ata_id_has_ncq(dev->id)) { 2288 desc[0] = '\0'; 2289 return; 2290 } 2291 if (dev->horkage & ATA_HORKAGE_NONCQ) { 2292 snprintf(desc, desc_sz, "NCQ (not used)"); 2293 return; 2294 } 2295 if (ap->flags & ATA_FLAG_NCQ) { 2296 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1); 2297 dev->flags |= ATA_DFLAG_NCQ; 2298 } 2299 2300 if (hdepth >= ddepth) 2301 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth); 2302 else 2303 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth); 2304 } 2305 2306 /** 2307 * ata_dev_configure - Configure the specified ATA/ATAPI device 2308 * @dev: Target device to configure 2309 * 2310 * Configure @dev according to @dev->id. Generic and low-level 2311 * driver specific fixups are also applied. 2312 * 2313 * LOCKING: 2314 * Kernel thread context (may sleep) 2315 * 2316 * RETURNS: 2317 * 0 on success, -errno otherwise 2318 */ 2319 int ata_dev_configure(struct ata_device *dev) 2320 { 2321 struct ata_port *ap = dev->link->ap; 2322 struct ata_eh_context *ehc = &dev->link->eh_context; 2323 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO; 2324 const u16 *id = dev->id; 2325 unsigned long xfer_mask; 2326 char revbuf[7]; /* XYZ-99\0 */ 2327 char fwrevbuf[ATA_ID_FW_REV_LEN+1]; 2328 char modelbuf[ATA_ID_PROD_LEN+1]; 2329 int rc; 2330 2331 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) { 2332 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n", 2333 __func__); 2334 return 0; 2335 } 2336 2337 if (ata_msg_probe(ap)) 2338 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__); 2339 2340 /* set horkage */ 2341 dev->horkage |= ata_dev_blacklisted(dev); 2342 ata_force_horkage(dev); 2343 2344 if (dev->horkage & ATA_HORKAGE_DISABLE) { 2345 ata_dev_printk(dev, KERN_INFO, 2346 "unsupported device, disabling\n"); 2347 ata_dev_disable(dev); 2348 return 0; 2349 } 2350 2351 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) && 2352 dev->class == ATA_DEV_ATAPI) { 2353 ata_dev_printk(dev, KERN_WARNING, 2354 "WARNING: ATAPI is %s, device ignored.\n", 2355 atapi_enabled ? "not supported with this driver" 2356 : "disabled"); 2357 ata_dev_disable(dev); 2358 return 0; 2359 } 2360 2361 rc = ata_do_link_spd_horkage(dev); 2362 if (rc) 2363 return rc; 2364 2365 /* let ACPI work its magic */ 2366 rc = ata_acpi_on_devcfg(dev); 2367 if (rc) 2368 return rc; 2369 2370 /* massage HPA, do it early as it might change IDENTIFY data */ 2371 rc = ata_hpa_resize(dev); 2372 if (rc) 2373 return rc; 2374 2375 /* print device capabilities */ 2376 if (ata_msg_probe(ap)) 2377 ata_dev_printk(dev, KERN_DEBUG, 2378 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x " 2379 "85:%04x 86:%04x 87:%04x 88:%04x\n", 2380 __func__, 2381 id[49], id[82], id[83], id[84], 2382 id[85], id[86], id[87], id[88]); 2383 2384 /* initialize to-be-configured parameters */ 2385 dev->flags &= ~ATA_DFLAG_CFG_MASK; 2386 dev->max_sectors = 0; 2387 dev->cdb_len = 0; 2388 dev->n_sectors = 0; 2389 dev->cylinders = 0; 2390 dev->heads = 0; 2391 dev->sectors = 0; 2392 2393 /* 2394 * common ATA, ATAPI feature tests 2395 */ 2396 2397 /* find max transfer mode; for printk only */ 2398 xfer_mask = ata_id_xfermask(id); 2399 2400 if (ata_msg_probe(ap)) 2401 ata_dump_id(id); 2402 2403 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */ 2404 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV, 2405 sizeof(fwrevbuf)); 2406 2407 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD, 2408 sizeof(modelbuf)); 2409 2410 /* ATA-specific feature tests */ 2411 if (dev->class == ATA_DEV_ATA) { 2412 if (ata_id_is_cfa(id)) { 2413 if (id[162] & 1) /* CPRM may make this media unusable */ 2414 ata_dev_printk(dev, KERN_WARNING, 2415 "supports DRM functions and may " 2416 "not be fully accessable.\n"); 2417 snprintf(revbuf, 7, "CFA"); 2418 } else { 2419 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id)); 2420 /* Warn the user if the device has TPM extensions */ 2421 if (ata_id_has_tpm(id)) 2422 ata_dev_printk(dev, KERN_WARNING, 2423 "supports DRM functions and may " 2424 "not be fully accessable.\n"); 2425 } 2426 2427 dev->n_sectors = ata_id_n_sectors(id); 2428 2429 if (dev->id[59] & 0x100) 2430 dev->multi_count = dev->id[59] & 0xff; 2431 2432 if (ata_id_has_lba(id)) { 2433 const char *lba_desc; 2434 char ncq_desc[20]; 2435 2436 lba_desc = "LBA"; 2437 dev->flags |= ATA_DFLAG_LBA; 2438 if (ata_id_has_lba48(id)) { 2439 dev->flags |= ATA_DFLAG_LBA48; 2440 lba_desc = "LBA48"; 2441 2442 if (dev->n_sectors >= (1UL << 28) && 2443 ata_id_has_flush_ext(id)) 2444 dev->flags |= ATA_DFLAG_FLUSH_EXT; 2445 } 2446 2447 /* config NCQ */ 2448 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc)); 2449 2450 /* print device info to dmesg */ 2451 if (ata_msg_drv(ap) && print_info) { 2452 ata_dev_printk(dev, KERN_INFO, 2453 "%s: %s, %s, max %s\n", 2454 revbuf, modelbuf, fwrevbuf, 2455 ata_mode_string(xfer_mask)); 2456 ata_dev_printk(dev, KERN_INFO, 2457 "%Lu sectors, multi %u: %s %s\n", 2458 (unsigned long long)dev->n_sectors, 2459 dev->multi_count, lba_desc, ncq_desc); 2460 } 2461 } else { 2462 /* CHS */ 2463 2464 /* Default translation */ 2465 dev->cylinders = id[1]; 2466 dev->heads = id[3]; 2467 dev->sectors = id[6]; 2468 2469 if (ata_id_current_chs_valid(id)) { 2470 /* Current CHS translation is valid. */ 2471 dev->cylinders = id[54]; 2472 dev->heads = id[55]; 2473 dev->sectors = id[56]; 2474 } 2475 2476 /* print device info to dmesg */ 2477 if (ata_msg_drv(ap) && print_info) { 2478 ata_dev_printk(dev, KERN_INFO, 2479 "%s: %s, %s, max %s\n", 2480 revbuf, modelbuf, fwrevbuf, 2481 ata_mode_string(xfer_mask)); 2482 ata_dev_printk(dev, KERN_INFO, 2483 "%Lu sectors, multi %u, CHS %u/%u/%u\n", 2484 (unsigned long long)dev->n_sectors, 2485 dev->multi_count, dev->cylinders, 2486 dev->heads, dev->sectors); 2487 } 2488 } 2489 2490 dev->cdb_len = 16; 2491 } 2492 2493 /* ATAPI-specific feature tests */ 2494 else if (dev->class == ATA_DEV_ATAPI) { 2495 const char *cdb_intr_string = ""; 2496 const char *atapi_an_string = ""; 2497 const char *dma_dir_string = ""; 2498 u32 sntf; 2499 2500 rc = atapi_cdb_len(id); 2501 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) { 2502 if (ata_msg_warn(ap)) 2503 ata_dev_printk(dev, KERN_WARNING, 2504 "unsupported CDB len\n"); 2505 rc = -EINVAL; 2506 goto err_out_nosup; 2507 } 2508 dev->cdb_len = (unsigned int) rc; 2509 2510 /* Enable ATAPI AN if both the host and device have 2511 * the support. If PMP is attached, SNTF is required 2512 * to enable ATAPI AN to discern between PHY status 2513 * changed notifications and ATAPI ANs. 2514 */ 2515 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) && 2516 (!sata_pmp_attached(ap) || 2517 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) { 2518 unsigned int err_mask; 2519 2520 /* issue SET feature command to turn this on */ 2521 err_mask = ata_dev_set_feature(dev, 2522 SETFEATURES_SATA_ENABLE, SATA_AN); 2523 if (err_mask) 2524 ata_dev_printk(dev, KERN_ERR, 2525 "failed to enable ATAPI AN " 2526 "(err_mask=0x%x)\n", err_mask); 2527 else { 2528 dev->flags |= ATA_DFLAG_AN; 2529 atapi_an_string = ", ATAPI AN"; 2530 } 2531 } 2532 2533 if (ata_id_cdb_intr(dev->id)) { 2534 dev->flags |= ATA_DFLAG_CDB_INTR; 2535 cdb_intr_string = ", CDB intr"; 2536 } 2537 2538 if (atapi_dmadir || atapi_id_dmadir(dev->id)) { 2539 dev->flags |= ATA_DFLAG_DMADIR; 2540 dma_dir_string = ", DMADIR"; 2541 } 2542 2543 /* print device info to dmesg */ 2544 if (ata_msg_drv(ap) && print_info) 2545 ata_dev_printk(dev, KERN_INFO, 2546 "ATAPI: %s, %s, max %s%s%s%s\n", 2547 modelbuf, fwrevbuf, 2548 ata_mode_string(xfer_mask), 2549 cdb_intr_string, atapi_an_string, 2550 dma_dir_string); 2551 } 2552 2553 /* determine max_sectors */ 2554 dev->max_sectors = ATA_MAX_SECTORS; 2555 if (dev->flags & ATA_DFLAG_LBA48) 2556 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2557 2558 if (!(dev->horkage & ATA_HORKAGE_IPM)) { 2559 if (ata_id_has_hipm(dev->id)) 2560 dev->flags |= ATA_DFLAG_HIPM; 2561 if (ata_id_has_dipm(dev->id)) 2562 dev->flags |= ATA_DFLAG_DIPM; 2563 } 2564 2565 /* Limit PATA drive on SATA cable bridge transfers to udma5, 2566 200 sectors */ 2567 if (ata_dev_knobble(dev)) { 2568 if (ata_msg_drv(ap) && print_info) 2569 ata_dev_printk(dev, KERN_INFO, 2570 "applying bridge limits\n"); 2571 dev->udma_mask &= ATA_UDMA5; 2572 dev->max_sectors = ATA_MAX_SECTORS; 2573 } 2574 2575 if ((dev->class == ATA_DEV_ATAPI) && 2576 (atapi_command_packet_set(id) == TYPE_TAPE)) { 2577 dev->max_sectors = ATA_MAX_SECTORS_TAPE; 2578 dev->horkage |= ATA_HORKAGE_STUCK_ERR; 2579 } 2580 2581 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128) 2582 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2583 dev->max_sectors); 2584 2585 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) { 2586 dev->horkage |= ATA_HORKAGE_IPM; 2587 2588 /* reset link pm_policy for this port to no pm */ 2589 ap->pm_policy = MAX_PERFORMANCE; 2590 } 2591 2592 if (ap->ops->dev_config) 2593 ap->ops->dev_config(dev); 2594 2595 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) { 2596 /* Let the user know. We don't want to disallow opens for 2597 rescue purposes, or in case the vendor is just a blithering 2598 idiot. Do this after the dev_config call as some controllers 2599 with buggy firmware may want to avoid reporting false device 2600 bugs */ 2601 2602 if (print_info) { 2603 ata_dev_printk(dev, KERN_WARNING, 2604 "Drive reports diagnostics failure. This may indicate a drive\n"); 2605 ata_dev_printk(dev, KERN_WARNING, 2606 "fault or invalid emulation. Contact drive vendor for information.\n"); 2607 } 2608 } 2609 2610 if ((dev->horkage & ATA_HORKAGE_FIRMWARE_WARN) && print_info) { 2611 ata_dev_printk(dev, KERN_WARNING, "WARNING: device requires " 2612 "firmware update to be fully functional.\n"); 2613 ata_dev_printk(dev, KERN_WARNING, " contact the vendor " 2614 "or visit http://ata.wiki.kernel.org.\n"); 2615 } 2616 2617 return 0; 2618 2619 err_out_nosup: 2620 if (ata_msg_probe(ap)) 2621 ata_dev_printk(dev, KERN_DEBUG, 2622 "%s: EXIT, err\n", __func__); 2623 return rc; 2624 } 2625 2626 /** 2627 * ata_cable_40wire - return 40 wire cable type 2628 * @ap: port 2629 * 2630 * Helper method for drivers which want to hardwire 40 wire cable 2631 * detection. 2632 */ 2633 2634 int ata_cable_40wire(struct ata_port *ap) 2635 { 2636 return ATA_CBL_PATA40; 2637 } 2638 2639 /** 2640 * ata_cable_80wire - return 80 wire cable type 2641 * @ap: port 2642 * 2643 * Helper method for drivers which want to hardwire 80 wire cable 2644 * detection. 2645 */ 2646 2647 int ata_cable_80wire(struct ata_port *ap) 2648 { 2649 return ATA_CBL_PATA80; 2650 } 2651 2652 /** 2653 * ata_cable_unknown - return unknown PATA cable. 2654 * @ap: port 2655 * 2656 * Helper method for drivers which have no PATA cable detection. 2657 */ 2658 2659 int ata_cable_unknown(struct ata_port *ap) 2660 { 2661 return ATA_CBL_PATA_UNK; 2662 } 2663 2664 /** 2665 * ata_cable_ignore - return ignored PATA cable. 2666 * @ap: port 2667 * 2668 * Helper method for drivers which don't use cable type to limit 2669 * transfer mode. 2670 */ 2671 int ata_cable_ignore(struct ata_port *ap) 2672 { 2673 return ATA_CBL_PATA_IGN; 2674 } 2675 2676 /** 2677 * ata_cable_sata - return SATA cable type 2678 * @ap: port 2679 * 2680 * Helper method for drivers which have SATA cables 2681 */ 2682 2683 int ata_cable_sata(struct ata_port *ap) 2684 { 2685 return ATA_CBL_SATA; 2686 } 2687 2688 /** 2689 * ata_bus_probe - Reset and probe ATA bus 2690 * @ap: Bus to probe 2691 * 2692 * Master ATA bus probing function. Initiates a hardware-dependent 2693 * bus reset, then attempts to identify any devices found on 2694 * the bus. 2695 * 2696 * LOCKING: 2697 * PCI/etc. bus probe sem. 2698 * 2699 * RETURNS: 2700 * Zero on success, negative errno otherwise. 2701 */ 2702 2703 int ata_bus_probe(struct ata_port *ap) 2704 { 2705 unsigned int classes[ATA_MAX_DEVICES]; 2706 int tries[ATA_MAX_DEVICES]; 2707 int rc; 2708 struct ata_device *dev; 2709 2710 ata_port_probe(ap); 2711 2712 ata_for_each_dev(dev, &ap->link, ALL) 2713 tries[dev->devno] = ATA_PROBE_MAX_TRIES; 2714 2715 retry: 2716 ata_for_each_dev(dev, &ap->link, ALL) { 2717 /* If we issue an SRST then an ATA drive (not ATAPI) 2718 * may change configuration and be in PIO0 timing. If 2719 * we do a hard reset (or are coming from power on) 2720 * this is true for ATA or ATAPI. Until we've set a 2721 * suitable controller mode we should not touch the 2722 * bus as we may be talking too fast. 2723 */ 2724 dev->pio_mode = XFER_PIO_0; 2725 2726 /* If the controller has a pio mode setup function 2727 * then use it to set the chipset to rights. Don't 2728 * touch the DMA setup as that will be dealt with when 2729 * configuring devices. 2730 */ 2731 if (ap->ops->set_piomode) 2732 ap->ops->set_piomode(ap, dev); 2733 } 2734 2735 /* reset and determine device classes */ 2736 ap->ops->phy_reset(ap); 2737 2738 ata_for_each_dev(dev, &ap->link, ALL) { 2739 if (!(ap->flags & ATA_FLAG_DISABLED) && 2740 dev->class != ATA_DEV_UNKNOWN) 2741 classes[dev->devno] = dev->class; 2742 else 2743 classes[dev->devno] = ATA_DEV_NONE; 2744 2745 dev->class = ATA_DEV_UNKNOWN; 2746 } 2747 2748 ata_port_probe(ap); 2749 2750 /* read IDENTIFY page and configure devices. We have to do the identify 2751 specific sequence bass-ackwards so that PDIAG- is released by 2752 the slave device */ 2753 2754 ata_for_each_dev(dev, &ap->link, ALL_REVERSE) { 2755 if (tries[dev->devno]) 2756 dev->class = classes[dev->devno]; 2757 2758 if (!ata_dev_enabled(dev)) 2759 continue; 2760 2761 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET, 2762 dev->id); 2763 if (rc) 2764 goto fail; 2765 } 2766 2767 /* Now ask for the cable type as PDIAG- should have been released */ 2768 if (ap->ops->cable_detect) 2769 ap->cbl = ap->ops->cable_detect(ap); 2770 2771 /* We may have SATA bridge glue hiding here irrespective of 2772 * the reported cable types and sensed types. When SATA 2773 * drives indicate we have a bridge, we don't know which end 2774 * of the link the bridge is which is a problem. 2775 */ 2776 ata_for_each_dev(dev, &ap->link, ENABLED) 2777 if (ata_id_is_sata(dev->id)) 2778 ap->cbl = ATA_CBL_SATA; 2779 2780 /* After the identify sequence we can now set up the devices. We do 2781 this in the normal order so that the user doesn't get confused */ 2782 2783 ata_for_each_dev(dev, &ap->link, ENABLED) { 2784 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO; 2785 rc = ata_dev_configure(dev); 2786 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO; 2787 if (rc) 2788 goto fail; 2789 } 2790 2791 /* configure transfer mode */ 2792 rc = ata_set_mode(&ap->link, &dev); 2793 if (rc) 2794 goto fail; 2795 2796 ata_for_each_dev(dev, &ap->link, ENABLED) 2797 return 0; 2798 2799 /* no device present, disable port */ 2800 ata_port_disable(ap); 2801 return -ENODEV; 2802 2803 fail: 2804 tries[dev->devno]--; 2805 2806 switch (rc) { 2807 case -EINVAL: 2808 /* eeek, something went very wrong, give up */ 2809 tries[dev->devno] = 0; 2810 break; 2811 2812 case -ENODEV: 2813 /* give it just one more chance */ 2814 tries[dev->devno] = min(tries[dev->devno], 1); 2815 case -EIO: 2816 if (tries[dev->devno] == 1) { 2817 /* This is the last chance, better to slow 2818 * down than lose it. 2819 */ 2820 sata_down_spd_limit(&ap->link, 0); 2821 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO); 2822 } 2823 } 2824 2825 if (!tries[dev->devno]) 2826 ata_dev_disable(dev); 2827 2828 goto retry; 2829 } 2830 2831 /** 2832 * ata_port_probe - Mark port as enabled 2833 * @ap: Port for which we indicate enablement 2834 * 2835 * Modify @ap data structure such that the system 2836 * thinks that the entire port is enabled. 2837 * 2838 * LOCKING: host lock, or some other form of 2839 * serialization. 2840 */ 2841 2842 void ata_port_probe(struct ata_port *ap) 2843 { 2844 ap->flags &= ~ATA_FLAG_DISABLED; 2845 } 2846 2847 /** 2848 * sata_print_link_status - Print SATA link status 2849 * @link: SATA link to printk link status about 2850 * 2851 * This function prints link speed and status of a SATA link. 2852 * 2853 * LOCKING: 2854 * None. 2855 */ 2856 static void sata_print_link_status(struct ata_link *link) 2857 { 2858 u32 sstatus, scontrol, tmp; 2859 2860 if (sata_scr_read(link, SCR_STATUS, &sstatus)) 2861 return; 2862 sata_scr_read(link, SCR_CONTROL, &scontrol); 2863 2864 if (ata_phys_link_online(link)) { 2865 tmp = (sstatus >> 4) & 0xf; 2866 ata_link_printk(link, KERN_INFO, 2867 "SATA link up %s (SStatus %X SControl %X)\n", 2868 sata_spd_string(tmp), sstatus, scontrol); 2869 } else { 2870 ata_link_printk(link, KERN_INFO, 2871 "SATA link down (SStatus %X SControl %X)\n", 2872 sstatus, scontrol); 2873 } 2874 } 2875 2876 /** 2877 * ata_dev_pair - return other device on cable 2878 * @adev: device 2879 * 2880 * Obtain the other device on the same cable, or if none is 2881 * present NULL is returned 2882 */ 2883 2884 struct ata_device *ata_dev_pair(struct ata_device *adev) 2885 { 2886 struct ata_link *link = adev->link; 2887 struct ata_device *pair = &link->device[1 - adev->devno]; 2888 if (!ata_dev_enabled(pair)) 2889 return NULL; 2890 return pair; 2891 } 2892 2893 /** 2894 * ata_port_disable - Disable port. 2895 * @ap: Port to be disabled. 2896 * 2897 * Modify @ap data structure such that the system 2898 * thinks that the entire port is disabled, and should 2899 * never attempt to probe or communicate with devices 2900 * on this port. 2901 * 2902 * LOCKING: host lock, or some other form of 2903 * serialization. 2904 */ 2905 2906 void ata_port_disable(struct ata_port *ap) 2907 { 2908 ap->link.device[0].class = ATA_DEV_NONE; 2909 ap->link.device[1].class = ATA_DEV_NONE; 2910 ap->flags |= ATA_FLAG_DISABLED; 2911 } 2912 2913 /** 2914 * sata_down_spd_limit - adjust SATA spd limit downward 2915 * @link: Link to adjust SATA spd limit for 2916 * @spd_limit: Additional limit 2917 * 2918 * Adjust SATA spd limit of @link downward. Note that this 2919 * function only adjusts the limit. The change must be applied 2920 * using sata_set_spd(). 2921 * 2922 * If @spd_limit is non-zero, the speed is limited to equal to or 2923 * lower than @spd_limit if such speed is supported. If 2924 * @spd_limit is slower than any supported speed, only the lowest 2925 * supported speed is allowed. 2926 * 2927 * LOCKING: 2928 * Inherited from caller. 2929 * 2930 * RETURNS: 2931 * 0 on success, negative errno on failure 2932 */ 2933 int sata_down_spd_limit(struct ata_link *link, u32 spd_limit) 2934 { 2935 u32 sstatus, spd, mask; 2936 int rc, bit; 2937 2938 if (!sata_scr_valid(link)) 2939 return -EOPNOTSUPP; 2940 2941 /* If SCR can be read, use it to determine the current SPD. 2942 * If not, use cached value in link->sata_spd. 2943 */ 2944 rc = sata_scr_read(link, SCR_STATUS, &sstatus); 2945 if (rc == 0 && ata_sstatus_online(sstatus)) 2946 spd = (sstatus >> 4) & 0xf; 2947 else 2948 spd = link->sata_spd; 2949 2950 mask = link->sata_spd_limit; 2951 if (mask <= 1) 2952 return -EINVAL; 2953 2954 /* unconditionally mask off the highest bit */ 2955 bit = fls(mask) - 1; 2956 mask &= ~(1 << bit); 2957 2958 /* Mask off all speeds higher than or equal to the current 2959 * one. Force 1.5Gbps if current SPD is not available. 2960 */ 2961 if (spd > 1) 2962 mask &= (1 << (spd - 1)) - 1; 2963 else 2964 mask &= 1; 2965 2966 /* were we already at the bottom? */ 2967 if (!mask) 2968 return -EINVAL; 2969 2970 if (spd_limit) { 2971 if (mask & ((1 << spd_limit) - 1)) 2972 mask &= (1 << spd_limit) - 1; 2973 else { 2974 bit = ffs(mask) - 1; 2975 mask = 1 << bit; 2976 } 2977 } 2978 2979 link->sata_spd_limit = mask; 2980 2981 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n", 2982 sata_spd_string(fls(mask))); 2983 2984 return 0; 2985 } 2986 2987 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 2988 { 2989 struct ata_link *host_link = &link->ap->link; 2990 u32 limit, target, spd; 2991 2992 limit = link->sata_spd_limit; 2993 2994 /* Don't configure downstream link faster than upstream link. 2995 * It doesn't speed up anything and some PMPs choke on such 2996 * configuration. 2997 */ 2998 if (!ata_is_host_link(link) && host_link->sata_spd) 2999 limit &= (1 << host_link->sata_spd) - 1; 3000 3001 if (limit == UINT_MAX) 3002 target = 0; 3003 else 3004 target = fls(limit); 3005 3006 spd = (*scontrol >> 4) & 0xf; 3007 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 3008 3009 return spd != target; 3010 } 3011 3012 /** 3013 * sata_set_spd_needed - is SATA spd configuration needed 3014 * @link: Link in question 3015 * 3016 * Test whether the spd limit in SControl matches 3017 * @link->sata_spd_limit. This function is used to determine 3018 * whether hardreset is necessary to apply SATA spd 3019 * configuration. 3020 * 3021 * LOCKING: 3022 * Inherited from caller. 3023 * 3024 * RETURNS: 3025 * 1 if SATA spd configuration is needed, 0 otherwise. 3026 */ 3027 static int sata_set_spd_needed(struct ata_link *link) 3028 { 3029 u32 scontrol; 3030 3031 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 3032 return 1; 3033 3034 return __sata_set_spd_needed(link, &scontrol); 3035 } 3036 3037 /** 3038 * sata_set_spd - set SATA spd according to spd limit 3039 * @link: Link to set SATA spd for 3040 * 3041 * Set SATA spd of @link according to sata_spd_limit. 3042 * 3043 * LOCKING: 3044 * Inherited from caller. 3045 * 3046 * RETURNS: 3047 * 0 if spd doesn't need to be changed, 1 if spd has been 3048 * changed. Negative errno if SCR registers are inaccessible. 3049 */ 3050 int sata_set_spd(struct ata_link *link) 3051 { 3052 u32 scontrol; 3053 int rc; 3054 3055 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3056 return rc; 3057 3058 if (!__sata_set_spd_needed(link, &scontrol)) 3059 return 0; 3060 3061 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3062 return rc; 3063 3064 return 1; 3065 } 3066 3067 /* 3068 * This mode timing computation functionality is ported over from 3069 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik 3070 */ 3071 /* 3072 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds). 3073 * These were taken from ATA/ATAPI-6 standard, rev 0a, except 3074 * for UDMA6, which is currently supported only by Maxtor drives. 3075 * 3076 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0. 3077 */ 3078 3079 static const struct ata_timing ata_timing[] = { 3080 /* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 0, 960, 0 }, */ 3081 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 0, 600, 0 }, 3082 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 0, 383, 0 }, 3083 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 0, 240, 0 }, 3084 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 0, 180, 0 }, 3085 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 0, 120, 0 }, 3086 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 0, 100, 0 }, 3087 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 0, 80, 0 }, 3088 3089 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 50, 960, 0 }, 3090 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 30, 480, 0 }, 3091 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 20, 240, 0 }, 3092 3093 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 20, 480, 0 }, 3094 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 5, 150, 0 }, 3095 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 5, 120, 0 }, 3096 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 5, 100, 0 }, 3097 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 5, 80, 0 }, 3098 3099 /* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 0, 150 }, */ 3100 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 0, 120 }, 3101 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 0, 80 }, 3102 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 0, 60 }, 3103 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 0, 45 }, 3104 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 0, 30 }, 3105 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, 3106 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, 3107 3108 { 0xFF } 3109 }; 3110 3111 #define ENOUGH(v, unit) (((v)-1)/(unit)+1) 3112 #define EZ(v, unit) ((v)?ENOUGH(v, unit):0) 3113 3114 static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT) 3115 { 3116 q->setup = EZ(t->setup * 1000, T); 3117 q->act8b = EZ(t->act8b * 1000, T); 3118 q->rec8b = EZ(t->rec8b * 1000, T); 3119 q->cyc8b = EZ(t->cyc8b * 1000, T); 3120 q->active = EZ(t->active * 1000, T); 3121 q->recover = EZ(t->recover * 1000, T); 3122 q->dmack_hold = EZ(t->dmack_hold * 1000, T); 3123 q->cycle = EZ(t->cycle * 1000, T); 3124 q->udma = EZ(t->udma * 1000, UT); 3125 } 3126 3127 void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b, 3128 struct ata_timing *m, unsigned int what) 3129 { 3130 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup); 3131 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b); 3132 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b); 3133 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b); 3134 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active); 3135 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover); 3136 if (what & ATA_TIMING_DMACK_HOLD) m->dmack_hold = max(a->dmack_hold, b->dmack_hold); 3137 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle); 3138 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma); 3139 } 3140 3141 const struct ata_timing *ata_timing_find_mode(u8 xfer_mode) 3142 { 3143 const struct ata_timing *t = ata_timing; 3144 3145 while (xfer_mode > t->mode) 3146 t++; 3147 3148 if (xfer_mode == t->mode) 3149 return t; 3150 return NULL; 3151 } 3152 3153 int ata_timing_compute(struct ata_device *adev, unsigned short speed, 3154 struct ata_timing *t, int T, int UT) 3155 { 3156 const struct ata_timing *s; 3157 struct ata_timing p; 3158 3159 /* 3160 * Find the mode. 3161 */ 3162 3163 if (!(s = ata_timing_find_mode(speed))) 3164 return -EINVAL; 3165 3166 memcpy(t, s, sizeof(*s)); 3167 3168 /* 3169 * If the drive is an EIDE drive, it can tell us it needs extended 3170 * PIO/MW_DMA cycle timing. 3171 */ 3172 3173 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */ 3174 memset(&p, 0, sizeof(p)); 3175 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) { 3176 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO]; 3177 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY]; 3178 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) { 3179 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN]; 3180 } 3181 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B); 3182 } 3183 3184 /* 3185 * Convert the timing to bus clock counts. 3186 */ 3187 3188 ata_timing_quantize(t, t, T, UT); 3189 3190 /* 3191 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY, 3192 * S.M.A.R.T * and some other commands. We have to ensure that the 3193 * DMA cycle timing is slower/equal than the fastest PIO timing. 3194 */ 3195 3196 if (speed > XFER_PIO_6) { 3197 ata_timing_compute(adev, adev->pio_mode, &p, T, UT); 3198 ata_timing_merge(&p, t, t, ATA_TIMING_ALL); 3199 } 3200 3201 /* 3202 * Lengthen active & recovery time so that cycle time is correct. 3203 */ 3204 3205 if (t->act8b + t->rec8b < t->cyc8b) { 3206 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2; 3207 t->rec8b = t->cyc8b - t->act8b; 3208 } 3209 3210 if (t->active + t->recover < t->cycle) { 3211 t->active += (t->cycle - (t->active + t->recover)) / 2; 3212 t->recover = t->cycle - t->active; 3213 } 3214 3215 /* In a few cases quantisation may produce enough errors to 3216 leave t->cycle too low for the sum of active and recovery 3217 if so we must correct this */ 3218 if (t->active + t->recover > t->cycle) 3219 t->cycle = t->active + t->recover; 3220 3221 return 0; 3222 } 3223 3224 /** 3225 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration 3226 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine. 3227 * @cycle: cycle duration in ns 3228 * 3229 * Return matching xfer mode for @cycle. The returned mode is of 3230 * the transfer type specified by @xfer_shift. If @cycle is too 3231 * slow for @xfer_shift, 0xff is returned. If @cycle is faster 3232 * than the fastest known mode, the fasted mode is returned. 3233 * 3234 * LOCKING: 3235 * None. 3236 * 3237 * RETURNS: 3238 * Matching xfer_mode, 0xff if no match found. 3239 */ 3240 u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle) 3241 { 3242 u8 base_mode = 0xff, last_mode = 0xff; 3243 const struct ata_xfer_ent *ent; 3244 const struct ata_timing *t; 3245 3246 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++) 3247 if (ent->shift == xfer_shift) 3248 base_mode = ent->base; 3249 3250 for (t = ata_timing_find_mode(base_mode); 3251 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) { 3252 unsigned short this_cycle; 3253 3254 switch (xfer_shift) { 3255 case ATA_SHIFT_PIO: 3256 case ATA_SHIFT_MWDMA: 3257 this_cycle = t->cycle; 3258 break; 3259 case ATA_SHIFT_UDMA: 3260 this_cycle = t->udma; 3261 break; 3262 default: 3263 return 0xff; 3264 } 3265 3266 if (cycle > this_cycle) 3267 break; 3268 3269 last_mode = t->mode; 3270 } 3271 3272 return last_mode; 3273 } 3274 3275 /** 3276 * ata_down_xfermask_limit - adjust dev xfer masks downward 3277 * @dev: Device to adjust xfer masks 3278 * @sel: ATA_DNXFER_* selector 3279 * 3280 * Adjust xfer masks of @dev downward. Note that this function 3281 * does not apply the change. Invoking ata_set_mode() afterwards 3282 * will apply the limit. 3283 * 3284 * LOCKING: 3285 * Inherited from caller. 3286 * 3287 * RETURNS: 3288 * 0 on success, negative errno on failure 3289 */ 3290 int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel) 3291 { 3292 char buf[32]; 3293 unsigned long orig_mask, xfer_mask; 3294 unsigned long pio_mask, mwdma_mask, udma_mask; 3295 int quiet, highbit; 3296 3297 quiet = !!(sel & ATA_DNXFER_QUIET); 3298 sel &= ~ATA_DNXFER_QUIET; 3299 3300 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask, 3301 dev->mwdma_mask, 3302 dev->udma_mask); 3303 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask); 3304 3305 switch (sel) { 3306 case ATA_DNXFER_PIO: 3307 highbit = fls(pio_mask) - 1; 3308 pio_mask &= ~(1 << highbit); 3309 break; 3310 3311 case ATA_DNXFER_DMA: 3312 if (udma_mask) { 3313 highbit = fls(udma_mask) - 1; 3314 udma_mask &= ~(1 << highbit); 3315 if (!udma_mask) 3316 return -ENOENT; 3317 } else if (mwdma_mask) { 3318 highbit = fls(mwdma_mask) - 1; 3319 mwdma_mask &= ~(1 << highbit); 3320 if (!mwdma_mask) 3321 return -ENOENT; 3322 } 3323 break; 3324 3325 case ATA_DNXFER_40C: 3326 udma_mask &= ATA_UDMA_MASK_40C; 3327 break; 3328 3329 case ATA_DNXFER_FORCE_PIO0: 3330 pio_mask &= 1; 3331 case ATA_DNXFER_FORCE_PIO: 3332 mwdma_mask = 0; 3333 udma_mask = 0; 3334 break; 3335 3336 default: 3337 BUG(); 3338 } 3339 3340 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask); 3341 3342 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask) 3343 return -ENOENT; 3344 3345 if (!quiet) { 3346 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA)) 3347 snprintf(buf, sizeof(buf), "%s:%s", 3348 ata_mode_string(xfer_mask), 3349 ata_mode_string(xfer_mask & ATA_MASK_PIO)); 3350 else 3351 snprintf(buf, sizeof(buf), "%s", 3352 ata_mode_string(xfer_mask)); 3353 3354 ata_dev_printk(dev, KERN_WARNING, 3355 "limiting speed to %s\n", buf); 3356 } 3357 3358 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 3359 &dev->udma_mask); 3360 3361 return 0; 3362 } 3363 3364 static int ata_dev_set_mode(struct ata_device *dev) 3365 { 3366 struct ata_eh_context *ehc = &dev->link->eh_context; 3367 const char *dev_err_whine = ""; 3368 int ign_dev_err = 0; 3369 unsigned int err_mask; 3370 int rc; 3371 3372 dev->flags &= ~ATA_DFLAG_PIO; 3373 if (dev->xfer_shift == ATA_SHIFT_PIO) 3374 dev->flags |= ATA_DFLAG_PIO; 3375 3376 err_mask = ata_dev_set_xfermode(dev); 3377 3378 if (err_mask & ~AC_ERR_DEV) 3379 goto fail; 3380 3381 /* revalidate */ 3382 ehc->i.flags |= ATA_EHI_POST_SETMODE; 3383 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0); 3384 ehc->i.flags &= ~ATA_EHI_POST_SETMODE; 3385 if (rc) 3386 return rc; 3387 3388 if (dev->xfer_shift == ATA_SHIFT_PIO) { 3389 /* Old CFA may refuse this command, which is just fine */ 3390 if (ata_id_is_cfa(dev->id)) 3391 ign_dev_err = 1; 3392 /* Catch several broken garbage emulations plus some pre 3393 ATA devices */ 3394 if (ata_id_major_version(dev->id) == 0 && 3395 dev->pio_mode <= XFER_PIO_2) 3396 ign_dev_err = 1; 3397 /* Some very old devices and some bad newer ones fail 3398 any kind of SET_XFERMODE request but support PIO0-2 3399 timings and no IORDY */ 3400 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2) 3401 ign_dev_err = 1; 3402 } 3403 /* Early MWDMA devices do DMA but don't allow DMA mode setting. 3404 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */ 3405 if (dev->xfer_shift == ATA_SHIFT_MWDMA && 3406 dev->dma_mode == XFER_MW_DMA_0 && 3407 (dev->id[63] >> 8) & 1) 3408 ign_dev_err = 1; 3409 3410 /* if the device is actually configured correctly, ignore dev err */ 3411 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id))) 3412 ign_dev_err = 1; 3413 3414 if (err_mask & AC_ERR_DEV) { 3415 if (!ign_dev_err) 3416 goto fail; 3417 else 3418 dev_err_whine = " (device error ignored)"; 3419 } 3420 3421 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 3422 dev->xfer_shift, (int)dev->xfer_mode); 3423 3424 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n", 3425 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)), 3426 dev_err_whine); 3427 3428 return 0; 3429 3430 fail: 3431 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode " 3432 "(err_mask=0x%x)\n", err_mask); 3433 return -EIO; 3434 } 3435 3436 /** 3437 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER 3438 * @link: link on which timings will be programmed 3439 * @r_failed_dev: out parameter for failed device 3440 * 3441 * Standard implementation of the function used to tune and set 3442 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If 3443 * ata_dev_set_mode() fails, pointer to the failing device is 3444 * returned in @r_failed_dev. 3445 * 3446 * LOCKING: 3447 * PCI/etc. bus probe sem. 3448 * 3449 * RETURNS: 3450 * 0 on success, negative errno otherwise 3451 */ 3452 3453 int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev) 3454 { 3455 struct ata_port *ap = link->ap; 3456 struct ata_device *dev; 3457 int rc = 0, used_dma = 0, found = 0; 3458 3459 /* step 1: calculate xfer_mask */ 3460 ata_for_each_dev(dev, link, ENABLED) { 3461 unsigned long pio_mask, dma_mask; 3462 unsigned int mode_mask; 3463 3464 mode_mask = ATA_DMA_MASK_ATA; 3465 if (dev->class == ATA_DEV_ATAPI) 3466 mode_mask = ATA_DMA_MASK_ATAPI; 3467 else if (ata_id_is_cfa(dev->id)) 3468 mode_mask = ATA_DMA_MASK_CFA; 3469 3470 ata_dev_xfermask(dev); 3471 ata_force_xfermask(dev); 3472 3473 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); 3474 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3475 3476 if (libata_dma_mask & mode_mask) 3477 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); 3478 else 3479 dma_mask = 0; 3480 3481 dev->pio_mode = ata_xfer_mask2mode(pio_mask); 3482 dev->dma_mode = ata_xfer_mask2mode(dma_mask); 3483 3484 found = 1; 3485 if (ata_dma_enabled(dev)) 3486 used_dma = 1; 3487 } 3488 if (!found) 3489 goto out; 3490 3491 /* step 2: always set host PIO timings */ 3492 ata_for_each_dev(dev, link, ENABLED) { 3493 if (dev->pio_mode == 0xff) { 3494 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n"); 3495 rc = -EINVAL; 3496 goto out; 3497 } 3498 3499 dev->xfer_mode = dev->pio_mode; 3500 dev->xfer_shift = ATA_SHIFT_PIO; 3501 if (ap->ops->set_piomode) 3502 ap->ops->set_piomode(ap, dev); 3503 } 3504 3505 /* step 3: set host DMA timings */ 3506 ata_for_each_dev(dev, link, ENABLED) { 3507 if (!ata_dma_enabled(dev)) 3508 continue; 3509 3510 dev->xfer_mode = dev->dma_mode; 3511 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); 3512 if (ap->ops->set_dmamode) 3513 ap->ops->set_dmamode(ap, dev); 3514 } 3515 3516 /* step 4: update devices' xfer mode */ 3517 ata_for_each_dev(dev, link, ENABLED) { 3518 rc = ata_dev_set_mode(dev); 3519 if (rc) 3520 goto out; 3521 } 3522 3523 /* Record simplex status. If we selected DMA then the other 3524 * host channels are not permitted to do so. 3525 */ 3526 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX)) 3527 ap->host->simplex_claimed = ap; 3528 3529 out: 3530 if (rc) 3531 *r_failed_dev = dev; 3532 return rc; 3533 } 3534 3535 /** 3536 * ata_wait_ready - wait for link to become ready 3537 * @link: link to be waited on 3538 * @deadline: deadline jiffies for the operation 3539 * @check_ready: callback to check link readiness 3540 * 3541 * Wait for @link to become ready. @check_ready should return 3542 * positive number if @link is ready, 0 if it isn't, -ENODEV if 3543 * link doesn't seem to be occupied, other errno for other error 3544 * conditions. 3545 * 3546 * Transient -ENODEV conditions are allowed for 3547 * ATA_TMOUT_FF_WAIT. 3548 * 3549 * LOCKING: 3550 * EH context. 3551 * 3552 * RETURNS: 3553 * 0 if @linke is ready before @deadline; otherwise, -errno. 3554 */ 3555 int ata_wait_ready(struct ata_link *link, unsigned long deadline, 3556 int (*check_ready)(struct ata_link *link)) 3557 { 3558 unsigned long start = jiffies; 3559 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT); 3560 int warned = 0; 3561 3562 /* Slave readiness can't be tested separately from master. On 3563 * M/S emulation configuration, this function should be called 3564 * only on the master and it will handle both master and slave. 3565 */ 3566 WARN_ON(link == link->ap->slave_link); 3567 3568 if (time_after(nodev_deadline, deadline)) 3569 nodev_deadline = deadline; 3570 3571 while (1) { 3572 unsigned long now = jiffies; 3573 int ready, tmp; 3574 3575 ready = tmp = check_ready(link); 3576 if (ready > 0) 3577 return 0; 3578 3579 /* -ENODEV could be transient. Ignore -ENODEV if link 3580 * is online. Also, some SATA devices take a long 3581 * time to clear 0xff after reset. For example, 3582 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum 3583 * GoVault needs even more than that. Wait for 3584 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline. 3585 * 3586 * Note that some PATA controllers (pata_ali) explode 3587 * if status register is read more than once when 3588 * there's no device attached. 3589 */ 3590 if (ready == -ENODEV) { 3591 if (ata_link_online(link)) 3592 ready = 0; 3593 else if ((link->ap->flags & ATA_FLAG_SATA) && 3594 !ata_link_offline(link) && 3595 time_before(now, nodev_deadline)) 3596 ready = 0; 3597 } 3598 3599 if (ready) 3600 return ready; 3601 if (time_after(now, deadline)) 3602 return -EBUSY; 3603 3604 if (!warned && time_after(now, start + 5 * HZ) && 3605 (deadline - now > 3 * HZ)) { 3606 ata_link_printk(link, KERN_WARNING, 3607 "link is slow to respond, please be patient " 3608 "(ready=%d)\n", tmp); 3609 warned = 1; 3610 } 3611 3612 msleep(50); 3613 } 3614 } 3615 3616 /** 3617 * ata_wait_after_reset - wait for link to become ready after reset 3618 * @link: link to be waited on 3619 * @deadline: deadline jiffies for the operation 3620 * @check_ready: callback to check link readiness 3621 * 3622 * Wait for @link to become ready after reset. 3623 * 3624 * LOCKING: 3625 * EH context. 3626 * 3627 * RETURNS: 3628 * 0 if @linke is ready before @deadline; otherwise, -errno. 3629 */ 3630 int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, 3631 int (*check_ready)(struct ata_link *link)) 3632 { 3633 msleep(ATA_WAIT_AFTER_RESET); 3634 3635 return ata_wait_ready(link, deadline, check_ready); 3636 } 3637 3638 /** 3639 * sata_link_debounce - debounce SATA phy status 3640 * @link: ATA link to debounce SATA phy status for 3641 * @params: timing parameters { interval, duratinon, timeout } in msec 3642 * @deadline: deadline jiffies for the operation 3643 * 3644 * Make sure SStatus of @link reaches stable state, determined by 3645 * holding the same value where DET is not 1 for @duration polled 3646 * every @interval, before @timeout. Timeout constraints the 3647 * beginning of the stable state. Because DET gets stuck at 1 on 3648 * some controllers after hot unplugging, this functions waits 3649 * until timeout then returns 0 if DET is stable at 1. 3650 * 3651 * @timeout is further limited by @deadline. The sooner of the 3652 * two is used. 3653 * 3654 * LOCKING: 3655 * Kernel thread context (may sleep) 3656 * 3657 * RETURNS: 3658 * 0 on success, -errno on failure. 3659 */ 3660 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 3661 unsigned long deadline) 3662 { 3663 unsigned long interval = params[0]; 3664 unsigned long duration = params[1]; 3665 unsigned long last_jiffies, t; 3666 u32 last, cur; 3667 int rc; 3668 3669 t = ata_deadline(jiffies, params[2]); 3670 if (time_before(t, deadline)) 3671 deadline = t; 3672 3673 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3674 return rc; 3675 cur &= 0xf; 3676 3677 last = cur; 3678 last_jiffies = jiffies; 3679 3680 while (1) { 3681 msleep(interval); 3682 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 3683 return rc; 3684 cur &= 0xf; 3685 3686 /* DET stable? */ 3687 if (cur == last) { 3688 if (cur == 1 && time_before(jiffies, deadline)) 3689 continue; 3690 if (time_after(jiffies, 3691 ata_deadline(last_jiffies, duration))) 3692 return 0; 3693 continue; 3694 } 3695 3696 /* unstable, start over */ 3697 last = cur; 3698 last_jiffies = jiffies; 3699 3700 /* Check deadline. If debouncing failed, return 3701 * -EPIPE to tell upper layer to lower link speed. 3702 */ 3703 if (time_after(jiffies, deadline)) 3704 return -EPIPE; 3705 } 3706 } 3707 3708 /** 3709 * sata_link_resume - resume SATA link 3710 * @link: ATA link to resume SATA 3711 * @params: timing parameters { interval, duratinon, timeout } in msec 3712 * @deadline: deadline jiffies for the operation 3713 * 3714 * Resume SATA phy @link and debounce it. 3715 * 3716 * LOCKING: 3717 * Kernel thread context (may sleep) 3718 * 3719 * RETURNS: 3720 * 0 on success, -errno on failure. 3721 */ 3722 int sata_link_resume(struct ata_link *link, const unsigned long *params, 3723 unsigned long deadline) 3724 { 3725 u32 scontrol, serror; 3726 int rc; 3727 3728 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3729 return rc; 3730 3731 scontrol = (scontrol & 0x0f0) | 0x300; 3732 3733 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3734 return rc; 3735 3736 /* Some PHYs react badly if SStatus is pounded immediately 3737 * after resuming. Delay 200ms before debouncing. 3738 */ 3739 msleep(200); 3740 3741 if ((rc = sata_link_debounce(link, params, deadline))) 3742 return rc; 3743 3744 /* clear SError, some PHYs require this even for SRST to work */ 3745 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 3746 rc = sata_scr_write(link, SCR_ERROR, serror); 3747 3748 return rc != -EINVAL ? rc : 0; 3749 } 3750 3751 /** 3752 * ata_std_prereset - prepare for reset 3753 * @link: ATA link to be reset 3754 * @deadline: deadline jiffies for the operation 3755 * 3756 * @link is about to be reset. Initialize it. Failure from 3757 * prereset makes libata abort whole reset sequence and give up 3758 * that port, so prereset should be best-effort. It does its 3759 * best to prepare for reset sequence but if things go wrong, it 3760 * should just whine, not fail. 3761 * 3762 * LOCKING: 3763 * Kernel thread context (may sleep) 3764 * 3765 * RETURNS: 3766 * 0 on success, -errno otherwise. 3767 */ 3768 int ata_std_prereset(struct ata_link *link, unsigned long deadline) 3769 { 3770 struct ata_port *ap = link->ap; 3771 struct ata_eh_context *ehc = &link->eh_context; 3772 const unsigned long *timing = sata_ehc_deb_timing(ehc); 3773 int rc; 3774 3775 /* if we're about to do hardreset, nothing more to do */ 3776 if (ehc->i.action & ATA_EH_HARDRESET) 3777 return 0; 3778 3779 /* if SATA, resume link */ 3780 if (ap->flags & ATA_FLAG_SATA) { 3781 rc = sata_link_resume(link, timing, deadline); 3782 /* whine about phy resume failure but proceed */ 3783 if (rc && rc != -EOPNOTSUPP) 3784 ata_link_printk(link, KERN_WARNING, "failed to resume " 3785 "link for reset (errno=%d)\n", rc); 3786 } 3787 3788 /* no point in trying softreset on offline link */ 3789 if (ata_phys_link_offline(link)) 3790 ehc->i.action &= ~ATA_EH_SOFTRESET; 3791 3792 return 0; 3793 } 3794 3795 /** 3796 * sata_link_hardreset - reset link via SATA phy reset 3797 * @link: link to reset 3798 * @timing: timing parameters { interval, duratinon, timeout } in msec 3799 * @deadline: deadline jiffies for the operation 3800 * @online: optional out parameter indicating link onlineness 3801 * @check_ready: optional callback to check link readiness 3802 * 3803 * SATA phy-reset @link using DET bits of SControl register. 3804 * After hardreset, link readiness is waited upon using 3805 * ata_wait_ready() if @check_ready is specified. LLDs are 3806 * allowed to not specify @check_ready and wait itself after this 3807 * function returns. Device classification is LLD's 3808 * responsibility. 3809 * 3810 * *@online is set to one iff reset succeeded and @link is online 3811 * after reset. 3812 * 3813 * LOCKING: 3814 * Kernel thread context (may sleep) 3815 * 3816 * RETURNS: 3817 * 0 on success, -errno otherwise. 3818 */ 3819 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 3820 unsigned long deadline, 3821 bool *online, int (*check_ready)(struct ata_link *)) 3822 { 3823 u32 scontrol; 3824 int rc; 3825 3826 DPRINTK("ENTER\n"); 3827 3828 if (online) 3829 *online = false; 3830 3831 if (sata_set_spd_needed(link)) { 3832 /* SATA spec says nothing about how to reconfigure 3833 * spd. To be on the safe side, turn off phy during 3834 * reconfiguration. This works for at least ICH7 AHCI 3835 * and Sil3124. 3836 */ 3837 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3838 goto out; 3839 3840 scontrol = (scontrol & 0x0f0) | 0x304; 3841 3842 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 3843 goto out; 3844 3845 sata_set_spd(link); 3846 } 3847 3848 /* issue phy wake/reset */ 3849 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 3850 goto out; 3851 3852 scontrol = (scontrol & 0x0f0) | 0x301; 3853 3854 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 3855 goto out; 3856 3857 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 3858 * 10.4.2 says at least 1 ms. 3859 */ 3860 msleep(1); 3861 3862 /* bring link back */ 3863 rc = sata_link_resume(link, timing, deadline); 3864 if (rc) 3865 goto out; 3866 /* if link is offline nothing more to do */ 3867 if (ata_phys_link_offline(link)) 3868 goto out; 3869 3870 /* Link is online. From this point, -ENODEV too is an error. */ 3871 if (online) 3872 *online = true; 3873 3874 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 3875 /* If PMP is supported, we have to do follow-up SRST. 3876 * Some PMPs don't send D2H Reg FIS after hardreset if 3877 * the first port is empty. Wait only for 3878 * ATA_TMOUT_PMP_SRST_WAIT. 3879 */ 3880 if (check_ready) { 3881 unsigned long pmp_deadline; 3882 3883 pmp_deadline = ata_deadline(jiffies, 3884 ATA_TMOUT_PMP_SRST_WAIT); 3885 if (time_after(pmp_deadline, deadline)) 3886 pmp_deadline = deadline; 3887 ata_wait_ready(link, pmp_deadline, check_ready); 3888 } 3889 rc = -EAGAIN; 3890 goto out; 3891 } 3892 3893 rc = 0; 3894 if (check_ready) 3895 rc = ata_wait_ready(link, deadline, check_ready); 3896 out: 3897 if (rc && rc != -EAGAIN) { 3898 /* online is set iff link is online && reset succeeded */ 3899 if (online) 3900 *online = false; 3901 ata_link_printk(link, KERN_ERR, 3902 "COMRESET failed (errno=%d)\n", rc); 3903 } 3904 DPRINTK("EXIT, rc=%d\n", rc); 3905 return rc; 3906 } 3907 3908 /** 3909 * sata_std_hardreset - COMRESET w/o waiting or classification 3910 * @link: link to reset 3911 * @class: resulting class of attached device 3912 * @deadline: deadline jiffies for the operation 3913 * 3914 * Standard SATA COMRESET w/o waiting or classification. 3915 * 3916 * LOCKING: 3917 * Kernel thread context (may sleep) 3918 * 3919 * RETURNS: 3920 * 0 if link offline, -EAGAIN if link online, -errno on errors. 3921 */ 3922 int sata_std_hardreset(struct ata_link *link, unsigned int *class, 3923 unsigned long deadline) 3924 { 3925 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context); 3926 bool online; 3927 int rc; 3928 3929 /* do hardreset */ 3930 rc = sata_link_hardreset(link, timing, deadline, &online, NULL); 3931 return online ? -EAGAIN : rc; 3932 } 3933 3934 /** 3935 * ata_std_postreset - standard postreset callback 3936 * @link: the target ata_link 3937 * @classes: classes of attached devices 3938 * 3939 * This function is invoked after a successful reset. Note that 3940 * the device might have been reset more than once using 3941 * different reset methods before postreset is invoked. 3942 * 3943 * LOCKING: 3944 * Kernel thread context (may sleep) 3945 */ 3946 void ata_std_postreset(struct ata_link *link, unsigned int *classes) 3947 { 3948 u32 serror; 3949 3950 DPRINTK("ENTER\n"); 3951 3952 /* reset complete, clear SError */ 3953 if (!sata_scr_read(link, SCR_ERROR, &serror)) 3954 sata_scr_write(link, SCR_ERROR, serror); 3955 3956 /* print link status */ 3957 sata_print_link_status(link); 3958 3959 DPRINTK("EXIT\n"); 3960 } 3961 3962 /** 3963 * ata_dev_same_device - Determine whether new ID matches configured device 3964 * @dev: device to compare against 3965 * @new_class: class of the new device 3966 * @new_id: IDENTIFY page of the new device 3967 * 3968 * Compare @new_class and @new_id against @dev and determine 3969 * whether @dev is the device indicated by @new_class and 3970 * @new_id. 3971 * 3972 * LOCKING: 3973 * None. 3974 * 3975 * RETURNS: 3976 * 1 if @dev matches @new_class and @new_id, 0 otherwise. 3977 */ 3978 static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class, 3979 const u16 *new_id) 3980 { 3981 const u16 *old_id = dev->id; 3982 unsigned char model[2][ATA_ID_PROD_LEN + 1]; 3983 unsigned char serial[2][ATA_ID_SERNO_LEN + 1]; 3984 3985 if (dev->class != new_class) { 3986 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n", 3987 dev->class, new_class); 3988 return 0; 3989 } 3990 3991 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0])); 3992 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1])); 3993 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0])); 3994 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1])); 3995 3996 if (strcmp(model[0], model[1])) { 3997 ata_dev_printk(dev, KERN_INFO, "model number mismatch " 3998 "'%s' != '%s'\n", model[0], model[1]); 3999 return 0; 4000 } 4001 4002 if (strcmp(serial[0], serial[1])) { 4003 ata_dev_printk(dev, KERN_INFO, "serial number mismatch " 4004 "'%s' != '%s'\n", serial[0], serial[1]); 4005 return 0; 4006 } 4007 4008 return 1; 4009 } 4010 4011 /** 4012 * ata_dev_reread_id - Re-read IDENTIFY data 4013 * @dev: target ATA device 4014 * @readid_flags: read ID flags 4015 * 4016 * Re-read IDENTIFY page and make sure @dev is still attached to 4017 * the port. 4018 * 4019 * LOCKING: 4020 * Kernel thread context (may sleep) 4021 * 4022 * RETURNS: 4023 * 0 on success, negative errno otherwise 4024 */ 4025 int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags) 4026 { 4027 unsigned int class = dev->class; 4028 u16 *id = (void *)dev->link->ap->sector_buf; 4029 int rc; 4030 4031 /* read ID data */ 4032 rc = ata_dev_read_id(dev, &class, readid_flags, id); 4033 if (rc) 4034 return rc; 4035 4036 /* is the device still there? */ 4037 if (!ata_dev_same_device(dev, class, id)) 4038 return -ENODEV; 4039 4040 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS); 4041 return 0; 4042 } 4043 4044 /** 4045 * ata_dev_revalidate - Revalidate ATA device 4046 * @dev: device to revalidate 4047 * @new_class: new class code 4048 * @readid_flags: read ID flags 4049 * 4050 * Re-read IDENTIFY page, make sure @dev is still attached to the 4051 * port and reconfigure it according to the new IDENTIFY page. 4052 * 4053 * LOCKING: 4054 * Kernel thread context (may sleep) 4055 * 4056 * RETURNS: 4057 * 0 on success, negative errno otherwise 4058 */ 4059 int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class, 4060 unsigned int readid_flags) 4061 { 4062 u64 n_sectors = dev->n_sectors; 4063 int rc; 4064 4065 if (!ata_dev_enabled(dev)) 4066 return -ENODEV; 4067 4068 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */ 4069 if (ata_class_enabled(new_class) && 4070 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) { 4071 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n", 4072 dev->class, new_class); 4073 rc = -ENODEV; 4074 goto fail; 4075 } 4076 4077 /* re-read ID */ 4078 rc = ata_dev_reread_id(dev, readid_flags); 4079 if (rc) 4080 goto fail; 4081 4082 /* configure device according to the new ID */ 4083 rc = ata_dev_configure(dev); 4084 if (rc) 4085 goto fail; 4086 4087 /* verify n_sectors hasn't changed */ 4088 if (dev->class == ATA_DEV_ATA && n_sectors && 4089 dev->n_sectors != n_sectors) { 4090 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch " 4091 "%llu != %llu\n", 4092 (unsigned long long)n_sectors, 4093 (unsigned long long)dev->n_sectors); 4094 4095 /* restore original n_sectors */ 4096 dev->n_sectors = n_sectors; 4097 4098 rc = -ENODEV; 4099 goto fail; 4100 } 4101 4102 return 0; 4103 4104 fail: 4105 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc); 4106 return rc; 4107 } 4108 4109 struct ata_blacklist_entry { 4110 const char *model_num; 4111 const char *model_rev; 4112 unsigned long horkage; 4113 }; 4114 4115 static const struct ata_blacklist_entry ata_device_blacklist [] = { 4116 /* Devices with DMA related problems under Linux */ 4117 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA }, 4118 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA }, 4119 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA }, 4120 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA }, 4121 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA }, 4122 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA }, 4123 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA }, 4124 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA }, 4125 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA }, 4126 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA }, 4127 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA }, 4128 { "CRD-84", NULL, ATA_HORKAGE_NODMA }, 4129 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA }, 4130 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA }, 4131 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA }, 4132 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA }, 4133 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA }, 4134 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA }, 4135 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA }, 4136 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA }, 4137 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA }, 4138 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA }, 4139 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA }, 4140 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA }, 4141 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA }, 4142 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA }, 4143 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA }, 4144 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA }, 4145 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA }, 4146 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA }, 4147 /* Odd clown on sil3726/4726 PMPs */ 4148 { "Config Disk", NULL, ATA_HORKAGE_DISABLE }, 4149 4150 /* Weird ATAPI devices */ 4151 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, 4152 { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, 4153 4154 /* Devices we expect to fail diagnostics */ 4155 4156 /* Devices where NCQ should be avoided */ 4157 /* NCQ is slow */ 4158 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ }, 4159 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, }, 4160 /* http://thread.gmane.org/gmane.linux.ide/14907 */ 4161 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ }, 4162 /* NCQ is broken */ 4163 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ }, 4164 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ }, 4165 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ }, 4166 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ }, 4167 { "OCZ CORE_SSD", "02.10104", ATA_HORKAGE_NONCQ }, 4168 4169 /* Seagate NCQ + FLUSH CACHE firmware bug */ 4170 { "ST31500341AS", "SD15", ATA_HORKAGE_NONCQ | 4171 ATA_HORKAGE_FIRMWARE_WARN }, 4172 { "ST31500341AS", "SD16", ATA_HORKAGE_NONCQ | 4173 ATA_HORKAGE_FIRMWARE_WARN }, 4174 { "ST31500341AS", "SD17", ATA_HORKAGE_NONCQ | 4175 ATA_HORKAGE_FIRMWARE_WARN }, 4176 { "ST31500341AS", "SD18", ATA_HORKAGE_NONCQ | 4177 ATA_HORKAGE_FIRMWARE_WARN }, 4178 { "ST31500341AS", "SD19", ATA_HORKAGE_NONCQ | 4179 ATA_HORKAGE_FIRMWARE_WARN }, 4180 4181 { "ST31000333AS", "SD15", ATA_HORKAGE_NONCQ | 4182 ATA_HORKAGE_FIRMWARE_WARN }, 4183 { "ST31000333AS", "SD16", ATA_HORKAGE_NONCQ | 4184 ATA_HORKAGE_FIRMWARE_WARN }, 4185 { "ST31000333AS", "SD17", ATA_HORKAGE_NONCQ | 4186 ATA_HORKAGE_FIRMWARE_WARN }, 4187 { "ST31000333AS", "SD18", ATA_HORKAGE_NONCQ | 4188 ATA_HORKAGE_FIRMWARE_WARN }, 4189 { "ST31000333AS", "SD19", ATA_HORKAGE_NONCQ | 4190 ATA_HORKAGE_FIRMWARE_WARN }, 4191 4192 { "ST3640623AS", "SD15", ATA_HORKAGE_NONCQ | 4193 ATA_HORKAGE_FIRMWARE_WARN }, 4194 { "ST3640623AS", "SD16", ATA_HORKAGE_NONCQ | 4195 ATA_HORKAGE_FIRMWARE_WARN }, 4196 { "ST3640623AS", "SD17", ATA_HORKAGE_NONCQ | 4197 ATA_HORKAGE_FIRMWARE_WARN }, 4198 { "ST3640623AS", "SD18", ATA_HORKAGE_NONCQ | 4199 ATA_HORKAGE_FIRMWARE_WARN }, 4200 { "ST3640623AS", "SD19", ATA_HORKAGE_NONCQ | 4201 ATA_HORKAGE_FIRMWARE_WARN }, 4202 4203 { "ST3640323AS", "SD15", ATA_HORKAGE_NONCQ | 4204 ATA_HORKAGE_FIRMWARE_WARN }, 4205 { "ST3640323AS", "SD16", ATA_HORKAGE_NONCQ | 4206 ATA_HORKAGE_FIRMWARE_WARN }, 4207 { "ST3640323AS", "SD17", ATA_HORKAGE_NONCQ | 4208 ATA_HORKAGE_FIRMWARE_WARN }, 4209 { "ST3640323AS", "SD18", ATA_HORKAGE_NONCQ | 4210 ATA_HORKAGE_FIRMWARE_WARN }, 4211 { "ST3640323AS", "SD19", ATA_HORKAGE_NONCQ | 4212 ATA_HORKAGE_FIRMWARE_WARN }, 4213 4214 { "ST3320813AS", "SD15", ATA_HORKAGE_NONCQ | 4215 ATA_HORKAGE_FIRMWARE_WARN }, 4216 { "ST3320813AS", "SD16", ATA_HORKAGE_NONCQ | 4217 ATA_HORKAGE_FIRMWARE_WARN }, 4218 { "ST3320813AS", "SD17", ATA_HORKAGE_NONCQ | 4219 ATA_HORKAGE_FIRMWARE_WARN }, 4220 { "ST3320813AS", "SD18", ATA_HORKAGE_NONCQ | 4221 ATA_HORKAGE_FIRMWARE_WARN }, 4222 { "ST3320813AS", "SD19", ATA_HORKAGE_NONCQ | 4223 ATA_HORKAGE_FIRMWARE_WARN }, 4224 4225 { "ST3320613AS", "SD15", ATA_HORKAGE_NONCQ | 4226 ATA_HORKAGE_FIRMWARE_WARN }, 4227 { "ST3320613AS", "SD16", ATA_HORKAGE_NONCQ | 4228 ATA_HORKAGE_FIRMWARE_WARN }, 4229 { "ST3320613AS", "SD17", ATA_HORKAGE_NONCQ | 4230 ATA_HORKAGE_FIRMWARE_WARN }, 4231 { "ST3320613AS", "SD18", ATA_HORKAGE_NONCQ | 4232 ATA_HORKAGE_FIRMWARE_WARN }, 4233 { "ST3320613AS", "SD19", ATA_HORKAGE_NONCQ | 4234 ATA_HORKAGE_FIRMWARE_WARN }, 4235 4236 /* Blacklist entries taken from Silicon Image 3124/3132 4237 Windows driver .inf file - also several Linux problem reports */ 4238 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, 4239 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, }, 4240 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, }, 4241 4242 /* devices which puke on READ_NATIVE_MAX */ 4243 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, }, 4244 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA }, 4245 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA }, 4246 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA }, 4247 4248 /* Devices which report 1 sector over size HPA */ 4249 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4250 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4251 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, }, 4252 4253 /* Devices which get the IVB wrong */ 4254 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, }, 4255 /* Maybe we should just blacklist TSSTcorp... */ 4256 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, }, 4257 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, }, 4258 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, }, 4259 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, }, 4260 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, }, 4261 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, }, 4262 4263 /* Devices that do not need bridging limits applied */ 4264 { "MTRON MSP-SATA*", NULL, ATA_HORKAGE_BRIDGE_OK, }, 4265 4266 /* Devices which aren't very happy with higher link speeds */ 4267 { "WD My Book", NULL, ATA_HORKAGE_1_5_GBPS, }, 4268 4269 /* End Marker */ 4270 { } 4271 }; 4272 4273 static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) 4274 { 4275 const char *p; 4276 int len; 4277 4278 /* 4279 * check for trailing wildcard: *\0 4280 */ 4281 p = strchr(patt, wildchar); 4282 if (p && ((*(p + 1)) == 0)) 4283 len = p - patt; 4284 else { 4285 len = strlen(name); 4286 if (!len) { 4287 if (!*patt) 4288 return 0; 4289 return -1; 4290 } 4291 } 4292 4293 return strncmp(patt, name, len); 4294 } 4295 4296 static unsigned long ata_dev_blacklisted(const struct ata_device *dev) 4297 { 4298 unsigned char model_num[ATA_ID_PROD_LEN + 1]; 4299 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1]; 4300 const struct ata_blacklist_entry *ad = ata_device_blacklist; 4301 4302 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num)); 4303 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev)); 4304 4305 while (ad->model_num) { 4306 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) { 4307 if (ad->model_rev == NULL) 4308 return ad->horkage; 4309 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*')) 4310 return ad->horkage; 4311 } 4312 ad++; 4313 } 4314 return 0; 4315 } 4316 4317 static int ata_dma_blacklisted(const struct ata_device *dev) 4318 { 4319 /* We don't support polling DMA. 4320 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO) 4321 * if the LLDD handles only interrupts in the HSM_ST_LAST state. 4322 */ 4323 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) && 4324 (dev->flags & ATA_DFLAG_CDB_INTR)) 4325 return 1; 4326 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0; 4327 } 4328 4329 /** 4330 * ata_is_40wire - check drive side detection 4331 * @dev: device 4332 * 4333 * Perform drive side detection decoding, allowing for device vendors 4334 * who can't follow the documentation. 4335 */ 4336 4337 static int ata_is_40wire(struct ata_device *dev) 4338 { 4339 if (dev->horkage & ATA_HORKAGE_IVB) 4340 return ata_drive_40wire_relaxed(dev->id); 4341 return ata_drive_40wire(dev->id); 4342 } 4343 4344 /** 4345 * cable_is_40wire - 40/80/SATA decider 4346 * @ap: port to consider 4347 * 4348 * This function encapsulates the policy for speed management 4349 * in one place. At the moment we don't cache the result but 4350 * there is a good case for setting ap->cbl to the result when 4351 * we are called with unknown cables (and figuring out if it 4352 * impacts hotplug at all). 4353 * 4354 * Return 1 if the cable appears to be 40 wire. 4355 */ 4356 4357 static int cable_is_40wire(struct ata_port *ap) 4358 { 4359 struct ata_link *link; 4360 struct ata_device *dev; 4361 4362 /* If the controller thinks we are 40 wire, we are. */ 4363 if (ap->cbl == ATA_CBL_PATA40) 4364 return 1; 4365 4366 /* If the controller thinks we are 80 wire, we are. */ 4367 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA) 4368 return 0; 4369 4370 /* If the system is known to be 40 wire short cable (eg 4371 * laptop), then we allow 80 wire modes even if the drive 4372 * isn't sure. 4373 */ 4374 if (ap->cbl == ATA_CBL_PATA40_SHORT) 4375 return 0; 4376 4377 /* If the controller doesn't know, we scan. 4378 * 4379 * Note: We look for all 40 wire detects at this point. Any 4380 * 80 wire detect is taken to be 80 wire cable because 4381 * - in many setups only the one drive (slave if present) will 4382 * give a valid detect 4383 * - if you have a non detect capable drive you don't want it 4384 * to colour the choice 4385 */ 4386 ata_for_each_link(link, ap, EDGE) { 4387 ata_for_each_dev(dev, link, ENABLED) { 4388 if (!ata_is_40wire(dev)) 4389 return 0; 4390 } 4391 } 4392 return 1; 4393 } 4394 4395 /** 4396 * ata_dev_xfermask - Compute supported xfermask of the given device 4397 * @dev: Device to compute xfermask for 4398 * 4399 * Compute supported xfermask of @dev and store it in 4400 * dev->*_mask. This function is responsible for applying all 4401 * known limits including host controller limits, device 4402 * blacklist, etc... 4403 * 4404 * LOCKING: 4405 * None. 4406 */ 4407 static void ata_dev_xfermask(struct ata_device *dev) 4408 { 4409 struct ata_link *link = dev->link; 4410 struct ata_port *ap = link->ap; 4411 struct ata_host *host = ap->host; 4412 unsigned long xfer_mask; 4413 4414 /* controller modes available */ 4415 xfer_mask = ata_pack_xfermask(ap->pio_mask, 4416 ap->mwdma_mask, ap->udma_mask); 4417 4418 /* drive modes available */ 4419 xfer_mask &= ata_pack_xfermask(dev->pio_mask, 4420 dev->mwdma_mask, dev->udma_mask); 4421 xfer_mask &= ata_id_xfermask(dev->id); 4422 4423 /* 4424 * CFA Advanced TrueIDE timings are not allowed on a shared 4425 * cable 4426 */ 4427 if (ata_dev_pair(dev)) { 4428 /* No PIO5 or PIO6 */ 4429 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5)); 4430 /* No MWDMA3 or MWDMA 4 */ 4431 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3)); 4432 } 4433 4434 if (ata_dma_blacklisted(dev)) { 4435 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4436 ata_dev_printk(dev, KERN_WARNING, 4437 "device is on DMA blacklist, disabling DMA\n"); 4438 } 4439 4440 if ((host->flags & ATA_HOST_SIMPLEX) && 4441 host->simplex_claimed && host->simplex_claimed != ap) { 4442 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 4443 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by " 4444 "other device, disabling DMA\n"); 4445 } 4446 4447 if (ap->flags & ATA_FLAG_NO_IORDY) 4448 xfer_mask &= ata_pio_mask_no_iordy(dev); 4449 4450 if (ap->ops->mode_filter) 4451 xfer_mask = ap->ops->mode_filter(dev, xfer_mask); 4452 4453 /* Apply cable rule here. Don't apply it early because when 4454 * we handle hot plug the cable type can itself change. 4455 * Check this last so that we know if the transfer rate was 4456 * solely limited by the cable. 4457 * Unknown or 80 wire cables reported host side are checked 4458 * drive side as well. Cases where we know a 40wire cable 4459 * is used safely for 80 are not checked here. 4460 */ 4461 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA)) 4462 /* UDMA/44 or higher would be available */ 4463 if (cable_is_40wire(ap)) { 4464 ata_dev_printk(dev, KERN_WARNING, 4465 "limited to UDMA/33 due to 40-wire cable\n"); 4466 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); 4467 } 4468 4469 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, 4470 &dev->mwdma_mask, &dev->udma_mask); 4471 } 4472 4473 /** 4474 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command 4475 * @dev: Device to which command will be sent 4476 * 4477 * Issue SET FEATURES - XFER MODE command to device @dev 4478 * on port @ap. 4479 * 4480 * LOCKING: 4481 * PCI/etc. bus probe sem. 4482 * 4483 * RETURNS: 4484 * 0 on success, AC_ERR_* mask otherwise. 4485 */ 4486 4487 static unsigned int ata_dev_set_xfermode(struct ata_device *dev) 4488 { 4489 struct ata_taskfile tf; 4490 unsigned int err_mask; 4491 4492 /* set up set-features taskfile */ 4493 DPRINTK("set features - xfer mode\n"); 4494 4495 /* Some controllers and ATAPI devices show flaky interrupt 4496 * behavior after setting xfer mode. Use polling instead. 4497 */ 4498 ata_tf_init(dev, &tf); 4499 tf.command = ATA_CMD_SET_FEATURES; 4500 tf.feature = SETFEATURES_XFER; 4501 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING; 4502 tf.protocol = ATA_PROT_NODATA; 4503 /* If we are using IORDY we must send the mode setting command */ 4504 if (ata_pio_need_iordy(dev)) 4505 tf.nsect = dev->xfer_mode; 4506 /* If the device has IORDY and the controller does not - turn it off */ 4507 else if (ata_id_has_iordy(dev->id)) 4508 tf.nsect = 0x01; 4509 else /* In the ancient relic department - skip all of this */ 4510 return 0; 4511 4512 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4513 4514 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4515 return err_mask; 4516 } 4517 /** 4518 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES 4519 * @dev: Device to which command will be sent 4520 * @enable: Whether to enable or disable the feature 4521 * @feature: The sector count represents the feature to set 4522 * 4523 * Issue SET FEATURES - SATA FEATURES command to device @dev 4524 * on port @ap with sector count 4525 * 4526 * LOCKING: 4527 * PCI/etc. bus probe sem. 4528 * 4529 * RETURNS: 4530 * 0 on success, AC_ERR_* mask otherwise. 4531 */ 4532 static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable, 4533 u8 feature) 4534 { 4535 struct ata_taskfile tf; 4536 unsigned int err_mask; 4537 4538 /* set up set-features taskfile */ 4539 DPRINTK("set features - SATA features\n"); 4540 4541 ata_tf_init(dev, &tf); 4542 tf.command = ATA_CMD_SET_FEATURES; 4543 tf.feature = enable; 4544 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4545 tf.protocol = ATA_PROT_NODATA; 4546 tf.nsect = feature; 4547 4548 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4549 4550 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4551 return err_mask; 4552 } 4553 4554 /** 4555 * ata_dev_init_params - Issue INIT DEV PARAMS command 4556 * @dev: Device to which command will be sent 4557 * @heads: Number of heads (taskfile parameter) 4558 * @sectors: Number of sectors (taskfile parameter) 4559 * 4560 * LOCKING: 4561 * Kernel thread context (may sleep) 4562 * 4563 * RETURNS: 4564 * 0 on success, AC_ERR_* mask otherwise. 4565 */ 4566 static unsigned int ata_dev_init_params(struct ata_device *dev, 4567 u16 heads, u16 sectors) 4568 { 4569 struct ata_taskfile tf; 4570 unsigned int err_mask; 4571 4572 /* Number of sectors per track 1-255. Number of heads 1-16 */ 4573 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16) 4574 return AC_ERR_INVALID; 4575 4576 /* set up init dev params taskfile */ 4577 DPRINTK("init dev params \n"); 4578 4579 ata_tf_init(dev, &tf); 4580 tf.command = ATA_CMD_INIT_DEV_PARAMS; 4581 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 4582 tf.protocol = ATA_PROT_NODATA; 4583 tf.nsect = sectors; 4584 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 4585 4586 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4587 /* A clean abort indicates an original or just out of spec drive 4588 and we should continue as we issue the setup based on the 4589 drive reported working geometry */ 4590 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED)) 4591 err_mask = 0; 4592 4593 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4594 return err_mask; 4595 } 4596 4597 /** 4598 * ata_sg_clean - Unmap DMA memory associated with command 4599 * @qc: Command containing DMA memory to be released 4600 * 4601 * Unmap all mapped DMA memory associated with this command. 4602 * 4603 * LOCKING: 4604 * spin_lock_irqsave(host lock) 4605 */ 4606 void ata_sg_clean(struct ata_queued_cmd *qc) 4607 { 4608 struct ata_port *ap = qc->ap; 4609 struct scatterlist *sg = qc->sg; 4610 int dir = qc->dma_dir; 4611 4612 WARN_ON_ONCE(sg == NULL); 4613 4614 VPRINTK("unmapping %u sg elements\n", qc->n_elem); 4615 4616 if (qc->n_elem) 4617 dma_unmap_sg(ap->dev, sg, qc->orig_n_elem, dir); 4618 4619 qc->flags &= ~ATA_QCFLAG_DMAMAP; 4620 qc->sg = NULL; 4621 } 4622 4623 /** 4624 * atapi_check_dma - Check whether ATAPI DMA can be supported 4625 * @qc: Metadata associated with taskfile to check 4626 * 4627 * Allow low-level driver to filter ATA PACKET commands, returning 4628 * a status indicating whether or not it is OK to use DMA for the 4629 * supplied PACKET command. 4630 * 4631 * LOCKING: 4632 * spin_lock_irqsave(host lock) 4633 * 4634 * RETURNS: 0 when ATAPI DMA can be used 4635 * nonzero otherwise 4636 */ 4637 int atapi_check_dma(struct ata_queued_cmd *qc) 4638 { 4639 struct ata_port *ap = qc->ap; 4640 4641 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a 4642 * few ATAPI devices choke on such DMA requests. 4643 */ 4644 if (!(qc->dev->horkage & ATA_HORKAGE_ATAPI_MOD16_DMA) && 4645 unlikely(qc->nbytes & 15)) 4646 return 1; 4647 4648 if (ap->ops->check_atapi_dma) 4649 return ap->ops->check_atapi_dma(qc); 4650 4651 return 0; 4652 } 4653 4654 /** 4655 * ata_std_qc_defer - Check whether a qc needs to be deferred 4656 * @qc: ATA command in question 4657 * 4658 * Non-NCQ commands cannot run with any other command, NCQ or 4659 * not. As upper layer only knows the queue depth, we are 4660 * responsible for maintaining exclusion. This function checks 4661 * whether a new command @qc can be issued. 4662 * 4663 * LOCKING: 4664 * spin_lock_irqsave(host lock) 4665 * 4666 * RETURNS: 4667 * ATA_DEFER_* if deferring is needed, 0 otherwise. 4668 */ 4669 int ata_std_qc_defer(struct ata_queued_cmd *qc) 4670 { 4671 struct ata_link *link = qc->dev->link; 4672 4673 if (qc->tf.protocol == ATA_PROT_NCQ) { 4674 if (!ata_tag_valid(link->active_tag)) 4675 return 0; 4676 } else { 4677 if (!ata_tag_valid(link->active_tag) && !link->sactive) 4678 return 0; 4679 } 4680 4681 return ATA_DEFER_LINK; 4682 } 4683 4684 void ata_noop_qc_prep(struct ata_queued_cmd *qc) { } 4685 4686 /** 4687 * ata_sg_init - Associate command with scatter-gather table. 4688 * @qc: Command to be associated 4689 * @sg: Scatter-gather table. 4690 * @n_elem: Number of elements in s/g table. 4691 * 4692 * Initialize the data-related elements of queued_cmd @qc 4693 * to point to a scatter-gather table @sg, containing @n_elem 4694 * elements. 4695 * 4696 * LOCKING: 4697 * spin_lock_irqsave(host lock) 4698 */ 4699 void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, 4700 unsigned int n_elem) 4701 { 4702 qc->sg = sg; 4703 qc->n_elem = n_elem; 4704 qc->cursg = qc->sg; 4705 } 4706 4707 /** 4708 * ata_sg_setup - DMA-map the scatter-gather table associated with a command. 4709 * @qc: Command with scatter-gather table to be mapped. 4710 * 4711 * DMA-map the scatter-gather table associated with queued_cmd @qc. 4712 * 4713 * LOCKING: 4714 * spin_lock_irqsave(host lock) 4715 * 4716 * RETURNS: 4717 * Zero on success, negative on error. 4718 * 4719 */ 4720 static int ata_sg_setup(struct ata_queued_cmd *qc) 4721 { 4722 struct ata_port *ap = qc->ap; 4723 unsigned int n_elem; 4724 4725 VPRINTK("ENTER, ata%u\n", ap->print_id); 4726 4727 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir); 4728 if (n_elem < 1) 4729 return -1; 4730 4731 DPRINTK("%d sg elements mapped\n", n_elem); 4732 qc->orig_n_elem = qc->n_elem; 4733 qc->n_elem = n_elem; 4734 qc->flags |= ATA_QCFLAG_DMAMAP; 4735 4736 return 0; 4737 } 4738 4739 /** 4740 * swap_buf_le16 - swap halves of 16-bit words in place 4741 * @buf: Buffer to swap 4742 * @buf_words: Number of 16-bit words in buffer. 4743 * 4744 * Swap halves of 16-bit words if needed to convert from 4745 * little-endian byte order to native cpu byte order, or 4746 * vice-versa. 4747 * 4748 * LOCKING: 4749 * Inherited from caller. 4750 */ 4751 void swap_buf_le16(u16 *buf, unsigned int buf_words) 4752 { 4753 #ifdef __BIG_ENDIAN 4754 unsigned int i; 4755 4756 for (i = 0; i < buf_words; i++) 4757 buf[i] = le16_to_cpu(buf[i]); 4758 #endif /* __BIG_ENDIAN */ 4759 } 4760 4761 /** 4762 * ata_qc_new - Request an available ATA command, for queueing 4763 * @ap: target port 4764 * 4765 * LOCKING: 4766 * None. 4767 */ 4768 4769 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) 4770 { 4771 struct ata_queued_cmd *qc = NULL; 4772 unsigned int i; 4773 4774 /* no command while frozen */ 4775 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN)) 4776 return NULL; 4777 4778 /* the last tag is reserved for internal command. */ 4779 for (i = 0; i < ATA_MAX_QUEUE - 1; i++) 4780 if (!test_and_set_bit(i, &ap->qc_allocated)) { 4781 qc = __ata_qc_from_tag(ap, i); 4782 break; 4783 } 4784 4785 if (qc) 4786 qc->tag = i; 4787 4788 return qc; 4789 } 4790 4791 /** 4792 * ata_qc_new_init - Request an available ATA command, and initialize it 4793 * @dev: Device from whom we request an available command structure 4794 * 4795 * LOCKING: 4796 * None. 4797 */ 4798 4799 struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev) 4800 { 4801 struct ata_port *ap = dev->link->ap; 4802 struct ata_queued_cmd *qc; 4803 4804 qc = ata_qc_new(ap); 4805 if (qc) { 4806 qc->scsicmd = NULL; 4807 qc->ap = ap; 4808 qc->dev = dev; 4809 4810 ata_qc_reinit(qc); 4811 } 4812 4813 return qc; 4814 } 4815 4816 /** 4817 * ata_qc_free - free unused ata_queued_cmd 4818 * @qc: Command to complete 4819 * 4820 * Designed to free unused ata_queued_cmd object 4821 * in case something prevents using it. 4822 * 4823 * LOCKING: 4824 * spin_lock_irqsave(host lock) 4825 */ 4826 void ata_qc_free(struct ata_queued_cmd *qc) 4827 { 4828 struct ata_port *ap = qc->ap; 4829 unsigned int tag; 4830 4831 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4832 4833 qc->flags = 0; 4834 tag = qc->tag; 4835 if (likely(ata_tag_valid(tag))) { 4836 qc->tag = ATA_TAG_POISON; 4837 clear_bit(tag, &ap->qc_allocated); 4838 } 4839 } 4840 4841 void __ata_qc_complete(struct ata_queued_cmd *qc) 4842 { 4843 struct ata_port *ap = qc->ap; 4844 struct ata_link *link = qc->dev->link; 4845 4846 WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */ 4847 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE)); 4848 4849 if (likely(qc->flags & ATA_QCFLAG_DMAMAP)) 4850 ata_sg_clean(qc); 4851 4852 /* command should be marked inactive atomically with qc completion */ 4853 if (qc->tf.protocol == ATA_PROT_NCQ) { 4854 link->sactive &= ~(1 << qc->tag); 4855 if (!link->sactive) 4856 ap->nr_active_links--; 4857 } else { 4858 link->active_tag = ATA_TAG_POISON; 4859 ap->nr_active_links--; 4860 } 4861 4862 /* clear exclusive status */ 4863 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL && 4864 ap->excl_link == link)) 4865 ap->excl_link = NULL; 4866 4867 /* atapi: mark qc as inactive to prevent the interrupt handler 4868 * from completing the command twice later, before the error handler 4869 * is called. (when rc != 0 and atapi request sense is needed) 4870 */ 4871 qc->flags &= ~ATA_QCFLAG_ACTIVE; 4872 ap->qc_active &= ~(1 << qc->tag); 4873 4874 /* call completion callback */ 4875 qc->complete_fn(qc); 4876 } 4877 4878 static void fill_result_tf(struct ata_queued_cmd *qc) 4879 { 4880 struct ata_port *ap = qc->ap; 4881 4882 qc->result_tf.flags = qc->tf.flags; 4883 ap->ops->qc_fill_rtf(qc); 4884 } 4885 4886 static void ata_verify_xfer(struct ata_queued_cmd *qc) 4887 { 4888 struct ata_device *dev = qc->dev; 4889 4890 if (ata_tag_internal(qc->tag)) 4891 return; 4892 4893 if (ata_is_nodata(qc->tf.protocol)) 4894 return; 4895 4896 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol)) 4897 return; 4898 4899 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER; 4900 } 4901 4902 /** 4903 * ata_qc_complete - Complete an active ATA command 4904 * @qc: Command to complete 4905 * 4906 * Indicate to the mid and upper layers that an ATA 4907 * command has completed, with either an ok or not-ok status. 4908 * 4909 * LOCKING: 4910 * spin_lock_irqsave(host lock) 4911 */ 4912 void ata_qc_complete(struct ata_queued_cmd *qc) 4913 { 4914 struct ata_port *ap = qc->ap; 4915 4916 /* XXX: New EH and old EH use different mechanisms to 4917 * synchronize EH with regular execution path. 4918 * 4919 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED. 4920 * Normal execution path is responsible for not accessing a 4921 * failed qc. libata core enforces the rule by returning NULL 4922 * from ata_qc_from_tag() for failed qcs. 4923 * 4924 * Old EH depends on ata_qc_complete() nullifying completion 4925 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does 4926 * not synchronize with interrupt handler. Only PIO task is 4927 * taken care of. 4928 */ 4929 if (ap->ops->error_handler) { 4930 struct ata_device *dev = qc->dev; 4931 struct ata_eh_info *ehi = &dev->link->eh_info; 4932 4933 WARN_ON_ONCE(ap->pflags & ATA_PFLAG_FROZEN); 4934 4935 if (unlikely(qc->err_mask)) 4936 qc->flags |= ATA_QCFLAG_FAILED; 4937 4938 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) { 4939 if (!ata_tag_internal(qc->tag)) { 4940 /* always fill result TF for failed qc */ 4941 fill_result_tf(qc); 4942 ata_qc_schedule_eh(qc); 4943 return; 4944 } 4945 } 4946 4947 /* read result TF if requested */ 4948 if (qc->flags & ATA_QCFLAG_RESULT_TF) 4949 fill_result_tf(qc); 4950 4951 /* Some commands need post-processing after successful 4952 * completion. 4953 */ 4954 switch (qc->tf.command) { 4955 case ATA_CMD_SET_FEATURES: 4956 if (qc->tf.feature != SETFEATURES_WC_ON && 4957 qc->tf.feature != SETFEATURES_WC_OFF) 4958 break; 4959 /* fall through */ 4960 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */ 4961 case ATA_CMD_SET_MULTI: /* multi_count changed */ 4962 /* revalidate device */ 4963 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE; 4964 ata_port_schedule_eh(ap); 4965 break; 4966 4967 case ATA_CMD_SLEEP: 4968 dev->flags |= ATA_DFLAG_SLEEPING; 4969 break; 4970 } 4971 4972 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) 4973 ata_verify_xfer(qc); 4974 4975 __ata_qc_complete(qc); 4976 } else { 4977 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED) 4978 return; 4979 4980 /* read result TF if failed or requested */ 4981 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF) 4982 fill_result_tf(qc); 4983 4984 __ata_qc_complete(qc); 4985 } 4986 } 4987 4988 /** 4989 * ata_qc_complete_multiple - Complete multiple qcs successfully 4990 * @ap: port in question 4991 * @qc_active: new qc_active mask 4992 * 4993 * Complete in-flight commands. This functions is meant to be 4994 * called from low-level driver's interrupt routine to complete 4995 * requests normally. ap->qc_active and @qc_active is compared 4996 * and commands are completed accordingly. 4997 * 4998 * LOCKING: 4999 * spin_lock_irqsave(host lock) 5000 * 5001 * RETURNS: 5002 * Number of completed commands on success, -errno otherwise. 5003 */ 5004 int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active) 5005 { 5006 int nr_done = 0; 5007 u32 done_mask; 5008 int i; 5009 5010 done_mask = ap->qc_active ^ qc_active; 5011 5012 if (unlikely(done_mask & qc_active)) { 5013 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition " 5014 "(%08x->%08x)\n", ap->qc_active, qc_active); 5015 return -EINVAL; 5016 } 5017 5018 for (i = 0; i < ATA_MAX_QUEUE; i++) { 5019 struct ata_queued_cmd *qc; 5020 5021 if (!(done_mask & (1 << i))) 5022 continue; 5023 5024 if ((qc = ata_qc_from_tag(ap, i))) { 5025 ata_qc_complete(qc); 5026 nr_done++; 5027 } 5028 } 5029 5030 return nr_done; 5031 } 5032 5033 /** 5034 * ata_qc_issue - issue taskfile to device 5035 * @qc: command to issue to device 5036 * 5037 * Prepare an ATA command to submission to device. 5038 * This includes mapping the data into a DMA-able 5039 * area, filling in the S/G table, and finally 5040 * writing the taskfile to hardware, starting the command. 5041 * 5042 * LOCKING: 5043 * spin_lock_irqsave(host lock) 5044 */ 5045 void ata_qc_issue(struct ata_queued_cmd *qc) 5046 { 5047 struct ata_port *ap = qc->ap; 5048 struct ata_link *link = qc->dev->link; 5049 u8 prot = qc->tf.protocol; 5050 5051 /* Make sure only one non-NCQ command is outstanding. The 5052 * check is skipped for old EH because it reuses active qc to 5053 * request ATAPI sense. 5054 */ 5055 WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag)); 5056 5057 if (ata_is_ncq(prot)) { 5058 WARN_ON_ONCE(link->sactive & (1 << qc->tag)); 5059 5060 if (!link->sactive) 5061 ap->nr_active_links++; 5062 link->sactive |= 1 << qc->tag; 5063 } else { 5064 WARN_ON_ONCE(link->sactive); 5065 5066 ap->nr_active_links++; 5067 link->active_tag = qc->tag; 5068 } 5069 5070 qc->flags |= ATA_QCFLAG_ACTIVE; 5071 ap->qc_active |= 1 << qc->tag; 5072 5073 /* We guarantee to LLDs that they will have at least one 5074 * non-zero sg if the command is a data command. 5075 */ 5076 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes)); 5077 5078 if (ata_is_dma(prot) || (ata_is_pio(prot) && 5079 (ap->flags & ATA_FLAG_PIO_DMA))) 5080 if (ata_sg_setup(qc)) 5081 goto sg_err; 5082 5083 /* if device is sleeping, schedule reset and abort the link */ 5084 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) { 5085 link->eh_info.action |= ATA_EH_RESET; 5086 ata_ehi_push_desc(&link->eh_info, "waking up from sleep"); 5087 ata_link_abort(link); 5088 return; 5089 } 5090 5091 ap->ops->qc_prep(qc); 5092 5093 qc->err_mask |= ap->ops->qc_issue(qc); 5094 if (unlikely(qc->err_mask)) 5095 goto err; 5096 return; 5097 5098 sg_err: 5099 qc->err_mask |= AC_ERR_SYSTEM; 5100 err: 5101 ata_qc_complete(qc); 5102 } 5103 5104 /** 5105 * sata_scr_valid - test whether SCRs are accessible 5106 * @link: ATA link to test SCR accessibility for 5107 * 5108 * Test whether SCRs are accessible for @link. 5109 * 5110 * LOCKING: 5111 * None. 5112 * 5113 * RETURNS: 5114 * 1 if SCRs are accessible, 0 otherwise. 5115 */ 5116 int sata_scr_valid(struct ata_link *link) 5117 { 5118 struct ata_port *ap = link->ap; 5119 5120 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 5121 } 5122 5123 /** 5124 * sata_scr_read - read SCR register of the specified port 5125 * @link: ATA link to read SCR for 5126 * @reg: SCR to read 5127 * @val: Place to store read value 5128 * 5129 * Read SCR register @reg of @link into *@val. This function is 5130 * guaranteed to succeed if @link is ap->link, the cable type of 5131 * the port is SATA and the port implements ->scr_read. 5132 * 5133 * LOCKING: 5134 * None if @link is ap->link. Kernel thread context otherwise. 5135 * 5136 * RETURNS: 5137 * 0 on success, negative errno on failure. 5138 */ 5139 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 5140 { 5141 if (ata_is_host_link(link)) { 5142 if (sata_scr_valid(link)) 5143 return link->ap->ops->scr_read(link, reg, val); 5144 return -EOPNOTSUPP; 5145 } 5146 5147 return sata_pmp_scr_read(link, reg, val); 5148 } 5149 5150 /** 5151 * sata_scr_write - write SCR register of the specified port 5152 * @link: ATA link to write SCR for 5153 * @reg: SCR to write 5154 * @val: value to write 5155 * 5156 * Write @val to SCR register @reg of @link. This function is 5157 * guaranteed to succeed if @link is ap->link, the cable type of 5158 * the port is SATA and the port implements ->scr_read. 5159 * 5160 * LOCKING: 5161 * None if @link is ap->link. Kernel thread context otherwise. 5162 * 5163 * RETURNS: 5164 * 0 on success, negative errno on failure. 5165 */ 5166 int sata_scr_write(struct ata_link *link, int reg, u32 val) 5167 { 5168 if (ata_is_host_link(link)) { 5169 if (sata_scr_valid(link)) 5170 return link->ap->ops->scr_write(link, reg, val); 5171 return -EOPNOTSUPP; 5172 } 5173 5174 return sata_pmp_scr_write(link, reg, val); 5175 } 5176 5177 /** 5178 * sata_scr_write_flush - write SCR register of the specified port and flush 5179 * @link: ATA link to write SCR for 5180 * @reg: SCR to write 5181 * @val: value to write 5182 * 5183 * This function is identical to sata_scr_write() except that this 5184 * function performs flush after writing to the register. 5185 * 5186 * LOCKING: 5187 * None if @link is ap->link. Kernel thread context otherwise. 5188 * 5189 * RETURNS: 5190 * 0 on success, negative errno on failure. 5191 */ 5192 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 5193 { 5194 if (ata_is_host_link(link)) { 5195 int rc; 5196 5197 if (sata_scr_valid(link)) { 5198 rc = link->ap->ops->scr_write(link, reg, val); 5199 if (rc == 0) 5200 rc = link->ap->ops->scr_read(link, reg, &val); 5201 return rc; 5202 } 5203 return -EOPNOTSUPP; 5204 } 5205 5206 return sata_pmp_scr_write(link, reg, val); 5207 } 5208 5209 /** 5210 * ata_phys_link_online - test whether the given link is online 5211 * @link: ATA link to test 5212 * 5213 * Test whether @link is online. Note that this function returns 5214 * 0 if online status of @link cannot be obtained, so 5215 * ata_link_online(link) != !ata_link_offline(link). 5216 * 5217 * LOCKING: 5218 * None. 5219 * 5220 * RETURNS: 5221 * True if the port online status is available and online. 5222 */ 5223 bool ata_phys_link_online(struct ata_link *link) 5224 { 5225 u32 sstatus; 5226 5227 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5228 ata_sstatus_online(sstatus)) 5229 return true; 5230 return false; 5231 } 5232 5233 /** 5234 * ata_phys_link_offline - test whether the given link is offline 5235 * @link: ATA link to test 5236 * 5237 * Test whether @link is offline. Note that this function 5238 * returns 0 if offline status of @link cannot be obtained, so 5239 * ata_link_online(link) != !ata_link_offline(link). 5240 * 5241 * LOCKING: 5242 * None. 5243 * 5244 * RETURNS: 5245 * True if the port offline status is available and offline. 5246 */ 5247 bool ata_phys_link_offline(struct ata_link *link) 5248 { 5249 u32 sstatus; 5250 5251 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 && 5252 !ata_sstatus_online(sstatus)) 5253 return true; 5254 return false; 5255 } 5256 5257 /** 5258 * ata_link_online - test whether the given link is online 5259 * @link: ATA link to test 5260 * 5261 * Test whether @link is online. This is identical to 5262 * ata_phys_link_online() when there's no slave link. When 5263 * there's a slave link, this function should only be called on 5264 * the master link and will return true if any of M/S links is 5265 * online. 5266 * 5267 * LOCKING: 5268 * None. 5269 * 5270 * RETURNS: 5271 * True if the port online status is available and online. 5272 */ 5273 bool ata_link_online(struct ata_link *link) 5274 { 5275 struct ata_link *slave = link->ap->slave_link; 5276 5277 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5278 5279 return ata_phys_link_online(link) || 5280 (slave && ata_phys_link_online(slave)); 5281 } 5282 5283 /** 5284 * ata_link_offline - test whether the given link is offline 5285 * @link: ATA link to test 5286 * 5287 * Test whether @link is offline. This is identical to 5288 * ata_phys_link_offline() when there's no slave link. When 5289 * there's a slave link, this function should only be called on 5290 * the master link and will return true if both M/S links are 5291 * offline. 5292 * 5293 * LOCKING: 5294 * None. 5295 * 5296 * RETURNS: 5297 * True if the port offline status is available and offline. 5298 */ 5299 bool ata_link_offline(struct ata_link *link) 5300 { 5301 struct ata_link *slave = link->ap->slave_link; 5302 5303 WARN_ON(link == slave); /* shouldn't be called on slave link */ 5304 5305 return ata_phys_link_offline(link) && 5306 (!slave || ata_phys_link_offline(slave)); 5307 } 5308 5309 #ifdef CONFIG_PM 5310 static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg, 5311 unsigned int action, unsigned int ehi_flags, 5312 int wait) 5313 { 5314 unsigned long flags; 5315 int i, rc; 5316 5317 for (i = 0; i < host->n_ports; i++) { 5318 struct ata_port *ap = host->ports[i]; 5319 struct ata_link *link; 5320 5321 /* Previous resume operation might still be in 5322 * progress. Wait for PM_PENDING to clear. 5323 */ 5324 if (ap->pflags & ATA_PFLAG_PM_PENDING) { 5325 ata_port_wait_eh(ap); 5326 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5327 } 5328 5329 /* request PM ops to EH */ 5330 spin_lock_irqsave(ap->lock, flags); 5331 5332 ap->pm_mesg = mesg; 5333 if (wait) { 5334 rc = 0; 5335 ap->pm_result = &rc; 5336 } 5337 5338 ap->pflags |= ATA_PFLAG_PM_PENDING; 5339 ata_for_each_link(link, ap, HOST_FIRST) { 5340 link->eh_info.action |= action; 5341 link->eh_info.flags |= ehi_flags; 5342 } 5343 5344 ata_port_schedule_eh(ap); 5345 5346 spin_unlock_irqrestore(ap->lock, flags); 5347 5348 /* wait and check result */ 5349 if (wait) { 5350 ata_port_wait_eh(ap); 5351 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING); 5352 if (rc) 5353 return rc; 5354 } 5355 } 5356 5357 return 0; 5358 } 5359 5360 /** 5361 * ata_host_suspend - suspend host 5362 * @host: host to suspend 5363 * @mesg: PM message 5364 * 5365 * Suspend @host. Actual operation is performed by EH. This 5366 * function requests EH to perform PM operations and waits for EH 5367 * to finish. 5368 * 5369 * LOCKING: 5370 * Kernel thread context (may sleep). 5371 * 5372 * RETURNS: 5373 * 0 on success, -errno on failure. 5374 */ 5375 int ata_host_suspend(struct ata_host *host, pm_message_t mesg) 5376 { 5377 int rc; 5378 5379 /* 5380 * disable link pm on all ports before requesting 5381 * any pm activity 5382 */ 5383 ata_lpm_enable(host); 5384 5385 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1); 5386 if (rc == 0) 5387 host->dev->power.power_state = mesg; 5388 return rc; 5389 } 5390 5391 /** 5392 * ata_host_resume - resume host 5393 * @host: host to resume 5394 * 5395 * Resume @host. Actual operation is performed by EH. This 5396 * function requests EH to perform PM operations and returns. 5397 * Note that all resume operations are performed parallely. 5398 * 5399 * LOCKING: 5400 * Kernel thread context (may sleep). 5401 */ 5402 void ata_host_resume(struct ata_host *host) 5403 { 5404 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET, 5405 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0); 5406 host->dev->power.power_state = PMSG_ON; 5407 5408 /* reenable link pm */ 5409 ata_lpm_disable(host); 5410 } 5411 #endif 5412 5413 /** 5414 * ata_port_start - Set port up for dma. 5415 * @ap: Port to initialize 5416 * 5417 * Called just after data structures for each port are 5418 * initialized. Allocates space for PRD table. 5419 * 5420 * May be used as the port_start() entry in ata_port_operations. 5421 * 5422 * LOCKING: 5423 * Inherited from caller. 5424 */ 5425 int ata_port_start(struct ata_port *ap) 5426 { 5427 struct device *dev = ap->dev; 5428 5429 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, 5430 GFP_KERNEL); 5431 if (!ap->prd) 5432 return -ENOMEM; 5433 5434 return 0; 5435 } 5436 5437 /** 5438 * ata_dev_init - Initialize an ata_device structure 5439 * @dev: Device structure to initialize 5440 * 5441 * Initialize @dev in preparation for probing. 5442 * 5443 * LOCKING: 5444 * Inherited from caller. 5445 */ 5446 void ata_dev_init(struct ata_device *dev) 5447 { 5448 struct ata_link *link = ata_dev_phys_link(dev); 5449 struct ata_port *ap = link->ap; 5450 unsigned long flags; 5451 5452 /* SATA spd limit is bound to the attached device, reset together */ 5453 link->sata_spd_limit = link->hw_sata_spd_limit; 5454 link->sata_spd = 0; 5455 5456 /* High bits of dev->flags are used to record warm plug 5457 * requests which occur asynchronously. Synchronize using 5458 * host lock. 5459 */ 5460 spin_lock_irqsave(ap->lock, flags); 5461 dev->flags &= ~ATA_DFLAG_INIT_MASK; 5462 dev->horkage = 0; 5463 spin_unlock_irqrestore(ap->lock, flags); 5464 5465 memset((void *)dev + ATA_DEVICE_CLEAR_BEGIN, 0, 5466 ATA_DEVICE_CLEAR_END - ATA_DEVICE_CLEAR_BEGIN); 5467 dev->pio_mask = UINT_MAX; 5468 dev->mwdma_mask = UINT_MAX; 5469 dev->udma_mask = UINT_MAX; 5470 } 5471 5472 /** 5473 * ata_link_init - Initialize an ata_link structure 5474 * @ap: ATA port link is attached to 5475 * @link: Link structure to initialize 5476 * @pmp: Port multiplier port number 5477 * 5478 * Initialize @link. 5479 * 5480 * LOCKING: 5481 * Kernel thread context (may sleep) 5482 */ 5483 void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp) 5484 { 5485 int i; 5486 5487 /* clear everything except for devices */ 5488 memset(link, 0, offsetof(struct ata_link, device[0])); 5489 5490 link->ap = ap; 5491 link->pmp = pmp; 5492 link->active_tag = ATA_TAG_POISON; 5493 link->hw_sata_spd_limit = UINT_MAX; 5494 5495 /* can't use iterator, ap isn't initialized yet */ 5496 for (i = 0; i < ATA_MAX_DEVICES; i++) { 5497 struct ata_device *dev = &link->device[i]; 5498 5499 dev->link = link; 5500 dev->devno = dev - link->device; 5501 ata_dev_init(dev); 5502 } 5503 } 5504 5505 /** 5506 * sata_link_init_spd - Initialize link->sata_spd_limit 5507 * @link: Link to configure sata_spd_limit for 5508 * 5509 * Initialize @link->[hw_]sata_spd_limit to the currently 5510 * configured value. 5511 * 5512 * LOCKING: 5513 * Kernel thread context (may sleep). 5514 * 5515 * RETURNS: 5516 * 0 on success, -errno on failure. 5517 */ 5518 int sata_link_init_spd(struct ata_link *link) 5519 { 5520 u8 spd; 5521 int rc; 5522 5523 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol); 5524 if (rc) 5525 return rc; 5526 5527 spd = (link->saved_scontrol >> 4) & 0xf; 5528 if (spd) 5529 link->hw_sata_spd_limit &= (1 << spd) - 1; 5530 5531 ata_force_link_limits(link); 5532 5533 link->sata_spd_limit = link->hw_sata_spd_limit; 5534 5535 return 0; 5536 } 5537 5538 /** 5539 * ata_port_alloc - allocate and initialize basic ATA port resources 5540 * @host: ATA host this allocated port belongs to 5541 * 5542 * Allocate and initialize basic ATA port resources. 5543 * 5544 * RETURNS: 5545 * Allocate ATA port on success, NULL on failure. 5546 * 5547 * LOCKING: 5548 * Inherited from calling layer (may sleep). 5549 */ 5550 struct ata_port *ata_port_alloc(struct ata_host *host) 5551 { 5552 struct ata_port *ap; 5553 5554 DPRINTK("ENTER\n"); 5555 5556 ap = kzalloc(sizeof(*ap), GFP_KERNEL); 5557 if (!ap) 5558 return NULL; 5559 5560 ap->pflags |= ATA_PFLAG_INITIALIZING; 5561 ap->lock = &host->lock; 5562 ap->flags = ATA_FLAG_DISABLED; 5563 ap->print_id = -1; 5564 ap->ctl = ATA_DEVCTL_OBS; 5565 ap->host = host; 5566 ap->dev = host->dev; 5567 ap->last_ctl = 0xFF; 5568 5569 #if defined(ATA_VERBOSE_DEBUG) 5570 /* turn on all debugging levels */ 5571 ap->msg_enable = 0x00FF; 5572 #elif defined(ATA_DEBUG) 5573 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR; 5574 #else 5575 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN; 5576 #endif 5577 5578 #ifdef CONFIG_ATA_SFF 5579 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task); 5580 #else 5581 INIT_DELAYED_WORK(&ap->port_task, NULL); 5582 #endif 5583 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug); 5584 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan); 5585 INIT_LIST_HEAD(&ap->eh_done_q); 5586 init_waitqueue_head(&ap->eh_wait_q); 5587 init_completion(&ap->park_req_pending); 5588 init_timer_deferrable(&ap->fastdrain_timer); 5589 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn; 5590 ap->fastdrain_timer.data = (unsigned long)ap; 5591 5592 ap->cbl = ATA_CBL_NONE; 5593 5594 ata_link_init(ap, &ap->link, 0); 5595 5596 #ifdef ATA_IRQ_TRAP 5597 ap->stats.unhandled_irq = 1; 5598 ap->stats.idle_irq = 1; 5599 #endif 5600 return ap; 5601 } 5602 5603 static void ata_host_release(struct device *gendev, void *res) 5604 { 5605 struct ata_host *host = dev_get_drvdata(gendev); 5606 int i; 5607 5608 for (i = 0; i < host->n_ports; i++) { 5609 struct ata_port *ap = host->ports[i]; 5610 5611 if (!ap) 5612 continue; 5613 5614 if (ap->scsi_host) 5615 scsi_host_put(ap->scsi_host); 5616 5617 kfree(ap->pmp_link); 5618 kfree(ap->slave_link); 5619 kfree(ap); 5620 host->ports[i] = NULL; 5621 } 5622 5623 dev_set_drvdata(gendev, NULL); 5624 } 5625 5626 /** 5627 * ata_host_alloc - allocate and init basic ATA host resources 5628 * @dev: generic device this host is associated with 5629 * @max_ports: maximum number of ATA ports associated with this host 5630 * 5631 * Allocate and initialize basic ATA host resources. LLD calls 5632 * this function to allocate a host, initializes it fully and 5633 * attaches it using ata_host_register(). 5634 * 5635 * @max_ports ports are allocated and host->n_ports is 5636 * initialized to @max_ports. The caller is allowed to decrease 5637 * host->n_ports before calling ata_host_register(). The unused 5638 * ports will be automatically freed on registration. 5639 * 5640 * RETURNS: 5641 * Allocate ATA host on success, NULL on failure. 5642 * 5643 * LOCKING: 5644 * Inherited from calling layer (may sleep). 5645 */ 5646 struct ata_host *ata_host_alloc(struct device *dev, int max_ports) 5647 { 5648 struct ata_host *host; 5649 size_t sz; 5650 int i; 5651 5652 DPRINTK("ENTER\n"); 5653 5654 if (!devres_open_group(dev, NULL, GFP_KERNEL)) 5655 return NULL; 5656 5657 /* alloc a container for our list of ATA ports (buses) */ 5658 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *); 5659 /* alloc a container for our list of ATA ports (buses) */ 5660 host = devres_alloc(ata_host_release, sz, GFP_KERNEL); 5661 if (!host) 5662 goto err_out; 5663 5664 devres_add(dev, host); 5665 dev_set_drvdata(dev, host); 5666 5667 spin_lock_init(&host->lock); 5668 host->dev = dev; 5669 host->n_ports = max_ports; 5670 5671 /* allocate ports bound to this host */ 5672 for (i = 0; i < max_ports; i++) { 5673 struct ata_port *ap; 5674 5675 ap = ata_port_alloc(host); 5676 if (!ap) 5677 goto err_out; 5678 5679 ap->port_no = i; 5680 host->ports[i] = ap; 5681 } 5682 5683 devres_remove_group(dev, NULL); 5684 return host; 5685 5686 err_out: 5687 devres_release_group(dev, NULL); 5688 return NULL; 5689 } 5690 5691 /** 5692 * ata_host_alloc_pinfo - alloc host and init with port_info array 5693 * @dev: generic device this host is associated with 5694 * @ppi: array of ATA port_info to initialize host with 5695 * @n_ports: number of ATA ports attached to this host 5696 * 5697 * Allocate ATA host and initialize with info from @ppi. If NULL 5698 * terminated, @ppi may contain fewer entries than @n_ports. The 5699 * last entry will be used for the remaining ports. 5700 * 5701 * RETURNS: 5702 * Allocate ATA host on success, NULL on failure. 5703 * 5704 * LOCKING: 5705 * Inherited from calling layer (may sleep). 5706 */ 5707 struct ata_host *ata_host_alloc_pinfo(struct device *dev, 5708 const struct ata_port_info * const * ppi, 5709 int n_ports) 5710 { 5711 const struct ata_port_info *pi; 5712 struct ata_host *host; 5713 int i, j; 5714 5715 host = ata_host_alloc(dev, n_ports); 5716 if (!host) 5717 return NULL; 5718 5719 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) { 5720 struct ata_port *ap = host->ports[i]; 5721 5722 if (ppi[j]) 5723 pi = ppi[j++]; 5724 5725 ap->pio_mask = pi->pio_mask; 5726 ap->mwdma_mask = pi->mwdma_mask; 5727 ap->udma_mask = pi->udma_mask; 5728 ap->flags |= pi->flags; 5729 ap->link.flags |= pi->link_flags; 5730 ap->ops = pi->port_ops; 5731 5732 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops)) 5733 host->ops = pi->port_ops; 5734 } 5735 5736 return host; 5737 } 5738 5739 /** 5740 * ata_slave_link_init - initialize slave link 5741 * @ap: port to initialize slave link for 5742 * 5743 * Create and initialize slave link for @ap. This enables slave 5744 * link handling on the port. 5745 * 5746 * In libata, a port contains links and a link contains devices. 5747 * There is single host link but if a PMP is attached to it, 5748 * there can be multiple fan-out links. On SATA, there's usually 5749 * a single device connected to a link but PATA and SATA 5750 * controllers emulating TF based interface can have two - master 5751 * and slave. 5752 * 5753 * However, there are a few controllers which don't fit into this 5754 * abstraction too well - SATA controllers which emulate TF 5755 * interface with both master and slave devices but also have 5756 * separate SCR register sets for each device. These controllers 5757 * need separate links for physical link handling 5758 * (e.g. onlineness, link speed) but should be treated like a 5759 * traditional M/S controller for everything else (e.g. command 5760 * issue, softreset). 5761 * 5762 * slave_link is libata's way of handling this class of 5763 * controllers without impacting core layer too much. For 5764 * anything other than physical link handling, the default host 5765 * link is used for both master and slave. For physical link 5766 * handling, separate @ap->slave_link is used. All dirty details 5767 * are implemented inside libata core layer. From LLD's POV, the 5768 * only difference is that prereset, hardreset and postreset are 5769 * called once more for the slave link, so the reset sequence 5770 * looks like the following. 5771 * 5772 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 5773 * softreset(M) -> postreset(M) -> postreset(S) 5774 * 5775 * Note that softreset is called only for the master. Softreset 5776 * resets both M/S by definition, so SRST on master should handle 5777 * both (the standard method will work just fine). 5778 * 5779 * LOCKING: 5780 * Should be called before host is registered. 5781 * 5782 * RETURNS: 5783 * 0 on success, -errno on failure. 5784 */ 5785 int ata_slave_link_init(struct ata_port *ap) 5786 { 5787 struct ata_link *link; 5788 5789 WARN_ON(ap->slave_link); 5790 WARN_ON(ap->flags & ATA_FLAG_PMP); 5791 5792 link = kzalloc(sizeof(*link), GFP_KERNEL); 5793 if (!link) 5794 return -ENOMEM; 5795 5796 ata_link_init(ap, link, 1); 5797 ap->slave_link = link; 5798 return 0; 5799 } 5800 5801 static void ata_host_stop(struct device *gendev, void *res) 5802 { 5803 struct ata_host *host = dev_get_drvdata(gendev); 5804 int i; 5805 5806 WARN_ON(!(host->flags & ATA_HOST_STARTED)); 5807 5808 for (i = 0; i < host->n_ports; i++) { 5809 struct ata_port *ap = host->ports[i]; 5810 5811 if (ap->ops->port_stop) 5812 ap->ops->port_stop(ap); 5813 } 5814 5815 if (host->ops->host_stop) 5816 host->ops->host_stop(host); 5817 } 5818 5819 /** 5820 * ata_finalize_port_ops - finalize ata_port_operations 5821 * @ops: ata_port_operations to finalize 5822 * 5823 * An ata_port_operations can inherit from another ops and that 5824 * ops can again inherit from another. This can go on as many 5825 * times as necessary as long as there is no loop in the 5826 * inheritance chain. 5827 * 5828 * Ops tables are finalized when the host is started. NULL or 5829 * unspecified entries are inherited from the closet ancestor 5830 * which has the method and the entry is populated with it. 5831 * After finalization, the ops table directly points to all the 5832 * methods and ->inherits is no longer necessary and cleared. 5833 * 5834 * Using ATA_OP_NULL, inheriting ops can force a method to NULL. 5835 * 5836 * LOCKING: 5837 * None. 5838 */ 5839 static void ata_finalize_port_ops(struct ata_port_operations *ops) 5840 { 5841 static DEFINE_SPINLOCK(lock); 5842 const struct ata_port_operations *cur; 5843 void **begin = (void **)ops; 5844 void **end = (void **)&ops->inherits; 5845 void **pp; 5846 5847 if (!ops || !ops->inherits) 5848 return; 5849 5850 spin_lock(&lock); 5851 5852 for (cur = ops->inherits; cur; cur = cur->inherits) { 5853 void **inherit = (void **)cur; 5854 5855 for (pp = begin; pp < end; pp++, inherit++) 5856 if (!*pp) 5857 *pp = *inherit; 5858 } 5859 5860 for (pp = begin; pp < end; pp++) 5861 if (IS_ERR(*pp)) 5862 *pp = NULL; 5863 5864 ops->inherits = NULL; 5865 5866 spin_unlock(&lock); 5867 } 5868 5869 /** 5870 * ata_host_start - start and freeze ports of an ATA host 5871 * @host: ATA host to start ports for 5872 * 5873 * Start and then freeze ports of @host. Started status is 5874 * recorded in host->flags, so this function can be called 5875 * multiple times. Ports are guaranteed to get started only 5876 * once. If host->ops isn't initialized yet, its set to the 5877 * first non-dummy port ops. 5878 * 5879 * LOCKING: 5880 * Inherited from calling layer (may sleep). 5881 * 5882 * RETURNS: 5883 * 0 if all ports are started successfully, -errno otherwise. 5884 */ 5885 int ata_host_start(struct ata_host *host) 5886 { 5887 int have_stop = 0; 5888 void *start_dr = NULL; 5889 int i, rc; 5890 5891 if (host->flags & ATA_HOST_STARTED) 5892 return 0; 5893 5894 ata_finalize_port_ops(host->ops); 5895 5896 for (i = 0; i < host->n_ports; i++) { 5897 struct ata_port *ap = host->ports[i]; 5898 5899 ata_finalize_port_ops(ap->ops); 5900 5901 if (!host->ops && !ata_port_is_dummy(ap)) 5902 host->ops = ap->ops; 5903 5904 if (ap->ops->port_stop) 5905 have_stop = 1; 5906 } 5907 5908 if (host->ops->host_stop) 5909 have_stop = 1; 5910 5911 if (have_stop) { 5912 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL); 5913 if (!start_dr) 5914 return -ENOMEM; 5915 } 5916 5917 for (i = 0; i < host->n_ports; i++) { 5918 struct ata_port *ap = host->ports[i]; 5919 5920 if (ap->ops->port_start) { 5921 rc = ap->ops->port_start(ap); 5922 if (rc) { 5923 if (rc != -ENODEV) 5924 dev_printk(KERN_ERR, host->dev, 5925 "failed to start port %d " 5926 "(errno=%d)\n", i, rc); 5927 goto err_out; 5928 } 5929 } 5930 ata_eh_freeze_port(ap); 5931 } 5932 5933 if (start_dr) 5934 devres_add(host->dev, start_dr); 5935 host->flags |= ATA_HOST_STARTED; 5936 return 0; 5937 5938 err_out: 5939 while (--i >= 0) { 5940 struct ata_port *ap = host->ports[i]; 5941 5942 if (ap->ops->port_stop) 5943 ap->ops->port_stop(ap); 5944 } 5945 devres_free(start_dr); 5946 return rc; 5947 } 5948 5949 /** 5950 * ata_sas_host_init - Initialize a host struct 5951 * @host: host to initialize 5952 * @dev: device host is attached to 5953 * @flags: host flags 5954 * @ops: port_ops 5955 * 5956 * LOCKING: 5957 * PCI/etc. bus probe sem. 5958 * 5959 */ 5960 /* KILLME - the only user left is ipr */ 5961 void ata_host_init(struct ata_host *host, struct device *dev, 5962 unsigned long flags, struct ata_port_operations *ops) 5963 { 5964 spin_lock_init(&host->lock); 5965 host->dev = dev; 5966 host->flags = flags; 5967 host->ops = ops; 5968 } 5969 5970 5971 static void async_port_probe(void *data, async_cookie_t cookie) 5972 { 5973 int rc; 5974 struct ata_port *ap = data; 5975 5976 /* 5977 * If we're not allowed to scan this host in parallel, 5978 * we need to wait until all previous scans have completed 5979 * before going further. 5980 * Jeff Garzik says this is only within a controller, so we 5981 * don't need to wait for port 0, only for later ports. 5982 */ 5983 if (!(ap->host->flags & ATA_HOST_PARALLEL_SCAN) && ap->port_no != 0) 5984 async_synchronize_cookie(cookie); 5985 5986 /* probe */ 5987 if (ap->ops->error_handler) { 5988 struct ata_eh_info *ehi = &ap->link.eh_info; 5989 unsigned long flags; 5990 5991 ata_port_probe(ap); 5992 5993 /* kick EH for boot probing */ 5994 spin_lock_irqsave(ap->lock, flags); 5995 5996 ehi->probe_mask |= ATA_ALL_DEVICES; 5997 ehi->action |= ATA_EH_RESET | ATA_EH_LPM; 5998 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET; 5999 6000 ap->pflags &= ~ATA_PFLAG_INITIALIZING; 6001 ap->pflags |= ATA_PFLAG_LOADING; 6002 ata_port_schedule_eh(ap); 6003 6004 spin_unlock_irqrestore(ap->lock, flags); 6005 6006 /* wait for EH to finish */ 6007 ata_port_wait_eh(ap); 6008 } else { 6009 DPRINTK("ata%u: bus probe begin\n", ap->print_id); 6010 rc = ata_bus_probe(ap); 6011 DPRINTK("ata%u: bus probe end\n", ap->print_id); 6012 6013 if (rc) { 6014 /* FIXME: do something useful here? 6015 * Current libata behavior will 6016 * tear down everything when 6017 * the module is removed 6018 * or the h/w is unplugged. 6019 */ 6020 } 6021 } 6022 6023 /* in order to keep device order, we need to synchronize at this point */ 6024 async_synchronize_cookie(cookie); 6025 6026 ata_scsi_scan_host(ap, 1); 6027 6028 } 6029 /** 6030 * ata_host_register - register initialized ATA host 6031 * @host: ATA host to register 6032 * @sht: template for SCSI host 6033 * 6034 * Register initialized ATA host. @host is allocated using 6035 * ata_host_alloc() and fully initialized by LLD. This function 6036 * starts ports, registers @host with ATA and SCSI layers and 6037 * probe registered devices. 6038 * 6039 * LOCKING: 6040 * Inherited from calling layer (may sleep). 6041 * 6042 * RETURNS: 6043 * 0 on success, -errno otherwise. 6044 */ 6045 int ata_host_register(struct ata_host *host, struct scsi_host_template *sht) 6046 { 6047 int i, rc; 6048 6049 /* host must have been started */ 6050 if (!(host->flags & ATA_HOST_STARTED)) { 6051 dev_printk(KERN_ERR, host->dev, 6052 "BUG: trying to register unstarted host\n"); 6053 WARN_ON(1); 6054 return -EINVAL; 6055 } 6056 6057 /* Blow away unused ports. This happens when LLD can't 6058 * determine the exact number of ports to allocate at 6059 * allocation time. 6060 */ 6061 for (i = host->n_ports; host->ports[i]; i++) 6062 kfree(host->ports[i]); 6063 6064 /* give ports names and add SCSI hosts */ 6065 for (i = 0; i < host->n_ports; i++) 6066 host->ports[i]->print_id = ata_print_id++; 6067 6068 rc = ata_scsi_add_hosts(host, sht); 6069 if (rc) 6070 return rc; 6071 6072 /* associate with ACPI nodes */ 6073 ata_acpi_associate(host); 6074 6075 /* set cable, sata_spd_limit and report */ 6076 for (i = 0; i < host->n_ports; i++) { 6077 struct ata_port *ap = host->ports[i]; 6078 unsigned long xfer_mask; 6079 6080 /* set SATA cable type if still unset */ 6081 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA)) 6082 ap->cbl = ATA_CBL_SATA; 6083 6084 /* init sata_spd_limit to the current value */ 6085 sata_link_init_spd(&ap->link); 6086 if (ap->slave_link) 6087 sata_link_init_spd(ap->slave_link); 6088 6089 /* print per-port info to dmesg */ 6090 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 6091 ap->udma_mask); 6092 6093 if (!ata_port_is_dummy(ap)) { 6094 ata_port_printk(ap, KERN_INFO, 6095 "%cATA max %s %s\n", 6096 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P', 6097 ata_mode_string(xfer_mask), 6098 ap->link.eh_info.desc); 6099 ata_ehi_clear_desc(&ap->link.eh_info); 6100 } else 6101 ata_port_printk(ap, KERN_INFO, "DUMMY\n"); 6102 } 6103 6104 /* perform each probe synchronously */ 6105 DPRINTK("probe begin\n"); 6106 for (i = 0; i < host->n_ports; i++) { 6107 struct ata_port *ap = host->ports[i]; 6108 async_schedule(async_port_probe, ap); 6109 } 6110 DPRINTK("probe end\n"); 6111 6112 return 0; 6113 } 6114 6115 /** 6116 * ata_host_activate - start host, request IRQ and register it 6117 * @host: target ATA host 6118 * @irq: IRQ to request 6119 * @irq_handler: irq_handler used when requesting IRQ 6120 * @irq_flags: irq_flags used when requesting IRQ 6121 * @sht: scsi_host_template to use when registering the host 6122 * 6123 * After allocating an ATA host and initializing it, most libata 6124 * LLDs perform three steps to activate the host - start host, 6125 * request IRQ and register it. This helper takes necessasry 6126 * arguments and performs the three steps in one go. 6127 * 6128 * An invalid IRQ skips the IRQ registration and expects the host to 6129 * have set polling mode on the port. In this case, @irq_handler 6130 * should be NULL. 6131 * 6132 * LOCKING: 6133 * Inherited from calling layer (may sleep). 6134 * 6135 * RETURNS: 6136 * 0 on success, -errno otherwise. 6137 */ 6138 int ata_host_activate(struct ata_host *host, int irq, 6139 irq_handler_t irq_handler, unsigned long irq_flags, 6140 struct scsi_host_template *sht) 6141 { 6142 int i, rc; 6143 6144 rc = ata_host_start(host); 6145 if (rc) 6146 return rc; 6147 6148 /* Special case for polling mode */ 6149 if (!irq) { 6150 WARN_ON(irq_handler); 6151 return ata_host_register(host, sht); 6152 } 6153 6154 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags, 6155 dev_driver_string(host->dev), host); 6156 if (rc) 6157 return rc; 6158 6159 for (i = 0; i < host->n_ports; i++) 6160 ata_port_desc(host->ports[i], "irq %d", irq); 6161 6162 rc = ata_host_register(host, sht); 6163 /* if failed, just free the IRQ and leave ports alone */ 6164 if (rc) 6165 devm_free_irq(host->dev, irq, host); 6166 6167 return rc; 6168 } 6169 6170 /** 6171 * ata_port_detach - Detach ATA port in prepration of device removal 6172 * @ap: ATA port to be detached 6173 * 6174 * Detach all ATA devices and the associated SCSI devices of @ap; 6175 * then, remove the associated SCSI host. @ap is guaranteed to 6176 * be quiescent on return from this function. 6177 * 6178 * LOCKING: 6179 * Kernel thread context (may sleep). 6180 */ 6181 static void ata_port_detach(struct ata_port *ap) 6182 { 6183 unsigned long flags; 6184 6185 if (!ap->ops->error_handler) 6186 goto skip_eh; 6187 6188 /* tell EH we're leaving & flush EH */ 6189 spin_lock_irqsave(ap->lock, flags); 6190 ap->pflags |= ATA_PFLAG_UNLOADING; 6191 ata_port_schedule_eh(ap); 6192 spin_unlock_irqrestore(ap->lock, flags); 6193 6194 /* wait till EH commits suicide */ 6195 ata_port_wait_eh(ap); 6196 6197 /* it better be dead now */ 6198 WARN_ON(!(ap->pflags & ATA_PFLAG_UNLOADED)); 6199 6200 cancel_rearming_delayed_work(&ap->hotplug_task); 6201 6202 skip_eh: 6203 /* remove the associated SCSI host */ 6204 scsi_remove_host(ap->scsi_host); 6205 } 6206 6207 /** 6208 * ata_host_detach - Detach all ports of an ATA host 6209 * @host: Host to detach 6210 * 6211 * Detach all ports of @host. 6212 * 6213 * LOCKING: 6214 * Kernel thread context (may sleep). 6215 */ 6216 void ata_host_detach(struct ata_host *host) 6217 { 6218 int i; 6219 6220 for (i = 0; i < host->n_ports; i++) 6221 ata_port_detach(host->ports[i]); 6222 6223 /* the host is dead now, dissociate ACPI */ 6224 ata_acpi_dissociate(host); 6225 } 6226 6227 #ifdef CONFIG_PCI 6228 6229 /** 6230 * ata_pci_remove_one - PCI layer callback for device removal 6231 * @pdev: PCI device that was removed 6232 * 6233 * PCI layer indicates to libata via this hook that hot-unplug or 6234 * module unload event has occurred. Detach all ports. Resource 6235 * release is handled via devres. 6236 * 6237 * LOCKING: 6238 * Inherited from PCI layer (may sleep). 6239 */ 6240 void ata_pci_remove_one(struct pci_dev *pdev) 6241 { 6242 struct device *dev = &pdev->dev; 6243 struct ata_host *host = dev_get_drvdata(dev); 6244 6245 ata_host_detach(host); 6246 } 6247 6248 /* move to PCI subsystem */ 6249 int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits) 6250 { 6251 unsigned long tmp = 0; 6252 6253 switch (bits->width) { 6254 case 1: { 6255 u8 tmp8 = 0; 6256 pci_read_config_byte(pdev, bits->reg, &tmp8); 6257 tmp = tmp8; 6258 break; 6259 } 6260 case 2: { 6261 u16 tmp16 = 0; 6262 pci_read_config_word(pdev, bits->reg, &tmp16); 6263 tmp = tmp16; 6264 break; 6265 } 6266 case 4: { 6267 u32 tmp32 = 0; 6268 pci_read_config_dword(pdev, bits->reg, &tmp32); 6269 tmp = tmp32; 6270 break; 6271 } 6272 6273 default: 6274 return -EINVAL; 6275 } 6276 6277 tmp &= bits->mask; 6278 6279 return (tmp == bits->val) ? 1 : 0; 6280 } 6281 6282 #ifdef CONFIG_PM 6283 void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg) 6284 { 6285 pci_save_state(pdev); 6286 pci_disable_device(pdev); 6287 6288 if (mesg.event & PM_EVENT_SLEEP) 6289 pci_set_power_state(pdev, PCI_D3hot); 6290 } 6291 6292 int ata_pci_device_do_resume(struct pci_dev *pdev) 6293 { 6294 int rc; 6295 6296 pci_set_power_state(pdev, PCI_D0); 6297 pci_restore_state(pdev); 6298 6299 rc = pcim_enable_device(pdev); 6300 if (rc) { 6301 dev_printk(KERN_ERR, &pdev->dev, 6302 "failed to enable device after resume (%d)\n", rc); 6303 return rc; 6304 } 6305 6306 pci_set_master(pdev); 6307 return 0; 6308 } 6309 6310 int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg) 6311 { 6312 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6313 int rc = 0; 6314 6315 rc = ata_host_suspend(host, mesg); 6316 if (rc) 6317 return rc; 6318 6319 ata_pci_device_do_suspend(pdev, mesg); 6320 6321 return 0; 6322 } 6323 6324 int ata_pci_device_resume(struct pci_dev *pdev) 6325 { 6326 struct ata_host *host = dev_get_drvdata(&pdev->dev); 6327 int rc; 6328 6329 rc = ata_pci_device_do_resume(pdev); 6330 if (rc == 0) 6331 ata_host_resume(host); 6332 return rc; 6333 } 6334 #endif /* CONFIG_PM */ 6335 6336 #endif /* CONFIG_PCI */ 6337 6338 static int __init ata_parse_force_one(char **cur, 6339 struct ata_force_ent *force_ent, 6340 const char **reason) 6341 { 6342 /* FIXME: Currently, there's no way to tag init const data and 6343 * using __initdata causes build failure on some versions of 6344 * gcc. Once __initdataconst is implemented, add const to the 6345 * following structure. 6346 */ 6347 static struct ata_force_param force_tbl[] __initdata = { 6348 { "40c", .cbl = ATA_CBL_PATA40 }, 6349 { "80c", .cbl = ATA_CBL_PATA80 }, 6350 { "short40c", .cbl = ATA_CBL_PATA40_SHORT }, 6351 { "unk", .cbl = ATA_CBL_PATA_UNK }, 6352 { "ign", .cbl = ATA_CBL_PATA_IGN }, 6353 { "sata", .cbl = ATA_CBL_SATA }, 6354 { "1.5Gbps", .spd_limit = 1 }, 6355 { "3.0Gbps", .spd_limit = 2 }, 6356 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ }, 6357 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ }, 6358 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) }, 6359 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) }, 6360 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) }, 6361 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) }, 6362 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) }, 6363 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) }, 6364 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) }, 6365 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) }, 6366 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) }, 6367 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) }, 6368 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) }, 6369 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) }, 6370 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6371 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6372 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) }, 6373 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6374 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6375 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) }, 6376 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6377 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6378 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) }, 6379 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6380 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6381 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) }, 6382 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6383 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6384 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) }, 6385 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6386 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6387 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) }, 6388 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6389 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6390 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) }, 6391 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) }, 6392 { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, 6393 { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, 6394 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, 6395 }; 6396 char *start = *cur, *p = *cur; 6397 char *id, *val, *endp; 6398 const struct ata_force_param *match_fp = NULL; 6399 int nr_matches = 0, i; 6400 6401 /* find where this param ends and update *cur */ 6402 while (*p != '\0' && *p != ',') 6403 p++; 6404 6405 if (*p == '\0') 6406 *cur = p; 6407 else 6408 *cur = p + 1; 6409 6410 *p = '\0'; 6411 6412 /* parse */ 6413 p = strchr(start, ':'); 6414 if (!p) { 6415 val = strstrip(start); 6416 goto parse_val; 6417 } 6418 *p = '\0'; 6419 6420 id = strstrip(start); 6421 val = strstrip(p + 1); 6422 6423 /* parse id */ 6424 p = strchr(id, '.'); 6425 if (p) { 6426 *p++ = '\0'; 6427 force_ent->device = simple_strtoul(p, &endp, 10); 6428 if (p == endp || *endp != '\0') { 6429 *reason = "invalid device"; 6430 return -EINVAL; 6431 } 6432 } 6433 6434 force_ent->port = simple_strtoul(id, &endp, 10); 6435 if (p == endp || *endp != '\0') { 6436 *reason = "invalid port/link"; 6437 return -EINVAL; 6438 } 6439 6440 parse_val: 6441 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */ 6442 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) { 6443 const struct ata_force_param *fp = &force_tbl[i]; 6444 6445 if (strncasecmp(val, fp->name, strlen(val))) 6446 continue; 6447 6448 nr_matches++; 6449 match_fp = fp; 6450 6451 if (strcasecmp(val, fp->name) == 0) { 6452 nr_matches = 1; 6453 break; 6454 } 6455 } 6456 6457 if (!nr_matches) { 6458 *reason = "unknown value"; 6459 return -EINVAL; 6460 } 6461 if (nr_matches > 1) { 6462 *reason = "ambigious value"; 6463 return -EINVAL; 6464 } 6465 6466 force_ent->param = *match_fp; 6467 6468 return 0; 6469 } 6470 6471 static void __init ata_parse_force_param(void) 6472 { 6473 int idx = 0, size = 1; 6474 int last_port = -1, last_device = -1; 6475 char *p, *cur, *next; 6476 6477 /* calculate maximum number of params and allocate force_tbl */ 6478 for (p = ata_force_param_buf; *p; p++) 6479 if (*p == ',') 6480 size++; 6481 6482 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL); 6483 if (!ata_force_tbl) { 6484 printk(KERN_WARNING "ata: failed to extend force table, " 6485 "libata.force ignored\n"); 6486 return; 6487 } 6488 6489 /* parse and populate the table */ 6490 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) { 6491 const char *reason = ""; 6492 struct ata_force_ent te = { .port = -1, .device = -1 }; 6493 6494 next = cur; 6495 if (ata_parse_force_one(&next, &te, &reason)) { 6496 printk(KERN_WARNING "ata: failed to parse force " 6497 "parameter \"%s\" (%s)\n", 6498 cur, reason); 6499 continue; 6500 } 6501 6502 if (te.port == -1) { 6503 te.port = last_port; 6504 te.device = last_device; 6505 } 6506 6507 ata_force_tbl[idx++] = te; 6508 6509 last_port = te.port; 6510 last_device = te.device; 6511 } 6512 6513 ata_force_tbl_size = idx; 6514 } 6515 6516 static int __init ata_init(void) 6517 { 6518 ata_parse_force_param(); 6519 6520 ata_wq = create_workqueue("ata"); 6521 if (!ata_wq) 6522 goto free_force_tbl; 6523 6524 ata_aux_wq = create_singlethread_workqueue("ata_aux"); 6525 if (!ata_aux_wq) 6526 goto free_wq; 6527 6528 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n"); 6529 return 0; 6530 6531 free_wq: 6532 destroy_workqueue(ata_wq); 6533 free_force_tbl: 6534 kfree(ata_force_tbl); 6535 return -ENOMEM; 6536 } 6537 6538 static void __exit ata_exit(void) 6539 { 6540 kfree(ata_force_tbl); 6541 destroy_workqueue(ata_wq); 6542 destroy_workqueue(ata_aux_wq); 6543 } 6544 6545 subsys_initcall(ata_init); 6546 module_exit(ata_exit); 6547 6548 static unsigned long ratelimit_time; 6549 static DEFINE_SPINLOCK(ata_ratelimit_lock); 6550 6551 int ata_ratelimit(void) 6552 { 6553 int rc; 6554 unsigned long flags; 6555 6556 spin_lock_irqsave(&ata_ratelimit_lock, flags); 6557 6558 if (time_after(jiffies, ratelimit_time)) { 6559 rc = 1; 6560 ratelimit_time = jiffies + (HZ/5); 6561 } else 6562 rc = 0; 6563 6564 spin_unlock_irqrestore(&ata_ratelimit_lock, flags); 6565 6566 return rc; 6567 } 6568 6569 /** 6570 * ata_wait_register - wait until register value changes 6571 * @reg: IO-mapped register 6572 * @mask: Mask to apply to read register value 6573 * @val: Wait condition 6574 * @interval: polling interval in milliseconds 6575 * @timeout: timeout in milliseconds 6576 * 6577 * Waiting for some bits of register to change is a common 6578 * operation for ATA controllers. This function reads 32bit LE 6579 * IO-mapped register @reg and tests for the following condition. 6580 * 6581 * (*@reg & mask) != val 6582 * 6583 * If the condition is met, it returns; otherwise, the process is 6584 * repeated after @interval_msec until timeout. 6585 * 6586 * LOCKING: 6587 * Kernel thread context (may sleep) 6588 * 6589 * RETURNS: 6590 * The final register value. 6591 */ 6592 u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, 6593 unsigned long interval, unsigned long timeout) 6594 { 6595 unsigned long deadline; 6596 u32 tmp; 6597 6598 tmp = ioread32(reg); 6599 6600 /* Calculate timeout _after_ the first read to make sure 6601 * preceding writes reach the controller before starting to 6602 * eat away the timeout. 6603 */ 6604 deadline = ata_deadline(jiffies, timeout); 6605 6606 while ((tmp & mask) == val && time_before(jiffies, deadline)) { 6607 msleep(interval); 6608 tmp = ioread32(reg); 6609 } 6610 6611 return tmp; 6612 } 6613 6614 /* 6615 * Dummy port_ops 6616 */ 6617 static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc) 6618 { 6619 return AC_ERR_SYSTEM; 6620 } 6621 6622 static void ata_dummy_error_handler(struct ata_port *ap) 6623 { 6624 /* truly dummy */ 6625 } 6626 6627 struct ata_port_operations ata_dummy_port_ops = { 6628 .qc_prep = ata_noop_qc_prep, 6629 .qc_issue = ata_dummy_qc_issue, 6630 .error_handler = ata_dummy_error_handler, 6631 }; 6632 6633 const struct ata_port_info ata_dummy_port_info = { 6634 .port_ops = &ata_dummy_port_ops, 6635 }; 6636 6637 /* 6638 * libata is essentially a library of internal helper functions for 6639 * low-level ATA host controller drivers. As such, the API/ABI is 6640 * likely to change as new drivers are added and updated. 6641 * Do not depend on ABI/API stability. 6642 */ 6643 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 6644 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 6645 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 6646 EXPORT_SYMBOL_GPL(ata_base_port_ops); 6647 EXPORT_SYMBOL_GPL(sata_port_ops); 6648 EXPORT_SYMBOL_GPL(ata_dummy_port_ops); 6649 EXPORT_SYMBOL_GPL(ata_dummy_port_info); 6650 EXPORT_SYMBOL_GPL(ata_link_next); 6651 EXPORT_SYMBOL_GPL(ata_dev_next); 6652 EXPORT_SYMBOL_GPL(ata_std_bios_param); 6653 EXPORT_SYMBOL_GPL(ata_host_init); 6654 EXPORT_SYMBOL_GPL(ata_host_alloc); 6655 EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo); 6656 EXPORT_SYMBOL_GPL(ata_slave_link_init); 6657 EXPORT_SYMBOL_GPL(ata_host_start); 6658 EXPORT_SYMBOL_GPL(ata_host_register); 6659 EXPORT_SYMBOL_GPL(ata_host_activate); 6660 EXPORT_SYMBOL_GPL(ata_host_detach); 6661 EXPORT_SYMBOL_GPL(ata_sg_init); 6662 EXPORT_SYMBOL_GPL(ata_qc_complete); 6663 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 6664 EXPORT_SYMBOL_GPL(atapi_cmd_type); 6665 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 6666 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 6667 EXPORT_SYMBOL_GPL(ata_pack_xfermask); 6668 EXPORT_SYMBOL_GPL(ata_unpack_xfermask); 6669 EXPORT_SYMBOL_GPL(ata_xfer_mask2mode); 6670 EXPORT_SYMBOL_GPL(ata_xfer_mode2mask); 6671 EXPORT_SYMBOL_GPL(ata_xfer_mode2shift); 6672 EXPORT_SYMBOL_GPL(ata_mode_string); 6673 EXPORT_SYMBOL_GPL(ata_id_xfermask); 6674 EXPORT_SYMBOL_GPL(ata_port_start); 6675 EXPORT_SYMBOL_GPL(ata_do_set_mode); 6676 EXPORT_SYMBOL_GPL(ata_std_qc_defer); 6677 EXPORT_SYMBOL_GPL(ata_noop_qc_prep); 6678 EXPORT_SYMBOL_GPL(ata_port_probe); 6679 EXPORT_SYMBOL_GPL(ata_dev_disable); 6680 EXPORT_SYMBOL_GPL(sata_set_spd); 6681 EXPORT_SYMBOL_GPL(ata_wait_after_reset); 6682 EXPORT_SYMBOL_GPL(sata_link_debounce); 6683 EXPORT_SYMBOL_GPL(sata_link_resume); 6684 EXPORT_SYMBOL_GPL(ata_std_prereset); 6685 EXPORT_SYMBOL_GPL(sata_link_hardreset); 6686 EXPORT_SYMBOL_GPL(sata_std_hardreset); 6687 EXPORT_SYMBOL_GPL(ata_std_postreset); 6688 EXPORT_SYMBOL_GPL(ata_dev_classify); 6689 EXPORT_SYMBOL_GPL(ata_dev_pair); 6690 EXPORT_SYMBOL_GPL(ata_port_disable); 6691 EXPORT_SYMBOL_GPL(ata_ratelimit); 6692 EXPORT_SYMBOL_GPL(ata_wait_register); 6693 EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 6694 EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 6695 EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy); 6696 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 6697 EXPORT_SYMBOL_GPL(sata_scr_valid); 6698 EXPORT_SYMBOL_GPL(sata_scr_read); 6699 EXPORT_SYMBOL_GPL(sata_scr_write); 6700 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 6701 EXPORT_SYMBOL_GPL(ata_link_online); 6702 EXPORT_SYMBOL_GPL(ata_link_offline); 6703 #ifdef CONFIG_PM 6704 EXPORT_SYMBOL_GPL(ata_host_suspend); 6705 EXPORT_SYMBOL_GPL(ata_host_resume); 6706 #endif /* CONFIG_PM */ 6707 EXPORT_SYMBOL_GPL(ata_id_string); 6708 EXPORT_SYMBOL_GPL(ata_id_c_string); 6709 EXPORT_SYMBOL_GPL(ata_do_dev_read_id); 6710 EXPORT_SYMBOL_GPL(ata_scsi_simulate); 6711 6712 EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 6713 EXPORT_SYMBOL_GPL(ata_timing_find_mode); 6714 EXPORT_SYMBOL_GPL(ata_timing_compute); 6715 EXPORT_SYMBOL_GPL(ata_timing_merge); 6716 EXPORT_SYMBOL_GPL(ata_timing_cycle2mode); 6717 6718 #ifdef CONFIG_PCI 6719 EXPORT_SYMBOL_GPL(pci_test_config_bits); 6720 EXPORT_SYMBOL_GPL(ata_pci_remove_one); 6721 #ifdef CONFIG_PM 6722 EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend); 6723 EXPORT_SYMBOL_GPL(ata_pci_device_do_resume); 6724 EXPORT_SYMBOL_GPL(ata_pci_device_suspend); 6725 EXPORT_SYMBOL_GPL(ata_pci_device_resume); 6726 #endif /* CONFIG_PM */ 6727 #endif /* CONFIG_PCI */ 6728 6729 EXPORT_SYMBOL_GPL(__ata_ehi_push_desc); 6730 EXPORT_SYMBOL_GPL(ata_ehi_push_desc); 6731 EXPORT_SYMBOL_GPL(ata_ehi_clear_desc); 6732 EXPORT_SYMBOL_GPL(ata_port_desc); 6733 #ifdef CONFIG_PCI 6734 EXPORT_SYMBOL_GPL(ata_port_pbar_desc); 6735 #endif /* CONFIG_PCI */ 6736 EXPORT_SYMBOL_GPL(ata_port_schedule_eh); 6737 EXPORT_SYMBOL_GPL(ata_link_abort); 6738 EXPORT_SYMBOL_GPL(ata_port_abort); 6739 EXPORT_SYMBOL_GPL(ata_port_freeze); 6740 EXPORT_SYMBOL_GPL(sata_async_notification); 6741 EXPORT_SYMBOL_GPL(ata_eh_freeze_port); 6742 EXPORT_SYMBOL_GPL(ata_eh_thaw_port); 6743 EXPORT_SYMBOL_GPL(ata_eh_qc_complete); 6744 EXPORT_SYMBOL_GPL(ata_eh_qc_retry); 6745 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 6746 EXPORT_SYMBOL_GPL(ata_do_eh); 6747 EXPORT_SYMBOL_GPL(ata_std_error_handler); 6748 6749 EXPORT_SYMBOL_GPL(ata_cable_40wire); 6750 EXPORT_SYMBOL_GPL(ata_cable_80wire); 6751 EXPORT_SYMBOL_GPL(ata_cable_unknown); 6752 EXPORT_SYMBOL_GPL(ata_cable_ignore); 6753 EXPORT_SYMBOL_GPL(ata_cable_sata); 6754