1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * SATA specific part of ATA helper library 4 * 5 * Copyright 2003-2004 Red Hat, Inc. All rights reserved. 6 * Copyright 2003-2004 Jeff Garzik 7 * Copyright 2006 Tejun Heo <htejun@gmail.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/module.h> 12 #include <scsi/scsi_cmnd.h> 13 #include <scsi/scsi_device.h> 14 #include <scsi/scsi_eh.h> 15 #include <linux/libata.h> 16 #include <asm/unaligned.h> 17 18 #include "libata.h" 19 #include "libata-transport.h" 20 21 /* debounce timing parameters in msecs { interval, duration, timeout } */ 22 const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 }; 23 EXPORT_SYMBOL_GPL(sata_deb_timing_normal); 24 const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 }; 25 EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug); 26 const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 }; 27 EXPORT_SYMBOL_GPL(sata_deb_timing_long); 28 29 /** 30 * sata_scr_valid - test whether SCRs are accessible 31 * @link: ATA link to test SCR accessibility for 32 * 33 * Test whether SCRs are accessible for @link. 34 * 35 * LOCKING: 36 * None. 37 * 38 * RETURNS: 39 * 1 if SCRs are accessible, 0 otherwise. 40 */ 41 int sata_scr_valid(struct ata_link *link) 42 { 43 struct ata_port *ap = link->ap; 44 45 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read; 46 } 47 EXPORT_SYMBOL_GPL(sata_scr_valid); 48 49 /** 50 * sata_scr_read - read SCR register of the specified port 51 * @link: ATA link to read SCR for 52 * @reg: SCR to read 53 * @val: Place to store read value 54 * 55 * Read SCR register @reg of @link into *@val. This function is 56 * guaranteed to succeed if @link is ap->link, the cable type of 57 * the port is SATA and the port implements ->scr_read. 58 * 59 * LOCKING: 60 * None if @link is ap->link. Kernel thread context otherwise. 61 * 62 * RETURNS: 63 * 0 on success, negative errno on failure. 64 */ 65 int sata_scr_read(struct ata_link *link, int reg, u32 *val) 66 { 67 if (ata_is_host_link(link)) { 68 if (sata_scr_valid(link)) 69 return link->ap->ops->scr_read(link, reg, val); 70 return -EOPNOTSUPP; 71 } 72 73 return sata_pmp_scr_read(link, reg, val); 74 } 75 EXPORT_SYMBOL_GPL(sata_scr_read); 76 77 /** 78 * sata_scr_write - write SCR register of the specified port 79 * @link: ATA link to write SCR for 80 * @reg: SCR to write 81 * @val: value to write 82 * 83 * Write @val to SCR register @reg of @link. This function is 84 * guaranteed to succeed if @link is ap->link, the cable type of 85 * the port is SATA and the port implements ->scr_read. 86 * 87 * LOCKING: 88 * None if @link is ap->link. Kernel thread context otherwise. 89 * 90 * RETURNS: 91 * 0 on success, negative errno on failure. 92 */ 93 int sata_scr_write(struct ata_link *link, int reg, u32 val) 94 { 95 if (ata_is_host_link(link)) { 96 if (sata_scr_valid(link)) 97 return link->ap->ops->scr_write(link, reg, val); 98 return -EOPNOTSUPP; 99 } 100 101 return sata_pmp_scr_write(link, reg, val); 102 } 103 EXPORT_SYMBOL_GPL(sata_scr_write); 104 105 /** 106 * sata_scr_write_flush - write SCR register of the specified port and flush 107 * @link: ATA link to write SCR for 108 * @reg: SCR to write 109 * @val: value to write 110 * 111 * This function is identical to sata_scr_write() except that this 112 * function performs flush after writing to the register. 113 * 114 * LOCKING: 115 * None if @link is ap->link. Kernel thread context otherwise. 116 * 117 * RETURNS: 118 * 0 on success, negative errno on failure. 119 */ 120 int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) 121 { 122 if (ata_is_host_link(link)) { 123 int rc; 124 125 if (sata_scr_valid(link)) { 126 rc = link->ap->ops->scr_write(link, reg, val); 127 if (rc == 0) 128 rc = link->ap->ops->scr_read(link, reg, &val); 129 return rc; 130 } 131 return -EOPNOTSUPP; 132 } 133 134 return sata_pmp_scr_write(link, reg, val); 135 } 136 EXPORT_SYMBOL_GPL(sata_scr_write_flush); 137 138 /** 139 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure 140 * @tf: Taskfile to convert 141 * @pmp: Port multiplier port 142 * @is_cmd: This FIS is for command 143 * @fis: Buffer into which data will output 144 * 145 * Converts a standard ATA taskfile to a Serial ATA 146 * FIS structure (Register - Host to Device). 147 * 148 * LOCKING: 149 * Inherited from caller. 150 */ 151 void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis) 152 { 153 fis[0] = 0x27; /* Register - Host to Device FIS */ 154 fis[1] = pmp & 0xf; /* Port multiplier number*/ 155 if (is_cmd) 156 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */ 157 158 fis[2] = tf->command; 159 fis[3] = tf->feature; 160 161 fis[4] = tf->lbal; 162 fis[5] = tf->lbam; 163 fis[6] = tf->lbah; 164 fis[7] = tf->device; 165 166 fis[8] = tf->hob_lbal; 167 fis[9] = tf->hob_lbam; 168 fis[10] = tf->hob_lbah; 169 fis[11] = tf->hob_feature; 170 171 fis[12] = tf->nsect; 172 fis[13] = tf->hob_nsect; 173 fis[14] = 0; 174 fis[15] = tf->ctl; 175 176 fis[16] = tf->auxiliary & 0xff; 177 fis[17] = (tf->auxiliary >> 8) & 0xff; 178 fis[18] = (tf->auxiliary >> 16) & 0xff; 179 fis[19] = (tf->auxiliary >> 24) & 0xff; 180 } 181 EXPORT_SYMBOL_GPL(ata_tf_to_fis); 182 183 /** 184 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile 185 * @fis: Buffer from which data will be input 186 * @tf: Taskfile to output 187 * 188 * Converts a serial ATA FIS structure to a standard ATA taskfile. 189 * 190 * LOCKING: 191 * Inherited from caller. 192 */ 193 194 void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf) 195 { 196 tf->status = fis[2]; 197 tf->error = fis[3]; 198 199 tf->lbal = fis[4]; 200 tf->lbam = fis[5]; 201 tf->lbah = fis[6]; 202 tf->device = fis[7]; 203 204 tf->hob_lbal = fis[8]; 205 tf->hob_lbam = fis[9]; 206 tf->hob_lbah = fis[10]; 207 208 tf->nsect = fis[12]; 209 tf->hob_nsect = fis[13]; 210 } 211 EXPORT_SYMBOL_GPL(ata_tf_from_fis); 212 213 /** 214 * sata_link_debounce - debounce SATA phy status 215 * @link: ATA link to debounce SATA phy status for 216 * @params: timing parameters { interval, duration, timeout } in msec 217 * @deadline: deadline jiffies for the operation 218 * 219 * Make sure SStatus of @link reaches stable state, determined by 220 * holding the same value where DET is not 1 for @duration polled 221 * every @interval, before @timeout. Timeout constraints the 222 * beginning of the stable state. Because DET gets stuck at 1 on 223 * some controllers after hot unplugging, this functions waits 224 * until timeout then returns 0 if DET is stable at 1. 225 * 226 * @timeout is further limited by @deadline. The sooner of the 227 * two is used. 228 * 229 * LOCKING: 230 * Kernel thread context (may sleep) 231 * 232 * RETURNS: 233 * 0 on success, -errno on failure. 234 */ 235 int sata_link_debounce(struct ata_link *link, const unsigned long *params, 236 unsigned long deadline) 237 { 238 unsigned long interval = params[0]; 239 unsigned long duration = params[1]; 240 unsigned long last_jiffies, t; 241 u32 last, cur; 242 int rc; 243 244 t = ata_deadline(jiffies, params[2]); 245 if (time_before(t, deadline)) 246 deadline = t; 247 248 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 249 return rc; 250 cur &= 0xf; 251 252 last = cur; 253 last_jiffies = jiffies; 254 255 while (1) { 256 ata_msleep(link->ap, interval); 257 if ((rc = sata_scr_read(link, SCR_STATUS, &cur))) 258 return rc; 259 cur &= 0xf; 260 261 /* DET stable? */ 262 if (cur == last) { 263 if (cur == 1 && time_before(jiffies, deadline)) 264 continue; 265 if (time_after(jiffies, 266 ata_deadline(last_jiffies, duration))) 267 return 0; 268 continue; 269 } 270 271 /* unstable, start over */ 272 last = cur; 273 last_jiffies = jiffies; 274 275 /* Check deadline. If debouncing failed, return 276 * -EPIPE to tell upper layer to lower link speed. 277 */ 278 if (time_after(jiffies, deadline)) 279 return -EPIPE; 280 } 281 } 282 EXPORT_SYMBOL_GPL(sata_link_debounce); 283 284 /** 285 * sata_link_resume - resume SATA link 286 * @link: ATA link to resume SATA 287 * @params: timing parameters { interval, duration, timeout } in msec 288 * @deadline: deadline jiffies for the operation 289 * 290 * Resume SATA phy @link and debounce it. 291 * 292 * LOCKING: 293 * Kernel thread context (may sleep) 294 * 295 * RETURNS: 296 * 0 on success, -errno on failure. 297 */ 298 int sata_link_resume(struct ata_link *link, const unsigned long *params, 299 unsigned long deadline) 300 { 301 int tries = ATA_LINK_RESUME_TRIES; 302 u32 scontrol, serror; 303 int rc; 304 305 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 306 return rc; 307 308 /* 309 * Writes to SControl sometimes get ignored under certain 310 * controllers (ata_piix SIDPR). Make sure DET actually is 311 * cleared. 312 */ 313 do { 314 scontrol = (scontrol & 0x0f0) | 0x300; 315 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 316 return rc; 317 /* 318 * Some PHYs react badly if SStatus is pounded 319 * immediately after resuming. Delay 200ms before 320 * debouncing. 321 */ 322 if (!(link->flags & ATA_LFLAG_NO_DEBOUNCE_DELAY)) 323 ata_msleep(link->ap, 200); 324 325 /* is SControl restored correctly? */ 326 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 327 return rc; 328 } while ((scontrol & 0xf0f) != 0x300 && --tries); 329 330 if ((scontrol & 0xf0f) != 0x300) { 331 ata_link_warn(link, "failed to resume link (SControl %X)\n", 332 scontrol); 333 return 0; 334 } 335 336 if (tries < ATA_LINK_RESUME_TRIES) 337 ata_link_warn(link, "link resume succeeded after %d retries\n", 338 ATA_LINK_RESUME_TRIES - tries); 339 340 if ((rc = sata_link_debounce(link, params, deadline))) 341 return rc; 342 343 /* clear SError, some PHYs require this even for SRST to work */ 344 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror))) 345 rc = sata_scr_write(link, SCR_ERROR, serror); 346 347 return rc != -EINVAL ? rc : 0; 348 } 349 EXPORT_SYMBOL_GPL(sata_link_resume); 350 351 /** 352 * sata_link_scr_lpm - manipulate SControl IPM and SPM fields 353 * @link: ATA link to manipulate SControl for 354 * @policy: LPM policy to configure 355 * @spm_wakeup: initiate LPM transition to active state 356 * 357 * Manipulate the IPM field of the SControl register of @link 358 * according to @policy. If @policy is ATA_LPM_MAX_POWER and 359 * @spm_wakeup is %true, the SPM field is manipulated to wake up 360 * the link. This function also clears PHYRDY_CHG before 361 * returning. 362 * 363 * LOCKING: 364 * EH context. 365 * 366 * RETURNS: 367 * 0 on success, -errno otherwise. 368 */ 369 int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, 370 bool spm_wakeup) 371 { 372 struct ata_eh_context *ehc = &link->eh_context; 373 bool woken_up = false; 374 u32 scontrol; 375 int rc; 376 377 rc = sata_scr_read(link, SCR_CONTROL, &scontrol); 378 if (rc) 379 return rc; 380 381 switch (policy) { 382 case ATA_LPM_MAX_POWER: 383 /* disable all LPM transitions */ 384 scontrol |= (0x7 << 8); 385 /* initiate transition to active state */ 386 if (spm_wakeup) { 387 scontrol |= (0x4 << 12); 388 woken_up = true; 389 } 390 break; 391 case ATA_LPM_MED_POWER: 392 /* allow LPM to PARTIAL */ 393 scontrol &= ~(0x1 << 8); 394 scontrol |= (0x6 << 8); 395 break; 396 case ATA_LPM_MED_POWER_WITH_DIPM: 397 case ATA_LPM_MIN_POWER_WITH_PARTIAL: 398 case ATA_LPM_MIN_POWER: 399 if (ata_link_nr_enabled(link) > 0) 400 /* no restrictions on LPM transitions */ 401 scontrol &= ~(0x7 << 8); 402 else { 403 /* empty port, power off */ 404 scontrol &= ~0xf; 405 scontrol |= (0x1 << 2); 406 } 407 break; 408 default: 409 WARN_ON(1); 410 } 411 412 rc = sata_scr_write(link, SCR_CONTROL, scontrol); 413 if (rc) 414 return rc; 415 416 /* give the link time to transit out of LPM state */ 417 if (woken_up) 418 msleep(10); 419 420 /* clear PHYRDY_CHG from SError */ 421 ehc->i.serror &= ~SERR_PHYRDY_CHG; 422 return sata_scr_write(link, SCR_ERROR, SERR_PHYRDY_CHG); 423 } 424 EXPORT_SYMBOL_GPL(sata_link_scr_lpm); 425 426 static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol) 427 { 428 struct ata_link *host_link = &link->ap->link; 429 u32 limit, target, spd; 430 431 limit = link->sata_spd_limit; 432 433 /* Don't configure downstream link faster than upstream link. 434 * It doesn't speed up anything and some PMPs choke on such 435 * configuration. 436 */ 437 if (!ata_is_host_link(link) && host_link->sata_spd) 438 limit &= (1 << host_link->sata_spd) - 1; 439 440 if (limit == UINT_MAX) 441 target = 0; 442 else 443 target = fls(limit); 444 445 spd = (*scontrol >> 4) & 0xf; 446 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4); 447 448 return spd != target; 449 } 450 451 /** 452 * sata_set_spd_needed - is SATA spd configuration needed 453 * @link: Link in question 454 * 455 * Test whether the spd limit in SControl matches 456 * @link->sata_spd_limit. This function is used to determine 457 * whether hardreset is necessary to apply SATA spd 458 * configuration. 459 * 460 * LOCKING: 461 * Inherited from caller. 462 * 463 * RETURNS: 464 * 1 if SATA spd configuration is needed, 0 otherwise. 465 */ 466 static int sata_set_spd_needed(struct ata_link *link) 467 { 468 u32 scontrol; 469 470 if (sata_scr_read(link, SCR_CONTROL, &scontrol)) 471 return 1; 472 473 return __sata_set_spd_needed(link, &scontrol); 474 } 475 476 /** 477 * sata_set_spd - set SATA spd according to spd limit 478 * @link: Link to set SATA spd for 479 * 480 * Set SATA spd of @link according to sata_spd_limit. 481 * 482 * LOCKING: 483 * Inherited from caller. 484 * 485 * RETURNS: 486 * 0 if spd doesn't need to be changed, 1 if spd has been 487 * changed. Negative errno if SCR registers are inaccessible. 488 */ 489 int sata_set_spd(struct ata_link *link) 490 { 491 u32 scontrol; 492 int rc; 493 494 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 495 return rc; 496 497 if (!__sata_set_spd_needed(link, &scontrol)) 498 return 0; 499 500 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 501 return rc; 502 503 return 1; 504 } 505 EXPORT_SYMBOL_GPL(sata_set_spd); 506 507 /** 508 * sata_link_hardreset - reset link via SATA phy reset 509 * @link: link to reset 510 * @timing: timing parameters { interval, duration, timeout } in msec 511 * @deadline: deadline jiffies for the operation 512 * @online: optional out parameter indicating link onlineness 513 * @check_ready: optional callback to check link readiness 514 * 515 * SATA phy-reset @link using DET bits of SControl register. 516 * After hardreset, link readiness is waited upon using 517 * ata_wait_ready() if @check_ready is specified. LLDs are 518 * allowed to not specify @check_ready and wait itself after this 519 * function returns. Device classification is LLD's 520 * responsibility. 521 * 522 * *@online is set to one iff reset succeeded and @link is online 523 * after reset. 524 * 525 * LOCKING: 526 * Kernel thread context (may sleep) 527 * 528 * RETURNS: 529 * 0 on success, -errno otherwise. 530 */ 531 int sata_link_hardreset(struct ata_link *link, const unsigned long *timing, 532 unsigned long deadline, 533 bool *online, int (*check_ready)(struct ata_link *)) 534 { 535 u32 scontrol; 536 int rc; 537 538 if (online) 539 *online = false; 540 541 if (sata_set_spd_needed(link)) { 542 /* SATA spec says nothing about how to reconfigure 543 * spd. To be on the safe side, turn off phy during 544 * reconfiguration. This works for at least ICH7 AHCI 545 * and Sil3124. 546 */ 547 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 548 goto out; 549 550 scontrol = (scontrol & 0x0f0) | 0x304; 551 552 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol))) 553 goto out; 554 555 sata_set_spd(link); 556 } 557 558 /* issue phy wake/reset */ 559 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol))) 560 goto out; 561 562 scontrol = (scontrol & 0x0f0) | 0x301; 563 564 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol))) 565 goto out; 566 567 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 568 * 10.4.2 says at least 1 ms. 569 */ 570 ata_msleep(link->ap, 1); 571 572 /* bring link back */ 573 rc = sata_link_resume(link, timing, deadline); 574 if (rc) 575 goto out; 576 /* if link is offline nothing more to do */ 577 if (ata_phys_link_offline(link)) 578 goto out; 579 580 /* Link is online. From this point, -ENODEV too is an error. */ 581 if (online) 582 *online = true; 583 584 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) { 585 /* If PMP is supported, we have to do follow-up SRST. 586 * Some PMPs don't send D2H Reg FIS after hardreset if 587 * the first port is empty. Wait only for 588 * ATA_TMOUT_PMP_SRST_WAIT. 589 */ 590 if (check_ready) { 591 unsigned long pmp_deadline; 592 593 pmp_deadline = ata_deadline(jiffies, 594 ATA_TMOUT_PMP_SRST_WAIT); 595 if (time_after(pmp_deadline, deadline)) 596 pmp_deadline = deadline; 597 ata_wait_ready(link, pmp_deadline, check_ready); 598 } 599 rc = -EAGAIN; 600 goto out; 601 } 602 603 rc = 0; 604 if (check_ready) 605 rc = ata_wait_ready(link, deadline, check_ready); 606 out: 607 if (rc && rc != -EAGAIN) { 608 /* online is set iff link is online && reset succeeded */ 609 if (online) 610 *online = false; 611 ata_link_err(link, "COMRESET failed (errno=%d)\n", rc); 612 } 613 return rc; 614 } 615 EXPORT_SYMBOL_GPL(sata_link_hardreset); 616 617 /** 618 * ata_qc_complete_multiple - Complete multiple qcs successfully 619 * @ap: port in question 620 * @qc_active: new qc_active mask 621 * 622 * Complete in-flight commands. This functions is meant to be 623 * called from low-level driver's interrupt routine to complete 624 * requests normally. ap->qc_active and @qc_active is compared 625 * and commands are completed accordingly. 626 * 627 * Always use this function when completing multiple NCQ commands 628 * from IRQ handlers instead of calling ata_qc_complete() 629 * multiple times to keep IRQ expect status properly in sync. 630 * 631 * LOCKING: 632 * spin_lock_irqsave(host lock) 633 * 634 * RETURNS: 635 * Number of completed commands on success, -errno otherwise. 636 */ 637 int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active) 638 { 639 u64 done_mask, ap_qc_active = ap->qc_active; 640 int nr_done = 0; 641 642 /* 643 * If the internal tag is set on ap->qc_active, then we care about 644 * bit0 on the passed in qc_active mask. Move that bit up to match 645 * the internal tag. 646 */ 647 if (ap_qc_active & (1ULL << ATA_TAG_INTERNAL)) { 648 qc_active |= (qc_active & 0x01) << ATA_TAG_INTERNAL; 649 qc_active ^= qc_active & 0x01; 650 } 651 652 done_mask = ap_qc_active ^ qc_active; 653 654 if (unlikely(done_mask & qc_active)) { 655 ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n", 656 ap->qc_active, qc_active); 657 return -EINVAL; 658 } 659 660 if (ap->ops->qc_ncq_fill_rtf) 661 ap->ops->qc_ncq_fill_rtf(ap, done_mask); 662 663 while (done_mask) { 664 struct ata_queued_cmd *qc; 665 unsigned int tag = __ffs64(done_mask); 666 667 qc = ata_qc_from_tag(ap, tag); 668 if (qc) { 669 ata_qc_complete(qc); 670 nr_done++; 671 } 672 done_mask &= ~(1ULL << tag); 673 } 674 675 return nr_done; 676 } 677 EXPORT_SYMBOL_GPL(ata_qc_complete_multiple); 678 679 /** 680 * ata_slave_link_init - initialize slave link 681 * @ap: port to initialize slave link for 682 * 683 * Create and initialize slave link for @ap. This enables slave 684 * link handling on the port. 685 * 686 * In libata, a port contains links and a link contains devices. 687 * There is single host link but if a PMP is attached to it, 688 * there can be multiple fan-out links. On SATA, there's usually 689 * a single device connected to a link but PATA and SATA 690 * controllers emulating TF based interface can have two - master 691 * and slave. 692 * 693 * However, there are a few controllers which don't fit into this 694 * abstraction too well - SATA controllers which emulate TF 695 * interface with both master and slave devices but also have 696 * separate SCR register sets for each device. These controllers 697 * need separate links for physical link handling 698 * (e.g. onlineness, link speed) but should be treated like a 699 * traditional M/S controller for everything else (e.g. command 700 * issue, softreset). 701 * 702 * slave_link is libata's way of handling this class of 703 * controllers without impacting core layer too much. For 704 * anything other than physical link handling, the default host 705 * link is used for both master and slave. For physical link 706 * handling, separate @ap->slave_link is used. All dirty details 707 * are implemented inside libata core layer. From LLD's POV, the 708 * only difference is that prereset, hardreset and postreset are 709 * called once more for the slave link, so the reset sequence 710 * looks like the following. 711 * 712 * prereset(M) -> prereset(S) -> hardreset(M) -> hardreset(S) -> 713 * softreset(M) -> postreset(M) -> postreset(S) 714 * 715 * Note that softreset is called only for the master. Softreset 716 * resets both M/S by definition, so SRST on master should handle 717 * both (the standard method will work just fine). 718 * 719 * LOCKING: 720 * Should be called before host is registered. 721 * 722 * RETURNS: 723 * 0 on success, -errno on failure. 724 */ 725 int ata_slave_link_init(struct ata_port *ap) 726 { 727 struct ata_link *link; 728 729 WARN_ON(ap->slave_link); 730 WARN_ON(ap->flags & ATA_FLAG_PMP); 731 732 link = kzalloc(sizeof(*link), GFP_KERNEL); 733 if (!link) 734 return -ENOMEM; 735 736 ata_link_init(ap, link, 1); 737 ap->slave_link = link; 738 return 0; 739 } 740 EXPORT_SYMBOL_GPL(ata_slave_link_init); 741 742 /** 743 * sata_lpm_ignore_phy_events - test if PHY event should be ignored 744 * @link: Link receiving the event 745 * 746 * Test whether the received PHY event has to be ignored or not. 747 * 748 * LOCKING: 749 * None: 750 * 751 * RETURNS: 752 * True if the event has to be ignored. 753 */ 754 bool sata_lpm_ignore_phy_events(struct ata_link *link) 755 { 756 unsigned long lpm_timeout = link->last_lpm_change + 757 msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY); 758 759 /* if LPM is enabled, PHYRDY doesn't mean anything */ 760 if (link->lpm_policy > ATA_LPM_MAX_POWER) 761 return true; 762 763 /* ignore the first PHY event after the LPM policy changed 764 * as it is might be spurious 765 */ 766 if ((link->flags & ATA_LFLAG_CHANGED) && 767 time_before(jiffies, lpm_timeout)) 768 return true; 769 770 return false; 771 } 772 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events); 773 774 static const char *ata_lpm_policy_names[] = { 775 [ATA_LPM_UNKNOWN] = "max_performance", 776 [ATA_LPM_MAX_POWER] = "max_performance", 777 [ATA_LPM_MED_POWER] = "medium_power", 778 [ATA_LPM_MED_POWER_WITH_DIPM] = "med_power_with_dipm", 779 [ATA_LPM_MIN_POWER_WITH_PARTIAL] = "min_power_with_partial", 780 [ATA_LPM_MIN_POWER] = "min_power", 781 }; 782 783 static ssize_t ata_scsi_lpm_store(struct device *device, 784 struct device_attribute *attr, 785 const char *buf, size_t count) 786 { 787 struct Scsi_Host *shost = class_to_shost(device); 788 struct ata_port *ap = ata_shost_to_port(shost); 789 struct ata_link *link; 790 struct ata_device *dev; 791 enum ata_lpm_policy policy; 792 unsigned long flags; 793 794 /* UNKNOWN is internal state, iterate from MAX_POWER */ 795 for (policy = ATA_LPM_MAX_POWER; 796 policy < ARRAY_SIZE(ata_lpm_policy_names); policy++) { 797 const char *name = ata_lpm_policy_names[policy]; 798 799 if (strncmp(name, buf, strlen(name)) == 0) 800 break; 801 } 802 if (policy == ARRAY_SIZE(ata_lpm_policy_names)) 803 return -EINVAL; 804 805 spin_lock_irqsave(ap->lock, flags); 806 807 ata_for_each_link(link, ap, EDGE) { 808 ata_for_each_dev(dev, &ap->link, ENABLED) { 809 if (dev->horkage & ATA_HORKAGE_NOLPM) { 810 count = -EOPNOTSUPP; 811 goto out_unlock; 812 } 813 } 814 } 815 816 ap->target_lpm_policy = policy; 817 ata_port_schedule_eh(ap); 818 out_unlock: 819 spin_unlock_irqrestore(ap->lock, flags); 820 return count; 821 } 822 823 static ssize_t ata_scsi_lpm_show(struct device *dev, 824 struct device_attribute *attr, char *buf) 825 { 826 struct Scsi_Host *shost = class_to_shost(dev); 827 struct ata_port *ap = ata_shost_to_port(shost); 828 829 if (ap->target_lpm_policy >= ARRAY_SIZE(ata_lpm_policy_names)) 830 return -EINVAL; 831 832 return sysfs_emit(buf, "%s\n", 833 ata_lpm_policy_names[ap->target_lpm_policy]); 834 } 835 DEVICE_ATTR(link_power_management_policy, S_IRUGO | S_IWUSR, 836 ata_scsi_lpm_show, ata_scsi_lpm_store); 837 EXPORT_SYMBOL_GPL(dev_attr_link_power_management_policy); 838 839 static ssize_t ata_ncq_prio_supported_show(struct device *device, 840 struct device_attribute *attr, 841 char *buf) 842 { 843 struct scsi_device *sdev = to_scsi_device(device); 844 struct ata_port *ap = ata_shost_to_port(sdev->host); 845 struct ata_device *dev; 846 bool ncq_prio_supported; 847 int rc = 0; 848 849 spin_lock_irq(ap->lock); 850 dev = ata_scsi_find_dev(ap, sdev); 851 if (!dev) 852 rc = -ENODEV; 853 else 854 ncq_prio_supported = dev->flags & ATA_DFLAG_NCQ_PRIO; 855 spin_unlock_irq(ap->lock); 856 857 return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_supported); 858 } 859 860 DEVICE_ATTR(ncq_prio_supported, S_IRUGO, ata_ncq_prio_supported_show, NULL); 861 EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_supported); 862 863 static ssize_t ata_ncq_prio_enable_show(struct device *device, 864 struct device_attribute *attr, 865 char *buf) 866 { 867 struct scsi_device *sdev = to_scsi_device(device); 868 struct ata_port *ap = ata_shost_to_port(sdev->host); 869 struct ata_device *dev; 870 bool ncq_prio_enable; 871 int rc = 0; 872 873 spin_lock_irq(ap->lock); 874 dev = ata_scsi_find_dev(ap, sdev); 875 if (!dev) 876 rc = -ENODEV; 877 else 878 ncq_prio_enable = dev->flags & ATA_DFLAG_NCQ_PRIO_ENABLED; 879 spin_unlock_irq(ap->lock); 880 881 return rc ? rc : sysfs_emit(buf, "%u\n", ncq_prio_enable); 882 } 883 884 static ssize_t ata_ncq_prio_enable_store(struct device *device, 885 struct device_attribute *attr, 886 const char *buf, size_t len) 887 { 888 struct scsi_device *sdev = to_scsi_device(device); 889 struct ata_port *ap; 890 struct ata_device *dev; 891 long int input; 892 int rc = 0; 893 894 rc = kstrtol(buf, 10, &input); 895 if (rc) 896 return rc; 897 if ((input < 0) || (input > 1)) 898 return -EINVAL; 899 900 ap = ata_shost_to_port(sdev->host); 901 dev = ata_scsi_find_dev(ap, sdev); 902 if (unlikely(!dev)) 903 return -ENODEV; 904 905 spin_lock_irq(ap->lock); 906 907 if (!(dev->flags & ATA_DFLAG_NCQ_PRIO)) { 908 rc = -EINVAL; 909 goto unlock; 910 } 911 912 if (input) { 913 if (dev->flags & ATA_DFLAG_CDL_ENABLED) { 914 ata_dev_err(dev, 915 "CDL must be disabled to enable NCQ priority\n"); 916 rc = -EINVAL; 917 goto unlock; 918 } 919 dev->flags |= ATA_DFLAG_NCQ_PRIO_ENABLED; 920 } else { 921 dev->flags &= ~ATA_DFLAG_NCQ_PRIO_ENABLED; 922 } 923 924 unlock: 925 spin_unlock_irq(ap->lock); 926 927 return rc ? rc : len; 928 } 929 930 DEVICE_ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, 931 ata_ncq_prio_enable_show, ata_ncq_prio_enable_store); 932 EXPORT_SYMBOL_GPL(dev_attr_ncq_prio_enable); 933 934 static struct attribute *ata_ncq_sdev_attrs[] = { 935 &dev_attr_unload_heads.attr, 936 &dev_attr_ncq_prio_enable.attr, 937 &dev_attr_ncq_prio_supported.attr, 938 NULL 939 }; 940 941 static const struct attribute_group ata_ncq_sdev_attr_group = { 942 .attrs = ata_ncq_sdev_attrs 943 }; 944 945 const struct attribute_group *ata_ncq_sdev_groups[] = { 946 &ata_ncq_sdev_attr_group, 947 NULL 948 }; 949 EXPORT_SYMBOL_GPL(ata_ncq_sdev_groups); 950 951 static ssize_t 952 ata_scsi_em_message_store(struct device *dev, struct device_attribute *attr, 953 const char *buf, size_t count) 954 { 955 struct Scsi_Host *shost = class_to_shost(dev); 956 struct ata_port *ap = ata_shost_to_port(shost); 957 if (ap->ops->em_store && (ap->flags & ATA_FLAG_EM)) 958 return ap->ops->em_store(ap, buf, count); 959 return -EINVAL; 960 } 961 962 static ssize_t 963 ata_scsi_em_message_show(struct device *dev, struct device_attribute *attr, 964 char *buf) 965 { 966 struct Scsi_Host *shost = class_to_shost(dev); 967 struct ata_port *ap = ata_shost_to_port(shost); 968 969 if (ap->ops->em_show && (ap->flags & ATA_FLAG_EM)) 970 return ap->ops->em_show(ap, buf); 971 return -EINVAL; 972 } 973 DEVICE_ATTR(em_message, S_IRUGO | S_IWUSR, 974 ata_scsi_em_message_show, ata_scsi_em_message_store); 975 EXPORT_SYMBOL_GPL(dev_attr_em_message); 976 977 static ssize_t 978 ata_scsi_em_message_type_show(struct device *dev, struct device_attribute *attr, 979 char *buf) 980 { 981 struct Scsi_Host *shost = class_to_shost(dev); 982 struct ata_port *ap = ata_shost_to_port(shost); 983 984 return sysfs_emit(buf, "%d\n", ap->em_message_type); 985 } 986 DEVICE_ATTR(em_message_type, S_IRUGO, 987 ata_scsi_em_message_type_show, NULL); 988 EXPORT_SYMBOL_GPL(dev_attr_em_message_type); 989 990 static ssize_t 991 ata_scsi_activity_show(struct device *dev, struct device_attribute *attr, 992 char *buf) 993 { 994 struct scsi_device *sdev = to_scsi_device(dev); 995 struct ata_port *ap = ata_shost_to_port(sdev->host); 996 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); 997 998 if (atadev && ap->ops->sw_activity_show && 999 (ap->flags & ATA_FLAG_SW_ACTIVITY)) 1000 return ap->ops->sw_activity_show(atadev, buf); 1001 return -EINVAL; 1002 } 1003 1004 static ssize_t 1005 ata_scsi_activity_store(struct device *dev, struct device_attribute *attr, 1006 const char *buf, size_t count) 1007 { 1008 struct scsi_device *sdev = to_scsi_device(dev); 1009 struct ata_port *ap = ata_shost_to_port(sdev->host); 1010 struct ata_device *atadev = ata_scsi_find_dev(ap, sdev); 1011 enum sw_activity val; 1012 int rc; 1013 1014 if (atadev && ap->ops->sw_activity_store && 1015 (ap->flags & ATA_FLAG_SW_ACTIVITY)) { 1016 val = simple_strtoul(buf, NULL, 0); 1017 switch (val) { 1018 case OFF: case BLINK_ON: case BLINK_OFF: 1019 rc = ap->ops->sw_activity_store(atadev, val); 1020 if (!rc) 1021 return count; 1022 else 1023 return rc; 1024 } 1025 } 1026 return -EINVAL; 1027 } 1028 DEVICE_ATTR(sw_activity, S_IWUSR | S_IRUGO, ata_scsi_activity_show, 1029 ata_scsi_activity_store); 1030 EXPORT_SYMBOL_GPL(dev_attr_sw_activity); 1031 1032 /** 1033 * ata_change_queue_depth - Set a device maximum queue depth 1034 * @ap: ATA port of the target device 1035 * @sdev: SCSI device to configure queue depth for 1036 * @queue_depth: new queue depth 1037 * 1038 * Helper to set a device maximum queue depth, usable with both libsas 1039 * and libata. 1040 * 1041 */ 1042 int ata_change_queue_depth(struct ata_port *ap, struct scsi_device *sdev, 1043 int queue_depth) 1044 { 1045 struct ata_device *dev; 1046 unsigned long flags; 1047 int max_queue_depth; 1048 1049 spin_lock_irqsave(ap->lock, flags); 1050 1051 dev = ata_scsi_find_dev(ap, sdev); 1052 if (!dev || queue_depth < 1 || queue_depth == sdev->queue_depth) { 1053 spin_unlock_irqrestore(ap->lock, flags); 1054 return sdev->queue_depth; 1055 } 1056 1057 /* 1058 * Make sure that the queue depth requested does not exceed the device 1059 * capabilities. 1060 */ 1061 max_queue_depth = min(ATA_MAX_QUEUE, sdev->host->can_queue); 1062 max_queue_depth = min(max_queue_depth, ata_id_queue_depth(dev->id)); 1063 if (queue_depth > max_queue_depth) { 1064 spin_unlock_irqrestore(ap->lock, flags); 1065 return -EINVAL; 1066 } 1067 1068 /* 1069 * If NCQ is not supported by the device or if the target queue depth 1070 * is 1 (to disable drive side command queueing), turn off NCQ. 1071 */ 1072 if (queue_depth == 1 || !ata_ncq_supported(dev)) { 1073 dev->flags |= ATA_DFLAG_NCQ_OFF; 1074 queue_depth = 1; 1075 } else { 1076 dev->flags &= ~ATA_DFLAG_NCQ_OFF; 1077 } 1078 1079 spin_unlock_irqrestore(ap->lock, flags); 1080 1081 if (queue_depth == sdev->queue_depth) 1082 return sdev->queue_depth; 1083 1084 return scsi_change_queue_depth(sdev, queue_depth); 1085 } 1086 EXPORT_SYMBOL_GPL(ata_change_queue_depth); 1087 1088 /** 1089 * ata_scsi_change_queue_depth - SCSI callback for queue depth config 1090 * @sdev: SCSI device to configure queue depth for 1091 * @queue_depth: new queue depth 1092 * 1093 * This is libata standard hostt->change_queue_depth callback. 1094 * SCSI will call into this callback when user tries to set queue 1095 * depth via sysfs. 1096 * 1097 * LOCKING: 1098 * SCSI layer (we don't care) 1099 * 1100 * RETURNS: 1101 * Newly configured queue depth. 1102 */ 1103 int ata_scsi_change_queue_depth(struct scsi_device *sdev, int queue_depth) 1104 { 1105 struct ata_port *ap = ata_shost_to_port(sdev->host); 1106 1107 return ata_change_queue_depth(ap, sdev, queue_depth); 1108 } 1109 EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth); 1110 1111 /** 1112 * ata_sas_port_alloc - Allocate port for a SAS attached SATA device 1113 * @host: ATA host container for all SAS ports 1114 * @port_info: Information from low-level host driver 1115 * @shost: SCSI host that the scsi device is attached to 1116 * 1117 * LOCKING: 1118 * PCI/etc. bus probe sem. 1119 * 1120 * RETURNS: 1121 * ata_port pointer on success / NULL on failure. 1122 */ 1123 1124 struct ata_port *ata_sas_port_alloc(struct ata_host *host, 1125 struct ata_port_info *port_info, 1126 struct Scsi_Host *shost) 1127 { 1128 struct ata_port *ap; 1129 1130 ap = ata_port_alloc(host); 1131 if (!ap) 1132 return NULL; 1133 1134 ap->port_no = 0; 1135 ap->lock = &host->lock; 1136 ap->pio_mask = port_info->pio_mask; 1137 ap->mwdma_mask = port_info->mwdma_mask; 1138 ap->udma_mask = port_info->udma_mask; 1139 ap->flags |= port_info->flags; 1140 ap->ops = port_info->port_ops; 1141 ap->cbl = ATA_CBL_SATA; 1142 1143 return ap; 1144 } 1145 EXPORT_SYMBOL_GPL(ata_sas_port_alloc); 1146 1147 /** 1148 * ata_sas_port_start - Set port up for dma. 1149 * @ap: Port to initialize 1150 * 1151 * Called just after data structures for each port are 1152 * initialized. 1153 * 1154 * May be used as the port_start() entry in ata_port_operations. 1155 * 1156 * LOCKING: 1157 * Inherited from caller. 1158 */ 1159 int ata_sas_port_start(struct ata_port *ap) 1160 { 1161 /* 1162 * the port is marked as frozen at allocation time, but if we don't 1163 * have new eh, we won't thaw it 1164 */ 1165 if (!ap->ops->error_handler) 1166 ap->pflags &= ~ATA_PFLAG_FROZEN; 1167 return 0; 1168 } 1169 EXPORT_SYMBOL_GPL(ata_sas_port_start); 1170 1171 /** 1172 * ata_sas_port_stop - Undo ata_sas_port_start() 1173 * @ap: Port to shut down 1174 * 1175 * May be used as the port_stop() entry in ata_port_operations. 1176 * 1177 * LOCKING: 1178 * Inherited from caller. 1179 */ 1180 1181 void ata_sas_port_stop(struct ata_port *ap) 1182 { 1183 } 1184 EXPORT_SYMBOL_GPL(ata_sas_port_stop); 1185 1186 /** 1187 * ata_sas_async_probe - simply schedule probing and return 1188 * @ap: Port to probe 1189 * 1190 * For batch scheduling of probe for sas attached ata devices, assumes 1191 * the port has already been through ata_sas_port_init() 1192 */ 1193 void ata_sas_async_probe(struct ata_port *ap) 1194 { 1195 __ata_port_probe(ap); 1196 } 1197 EXPORT_SYMBOL_GPL(ata_sas_async_probe); 1198 1199 int ata_sas_sync_probe(struct ata_port *ap) 1200 { 1201 return ata_port_probe(ap); 1202 } 1203 EXPORT_SYMBOL_GPL(ata_sas_sync_probe); 1204 1205 1206 /** 1207 * ata_sas_port_init - Initialize a SATA device 1208 * @ap: SATA port to initialize 1209 * 1210 * LOCKING: 1211 * PCI/etc. bus probe sem. 1212 * 1213 * RETURNS: 1214 * Zero on success, non-zero on error. 1215 */ 1216 1217 int ata_sas_port_init(struct ata_port *ap) 1218 { 1219 int rc = ap->ops->port_start(ap); 1220 1221 if (rc) 1222 return rc; 1223 ap->print_id = atomic_inc_return(&ata_print_id); 1224 return 0; 1225 } 1226 EXPORT_SYMBOL_GPL(ata_sas_port_init); 1227 1228 int ata_sas_tport_add(struct device *parent, struct ata_port *ap) 1229 { 1230 return ata_tport_add(parent, ap); 1231 } 1232 EXPORT_SYMBOL_GPL(ata_sas_tport_add); 1233 1234 void ata_sas_tport_delete(struct ata_port *ap) 1235 { 1236 ata_tport_delete(ap); 1237 } 1238 EXPORT_SYMBOL_GPL(ata_sas_tport_delete); 1239 1240 /** 1241 * ata_sas_port_destroy - Destroy a SATA port allocated by ata_sas_port_alloc 1242 * @ap: SATA port to destroy 1243 * 1244 */ 1245 1246 void ata_sas_port_destroy(struct ata_port *ap) 1247 { 1248 if (ap->ops->port_stop) 1249 ap->ops->port_stop(ap); 1250 kfree(ap); 1251 } 1252 EXPORT_SYMBOL_GPL(ata_sas_port_destroy); 1253 1254 /** 1255 * ata_sas_slave_configure - Default slave_config routine for libata devices 1256 * @sdev: SCSI device to configure 1257 * @ap: ATA port to which SCSI device is attached 1258 * 1259 * RETURNS: 1260 * Zero. 1261 */ 1262 1263 int ata_sas_slave_configure(struct scsi_device *sdev, struct ata_port *ap) 1264 { 1265 ata_scsi_sdev_config(sdev); 1266 ata_scsi_dev_config(sdev, ap->link.device); 1267 return 0; 1268 } 1269 EXPORT_SYMBOL_GPL(ata_sas_slave_configure); 1270 1271 /** 1272 * ata_sas_queuecmd - Issue SCSI cdb to libata-managed device 1273 * @cmd: SCSI command to be sent 1274 * @ap: ATA port to which the command is being sent 1275 * 1276 * RETURNS: 1277 * Return value from __ata_scsi_queuecmd() if @cmd can be queued, 1278 * 0 otherwise. 1279 */ 1280 1281 int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap) 1282 { 1283 int rc = 0; 1284 1285 if (likely(ata_dev_enabled(ap->link.device))) 1286 rc = __ata_scsi_queuecmd(cmd, ap->link.device); 1287 else { 1288 cmd->result = (DID_BAD_TARGET << 16); 1289 scsi_done(cmd); 1290 } 1291 return rc; 1292 } 1293 EXPORT_SYMBOL_GPL(ata_sas_queuecmd); 1294 1295 /** 1296 * sata_async_notification - SATA async notification handler 1297 * @ap: ATA port where async notification is received 1298 * 1299 * Handler to be called when async notification via SDB FIS is 1300 * received. This function schedules EH if necessary. 1301 * 1302 * LOCKING: 1303 * spin_lock_irqsave(host lock) 1304 * 1305 * RETURNS: 1306 * 1 if EH is scheduled, 0 otherwise. 1307 */ 1308 int sata_async_notification(struct ata_port *ap) 1309 { 1310 u32 sntf; 1311 int rc; 1312 1313 if (!(ap->flags & ATA_FLAG_AN)) 1314 return 0; 1315 1316 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf); 1317 if (rc == 0) 1318 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf); 1319 1320 if (!sata_pmp_attached(ap) || rc) { 1321 /* PMP is not attached or SNTF is not available */ 1322 if (!sata_pmp_attached(ap)) { 1323 /* PMP is not attached. Check whether ATAPI 1324 * AN is configured. If so, notify media 1325 * change. 1326 */ 1327 struct ata_device *dev = ap->link.device; 1328 1329 if ((dev->class == ATA_DEV_ATAPI) && 1330 (dev->flags & ATA_DFLAG_AN)) 1331 ata_scsi_media_change_notify(dev); 1332 return 0; 1333 } else { 1334 /* PMP is attached but SNTF is not available. 1335 * ATAPI async media change notification is 1336 * not used. The PMP must be reporting PHY 1337 * status change, schedule EH. 1338 */ 1339 ata_port_schedule_eh(ap); 1340 return 1; 1341 } 1342 } else { 1343 /* PMP is attached and SNTF is available */ 1344 struct ata_link *link; 1345 1346 /* check and notify ATAPI AN */ 1347 ata_for_each_link(link, ap, EDGE) { 1348 if (!(sntf & (1 << link->pmp))) 1349 continue; 1350 1351 if ((link->device->class == ATA_DEV_ATAPI) && 1352 (link->device->flags & ATA_DFLAG_AN)) 1353 ata_scsi_media_change_notify(link->device); 1354 } 1355 1356 /* If PMP is reporting that PHY status of some 1357 * downstream ports has changed, schedule EH. 1358 */ 1359 if (sntf & (1 << SATA_PMP_CTRL_PORT)) { 1360 ata_port_schedule_eh(ap); 1361 return 1; 1362 } 1363 1364 return 0; 1365 } 1366 } 1367 EXPORT_SYMBOL_GPL(sata_async_notification); 1368 1369 /** 1370 * ata_eh_read_log_10h - Read log page 10h for NCQ error details 1371 * @dev: Device to read log page 10h from 1372 * @tag: Resulting tag of the failed command 1373 * @tf: Resulting taskfile registers of the failed command 1374 * 1375 * Read log page 10h to obtain NCQ error details and clear error 1376 * condition. 1377 * 1378 * LOCKING: 1379 * Kernel thread context (may sleep). 1380 * 1381 * RETURNS: 1382 * 0 on success, -errno otherwise. 1383 */ 1384 static int ata_eh_read_log_10h(struct ata_device *dev, 1385 int *tag, struct ata_taskfile *tf) 1386 { 1387 u8 *buf = dev->link->ap->sector_buf; 1388 unsigned int err_mask; 1389 u8 csum; 1390 int i; 1391 1392 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, 0, buf, 1); 1393 if (err_mask) 1394 return -EIO; 1395 1396 csum = 0; 1397 for (i = 0; i < ATA_SECT_SIZE; i++) 1398 csum += buf[i]; 1399 if (csum) 1400 ata_dev_warn(dev, "invalid checksum 0x%x on log page 10h\n", 1401 csum); 1402 1403 if (buf[0] & 0x80) 1404 return -ENOENT; 1405 1406 *tag = buf[0] & 0x1f; 1407 1408 tf->status = buf[2]; 1409 tf->error = buf[3]; 1410 tf->lbal = buf[4]; 1411 tf->lbam = buf[5]; 1412 tf->lbah = buf[6]; 1413 tf->device = buf[7]; 1414 tf->hob_lbal = buf[8]; 1415 tf->hob_lbam = buf[9]; 1416 tf->hob_lbah = buf[10]; 1417 tf->nsect = buf[12]; 1418 tf->hob_nsect = buf[13]; 1419 if (ata_id_has_ncq_autosense(dev->id) && (tf->status & ATA_SENSE)) 1420 tf->auxiliary = buf[14] << 16 | buf[15] << 8 | buf[16]; 1421 1422 return 0; 1423 } 1424 1425 /** 1426 * ata_eh_read_sense_success_ncq_log - Read the sense data for successful 1427 * NCQ commands log 1428 * @link: ATA link to get sense data for 1429 * 1430 * Read the sense data for successful NCQ commands log page to obtain 1431 * sense data for all NCQ commands that completed successfully with 1432 * the sense data available bit set. 1433 * 1434 * LOCKING: 1435 * Kernel thread context (may sleep). 1436 * 1437 * RETURNS: 1438 * 0 on success, -errno otherwise. 1439 */ 1440 int ata_eh_read_sense_success_ncq_log(struct ata_link *link) 1441 { 1442 struct ata_device *dev = link->device; 1443 struct ata_port *ap = dev->link->ap; 1444 u8 *buf = ap->ncq_sense_buf; 1445 struct ata_queued_cmd *qc; 1446 unsigned int err_mask, tag; 1447 u8 *sense, sk = 0, asc = 0, ascq = 0; 1448 u64 sense_valid, val; 1449 int ret = 0; 1450 1451 err_mask = ata_read_log_page(dev, ATA_LOG_SENSE_NCQ, 0, buf, 2); 1452 if (err_mask) { 1453 ata_dev_err(dev, 1454 "Failed to read Sense Data for Successful NCQ Commands log\n"); 1455 return -EIO; 1456 } 1457 1458 /* Check the log header */ 1459 val = get_unaligned_le64(&buf[0]); 1460 if ((val & 0xffff) != 1 || ((val >> 16) & 0xff) != 0x0f) { 1461 ata_dev_err(dev, 1462 "Invalid Sense Data for Successful NCQ Commands log\n"); 1463 return -EIO; 1464 } 1465 1466 sense_valid = (u64)buf[8] | ((u64)buf[9] << 8) | 1467 ((u64)buf[10] << 16) | ((u64)buf[11] << 24); 1468 1469 ata_qc_for_each_raw(ap, qc, tag) { 1470 if (!(qc->flags & ATA_QCFLAG_EH) || 1471 !(qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD) || 1472 qc->err_mask || 1473 ata_dev_phys_link(qc->dev) != link) 1474 continue; 1475 1476 /* 1477 * If the command does not have any sense data, clear ATA_SENSE. 1478 * Keep ATA_QCFLAG_EH_SUCCESS_CMD so that command is finished. 1479 */ 1480 if (!(sense_valid & (1ULL << tag))) { 1481 qc->result_tf.status &= ~ATA_SENSE; 1482 continue; 1483 } 1484 1485 sense = &buf[32 + 24 * tag]; 1486 sk = sense[0]; 1487 asc = sense[1]; 1488 ascq = sense[2]; 1489 1490 if (!ata_scsi_sense_is_valid(sk, asc, ascq)) { 1491 ret = -EIO; 1492 continue; 1493 } 1494 1495 /* Set sense without also setting scsicmd->result */ 1496 scsi_build_sense_buffer(dev->flags & ATA_DFLAG_D_SENSE, 1497 qc->scsicmd->sense_buffer, sk, 1498 asc, ascq); 1499 qc->flags |= ATA_QCFLAG_SENSE_VALID; 1500 1501 /* 1502 * If we have sense data, call scsi_check_sense() in order to 1503 * set the correct SCSI ML byte (if any). No point in checking 1504 * the return value, since the command has already completed 1505 * successfully. 1506 */ 1507 scsi_check_sense(qc->scsicmd); 1508 } 1509 1510 return ret; 1511 } 1512 EXPORT_SYMBOL_GPL(ata_eh_read_sense_success_ncq_log); 1513 1514 /** 1515 * ata_eh_analyze_ncq_error - analyze NCQ error 1516 * @link: ATA link to analyze NCQ error for 1517 * 1518 * Read log page 10h, determine the offending qc and acquire 1519 * error status TF. For NCQ device errors, all LLDDs have to do 1520 * is setting AC_ERR_DEV in ehi->err_mask. This function takes 1521 * care of the rest. 1522 * 1523 * LOCKING: 1524 * Kernel thread context (may sleep). 1525 */ 1526 void ata_eh_analyze_ncq_error(struct ata_link *link) 1527 { 1528 struct ata_port *ap = link->ap; 1529 struct ata_eh_context *ehc = &link->eh_context; 1530 struct ata_device *dev = link->device; 1531 struct ata_queued_cmd *qc; 1532 struct ata_taskfile tf; 1533 int tag, rc; 1534 1535 /* if frozen, we can't do much */ 1536 if (ata_port_is_frozen(ap)) 1537 return; 1538 1539 /* is it NCQ device error? */ 1540 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV)) 1541 return; 1542 1543 /* has LLDD analyzed already? */ 1544 ata_qc_for_each_raw(ap, qc, tag) { 1545 if (!(qc->flags & ATA_QCFLAG_EH)) 1546 continue; 1547 1548 if (qc->err_mask) 1549 return; 1550 } 1551 1552 /* okay, this error is ours */ 1553 memset(&tf, 0, sizeof(tf)); 1554 rc = ata_eh_read_log_10h(dev, &tag, &tf); 1555 if (rc) { 1556 ata_link_err(link, "failed to read log page 10h (errno=%d)\n", 1557 rc); 1558 return; 1559 } 1560 1561 if (!(link->sactive & (1 << tag))) { 1562 ata_link_err(link, "log page 10h reported inactive tag %d\n", 1563 tag); 1564 return; 1565 } 1566 1567 /* we've got the perpetrator, condemn it */ 1568 qc = __ata_qc_from_tag(ap, tag); 1569 memcpy(&qc->result_tf, &tf, sizeof(tf)); 1570 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48; 1571 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ; 1572 1573 /* 1574 * If the device supports NCQ autosense, ata_eh_read_log_10h() will have 1575 * stored the sense data in qc->result_tf.auxiliary. 1576 */ 1577 if (qc->result_tf.auxiliary) { 1578 char sense_key, asc, ascq; 1579 1580 sense_key = (qc->result_tf.auxiliary >> 16) & 0xff; 1581 asc = (qc->result_tf.auxiliary >> 8) & 0xff; 1582 ascq = qc->result_tf.auxiliary & 0xff; 1583 if (ata_scsi_sense_is_valid(sense_key, asc, ascq)) { 1584 ata_scsi_set_sense(dev, qc->scsicmd, sense_key, asc, 1585 ascq); 1586 ata_scsi_set_sense_information(dev, qc->scsicmd, 1587 &qc->result_tf); 1588 qc->flags |= ATA_QCFLAG_SENSE_VALID; 1589 } 1590 } 1591 1592 ata_qc_for_each_raw(ap, qc, tag) { 1593 if (!(qc->flags & ATA_QCFLAG_EH) || 1594 qc->flags & ATA_QCFLAG_EH_SUCCESS_CMD || 1595 ata_dev_phys_link(qc->dev) != link) 1596 continue; 1597 1598 /* Skip the single QC which caused the NCQ error. */ 1599 if (qc->err_mask) 1600 continue; 1601 1602 /* 1603 * For SATA, the STATUS and ERROR fields are shared for all NCQ 1604 * commands that were completed with the same SDB FIS. 1605 * Therefore, we have to clear the ATA_ERR bit for all QCs 1606 * except the one that caused the NCQ error. 1607 */ 1608 qc->result_tf.status &= ~ATA_ERR; 1609 qc->result_tf.error = 0; 1610 1611 /* 1612 * If we get a NCQ error, that means that a single command was 1613 * aborted. All other failed commands for our link should be 1614 * retried and has no business of going though further scrutiny 1615 * by ata_eh_link_autopsy(). 1616 */ 1617 qc->flags |= ATA_QCFLAG_RETRY; 1618 } 1619 1620 ehc->i.err_mask &= ~AC_ERR_DEV; 1621 } 1622 EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error); 1623