1 /*- 2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/kernel.h> 33 #include <sys/endian.h> 34 #include <sys/ata.h> 35 #include <sys/bio.h> 36 #include <sys/conf.h> 37 #include <sys/ctype.h> 38 #include <sys/bus.h> 39 #include <sys/sema.h> 40 #include <sys/taskqueue.h> 41 #include <vm/uma.h> 42 #include <machine/bus.h> 43 #include <sys/rman.h> 44 #include <dev/ata/ata-all.h> 45 #include <dev/ata/ata-pci.h> 46 #include <ata_if.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 51 #include <cam/cam.h> 52 #include <cam/cam_ccb.h> 53 54 /* prototypes */ 55 static int ata_generic_status(device_t dev); 56 static int ata_wait(struct ata_channel *ch, int unit, u_int8_t); 57 static void ata_pio_read(struct ata_request *, int); 58 static void ata_pio_write(struct ata_request *, int); 59 static void ata_tf_read(struct ata_request *); 60 static void ata_tf_write(struct ata_request *); 61 62 /* 63 * low level ATA functions 64 */ 65 void 66 ata_generic_hw(device_t dev) 67 { 68 struct ata_channel *ch = device_get_softc(dev); 69 70 ch->hw.begin_transaction = ata_begin_transaction; 71 ch->hw.end_transaction = ata_end_transaction; 72 ch->hw.status = ata_generic_status; 73 ch->hw.softreset = NULL; 74 ch->hw.command = ata_generic_command; 75 ch->hw.tf_read = ata_tf_read; 76 ch->hw.tf_write = ata_tf_write; 77 ch->hw.pm_read = NULL; 78 ch->hw.pm_write = NULL; 79 } 80 81 /* must be called with ATA channel locked and state_mtx held */ 82 int 83 ata_begin_transaction(struct ata_request *request) 84 { 85 struct ata_channel *ch = device_get_softc(request->parent); 86 int dummy, error; 87 88 ATA_DEBUG_RQ(request, "begin transaction"); 89 90 /* disable ATAPI DMA writes if HW doesn't support it */ 91 if ((ch->flags & ATA_NO_ATAPI_DMA) && 92 (request->flags & ATA_R_ATAPI) == ATA_R_ATAPI) 93 request->flags &= ~ATA_R_DMA; 94 if ((ch->flags & ATA_ATAPI_DMA_RO) && 95 ((request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE)) == 96 (ATA_R_ATAPI | ATA_R_DMA | ATA_R_WRITE))) 97 request->flags &= ~ATA_R_DMA; 98 99 switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA)) { 100 101 /* ATA PIO data transfer and control commands */ 102 default: 103 { 104 /* record command direction here as our request might be gone later */ 105 int write = (request->flags & ATA_R_WRITE); 106 107 /* issue command */ 108 if (ch->hw.command(request)) { 109 device_printf(request->parent, "error issuing %s command\n", 110 ata_cmd2str(request)); 111 request->result = EIO; 112 goto begin_finished; 113 } 114 115 /* device reset doesn't interrupt */ 116 if (request->u.ata.command == ATA_DEVICE_RESET) { 117 118 int timeout = 1000000; 119 do { 120 DELAY(10); 121 request->status = ATA_IDX_INB(ch, ATA_STATUS); 122 } while (request->status & ATA_S_BUSY && timeout--); 123 if (request->status & ATA_S_ERROR) 124 request->error = ATA_IDX_INB(ch, ATA_ERROR); 125 ch->hw.tf_read(request); 126 goto begin_finished; 127 } 128 129 /* if write command output the data */ 130 if (write) { 131 if (ata_wait(ch, request->unit, (ATA_S_READY | ATA_S_DRQ)) < 0) { 132 device_printf(request->parent, 133 "timeout waiting for write DRQ\n"); 134 request->result = EIO; 135 goto begin_finished; 136 } 137 ata_pio_write(request, request->transfersize); 138 } 139 } 140 goto begin_continue; 141 142 /* ATA DMA data transfer commands */ 143 case ATA_R_DMA: 144 /* check sanity, setup SG list and DMA engine */ 145 if ((error = ch->dma.load(request, NULL, &dummy))) { 146 device_printf(request->parent, "setting up DMA failed\n"); 147 request->result = error; 148 goto begin_finished; 149 } 150 151 /* start DMA engine if necessary */ 152 if ((ch->flags & ATA_DMA_BEFORE_CMD) && 153 ch->dma.start && ch->dma.start(request)) { 154 device_printf(request->parent, "error starting DMA\n"); 155 request->result = EIO; 156 goto begin_finished; 157 } 158 159 /* issue command */ 160 if (ch->hw.command(request)) { 161 device_printf(request->parent, "error issuing %s command\n", 162 ata_cmd2str(request)); 163 request->result = EIO; 164 goto begin_finished; 165 } 166 167 /* start DMA engine */ 168 if (!(ch->flags & ATA_DMA_BEFORE_CMD) && 169 ch->dma.start && ch->dma.start(request)) { 170 device_printf(request->parent, "error starting DMA\n"); 171 request->result = EIO; 172 goto begin_finished; 173 } 174 goto begin_continue; 175 176 /* ATAPI PIO commands */ 177 case ATA_R_ATAPI: 178 /* is this just a POLL DSC command ? */ 179 if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) { 180 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(request->unit)); 181 DELAY(10); 182 if (!(ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_DSC)) 183 request->result = EBUSY; 184 goto begin_finished; 185 } 186 187 /* start ATAPI operation */ 188 if (ch->hw.command(request)) { 189 device_printf(request->parent, "error issuing ATA PACKET command\n"); 190 request->result = EIO; 191 goto begin_finished; 192 } 193 goto begin_continue; 194 195 /* ATAPI DMA commands */ 196 case ATA_R_ATAPI|ATA_R_DMA: 197 /* is this just a POLL DSC command ? */ 198 if (request->u.atapi.ccb[0] == ATAPI_POLL_DSC) { 199 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(request->unit)); 200 DELAY(10); 201 if (!(ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_DSC)) 202 request->result = EBUSY; 203 goto begin_finished; 204 } 205 206 /* check sanity, setup SG list and DMA engine */ 207 if ((error = ch->dma.load(request, NULL, &dummy))) { 208 device_printf(request->parent, "setting up DMA failed\n"); 209 request->result = error; 210 goto begin_finished; 211 } 212 213 /* start ATAPI operation */ 214 if (ch->hw.command(request)) { 215 device_printf(request->parent, "error issuing ATA PACKET command\n"); 216 request->result = EIO; 217 goto begin_finished; 218 } 219 220 /* start DMA engine */ 221 if (ch->dma.start && ch->dma.start(request)) { 222 request->result = EIO; 223 goto begin_finished; 224 } 225 goto begin_continue; 226 } 227 /* NOT REACHED */ 228 printf("ata_begin_transaction OOPS!!!\n"); 229 230 begin_finished: 231 if (ch->dma.unload) { 232 ch->dma.unload(request); 233 } 234 return ATA_OP_FINISHED; 235 236 begin_continue: 237 callout_reset(&request->callout, request->timeout * hz, 238 (timeout_t*)ata_timeout, request); 239 return ATA_OP_CONTINUES; 240 } 241 242 /* must be called with ATA channel locked and state_mtx held */ 243 int 244 ata_end_transaction(struct ata_request *request) 245 { 246 struct ata_channel *ch = device_get_softc(request->parent); 247 int length; 248 249 ATA_DEBUG_RQ(request, "end transaction"); 250 251 /* clear interrupt and get status */ 252 request->status = ATA_IDX_INB(ch, ATA_STATUS); 253 254 switch (request->flags & (ATA_R_ATAPI | ATA_R_DMA | ATA_R_CONTROL)) { 255 256 /* ATA PIO data transfer and control commands */ 257 default: 258 259 /* on timeouts we have no data or anything so just return */ 260 if (request->flags & ATA_R_TIMEOUT) 261 goto end_finished; 262 263 /* Read back registers to the request struct. */ 264 if ((request->status & ATA_S_ERROR) || 265 (request->flags & (ATA_R_CONTROL | ATA_R_NEEDRESULT))) { 266 ch->hw.tf_read(request); 267 } 268 269 /* if we got an error we are done with the HW */ 270 if (request->status & ATA_S_ERROR) { 271 request->error = ATA_IDX_INB(ch, ATA_ERROR); 272 goto end_finished; 273 } 274 275 /* are we moving data ? */ 276 if (request->flags & (ATA_R_READ | ATA_R_WRITE)) { 277 278 /* if read data get it */ 279 if (request->flags & ATA_R_READ) { 280 int flags = ATA_S_DRQ; 281 282 if (request->u.ata.command != ATA_ATAPI_IDENTIFY) 283 flags |= ATA_S_READY; 284 if (ata_wait(ch, request->unit, flags) < 0) { 285 device_printf(request->parent, 286 "timeout waiting for read DRQ\n"); 287 request->result = EIO; 288 goto end_finished; 289 } 290 ata_pio_read(request, request->transfersize); 291 } 292 293 /* update how far we've gotten */ 294 request->donecount += request->transfersize; 295 296 /* do we need a scoop more ? */ 297 if (request->bytecount > request->donecount) { 298 299 /* set this transfer size according to HW capabilities */ 300 request->transfersize = 301 min((request->bytecount - request->donecount), 302 request->transfersize); 303 304 /* if data write command, output the data */ 305 if (request->flags & ATA_R_WRITE) { 306 307 /* if we get an error here we are done with the HW */ 308 if (ata_wait(ch, request->unit, (ATA_S_READY | ATA_S_DRQ)) < 0) { 309 device_printf(request->parent, 310 "timeout waiting for write DRQ\n"); 311 request->status = ATA_IDX_INB(ch, ATA_STATUS); 312 goto end_finished; 313 } 314 315 /* output data and return waiting for new interrupt */ 316 ata_pio_write(request, request->transfersize); 317 goto end_continue; 318 } 319 320 /* if data read command, return & wait for interrupt */ 321 if (request->flags & ATA_R_READ) 322 goto end_continue; 323 } 324 } 325 /* done with HW */ 326 goto end_finished; 327 328 /* ATA DMA data transfer commands */ 329 case ATA_R_DMA: 330 331 /* stop DMA engine and get status */ 332 if (ch->dma.stop) 333 request->dma->status = ch->dma.stop(request); 334 335 /* did we get error or data */ 336 if (request->status & ATA_S_ERROR) 337 request->error = ATA_IDX_INB(ch, ATA_ERROR); 338 else if (request->dma->status & ATA_BMSTAT_ERROR) 339 request->status |= ATA_S_ERROR; 340 else if (!(request->flags & ATA_R_TIMEOUT)) 341 request->donecount = request->bytecount; 342 343 /* Read back registers to the request struct. */ 344 if ((request->status & ATA_S_ERROR) || 345 (request->flags & (ATA_R_CONTROL | ATA_R_NEEDRESULT))) { 346 ch->hw.tf_read(request); 347 } 348 349 /* release SG list etc */ 350 ch->dma.unload(request); 351 352 /* done with HW */ 353 goto end_finished; 354 355 /* ATAPI PIO commands */ 356 case ATA_R_ATAPI: 357 length = ATA_IDX_INB(ch, ATA_CYL_LSB)|(ATA_IDX_INB(ch, ATA_CYL_MSB)<<8); 358 359 /* on timeouts we have no data or anything so just return */ 360 if (request->flags & ATA_R_TIMEOUT) 361 goto end_finished; 362 363 switch ((ATA_IDX_INB(ch, ATA_IREASON) & (ATA_I_CMD | ATA_I_IN)) | 364 (request->status & ATA_S_DRQ)) { 365 366 case ATAPI_P_CMDOUT: 367 /* this seems to be needed for some (slow) devices */ 368 DELAY(10); 369 370 if (!(request->status & ATA_S_DRQ)) { 371 device_printf(request->parent, "command interrupt without DRQ\n"); 372 request->status = ATA_S_ERROR; 373 goto end_finished; 374 } 375 ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb, 376 (request->flags & ATA_R_ATAPI16) ? 8 : 6); 377 /* return wait for interrupt */ 378 goto end_continue; 379 380 case ATAPI_P_WRITE: 381 if (request->flags & ATA_R_READ) { 382 request->status = ATA_S_ERROR; 383 device_printf(request->parent, 384 "%s trying to write on read buffer\n", 385 ata_cmd2str(request)); 386 goto end_finished; 387 } 388 ata_pio_write(request, length); 389 request->donecount += length; 390 391 /* set next transfer size according to HW capabilities */ 392 request->transfersize = min((request->bytecount-request->donecount), 393 request->transfersize); 394 /* return wait for interrupt */ 395 goto end_continue; 396 397 case ATAPI_P_READ: 398 if (request->flags & ATA_R_WRITE) { 399 request->status = ATA_S_ERROR; 400 device_printf(request->parent, 401 "%s trying to read on write buffer\n", 402 ata_cmd2str(request)); 403 goto end_finished; 404 } 405 ata_pio_read(request, length); 406 request->donecount += length; 407 408 /* set next transfer size according to HW capabilities */ 409 request->transfersize = min((request->bytecount-request->donecount), 410 request->transfersize); 411 /* return wait for interrupt */ 412 goto end_continue; 413 414 case ATAPI_P_DONEDRQ: 415 device_printf(request->parent, 416 "WARNING - %s DONEDRQ non conformant device\n", 417 ata_cmd2str(request)); 418 if (request->flags & ATA_R_READ) { 419 ata_pio_read(request, length); 420 request->donecount += length; 421 } 422 else if (request->flags & ATA_R_WRITE) { 423 ata_pio_write(request, length); 424 request->donecount += length; 425 } 426 else 427 request->status = ATA_S_ERROR; 428 /* FALLTHROUGH */ 429 430 case ATAPI_P_ABORT: 431 case ATAPI_P_DONE: 432 if (request->status & (ATA_S_ERROR | ATA_S_DWF)) 433 request->error = ATA_IDX_INB(ch, ATA_ERROR); 434 goto end_finished; 435 436 default: 437 device_printf(request->parent, "unknown transfer phase\n"); 438 request->status = ATA_S_ERROR; 439 } 440 441 /* done with HW */ 442 goto end_finished; 443 444 /* ATAPI DMA commands */ 445 case ATA_R_ATAPI|ATA_R_DMA: 446 447 /* stop DMA engine and get status */ 448 if (ch->dma.stop) 449 request->dma->status = ch->dma.stop(request); 450 451 /* did we get error or data */ 452 if (request->status & (ATA_S_ERROR | ATA_S_DWF)) 453 request->error = ATA_IDX_INB(ch, ATA_ERROR); 454 else if (request->dma->status & ATA_BMSTAT_ERROR) 455 request->status |= ATA_S_ERROR; 456 else if (!(request->flags & ATA_R_TIMEOUT)) 457 request->donecount = request->bytecount; 458 459 /* release SG list etc */ 460 ch->dma.unload(request); 461 462 /* done with HW */ 463 goto end_finished; 464 } 465 /* NOT REACHED */ 466 printf("ata_end_transaction OOPS!!\n"); 467 468 end_finished: 469 callout_stop(&request->callout); 470 return ATA_OP_FINISHED; 471 472 end_continue: 473 return ATA_OP_CONTINUES; 474 } 475 476 /* must be called with ATA channel locked and state_mtx held */ 477 void 478 ata_generic_reset(device_t dev) 479 { 480 struct ata_channel *ch = device_get_softc(dev); 481 482 u_int8_t ostat0 = 0, stat0 = 0, ostat1 = 0, stat1 = 0; 483 u_int8_t err = 0, lsb = 0, msb = 0; 484 int mask = 0, timeout; 485 486 /* do we have any signs of ATA/ATAPI HW being present ? */ 487 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_MASTER)); 488 DELAY(10); 489 ostat0 = ATA_IDX_INB(ch, ATA_STATUS); 490 if (((ostat0 & 0xf8) != 0xf8 || (ch->flags & ATA_KNOWN_PRESENCE)) && 491 ostat0 != 0xa5) { 492 stat0 = ATA_S_BUSY; 493 mask |= 0x01; 494 } 495 496 /* in some setups we dont want to test for a slave */ 497 if (!(ch->flags & ATA_NO_SLAVE)) { 498 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_SLAVE)); 499 DELAY(10); 500 ostat1 = ATA_IDX_INB(ch, ATA_STATUS); 501 if (((ostat1 & 0xf8) != 0xf8 || (ch->flags & ATA_KNOWN_PRESENCE)) && 502 ostat1 != 0xa5) { 503 stat1 = ATA_S_BUSY; 504 mask |= 0x02; 505 } 506 } 507 508 if (bootverbose) 509 device_printf(dev, "reset tp1 mask=%02x ostat0=%02x ostat1=%02x\n", 510 mask, ostat0, ostat1); 511 512 /* if nothing showed up there is no need to get any further */ 513 /* XXX SOS is that too strong?, we just might lose devices here */ 514 ch->devices = 0; 515 if (!mask) 516 return; 517 518 /* reset (both) devices on this channel */ 519 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(ATA_MASTER)); 520 DELAY(10); 521 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS | ATA_A_RESET); 522 ata_udelay(10000); 523 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_IDS); 524 ata_udelay(100000); 525 ATA_IDX_INB(ch, ATA_ERROR); 526 527 /* wait for BUSY to go inactive */ 528 for (timeout = 0; timeout < 310; timeout++) { 529 if ((mask & 0x01) && (stat0 & ATA_S_BUSY)) { 530 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(ATA_MASTER)); 531 DELAY(10); 532 if (ch->flags & ATA_STATUS_IS_LONG) 533 stat0 = ATA_IDX_INL(ch, ATA_STATUS) & 0xff; 534 else 535 stat0 = ATA_IDX_INB(ch, ATA_STATUS); 536 err = ATA_IDX_INB(ch, ATA_ERROR); 537 lsb = ATA_IDX_INB(ch, ATA_CYL_LSB); 538 msb = ATA_IDX_INB(ch, ATA_CYL_MSB); 539 if (bootverbose) 540 device_printf(dev, 541 "stat0=0x%02x err=0x%02x lsb=0x%02x msb=0x%02x\n", 542 stat0, err, lsb, msb); 543 if (stat0 == err && lsb == err && msb == err && 544 timeout > (stat0 & ATA_S_BUSY ? 100 : 10)) 545 mask &= ~0x01; 546 if (!(stat0 & ATA_S_BUSY)) { 547 if ((err & 0x7f) == ATA_E_ILI) { 548 if (lsb == ATAPI_MAGIC_LSB && msb == ATAPI_MAGIC_MSB) { 549 ch->devices |= ATA_ATAPI_MASTER; 550 } 551 else if (lsb == 0 && msb == 0 && (stat0 & ATA_S_READY)) { 552 ch->devices |= ATA_ATA_MASTER; 553 } 554 } 555 else if ((stat0 & 0x0f) && err == lsb && err == msb) { 556 stat0 |= ATA_S_BUSY; 557 } 558 } 559 } 560 561 if ((mask & 0x02) && (stat1 & ATA_S_BUSY) && 562 !((mask & 0x01) && (stat0 & ATA_S_BUSY))) { 563 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(ATA_SLAVE)); 564 DELAY(10); 565 if (ch->flags & ATA_STATUS_IS_LONG) 566 stat1 = ATA_IDX_INL(ch, ATA_STATUS) & 0xff; 567 else 568 stat1 = ATA_IDX_INB(ch, ATA_STATUS); 569 err = ATA_IDX_INB(ch, ATA_ERROR); 570 lsb = ATA_IDX_INB(ch, ATA_CYL_LSB); 571 msb = ATA_IDX_INB(ch, ATA_CYL_MSB); 572 if (bootverbose) 573 device_printf(dev, 574 "stat1=0x%02x err=0x%02x lsb=0x%02x msb=0x%02x\n", 575 stat1, err, lsb, msb); 576 if (stat1 == err && lsb == err && msb == err && 577 timeout > (stat1 & ATA_S_BUSY ? 100 : 10)) 578 mask &= ~0x02; 579 if (!(stat1 & ATA_S_BUSY)) { 580 if ((err & 0x7f) == ATA_E_ILI) { 581 if (lsb == ATAPI_MAGIC_LSB && msb == ATAPI_MAGIC_MSB) { 582 ch->devices |= ATA_ATAPI_SLAVE; 583 } 584 else if (lsb == 0 && msb == 0 && (stat1 & ATA_S_READY)) { 585 ch->devices |= ATA_ATA_SLAVE; 586 } 587 } 588 else if ((stat1 & 0x0f) && err == lsb && err == msb) { 589 stat1 |= ATA_S_BUSY; 590 } 591 } 592 } 593 594 if ((ch->flags & ATA_KNOWN_PRESENCE) == 0 && 595 timeout > ((mask == 0x03) ? 20 : 10)) { 596 if ((mask & 0x01) && stat0 == 0xff) 597 mask &= ~0x01; 598 if ((mask & 0x02) && stat1 == 0xff) 599 mask &= ~0x02; 600 } 601 if (((mask & 0x01) == 0 || !(stat0 & ATA_S_BUSY)) && 602 ((mask & 0x02) == 0 || !(stat1 & ATA_S_BUSY))) 603 break; 604 ata_udelay(100000); 605 } 606 607 if (bootverbose) 608 device_printf(dev, "reset tp2 stat0=%02x stat1=%02x devices=0x%x\n", 609 stat0, stat1, ch->devices); 610 } 611 612 /* must be called with ATA channel locked and state_mtx held */ 613 static int 614 ata_generic_status(device_t dev) 615 { 616 struct ata_channel *ch = device_get_softc(dev); 617 618 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) { 619 DELAY(100); 620 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) 621 return 0; 622 } 623 return 1; 624 } 625 626 static int 627 ata_wait(struct ata_channel *ch, int unit, u_int8_t mask) 628 { 629 u_int8_t status; 630 int timeout = 0; 631 632 DELAY(1); 633 634 /* wait at max 1 second for device to get !BUSY */ 635 while (timeout < 1000000) { 636 status = ATA_IDX_INB(ch, ATA_ALTSTAT); 637 638 /* if drive fails status, reselect the drive and try again */ 639 if (status == 0xff) { 640 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_DEV(unit)); 641 timeout += 1000; 642 DELAY(1000); 643 continue; 644 } 645 646 /* are we done ? */ 647 if (!(status & ATA_S_BUSY)) 648 break; 649 650 if (timeout > 1000) { 651 timeout += 1000; 652 DELAY(1000); 653 } 654 else { 655 timeout += 10; 656 DELAY(10); 657 } 658 } 659 if (timeout >= 1000000) 660 return -2; 661 if (!mask) 662 return (status & ATA_S_ERROR); 663 664 DELAY(1); 665 666 /* wait 50 msec for bits wanted */ 667 timeout = 5000; 668 while (timeout--) { 669 status = ATA_IDX_INB(ch, ATA_ALTSTAT); 670 if ((status & mask) == mask) 671 return (status & ATA_S_ERROR); 672 DELAY(10); 673 } 674 return -3; 675 } 676 677 int 678 ata_generic_command(struct ata_request *request) 679 { 680 struct ata_channel *ch = device_get_softc(request->parent); 681 682 /* select device */ 683 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_IBM | ATA_D_LBA | ATA_DEV(request->unit)); 684 685 /* ready to issue command ? */ 686 if (ata_wait(ch, request->unit, 0) < 0) { 687 device_printf(request->parent, "timeout waiting to issue command\n"); 688 request->flags |= ATA_R_TIMEOUT; 689 return (-1); 690 } 691 692 /* enable interrupt */ 693 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT); 694 695 if (request->flags & ATA_R_ATAPI) { 696 int timeout = 5000; 697 int res; 698 699 /* issue packet command to controller */ 700 if (request->flags & ATA_R_DMA) { 701 ATA_IDX_OUTB(ch, ATA_FEATURE, ATA_F_DMA); 702 ATA_IDX_OUTB(ch, ATA_CYL_LSB, 0); 703 ATA_IDX_OUTB(ch, ATA_CYL_MSB, 0); 704 } 705 else { 706 ATA_IDX_OUTB(ch, ATA_FEATURE, 0); 707 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->transfersize); 708 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->transfersize >> 8); 709 } 710 ATA_IDX_OUTB(ch, ATA_COMMAND, ATA_PACKET_CMD); 711 712 /* command interrupt device ? just return and wait for interrupt */ 713 if (request->flags & ATA_R_ATAPI_INTR) 714 return (0); 715 716 /* command processed ? */ 717 res = ata_wait(ch, request->unit, 0); 718 if (res != 0) { 719 if (res < 0) { 720 device_printf(request->parent, 721 "timeout waiting for PACKET command\n"); 722 request->flags |= ATA_R_TIMEOUT; 723 } 724 return (-1); 725 } 726 /* wait for ready to write ATAPI command block */ 727 while (timeout--) { 728 int reason = ATA_IDX_INB(ch, ATA_IREASON); 729 int status = ATA_IDX_INB(ch, ATA_STATUS); 730 731 if (((reason & (ATA_I_CMD | ATA_I_IN)) | 732 (status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT) 733 break; 734 DELAY(20); 735 } 736 if (timeout <= 0) { 737 device_printf(request->parent, 738 "timeout waiting for ATAPI ready\n"); 739 request->flags |= ATA_R_TIMEOUT; 740 return (-1); 741 } 742 743 /* this seems to be needed for some (slow) devices */ 744 DELAY(10); 745 746 /* output command block */ 747 ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (int16_t *)request->u.atapi.ccb, 748 (request->flags & ATA_R_ATAPI16) ? 8 : 6); 749 } 750 else { 751 ch->hw.tf_write(request); 752 753 /* issue command to controller */ 754 ATA_IDX_OUTB(ch, ATA_COMMAND, request->u.ata.command); 755 } 756 return (0); 757 } 758 759 static void 760 ata_tf_read(struct ata_request *request) 761 { 762 struct ata_channel *ch = device_get_softc(request->parent); 763 764 if (request->flags & ATA_R_48BIT) { 765 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT | ATA_A_HOB); 766 request->u.ata.count = (ATA_IDX_INB(ch, ATA_COUNT) << 8); 767 request->u.ata.lba = 768 ((u_int64_t)(ATA_IDX_INB(ch, ATA_SECTOR)) << 24) | 769 ((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_LSB)) << 32) | 770 ((u_int64_t)(ATA_IDX_INB(ch, ATA_CYL_MSB)) << 40); 771 772 ATA_IDX_OUTB(ch, ATA_CONTROL, ATA_A_4BIT); 773 request->u.ata.count |= ATA_IDX_INB(ch, ATA_COUNT); 774 request->u.ata.lba |= 775 (ATA_IDX_INB(ch, ATA_SECTOR) | 776 (ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) | 777 (ATA_IDX_INB(ch, ATA_CYL_MSB) << 16)); 778 } 779 else { 780 request->u.ata.count = ATA_IDX_INB(ch, ATA_COUNT); 781 request->u.ata.lba = ATA_IDX_INB(ch, ATA_SECTOR) | 782 (ATA_IDX_INB(ch, ATA_CYL_LSB) << 8) | 783 (ATA_IDX_INB(ch, ATA_CYL_MSB) << 16) | 784 ((ATA_IDX_INB(ch, ATA_DRIVE) & 0xf) << 24); 785 } 786 } 787 788 static void 789 ata_tf_write(struct ata_request *request) 790 { 791 struct ata_channel *ch = device_get_softc(request->parent); 792 793 if (request->flags & ATA_R_48BIT) { 794 ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature >> 8); 795 ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature); 796 ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count >> 8); 797 ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count); 798 ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba >> 24); 799 ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba); 800 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 32); 801 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8); 802 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 40); 803 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16); 804 ATA_IDX_OUTB(ch, ATA_DRIVE, ATA_D_LBA | ATA_DEV(request->unit)); 805 } 806 else { 807 ATA_IDX_OUTB(ch, ATA_FEATURE, request->u.ata.feature); 808 ATA_IDX_OUTB(ch, ATA_COUNT, request->u.ata.count); 809 ATA_IDX_OUTB(ch, ATA_SECTOR, request->u.ata.lba); 810 ATA_IDX_OUTB(ch, ATA_CYL_LSB, request->u.ata.lba >> 8); 811 ATA_IDX_OUTB(ch, ATA_CYL_MSB, request->u.ata.lba >> 16); 812 ATA_IDX_OUTB(ch, ATA_DRIVE, 813 ATA_D_IBM | ATA_D_LBA | ATA_DEV(request->unit) | 814 ((request->u.ata.lba >> 24) & 0x0f)); 815 } 816 } 817 818 static void 819 ata_pio_read(struct ata_request *request, int length) 820 { 821 struct ata_channel *ch = device_get_softc(request->parent); 822 struct bio *bio; 823 uint8_t *addr; 824 vm_offset_t page; 825 int todo, done, off, moff, resid, size, i; 826 uint8_t buf[2] __aligned(2); 827 828 todo = min(request->transfersize, length); 829 page = done = resid = 0; 830 while (done < todo) { 831 size = todo - done; 832 833 /* Prepare data address and limit size (if not sequential). */ 834 off = request->donecount + done; 835 if ((request->flags & ATA_R_DATA_IN_CCB) == 0 || 836 (request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { 837 addr = (uint8_t *)request->data + off; 838 } else if ((request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) { 839 bio = (struct bio *)request->data; 840 if ((bio->bio_flags & BIO_UNMAPPED) == 0) { 841 addr = (uint8_t *)bio->bio_data + off; 842 } else { 843 moff = bio->bio_ma_offset + off; 844 page = pmap_quick_enter_page( 845 bio->bio_ma[moff / PAGE_SIZE]); 846 moff %= PAGE_SIZE; 847 size = min(size, PAGE_SIZE - moff); 848 addr = (void *)(page + moff); 849 } 850 } else 851 panic("ata_pio_read: Unsupported CAM data type %x\n", 852 (request->ccb->ccb_h.flags & CAM_DATA_MASK)); 853 854 /* We may have extra byte already red but not stored. */ 855 if (resid) { 856 addr[0] = buf[1]; 857 addr++; 858 done++; 859 size--; 860 } 861 862 /* Process main part of data. */ 863 resid = size % 2; 864 if (__predict_false((ch->flags & ATA_USE_16BIT) || 865 (size % 4) != 0 || ((uintptr_t)addr % 4) != 0)) { 866 #ifndef __NO_STRICT_ALIGNMENT 867 if (__predict_false((uintptr_t)addr % 2)) { 868 for (i = 0; i + 1 < size; i += 2) { 869 *(uint16_t *)&buf = 870 ATA_IDX_INW_STRM(ch, ATA_DATA); 871 addr[i] = buf[0]; 872 addr[i + 1] = buf[1]; 873 } 874 } else 875 #endif 876 ATA_IDX_INSW_STRM(ch, ATA_DATA, (void*)addr, 877 size / 2); 878 879 /* If we have extra byte of data, leave it for later. */ 880 if (resid) { 881 *(uint16_t *)&buf = 882 ATA_IDX_INW_STRM(ch, ATA_DATA); 883 addr[size - 1] = buf[0]; 884 } 885 } else 886 ATA_IDX_INSL_STRM(ch, ATA_DATA, (void*)addr, size / 4); 887 888 if (page) { 889 pmap_quick_remove_page(page); 890 page = 0; 891 } 892 done += size; 893 } 894 895 if (length > done) { 896 device_printf(request->parent, 897 "WARNING - %s read data overrun %d > %d\n", 898 ata_cmd2str(request), length, done); 899 for (i = done + resid; i < length; i += 2) 900 ATA_IDX_INW(ch, ATA_DATA); 901 } 902 } 903 904 static void 905 ata_pio_write(struct ata_request *request, int length) 906 { 907 struct ata_channel *ch = device_get_softc(request->parent); 908 struct bio *bio; 909 uint8_t *addr; 910 vm_offset_t page; 911 int todo, done, off, moff, resid, size, i; 912 uint8_t buf[2] __aligned(2); 913 914 todo = min(request->transfersize, length); 915 page = done = resid = 0; 916 while (done < todo) { 917 size = todo - done; 918 919 /* Prepare data address and limit size (if not sequential). */ 920 off = request->donecount + done; 921 if ((request->flags & ATA_R_DATA_IN_CCB) == 0 || 922 (request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR) { 923 addr = (uint8_t *)request->data + off; 924 } else if ((request->ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO) { 925 bio = (struct bio *)request->data; 926 if ((bio->bio_flags & BIO_UNMAPPED) == 0) { 927 addr = (uint8_t *)bio->bio_data + off; 928 } else { 929 moff = bio->bio_ma_offset + off; 930 page = pmap_quick_enter_page( 931 bio->bio_ma[moff / PAGE_SIZE]); 932 moff %= PAGE_SIZE; 933 size = min(size, PAGE_SIZE - moff); 934 addr = (void *)(page + moff); 935 } 936 } else 937 panic("ata_pio_write: Unsupported CAM data type %x\n", 938 (request->ccb->ccb_h.flags & CAM_DATA_MASK)); 939 940 /* We may have extra byte to be written first. */ 941 if (resid) { 942 buf[1] = addr[0]; 943 ATA_IDX_OUTW_STRM(ch, ATA_DATA, *(uint16_t *)&buf); 944 addr++; 945 done++; 946 size--; 947 } 948 949 /* Process main part of data. */ 950 resid = size % 2; 951 if (__predict_false((ch->flags & ATA_USE_16BIT) || 952 (size % 4) != 0 || ((uintptr_t)addr % 4) != 0)) { 953 #ifndef __NO_STRICT_ALIGNMENT 954 if (__predict_false((uintptr_t)addr % 2)) { 955 for (i = 0; i + 1 < size; i += 2) { 956 buf[0] = addr[i]; 957 buf[1] = addr[i + 1]; 958 ATA_IDX_OUTW_STRM(ch, ATA_DATA, 959 *(uint16_t *)&buf); 960 } 961 } else 962 #endif 963 ATA_IDX_OUTSW_STRM(ch, ATA_DATA, (void*)addr, 964 size / 2); 965 966 /* If we have extra byte of data, save it for later. */ 967 if (resid) 968 buf[0] = addr[size - 1]; 969 } else 970 ATA_IDX_OUTSL_STRM(ch, ATA_DATA, 971 (void*)addr, size / sizeof(int32_t)); 972 973 if (page) { 974 pmap_quick_remove_page(page); 975 page = 0; 976 } 977 done += size; 978 } 979 980 /* We may have extra byte of data to be written. Pad it with zero. */ 981 if (resid) { 982 buf[1] = 0; 983 ATA_IDX_OUTW_STRM(ch, ATA_DATA, *(uint16_t *)&buf); 984 } 985 986 if (length > done) { 987 device_printf(request->parent, 988 "WARNING - %s write data underrun %d > %d\n", 989 ata_cmd2str(request), length, done); 990 for (i = done + resid; i < length; i += 2) 991 ATA_IDX_OUTW(ch, ATA_DATA, 0); 992 } 993 } 994