1 /*- 2 * Copyright (c) 1999,2000 Jonathan Lemon 3 * All rights reserved. 4 * 5 # Derived from the original IDA Compaq RAID driver, which is 6 * Copyright (c) 1996, 1997, 1998, 1999 7 * Mark Dawson and David James. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Generic driver for Compaq SMART RAID adapters. 36 */ 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/systm.h> 41 #include <sys/malloc.h> 42 #include <sys/stat.h> 43 44 #include <sys/bio.h> 45 #include <sys/bus.h> 46 #include <sys/conf.h> 47 #include <sys/endian.h> 48 49 #include <machine/bus.h> 50 #include <sys/rman.h> 51 52 #include <geom/geom_disk.h> 53 54 #include <dev/ida/idareg.h> 55 #include <dev/ida/idavar.h> 56 #include <dev/ida/idaio.h> 57 58 /* prototypes */ 59 static void ida_alloc_qcb(struct ida_softc *ida); 60 static void ida_construct_qcb(struct ida_softc *ida); 61 static void ida_start(struct ida_softc *ida); 62 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb); 63 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb); 64 static void ida_timeout (void *arg); 65 66 static d_ioctl_t ida_ioctl; 67 static struct cdevsw ida_cdevsw = { 68 .d_version = D_VERSION, 69 .d_flags = D_NEEDGIANT, 70 .d_ioctl = ida_ioctl, 71 .d_name = "ida", 72 }; 73 74 void 75 ida_free(struct ida_softc *ida) 76 { 77 int i; 78 79 callout_stop(&ida->ch); 80 81 if (ida->buffer_dmat) { 82 for (i = 0; i < ida->num_qcbs; i++) 83 bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap); 84 bus_dma_tag_destroy(ida->buffer_dmat); 85 } 86 87 if (ida->hwqcb_dmat) { 88 if (ida->hwqcb_busaddr) 89 bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap); 90 if (ida->hwqcbs) 91 bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs, 92 ida->hwqcb_dmamap); 93 bus_dma_tag_destroy(ida->hwqcb_dmat); 94 } 95 96 if (ida->qcbs != NULL) 97 free(ida->qcbs, M_DEVBUF); 98 99 if (ida->ih != NULL) 100 bus_teardown_intr(ida->dev, ida->irq, ida->ih); 101 102 if (ida->irq != NULL) 103 bus_release_resource(ida->dev, ida->irq_res_type, 104 0, ida->irq); 105 106 if (ida->parent_dmat != NULL) 107 bus_dma_tag_destroy(ida->parent_dmat); 108 109 if (ida->regs != NULL) 110 bus_release_resource(ida->dev, ida->regs_res_type, 111 ida->regs_res_id, ida->regs); 112 } 113 114 /* 115 * record bus address from bus_dmamap_load 116 */ 117 static void 118 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 119 { 120 bus_addr_t *baddr; 121 122 baddr = (bus_addr_t *)arg; 123 *baddr = segs->ds_addr; 124 } 125 126 static __inline struct ida_qcb * 127 ida_get_qcb(struct ida_softc *ida) 128 { 129 struct ida_qcb *qcb; 130 131 if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) { 132 SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); 133 } else { 134 ida_alloc_qcb(ida); 135 if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) 136 SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); 137 } 138 return (qcb); 139 } 140 141 static __inline bus_addr_t 142 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb) 143 { 144 return (ida->hwqcb_busaddr + 145 ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs)); 146 } 147 148 static __inline struct ida_qcb * 149 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr) 150 { 151 struct ida_hardware_qcb *hwqcb; 152 153 hwqcb = (struct ida_hardware_qcb *) 154 ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr)); 155 return (hwqcb->qcb); 156 } 157 158 /* 159 * XXX 160 * since we allocate all QCB space up front during initialization, then 161 * why bother with this routine? 162 */ 163 static void 164 ida_alloc_qcb(struct ida_softc *ida) 165 { 166 struct ida_qcb *qcb; 167 int error; 168 169 if (ida->num_qcbs >= IDA_QCB_MAX) 170 return; 171 172 qcb = &ida->qcbs[ida->num_qcbs]; 173 174 error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap); 175 if (error != 0) 176 return; 177 178 qcb->flags = QCB_FREE; 179 qcb->hwqcb = &ida->hwqcbs[ida->num_qcbs]; 180 qcb->hwqcb->qcb = qcb; 181 qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb); 182 SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); 183 ida->num_qcbs++; 184 } 185 186 int 187 ida_init(struct ida_softc *ida) 188 { 189 int error; 190 191 ida->unit = device_get_unit(ida->dev); 192 ida->tag = rman_get_bustag(ida->regs); 193 ida->bsh = rman_get_bushandle(ida->regs); 194 195 SLIST_INIT(&ida->free_qcbs); 196 STAILQ_INIT(&ida->qcb_queue); 197 bioq_init(&ida->bio_queue); 198 199 ida->qcbs = (struct ida_qcb *) 200 malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF, 201 M_NOWAIT | M_ZERO); 202 if (ida->qcbs == NULL) 203 return (ENOMEM); 204 205 /* 206 * Create our DMA tags 207 */ 208 209 /* DMA tag for our hardware QCB structures */ 210 error = bus_dma_tag_create( 211 /* parent */ ida->parent_dmat, 212 /* alignment */ 1, 213 /* boundary */ 0, 214 /* lowaddr */ BUS_SPACE_MAXADDR, 215 /* highaddr */ BUS_SPACE_MAXADDR, 216 /* filter */ NULL, 217 /* filterarg */ NULL, 218 /* maxsize */ IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), 219 /* nsegments */ 1, 220 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 221 /* flags */ 0, 222 /* lockfunc */ busdma_lock_mutex, 223 /* lockarg */ &Giant, 224 &ida->hwqcb_dmat); 225 if (error) 226 return (ENOMEM); 227 228 /* DMA tag for mapping buffers into device space */ 229 error = bus_dma_tag_create( 230 /* parent */ ida->parent_dmat, 231 /* alignment */ 1, 232 /* boundary */ 0, 233 /* lowaddr */ BUS_SPACE_MAXADDR, 234 /* highaddr */ BUS_SPACE_MAXADDR, 235 /* filter */ NULL, 236 /* filterarg */ NULL, 237 /* maxsize */ MAXBSIZE, 238 /* nsegments */ IDA_NSEG, 239 /* maxsegsz */ BUS_SPACE_MAXSIZE_32BIT, 240 /* flags */ 0, 241 /* lockfunc */ busdma_lock_mutex, 242 /* lockarg */ &Giant, 243 &ida->buffer_dmat); 244 if (error) 245 return (ENOMEM); 246 247 /* Allocation of hardware QCBs */ 248 /* XXX allocation is rounded to hardware page size */ 249 error = bus_dmamem_alloc(ida->hwqcb_dmat, 250 (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap); 251 if (error) 252 return (ENOMEM); 253 254 /* And permanently map them in */ 255 bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap, 256 ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), 257 ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0); 258 259 bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb)); 260 261 ida_alloc_qcb(ida); /* allocate an initial qcb */ 262 263 callout_init(&ida->ch, CALLOUT_MPSAFE); 264 265 return (0); 266 } 267 268 void 269 ida_attach(struct ida_softc *ida) 270 { 271 struct ida_controller_info cinfo; 272 int error, i; 273 274 ida->cmd.int_enable(ida, 0); 275 276 error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 277 IDA_CONTROLLER, 0, DMA_DATA_IN); 278 if (error) { 279 device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n"); 280 return; 281 } 282 283 device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n", 284 cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1], 285 cinfo.firm_rev[2], cinfo.firm_rev[3]); 286 287 if (ida->flags & IDA_FIRMWARE) { 288 int data; 289 290 error = ida_command(ida, CMD_START_FIRMWARE, 291 &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN); 292 if (error) { 293 device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n"); 294 return; 295 } 296 } 297 298 ida->ida_dev_t = make_dev(&ida_cdevsw, ida->unit, 299 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, 300 "ida%d", ida->unit); 301 ida->ida_dev_t->si_drv1 = ida; 302 303 ida->num_drives = 0; 304 for (i = 0; i < cinfo.num_drvs; i++) 305 device_add_child(ida->dev, /*"idad"*/NULL, -1); 306 307 bus_generic_attach(ida->dev); 308 309 ida->cmd.int_enable(ida, 1); 310 } 311 312 int 313 ida_detach(device_t dev) 314 { 315 struct ida_softc *ida; 316 int error = 0; 317 318 ida = (struct ida_softc *)device_get_softc(dev); 319 320 /* 321 * XXX 322 * before detaching, we must make sure that the system is 323 * quiescent; nothing mounted, no pending activity. 324 */ 325 326 /* 327 * XXX 328 * now, how are we supposed to maintain a list of our drives? 329 * iterate over our "child devices"? 330 */ 331 332 destroy_dev(ida->ida_dev_t); 333 ida_free(ida); 334 return (error); 335 } 336 337 static void 338 ida_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error) 339 { 340 struct ida_hardware_qcb *hwqcb = (struct ida_hardware_qcb *)arg; 341 int i; 342 343 hwqcb->hdr.size = htole16((sizeof(struct ida_req) + 344 sizeof(struct ida_sgb) * IDA_NSEG) >> 2); 345 346 for (i = 0; i < nsegments; i++) { 347 hwqcb->seg[i].addr = htole32(segs[i].ds_addr); 348 hwqcb->seg[i].length = htole32(segs[i].ds_len); 349 } 350 hwqcb->req.sgcount = nsegments; 351 } 352 353 int 354 ida_command(struct ida_softc *ida, int command, void *data, int datasize, 355 int drive, u_int32_t pblkno, int flags) 356 { 357 struct ida_hardware_qcb *hwqcb; 358 struct ida_qcb *qcb; 359 bus_dmasync_op_t op; 360 int s, error; 361 362 s = splbio(); 363 qcb = ida_get_qcb(ida); 364 splx(s); 365 366 if (qcb == NULL) { 367 printf("ida_command: out of QCBs"); 368 return (EAGAIN); 369 } 370 371 hwqcb = qcb->hwqcb; 372 bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); 373 374 bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, 375 (void *)data, datasize, ida_setup_dmamap, hwqcb, 0); 376 op = qcb->flags & DMA_DATA_IN ? 377 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 378 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 379 380 hwqcb->hdr.drive = drive; 381 hwqcb->req.blkno = htole32(pblkno); 382 hwqcb->req.bcount = htole16(howmany(datasize, DEV_BSIZE)); 383 hwqcb->req.command = command; 384 385 qcb->flags = flags | IDA_COMMAND; 386 387 s = splbio(); 388 STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); 389 ida_start(ida); 390 error = ida_wait(ida, qcb); 391 splx(s); 392 393 /* XXX should have status returned here? */ 394 /* XXX have "status pointer" area in QCB? */ 395 396 return (error); 397 } 398 399 void 400 ida_submit_buf(struct ida_softc *ida, struct bio *bp) 401 { 402 bioq_insert_tail(&ida->bio_queue, bp); 403 ida_construct_qcb(ida); 404 ida_start(ida); 405 } 406 407 static void 408 ida_construct_qcb(struct ida_softc *ida) 409 { 410 struct ida_hardware_qcb *hwqcb; 411 struct ida_qcb *qcb; 412 bus_dmasync_op_t op; 413 struct bio *bp; 414 415 bp = bioq_first(&ida->bio_queue); 416 if (bp == NULL) 417 return; /* no more buffers */ 418 419 qcb = ida_get_qcb(ida); 420 if (qcb == NULL) 421 return; /* out of resources */ 422 423 bioq_remove(&ida->bio_queue, bp); 424 qcb->buf = bp; 425 qcb->flags = bp->bio_cmd == BIO_READ ? DMA_DATA_IN : DMA_DATA_OUT; 426 427 hwqcb = qcb->hwqcb; 428 bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); 429 430 bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, 431 (void *)bp->bio_data, bp->bio_bcount, ida_setup_dmamap, hwqcb, 0); 432 op = qcb->flags & DMA_DATA_IN ? 433 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 434 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 435 436 { 437 struct idad_softc *drv = (struct idad_softc *)bp->bio_driver1; 438 hwqcb->hdr.drive = drv->drive; 439 } 440 441 hwqcb->req.blkno = bp->bio_pblkno; 442 hwqcb->req.bcount = howmany(bp->bio_bcount, DEV_BSIZE); 443 hwqcb->req.command = bp->bio_cmd == BIO_READ ? CMD_READ : CMD_WRITE; 444 445 STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); 446 } 447 448 /* 449 * This routine will be called from ida_intr in order to queue up more 450 * I/O, meaning that we may be in an interrupt context. Hence, we should 451 * not muck around with spl() in this routine. 452 */ 453 static void 454 ida_start(struct ida_softc *ida) 455 { 456 struct ida_qcb *qcb; 457 458 while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) { 459 if (ida->cmd.fifo_full(ida)) 460 break; 461 STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe); 462 /* 463 * XXX 464 * place the qcb on an active list? 465 */ 466 467 /* Set a timeout. */ 468 if (!ida->qactive) 469 callout_reset(&ida->ch, hz * 5, ida_timeout, ida); 470 ida->qactive++; 471 472 qcb->state = QCB_ACTIVE; 473 ida->cmd.submit(ida, qcb); 474 } 475 } 476 477 static int 478 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb) 479 { 480 struct ida_qcb *qcb_done = NULL; 481 bus_addr_t completed; 482 int delay; 483 484 if (ida->flags & IDA_INTERRUPTS) { 485 if (tsleep(qcb, PRIBIO, "idacmd", 5 * hz)) 486 return (ETIMEDOUT); 487 return (0); 488 } 489 490 again: 491 delay = 5 * 1000 * 100; /* 5 sec delay */ 492 while ((completed = ida->cmd.done(ida)) == 0) { 493 if (delay-- == 0) 494 return (ETIMEDOUT); 495 DELAY(10); 496 } 497 498 qcb_done = idahwqcbptov(ida, completed & ~3); 499 if (qcb_done != qcb) 500 goto again; 501 ida_done(ida, qcb); 502 return (0); 503 } 504 505 void 506 ida_intr(void *data) 507 { 508 struct ida_softc *ida; 509 struct ida_qcb *qcb; 510 bus_addr_t completed; 511 512 ida = (struct ida_softc *)data; 513 514 if (ida->cmd.int_pending(ida) == 0) 515 return; /* not our interrupt */ 516 517 while ((completed = ida->cmd.done(ida)) != 0) { 518 qcb = idahwqcbptov(ida, completed & ~3); 519 520 if (qcb == NULL || qcb->state != QCB_ACTIVE) { 521 device_printf(ida->dev, 522 "ignoring completion %jx\n", (intmax_t)completed); 523 continue; 524 } 525 /* Handle "Bad Command List" errors. */ 526 if ((completed & 3) && (qcb->hwqcb->req.error == 0)) 527 qcb->hwqcb->req.error = CMD_REJECTED; 528 ida_done(ida, qcb); 529 } 530 ida_start(ida); 531 } 532 533 /* 534 * should switch out command type; may be status, not just I/O. 535 */ 536 static void 537 ida_done(struct ida_softc *ida, struct ida_qcb *qcb) 538 { 539 int error = 0; 540 541 /* 542 * finish up command 543 */ 544 if (qcb->flags & DMA_DATA_TRANSFER) { 545 bus_dmasync_op_t op; 546 547 op = qcb->flags & DMA_DATA_IN ? 548 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE; 549 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 550 bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap); 551 } 552 553 if (qcb->hwqcb->req.error & SOFT_ERROR) { 554 if (qcb->buf) 555 device_printf(ida->dev, "soft %s error\n", 556 qcb->buf->bio_cmd == BIO_READ ? 557 "read" : "write"); 558 else 559 device_printf(ida->dev, "soft error\n"); 560 } 561 if (qcb->hwqcb->req.error & HARD_ERROR) { 562 error = 1; 563 if (qcb->buf) 564 device_printf(ida->dev, "hard %s error\n", 565 qcb->buf->bio_cmd == BIO_READ ? 566 "read" : "write"); 567 else 568 device_printf(ida->dev, "hard error\n"); 569 } 570 if (qcb->hwqcb->req.error & CMD_REJECTED) { 571 error = 1; 572 device_printf(ida->dev, "invalid request\n"); 573 } 574 575 if (qcb->flags & IDA_COMMAND) { 576 if (ida->flags & IDA_INTERRUPTS) 577 wakeup(qcb); 578 } else { 579 KASSERT(qcb->buf != NULL, ("ida_done(): qcb->buf is NULL!")); 580 if (error) 581 qcb->buf->bio_flags |= BIO_ERROR; 582 idad_intr(qcb->buf); 583 } 584 585 ida->qactive--; 586 /* Reschedule or cancel timeout */ 587 if (ida->qactive) 588 callout_reset(&ida->ch, hz * 5, ida_timeout, ida); 589 else 590 callout_stop(&ida->ch); 591 592 qcb->state = QCB_FREE; 593 qcb->buf = NULL; 594 SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); 595 ida_construct_qcb(ida); 596 } 597 598 static void 599 ida_timeout (void *arg) 600 { 601 struct ida_softc *ida; 602 603 ida = (struct ida_softc *)arg; 604 device_printf(ida->dev, "%s() qactive %d\n", __func__, ida->qactive); 605 606 if (ida->flags & IDA_INTERRUPTS) 607 device_printf(ida->dev, "IDA_INTERRUPTS\n"); 608 609 device_printf(ida->dev, "\t R_CMD_FIFO: %08x\n" 610 "\t R_DONE_FIFO: %08x\n" 611 "\t R_INT_MASK: %08x\n" 612 "\t R_STATUS: %08x\n" 613 "\tR_INT_PENDING: %08x\n", 614 ida_inl(ida, R_CMD_FIFO), 615 ida_inl(ida, R_DONE_FIFO), 616 ida_inl(ida, R_INT_MASK), 617 ida_inl(ida, R_STATUS), 618 ida_inl(ida, R_INT_PENDING)); 619 620 return; 621 } 622 623 /* 624 * IOCTL stuff follows. 625 */ 626 struct cmd_info { 627 int cmd; 628 int len; 629 int flags; 630 }; 631 static struct cmd_info *ida_cmd_lookup(int); 632 633 static int 634 ida_ioctl (struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) 635 { 636 struct ida_softc *sc; 637 struct ida_user_command *uc; 638 struct cmd_info *ci; 639 int len; 640 int flags; 641 int error; 642 int data; 643 void *daddr; 644 645 sc = (struct ida_softc *)dev->si_drv1; 646 uc = (struct ida_user_command *)addr; 647 error = 0; 648 649 switch (cmd) { 650 case IDAIO_COMMAND: 651 ci = ida_cmd_lookup(uc->command); 652 if (ci == NULL) { 653 error = EINVAL; 654 break; 655 } 656 len = ci->len; 657 flags = ci->flags; 658 if (len) 659 daddr = &uc->d.buf; 660 else { 661 daddr = &data; 662 len = sizeof(data); 663 } 664 error = ida_command(sc, uc->command, daddr, len, 665 uc->drive, uc->blkno, flags); 666 break; 667 default: 668 error = ENOIOCTL; 669 break; 670 } 671 return (error); 672 } 673 674 static struct cmd_info ci_list[] = { 675 { CMD_GET_LOG_DRV_INFO, 676 sizeof(struct ida_drive_info), DMA_DATA_IN }, 677 { CMD_GET_CTRL_INFO, 678 sizeof(struct ida_controller_info), DMA_DATA_IN }, 679 { CMD_SENSE_DRV_STATUS, 680 sizeof(struct ida_drive_status), DMA_DATA_IN }, 681 { CMD_START_RECOVERY, 0, 0 }, 682 { CMD_GET_PHYS_DRV_INFO, 683 sizeof(struct ida_phys_drv_info), DMA_DATA_TRANSFER }, 684 { CMD_BLINK_DRV_LEDS, 685 sizeof(struct ida_blink_drv_leds), DMA_DATA_OUT }, 686 { CMD_SENSE_DRV_LEDS, 687 sizeof(struct ida_blink_drv_leds), DMA_DATA_IN }, 688 { CMD_GET_LOG_DRV_EXT, 689 sizeof(struct ida_drive_info_ext), DMA_DATA_IN }, 690 { CMD_RESET_CTRL, 0, 0 }, 691 { CMD_GET_CONFIG, 0, 0 }, 692 { CMD_SET_CONFIG, 0, 0 }, 693 { CMD_LABEL_LOG_DRV, 694 sizeof(struct ida_label_logical), DMA_DATA_OUT }, 695 { CMD_SET_SURFACE_DELAY, 0, 0 }, 696 { CMD_SENSE_BUS_PARAMS, 0, 0 }, 697 { CMD_SENSE_SUBSYS_INFO, 0, 0 }, 698 { CMD_SENSE_SURFACE_ATS, 0, 0 }, 699 { CMD_PASSTHROUGH, 0, 0 }, 700 { CMD_RESET_SCSI_DEV, 0, 0 }, 701 { CMD_PAUSE_BG_ACT, 0, 0 }, 702 { CMD_RESUME_BG_ACT, 0, 0 }, 703 { CMD_START_FIRMWARE, 0, 0 }, 704 { CMD_SENSE_DRV_ERR_LOG, 0, 0 }, 705 { CMD_START_CPM, 0, 0 }, 706 { CMD_SENSE_CP, 0, 0 }, 707 { CMD_STOP_CPM, 0, 0 }, 708 { CMD_FLUSH_CACHE, 0, 0 }, 709 { CMD_ACCEPT_MEDIA_EXCH, 0, 0 }, 710 { 0, 0, 0 } 711 }; 712 713 static struct cmd_info * 714 ida_cmd_lookup (int command) 715 { 716 struct cmd_info *ci; 717 718 ci = ci_list; 719 while (ci->cmd) { 720 if (ci->cmd == command) 721 return (ci); 722 ci++; 723 } 724 return (NULL); 725 } 726