1 /*- 2 * Copyright (c) 1999,2000 Jonathan Lemon 3 * All rights reserved. 4 * 5 # Derived from the original IDA Compaq RAID driver, which is 6 * Copyright (c) 1996, 1997, 1998, 1999 7 * Mark Dawson and David James. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Generic driver for Compaq SMART RAID adapters. 36 */ 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/systm.h> 41 #include <sys/malloc.h> 42 #include <sys/stat.h> 43 44 #include <sys/bio.h> 45 #include <sys/bus.h> 46 #include <sys/conf.h> 47 #include <sys/endian.h> 48 49 #include <machine/bus_memio.h> 50 #include <machine/bus_pio.h> 51 #include <machine/bus.h> 52 #include <sys/rman.h> 53 54 #include <geom/geom_disk.h> 55 56 #include <dev/ida/idareg.h> 57 #include <dev/ida/idavar.h> 58 #include <dev/ida/idaio.h> 59 60 /* prototypes */ 61 static void ida_alloc_qcb(struct ida_softc *ida); 62 static void ida_construct_qcb(struct ida_softc *ida); 63 static void ida_start(struct ida_softc *ida); 64 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb); 65 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb); 66 67 static d_ioctl_t ida_ioctl; 68 static struct cdevsw ida_cdevsw = { 69 .d_ioctl = ida_ioctl, 70 .d_name = "ida", 71 }; 72 73 void 74 ida_free(struct ida_softc *ida) 75 { 76 int i; 77 78 for (i = 0; i < ida->num_qcbs; i++) 79 bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap); 80 81 if (ida->hwqcb_busaddr) 82 bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap); 83 84 if (ida->hwqcbs) 85 bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs, 86 ida->hwqcb_dmamap); 87 88 if (ida->buffer_dmat) 89 bus_dma_tag_destroy(ida->buffer_dmat); 90 91 if (ida->hwqcb_dmat) 92 bus_dma_tag_destroy(ida->hwqcb_dmat); 93 94 if (ida->qcbs != NULL) 95 free(ida->qcbs, M_DEVBUF); 96 97 if (ida->ih != NULL) 98 bus_teardown_intr(ida->dev, ida->irq, ida->ih); 99 100 if (ida->irq != NULL) 101 bus_release_resource(ida->dev, ida->irq_res_type, 102 0, ida->irq); 103 104 if (ida->parent_dmat != NULL) 105 bus_dma_tag_destroy(ida->parent_dmat); 106 107 if (ida->regs != NULL) 108 bus_release_resource(ida->dev, ida->regs_res_type, 109 ida->regs_res_id, ida->regs); 110 } 111 112 /* 113 * record bus address from bus_dmamap_load 114 */ 115 static void 116 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 117 { 118 bus_addr_t *baddr; 119 120 baddr = (bus_addr_t *)arg; 121 *baddr = segs->ds_addr; 122 } 123 124 static __inline struct ida_qcb * 125 ida_get_qcb(struct ida_softc *ida) 126 { 127 struct ida_qcb *qcb; 128 129 if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) { 130 SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); 131 } else { 132 ida_alloc_qcb(ida); 133 if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) 134 SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); 135 } 136 return (qcb); 137 } 138 139 static __inline bus_addr_t 140 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb) 141 { 142 return (ida->hwqcb_busaddr + 143 ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs)); 144 } 145 146 static __inline struct ida_qcb * 147 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr) 148 { 149 struct ida_hardware_qcb *hwqcb; 150 151 hwqcb = (struct ida_hardware_qcb *) 152 ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr)); 153 return (hwqcb->qcb); 154 } 155 156 /* 157 * XXX 158 * since we allocate all QCB space up front during initialization, then 159 * why bother with this routine? 160 */ 161 static void 162 ida_alloc_qcb(struct ida_softc *ida) 163 { 164 struct ida_qcb *qcb; 165 int error; 166 167 if (ida->num_qcbs >= IDA_QCB_MAX) 168 return; 169 170 qcb = &ida->qcbs[ida->num_qcbs]; 171 172 error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap); 173 if (error != 0) 174 return; 175 176 qcb->flags = QCB_FREE; 177 qcb->hwqcb = &ida->hwqcbs[ida->num_qcbs]; 178 qcb->hwqcb->qcb = qcb; 179 qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb); 180 SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); 181 ida->num_qcbs++; 182 } 183 184 int 185 ida_init(struct ida_softc *ida) 186 { 187 int error; 188 189 ida->unit = device_get_unit(ida->dev); 190 ida->tag = rman_get_bustag(ida->regs); 191 ida->bsh = rman_get_bushandle(ida->regs); 192 193 SLIST_INIT(&ida->free_qcbs); 194 STAILQ_INIT(&ida->qcb_queue); 195 bioq_init(&ida->bio_queue); 196 197 ida->qcbs = (struct ida_qcb *) 198 malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF, 199 M_NOWAIT | M_ZERO); 200 if (ida->qcbs == NULL) 201 return (ENOMEM); 202 203 /* 204 * Create our DMA tags 205 */ 206 207 /* DMA tag for our hardware QCB structures */ 208 error = bus_dma_tag_create(ida->parent_dmat, 209 /*alignment*/1, /*boundary*/0, 210 /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, 211 /*filter*/NULL, /*filterarg*/NULL, 212 IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), 213 /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 214 /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, 215 &ida->hwqcb_dmat); 216 if (error) 217 return (ENOMEM); 218 219 /* DMA tag for mapping buffers into device space */ 220 error = bus_dma_tag_create(ida->parent_dmat, 221 /*alignment*/1, /*boundary*/0, 222 /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, 223 /*filter*/NULL, /*filterarg*/NULL, 224 /*maxsize*/MAXBSIZE, /*nsegments*/IDA_NSEG, 225 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, 226 /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &ida->buffer_dmat); 227 if (error) 228 return (ENOMEM); 229 230 /* Allocation of hardware QCBs */ 231 /* XXX allocation is rounded to hardware page size */ 232 error = bus_dmamem_alloc(ida->hwqcb_dmat, 233 (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap); 234 if (error) 235 return (ENOMEM); 236 237 /* And permanently map them in */ 238 bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap, 239 ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), 240 ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0); 241 242 bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb)); 243 244 ida_alloc_qcb(ida); /* allocate an initial qcb */ 245 246 return (0); 247 } 248 249 void 250 ida_attach(struct ida_softc *ida) 251 { 252 struct ida_controller_info cinfo; 253 int error, i; 254 255 ida->cmd.int_enable(ida, 0); 256 257 error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 258 IDA_CONTROLLER, 0, DMA_DATA_IN); 259 if (error) { 260 device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n"); 261 return; 262 } 263 264 device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n", 265 cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1], 266 cinfo.firm_rev[2], cinfo.firm_rev[3]); 267 268 if (ida->flags & IDA_FIRMWARE) { 269 int data; 270 271 error = ida_command(ida, CMD_START_FIRMWARE, 272 &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN); 273 if (error) { 274 device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n"); 275 return; 276 } 277 } 278 279 ida->ida_dev_t = make_dev(&ida_cdevsw, ida->unit, 280 UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, 281 "ida%d", ida->unit); 282 ida->ida_dev_t->si_drv1 = ida; 283 284 ida->num_drives = 0; 285 for (i = 0; i < cinfo.num_drvs; i++) 286 device_add_child(ida->dev, /*"idad"*/NULL, -1); 287 288 bus_generic_attach(ida->dev); 289 290 ida->cmd.int_enable(ida, 1); 291 } 292 293 int 294 ida_detach(device_t dev) 295 { 296 struct ida_softc *ida; 297 int error = 0; 298 299 ida = (struct ida_softc *)device_get_softc(dev); 300 301 /* 302 * XXX 303 * before detaching, we must make sure that the system is 304 * quiescent; nothing mounted, no pending activity. 305 */ 306 307 /* 308 * XXX 309 * now, how are we supposed to maintain a list of our drives? 310 * iterate over our "child devices"? 311 */ 312 313 destroy_dev(ida->ida_dev_t); 314 ida_free(ida); 315 return (error); 316 } 317 318 static void 319 ida_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error) 320 { 321 struct ida_hardware_qcb *hwqcb = (struct ida_hardware_qcb *)arg; 322 int i; 323 324 hwqcb->hdr.size = htole16((sizeof(struct ida_req) + 325 sizeof(struct ida_sgb) * IDA_NSEG) >> 2); 326 327 for (i = 0; i < nsegments; i++) { 328 hwqcb->seg[i].addr = htole32(segs[i].ds_addr); 329 hwqcb->seg[i].length = htole32(segs[i].ds_len); 330 } 331 hwqcb->req.sgcount = nsegments; 332 } 333 334 int 335 ida_command(struct ida_softc *ida, int command, void *data, int datasize, 336 int drive, u_int32_t pblkno, int flags) 337 { 338 struct ida_hardware_qcb *hwqcb; 339 struct ida_qcb *qcb; 340 bus_dmasync_op_t op; 341 int s, error; 342 343 s = splbio(); 344 qcb = ida_get_qcb(ida); 345 splx(s); 346 347 if (qcb == NULL) { 348 printf("ida_command: out of QCBs"); 349 return (EAGAIN); 350 } 351 352 hwqcb = qcb->hwqcb; 353 bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); 354 355 bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, 356 (void *)data, datasize, ida_setup_dmamap, hwqcb, 0); 357 op = qcb->flags & DMA_DATA_IN ? 358 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 359 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 360 361 hwqcb->hdr.drive = drive; 362 hwqcb->req.blkno = htole32(pblkno); 363 hwqcb->req.bcount = htole16(howmany(datasize, DEV_BSIZE)); 364 hwqcb->req.command = command; 365 366 qcb->flags = flags | IDA_COMMAND; 367 368 s = splbio(); 369 STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); 370 ida_start(ida); 371 error = ida_wait(ida, qcb); 372 splx(s); 373 374 /* XXX should have status returned here? */ 375 /* XXX have "status pointer" area in QCB? */ 376 377 return (error); 378 } 379 380 void 381 ida_submit_buf(struct ida_softc *ida, struct bio *bp) 382 { 383 bioq_insert_tail(&ida->bio_queue, bp); 384 ida_construct_qcb(ida); 385 ida_start(ida); 386 } 387 388 static void 389 ida_construct_qcb(struct ida_softc *ida) 390 { 391 struct ida_hardware_qcb *hwqcb; 392 struct ida_qcb *qcb; 393 bus_dmasync_op_t op; 394 struct bio *bp; 395 396 bp = bioq_first(&ida->bio_queue); 397 if (bp == NULL) 398 return; /* no more buffers */ 399 400 qcb = ida_get_qcb(ida); 401 if (qcb == NULL) 402 return; /* out of resources */ 403 404 bioq_remove(&ida->bio_queue, bp); 405 qcb->buf = bp; 406 qcb->flags = bp->bio_cmd == BIO_READ ? DMA_DATA_IN : DMA_DATA_OUT; 407 408 hwqcb = qcb->hwqcb; 409 bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); 410 411 bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, 412 (void *)bp->bio_data, bp->bio_bcount, ida_setup_dmamap, hwqcb, 0); 413 op = qcb->flags & DMA_DATA_IN ? 414 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 415 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 416 417 { 418 struct idad_softc *drv = (struct idad_softc *)bp->bio_driver1; 419 hwqcb->hdr.drive = drv->drive; 420 } 421 422 hwqcb->req.blkno = bp->bio_pblkno; 423 hwqcb->req.bcount = howmany(bp->bio_bcount, DEV_BSIZE); 424 hwqcb->req.command = bp->bio_cmd == BIO_READ ? CMD_READ : CMD_WRITE; 425 426 STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); 427 } 428 429 /* 430 * This routine will be called from ida_intr in order to queue up more 431 * I/O, meaning that we may be in an interrupt context. Hence, we should 432 * not muck around with spl() in this routine. 433 */ 434 static void 435 ida_start(struct ida_softc *ida) 436 { 437 struct ida_qcb *qcb; 438 439 while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) { 440 if (ida->cmd.fifo_full(ida)) 441 break; 442 STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe); 443 /* 444 * XXX 445 * place the qcb on an active list and set a timeout? 446 */ 447 qcb->state = QCB_ACTIVE; 448 ida->cmd.submit(ida, qcb); 449 } 450 } 451 452 static int 453 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb) 454 { 455 struct ida_qcb *qcb_done = NULL; 456 bus_addr_t completed; 457 int delay; 458 459 if (ida->flags & IDA_INTERRUPTS) { 460 if (tsleep(qcb, PRIBIO, "idacmd", 5 * hz)) 461 return (ETIMEDOUT); 462 return (0); 463 } 464 465 again: 466 delay = 5 * 1000 * 100; /* 5 sec delay */ 467 while ((completed = ida->cmd.done(ida)) == 0) { 468 if (delay-- == 0) 469 return (ETIMEDOUT); 470 DELAY(10); 471 } 472 473 qcb_done = idahwqcbptov(ida, completed & ~3); 474 if (qcb_done != qcb) 475 goto again; 476 ida_done(ida, qcb); 477 return (0); 478 } 479 480 void 481 ida_intr(void *data) 482 { 483 struct ida_softc *ida; 484 struct ida_qcb *qcb; 485 bus_addr_t completed; 486 487 ida = (struct ida_softc *)data; 488 489 if (ida->cmd.int_pending(ida) == 0) 490 return; /* not our interrupt */ 491 492 while ((completed = ida->cmd.done(ida)) != 0) { 493 qcb = idahwqcbptov(ida, completed & ~3); 494 495 if (qcb == NULL || qcb->state != QCB_ACTIVE) { 496 device_printf(ida->dev, 497 "ignoring completion %jx\n", (intmax_t)completed); 498 continue; 499 } 500 /* Handle "Bad Command List" errors. */ 501 if ((completed & 3) && (qcb->hwqcb->req.error == 0)) 502 qcb->hwqcb->req.error = CMD_REJECTED; 503 ida_done(ida, qcb); 504 } 505 ida_start(ida); 506 } 507 508 /* 509 * should switch out command type; may be status, not just I/O. 510 */ 511 static void 512 ida_done(struct ida_softc *ida, struct ida_qcb *qcb) 513 { 514 int error = 0; 515 516 /* 517 * finish up command 518 */ 519 if (qcb->flags & DMA_DATA_TRANSFER) { 520 bus_dmasync_op_t op; 521 522 op = qcb->flags & DMA_DATA_IN ? 523 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE; 524 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 525 bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap); 526 } 527 528 if (qcb->hwqcb->req.error & SOFT_ERROR) { 529 if (qcb->buf) 530 device_printf(ida->dev, "soft %s error\n", 531 qcb->buf->bio_cmd == BIO_READ ? 532 "read" : "write"); 533 else 534 device_printf(ida->dev, "soft error\n"); 535 } 536 if (qcb->hwqcb->req.error & HARD_ERROR) { 537 error = 1; 538 if (qcb->buf) 539 device_printf(ida->dev, "hard %s error\n", 540 qcb->buf->bio_cmd == BIO_READ ? 541 "read" : "write"); 542 else 543 device_printf(ida->dev, "hard error\n"); 544 } 545 if (qcb->hwqcb->req.error & CMD_REJECTED) { 546 error = 1; 547 device_printf(ida->dev, "invalid request\n"); 548 } 549 550 if (qcb->flags & IDA_COMMAND) { 551 if (ida->flags & IDA_INTERRUPTS) 552 wakeup(qcb); 553 } else { 554 if (error) 555 qcb->buf->bio_flags |= BIO_ERROR; 556 idad_intr(qcb->buf); 557 } 558 559 qcb->state = QCB_FREE; 560 qcb->buf = NULL; 561 SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); 562 ida_construct_qcb(ida); 563 } 564 565 /* 566 * IOCTL stuff follows. 567 */ 568 struct cmd_info { 569 int cmd; 570 int len; 571 int flags; 572 }; 573 static struct cmd_info *ida_cmd_lookup(int); 574 575 static int 576 ida_ioctl (dev_t dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td) 577 { 578 struct ida_softc *sc; 579 struct ida_user_command *uc; 580 struct cmd_info *ci; 581 int len; 582 int flags; 583 int error; 584 int data; 585 void *daddr; 586 587 sc = (struct ida_softc *)dev->si_drv1; 588 uc = (struct ida_user_command *)addr; 589 error = 0; 590 591 switch (cmd) { 592 case IDAIO_COMMAND: 593 ci = ida_cmd_lookup(uc->command); 594 if (ci == NULL) { 595 error = EINVAL; 596 break; 597 } 598 len = ci->len; 599 flags = ci->flags; 600 if (len) 601 daddr = &uc->d.buf; 602 else { 603 daddr = &data; 604 len = sizeof(data); 605 } 606 error = ida_command(sc, uc->command, daddr, len, 607 uc->drive, uc->blkno, flags); 608 break; 609 default: 610 error = ENOIOCTL; 611 break; 612 } 613 return (error); 614 } 615 616 static struct cmd_info ci_list[] = { 617 { CMD_GET_LOG_DRV_INFO, 618 sizeof(struct ida_drive_info), DMA_DATA_IN }, 619 { CMD_GET_CTRL_INFO, 620 sizeof(struct ida_controller_info), DMA_DATA_IN }, 621 { CMD_SENSE_DRV_STATUS, 622 sizeof(struct ida_drive_status), DMA_DATA_IN }, 623 { CMD_START_RECOVERY, 0, 0 }, 624 { CMD_GET_PHYS_DRV_INFO, 625 sizeof(struct ida_phys_drv_info), DMA_DATA_TRANSFER }, 626 { CMD_BLINK_DRV_LEDS, 627 sizeof(struct ida_blink_drv_leds), DMA_DATA_OUT }, 628 { CMD_SENSE_DRV_LEDS, 629 sizeof(struct ida_blink_drv_leds), DMA_DATA_IN }, 630 { CMD_GET_LOG_DRV_EXT, 631 sizeof(struct ida_drive_info_ext), DMA_DATA_IN }, 632 { CMD_RESET_CTRL, 0, 0 }, 633 { CMD_GET_CONFIG, 0, 0 }, 634 { CMD_SET_CONFIG, 0, 0 }, 635 { CMD_LABEL_LOG_DRV, 636 sizeof(struct ida_label_logical), DMA_DATA_OUT }, 637 { CMD_SET_SURFACE_DELAY, 0, 0 }, 638 { CMD_SENSE_BUS_PARAMS, 0, 0 }, 639 { CMD_SENSE_SUBSYS_INFO, 0, 0 }, 640 { CMD_SENSE_SURFACE_ATS, 0, 0 }, 641 { CMD_PASSTHROUGH, 0, 0 }, 642 { CMD_RESET_SCSI_DEV, 0, 0 }, 643 { CMD_PAUSE_BG_ACT, 0, 0 }, 644 { CMD_RESUME_BG_ACT, 0, 0 }, 645 { CMD_START_FIRMWARE, 0, 0 }, 646 { CMD_SENSE_DRV_ERR_LOG, 0, 0 }, 647 { CMD_START_CPM, 0, 0 }, 648 { CMD_SENSE_CP, 0, 0 }, 649 { CMD_STOP_CPM, 0, 0 }, 650 { CMD_FLUSH_CACHE, 0, 0 }, 651 { CMD_ACCEPT_MEDIA_EXCH, 0, 0 }, 652 { 0, 0, 0 } 653 }; 654 655 static struct cmd_info * 656 ida_cmd_lookup (int command) 657 { 658 struct cmd_info *ci; 659 660 ci = ci_list; 661 while (ci->cmd) { 662 if (ci->cmd == command) 663 return (ci); 664 ci++; 665 } 666 return (NULL); 667 } 668