1 /*- 2 * Copyright (c) 1999,2000 Jonathan Lemon 3 * All rights reserved. 4 * 5 # Derived from the original IDA Compaq RAID driver, which is 6 * Copyright (c) 1996, 1997, 1998, 1999 7 * Mark Dawson and David James. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 /* 35 * Generic driver for Compaq SMART RAID adapters. 36 */ 37 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/systm.h> 41 #include <sys/malloc.h> 42 43 #include <sys/bio.h> 44 #include <sys/bus.h> 45 #include <sys/conf.h> 46 #include <sys/endian.h> 47 48 #include <machine/bus_memio.h> 49 #include <machine/bus_pio.h> 50 #include <machine/bus.h> 51 #include <sys/rman.h> 52 53 #include <geom/geom_disk.h> 54 55 #include <dev/ida/idareg.h> 56 #include <dev/ida/idavar.h> 57 58 /* prototypes */ 59 static void ida_alloc_qcb(struct ida_softc *ida); 60 static void ida_construct_qcb(struct ida_softc *ida); 61 static void ida_start(struct ida_softc *ida); 62 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb); 63 static int ida_wait(struct ida_softc *ida, struct ida_qcb *qcb); 64 65 void 66 ida_free(struct ida_softc *ida) 67 { 68 int i; 69 70 for (i = 0; i < ida->num_qcbs; i++) 71 bus_dmamap_destroy(ida->buffer_dmat, ida->qcbs[i].dmamap); 72 73 if (ida->hwqcb_busaddr) 74 bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap); 75 76 if (ida->hwqcbs) 77 bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs, 78 ida->hwqcb_dmamap); 79 80 if (ida->buffer_dmat) 81 bus_dma_tag_destroy(ida->buffer_dmat); 82 83 if (ida->hwqcb_dmat) 84 bus_dma_tag_destroy(ida->hwqcb_dmat); 85 86 if (ida->qcbs != NULL) 87 free(ida->qcbs, M_DEVBUF); 88 89 if (ida->ih != NULL) 90 bus_teardown_intr(ida->dev, ida->irq, ida->ih); 91 92 if (ida->irq != NULL) 93 bus_release_resource(ida->dev, ida->irq_res_type, 94 0, ida->irq); 95 96 if (ida->parent_dmat != NULL) 97 bus_dma_tag_destroy(ida->parent_dmat); 98 99 if (ida->regs != NULL) 100 bus_release_resource(ida->dev, ida->regs_res_type, 101 ida->regs_res_id, ida->regs); 102 } 103 104 /* 105 * record bus address from bus_dmamap_load 106 */ 107 static void 108 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 109 { 110 bus_addr_t *baddr; 111 112 baddr = (bus_addr_t *)arg; 113 *baddr = segs->ds_addr; 114 } 115 116 static __inline struct ida_qcb * 117 ida_get_qcb(struct ida_softc *ida) 118 { 119 struct ida_qcb *qcb; 120 121 if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) { 122 SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); 123 } else { 124 ida_alloc_qcb(ida); 125 if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) 126 SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); 127 } 128 return (qcb); 129 } 130 131 static __inline bus_addr_t 132 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb) 133 { 134 return (ida->hwqcb_busaddr + 135 ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs)); 136 } 137 138 static __inline struct ida_qcb * 139 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr) 140 { 141 struct ida_hardware_qcb *hwqcb; 142 143 hwqcb = (struct ida_hardware_qcb *) 144 ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr)); 145 return (hwqcb->qcb); 146 } 147 148 /* 149 * XXX 150 * since we allocate all QCB space up front during initialization, then 151 * why bother with this routine? 152 */ 153 static void 154 ida_alloc_qcb(struct ida_softc *ida) 155 { 156 struct ida_qcb *qcb; 157 int error; 158 159 if (ida->num_qcbs >= IDA_QCB_MAX) 160 return; 161 162 qcb = &ida->qcbs[ida->num_qcbs]; 163 164 error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap); 165 if (error != 0) 166 return; 167 168 qcb->flags = QCB_FREE; 169 qcb->hwqcb = &ida->hwqcbs[ida->num_qcbs]; 170 qcb->hwqcb->qcb = qcb; 171 qcb->hwqcb_busaddr = idahwqcbvtop(ida, qcb->hwqcb); 172 SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); 173 ida->num_qcbs++; 174 } 175 176 int 177 ida_init(struct ida_softc *ida) 178 { 179 int error; 180 181 ida->unit = device_get_unit(ida->dev); 182 ida->tag = rman_get_bustag(ida->regs); 183 ida->bsh = rman_get_bushandle(ida->regs); 184 185 SLIST_INIT(&ida->free_qcbs); 186 STAILQ_INIT(&ida->qcb_queue); 187 bioq_init(&ida->bio_queue); 188 189 ida->qcbs = (struct ida_qcb *) 190 malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF, 191 M_NOWAIT | M_ZERO); 192 if (ida->qcbs == NULL) 193 return (ENOMEM); 194 195 /* 196 * Create our DMA tags 197 */ 198 199 /* DMA tag for our hardware QCB structures */ 200 error = bus_dma_tag_create(ida->parent_dmat, 201 /*alignment*/1, /*boundary*/0, 202 /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, 203 /*filter*/NULL, /*filterarg*/NULL, 204 IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), 205 /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 206 /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, 207 &ida->hwqcb_dmat); 208 if (error) 209 return (ENOMEM); 210 211 /* DMA tag for mapping buffers into device space */ 212 error = bus_dma_tag_create(ida->parent_dmat, 213 /*alignment*/1, /*boundary*/0, 214 /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, 215 /*filter*/NULL, /*filterarg*/NULL, 216 /*maxsize*/MAXBSIZE, /*nsegments*/IDA_NSEG, 217 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, 218 /*lockfunc*/busdma_lock_mutex, /*lockarg*/&Giant, &ida->buffer_dmat); 219 if (error) 220 return (ENOMEM); 221 222 /* Allocation of hardware QCBs */ 223 /* XXX allocation is rounded to hardware page size */ 224 error = bus_dmamem_alloc(ida->hwqcb_dmat, 225 (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap); 226 if (error) 227 return (ENOMEM); 228 229 /* And permanently map them in */ 230 bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap, 231 ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), 232 ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0); 233 234 bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb)); 235 236 ida_alloc_qcb(ida); /* allocate an initial qcb */ 237 238 return (0); 239 } 240 241 void 242 ida_attach(struct ida_softc *ida) 243 { 244 struct ida_controller_info cinfo; 245 int error, i; 246 247 ida->cmd.int_enable(ida, 0); 248 249 error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 250 IDA_CONTROLLER, 0, DMA_DATA_IN); 251 if (error) { 252 device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n"); 253 return; 254 } 255 256 device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n", 257 cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1], 258 cinfo.firm_rev[2], cinfo.firm_rev[3]); 259 260 if (ida->flags & IDA_FIRMWARE) { 261 int data; 262 263 error = ida_command(ida, CMD_START_FIRMWARE, 264 &data, sizeof(data), IDA_CONTROLLER, 0, DMA_DATA_IN); 265 if (error) { 266 device_printf(ida->dev, "CMD_START_FIRMWARE failed.\n"); 267 return; 268 } 269 } 270 271 ida->num_drives = 0; 272 for (i = 0; i < cinfo.num_drvs; i++) 273 device_add_child(ida->dev, /*"idad"*/NULL, -1); 274 275 bus_generic_attach(ida->dev); 276 277 ida->cmd.int_enable(ida, 1); 278 } 279 280 int 281 ida_detach(device_t dev) 282 { 283 struct ida_softc *ida; 284 int error = 0; 285 286 ida = (struct ida_softc *)device_get_softc(dev); 287 288 /* 289 * XXX 290 * before detaching, we must make sure that the system is 291 * quiescent; nothing mounted, no pending activity. 292 */ 293 294 /* 295 * XXX 296 * now, how are we supposed to maintain a list of our drives? 297 * iterate over our "child devices"? 298 */ 299 300 301 ida_free(ida); 302 return (error); 303 } 304 305 static void 306 ida_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error) 307 { 308 struct ida_hardware_qcb *hwqcb = (struct ida_hardware_qcb *)arg; 309 int i; 310 311 hwqcb->hdr.size = htole16((sizeof(struct ida_req) + 312 sizeof(struct ida_sgb) * IDA_NSEG) >> 2); 313 314 for (i = 0; i < nsegments; i++) { 315 hwqcb->seg[i].addr = htole32(segs[i].ds_addr); 316 hwqcb->seg[i].length = htole32(segs[i].ds_len); 317 } 318 hwqcb->req.sgcount = nsegments; 319 } 320 321 int 322 ida_command(struct ida_softc *ida, int command, void *data, int datasize, 323 int drive, u_int32_t pblkno, int flags) 324 { 325 struct ida_hardware_qcb *hwqcb; 326 struct ida_qcb *qcb; 327 bus_dmasync_op_t op; 328 int s, error; 329 330 s = splbio(); 331 qcb = ida_get_qcb(ida); 332 splx(s); 333 334 if (qcb == NULL) { 335 printf("ida_command: out of QCBs"); 336 return (EAGAIN); 337 } 338 339 hwqcb = qcb->hwqcb; 340 bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); 341 342 bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, 343 (void *)data, datasize, ida_setup_dmamap, hwqcb, 0); 344 op = qcb->flags & DMA_DATA_IN ? 345 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 346 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 347 348 hwqcb->hdr.drive = drive; 349 hwqcb->req.blkno = htole32(pblkno); 350 hwqcb->req.bcount = htole16(howmany(datasize, DEV_BSIZE)); 351 hwqcb->req.command = command; 352 353 qcb->flags = flags | IDA_COMMAND; 354 355 s = splbio(); 356 STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); 357 ida_start(ida); 358 error = ida_wait(ida, qcb); 359 splx(s); 360 361 /* XXX should have status returned here? */ 362 /* XXX have "status pointer" area in QCB? */ 363 364 return (error); 365 } 366 367 void 368 ida_submit_buf(struct ida_softc *ida, struct bio *bp) 369 { 370 bioq_insert_tail(&ida->bio_queue, bp); 371 ida_construct_qcb(ida); 372 ida_start(ida); 373 } 374 375 static void 376 ida_construct_qcb(struct ida_softc *ida) 377 { 378 struct ida_hardware_qcb *hwqcb; 379 struct ida_qcb *qcb; 380 bus_dmasync_op_t op; 381 struct bio *bp; 382 383 bp = bioq_first(&ida->bio_queue); 384 if (bp == NULL) 385 return; /* no more buffers */ 386 387 qcb = ida_get_qcb(ida); 388 if (qcb == NULL) 389 return; /* out of resources */ 390 391 bioq_remove(&ida->bio_queue, bp); 392 qcb->buf = bp; 393 qcb->flags = bp->bio_cmd == BIO_READ ? DMA_DATA_IN : DMA_DATA_OUT; 394 395 hwqcb = qcb->hwqcb; 396 bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); 397 398 bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, 399 (void *)bp->bio_data, bp->bio_bcount, ida_setup_dmamap, hwqcb, 0); 400 op = qcb->flags & DMA_DATA_IN ? 401 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 402 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 403 404 { 405 struct idad_softc *drv = (struct idad_softc *)bp->bio_driver1; 406 hwqcb->hdr.drive = drv->drive; 407 } 408 409 hwqcb->req.blkno = bp->bio_pblkno; 410 hwqcb->req.bcount = howmany(bp->bio_bcount, DEV_BSIZE); 411 hwqcb->req.command = bp->bio_cmd == BIO_READ ? CMD_READ : CMD_WRITE; 412 413 STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); 414 } 415 416 /* 417 * This routine will be called from ida_intr in order to queue up more 418 * I/O, meaning that we may be in an interrupt context. Hence, we should 419 * not muck around with spl() in this routine. 420 */ 421 static void 422 ida_start(struct ida_softc *ida) 423 { 424 struct ida_qcb *qcb; 425 426 while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) { 427 if (ida->cmd.fifo_full(ida)) 428 break; 429 STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe); 430 /* 431 * XXX 432 * place the qcb on an active list and set a timeout? 433 */ 434 qcb->state = QCB_ACTIVE; 435 ida->cmd.submit(ida, qcb); 436 } 437 } 438 439 static int 440 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb) 441 { 442 struct ida_qcb *qcb_done = NULL; 443 bus_addr_t completed; 444 int delay; 445 446 if (ida->flags & IDA_INTERRUPTS) { 447 if (tsleep(qcb, PRIBIO, "idacmd", 5 * hz)) 448 return (ETIMEDOUT); 449 return (0); 450 } 451 452 again: 453 delay = 5 * 1000 * 100; /* 5 sec delay */ 454 while ((completed = ida->cmd.done(ida)) == 0) { 455 if (delay-- == 0) 456 return (ETIMEDOUT); 457 DELAY(10); 458 } 459 460 qcb_done = idahwqcbptov(ida, completed & ~3); 461 if (qcb_done != qcb) 462 goto again; 463 ida_done(ida, qcb); 464 return (0); 465 } 466 467 void 468 ida_intr(void *data) 469 { 470 struct ida_softc *ida; 471 struct ida_qcb *qcb; 472 bus_addr_t completed; 473 474 ida = (struct ida_softc *)data; 475 476 if (ida->cmd.int_pending(ida) == 0) 477 return; /* not our interrupt */ 478 479 while ((completed = ida->cmd.done(ida)) != 0) { 480 qcb = idahwqcbptov(ida, completed & ~3); 481 482 if (qcb == NULL || qcb->state != QCB_ACTIVE) { 483 device_printf(ida->dev, 484 "ignoring completion %jx\n", (intmax_t)completed); 485 continue; 486 } 487 /* Handle "Bad Command List" errors. */ 488 if ((completed & 3) && (qcb->hwqcb->req.error == 0)) 489 qcb->hwqcb->req.error = CMD_REJECTED; 490 ida_done(ida, qcb); 491 } 492 ida_start(ida); 493 } 494 495 /* 496 * should switch out command type; may be status, not just I/O. 497 */ 498 static void 499 ida_done(struct ida_softc *ida, struct ida_qcb *qcb) 500 { 501 int error = 0; 502 503 /* 504 * finish up command 505 */ 506 if (qcb->flags & DMA_DATA_TRANSFER) { 507 bus_dmasync_op_t op; 508 509 op = qcb->flags & DMA_DATA_IN ? 510 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE; 511 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 512 bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap); 513 } 514 515 if (qcb->hwqcb->req.error & SOFT_ERROR) { 516 if (qcb->buf) 517 device_printf(ida->dev, "soft %s error\n", 518 qcb->buf->bio_cmd == BIO_READ ? 519 "read" : "write"); 520 else 521 device_printf(ida->dev, "soft error\n"); 522 } 523 if (qcb->hwqcb->req.error & HARD_ERROR) { 524 error = 1; 525 if (qcb->buf) 526 device_printf(ida->dev, "hard %s error\n", 527 qcb->buf->bio_cmd == BIO_READ ? 528 "read" : "write"); 529 else 530 device_printf(ida->dev, "hard error\n"); 531 } 532 if (qcb->hwqcb->req.error & CMD_REJECTED) { 533 error = 1; 534 device_printf(ida->dev, "invalid request\n"); 535 } 536 537 if (qcb->flags & IDA_COMMAND) { 538 if (ida->flags & IDA_INTERRUPTS) 539 wakeup(qcb); 540 } else { 541 if (error) 542 qcb->buf->bio_flags |= BIO_ERROR; 543 idad_intr(qcb->buf); 544 } 545 546 qcb->state = QCB_FREE; 547 qcb->buf = NULL; 548 SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); 549 ida_construct_qcb(ida); 550 } 551