1 /*- 2 * Copyright (c) 1999 Jonathan Lemon 3 * All rights reserved. 4 * 5 # Derived from the original IDA Compaq RAID driver, which is 6 * Copyright (c) 1996, 1997, 1998, 1999 7 * Mark Dawson and David James. All rights reserved. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 * $FreeBSD$ 31 */ 32 33 /* 34 * Generic driver for Compaq SMART RAID adapters. 35 * 36 * Specific probe routines are in: 37 * pci/ida_pci.c 38 * i386/eisa/ida_eisa.c 39 */ 40 41 #include <pci.h> 42 43 #include <sys/param.h> 44 #include <sys/systm.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 48 #include <sys/buf.h> 49 #include <sys/bus.h> 50 #include <sys/device.h> 51 #include <sys/devicestat.h> 52 53 #if NPCI > 0 54 #include <machine/bus_memio.h> 55 #endif 56 #include <machine/bus_pio.h> 57 #include <machine/bus.h> 58 #include <machine/clock.h> 59 #include <sys/rman.h> 60 61 #include <dev/ida/idareg.h> 62 #include <dev/ida/idavar.h> 63 64 #define ida_inl(ida, port) \ 65 bus_space_read_4((ida)->tag, (ida)->bsh, port) 66 67 #define ida_outl(ida, port, val) \ 68 bus_space_write_4((ida)->tag, (ida)->bsh, port, val) 69 70 /* prototypes */ 71 static void ida_alloc_qcb(struct ida_softc *ida); 72 static void ida_construct_qcb(struct ida_softc *ida); 73 static void ida_start(struct ida_softc *ida); 74 static void ida_done(struct ida_softc *ida, struct ida_qcb *qcb); 75 76 void 77 ida_free(struct ida_softc *ida) 78 { 79 80 /* 81 * still need to call bus_dmamap_destroy() for each map created 82 * in ida_alloc_qcb(). 83 */ 84 85 if (ida->hwqcb_busaddr) 86 bus_dmamap_unload(ida->hwqcb_dmat, ida->hwqcb_dmamap); 87 88 if (ida->hwqcbs) 89 bus_dmamem_free(ida->hwqcb_dmat, ida->hwqcbs, 90 ida->hwqcb_dmamap); 91 92 if (ida->buffer_dmat) 93 bus_dma_tag_destroy(ida->buffer_dmat); 94 95 if (ida->hwqcb_dmat) 96 bus_dma_tag_destroy(ida->hwqcb_dmat); 97 98 if (ida->qcbs != NULL) 99 free(ida->qcbs, M_DEVBUF); 100 101 if (ida->ih != NULL) 102 bus_teardown_intr(ida->dev, ida->irq, ida->ih); 103 104 if (ida->irq != NULL) 105 bus_release_resource(ida->dev, ida->irq_res_type, 106 0, ida->irq); 107 108 if (ida->parent_dmat != NULL) 109 bus_dma_tag_destroy(ida->parent_dmat); 110 111 if (ida->regs != NULL) 112 bus_release_resource(ida->dev, ida->regs_res_type, 113 ida->regs_res_id, ida->regs); 114 } 115 116 /* 117 * record bus address from bus_dmamap_load 118 */ 119 static void 120 ida_dma_map_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) 121 { 122 bus_addr_t *baddr; 123 124 baddr = (bus_addr_t *)arg; 125 *baddr = segs->ds_addr; 126 } 127 128 static __inline struct ida_qcb * 129 ida_get_qcb(struct ida_softc *ida) 130 { 131 struct ida_qcb *qcb; 132 133 if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) { 134 SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); 135 } else { 136 ida_alloc_qcb(ida); 137 if ((qcb = SLIST_FIRST(&ida->free_qcbs)) != NULL) 138 SLIST_REMOVE_HEAD(&ida->free_qcbs, link.sle); 139 } 140 return (qcb); 141 } 142 143 /* 144 * XXX 145 * since we allocate all QCB space up front during initialization, then 146 * why bother with this routine? 147 */ 148 static void 149 ida_alloc_qcb(struct ida_softc *ida) 150 { 151 struct ida_qcb *qcb; 152 int error; 153 154 if (ida->num_qcbs >= IDA_QCB_MAX) 155 return; 156 157 qcb = &ida->qcbs[ida->num_qcbs]; 158 159 error = bus_dmamap_create(ida->buffer_dmat, /*flags*/0, &qcb->dmamap); 160 if (error != 0) 161 return; 162 163 qcb->flags = QCB_FREE; 164 qcb->hwqcb = &ida->hwqcbs[ida->num_qcbs]; 165 qcb->hwqcb->qcb = qcb; 166 SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); 167 ida->num_qcbs++; 168 } 169 170 int 171 ida_init(struct ida_softc *ida) 172 { 173 int error; 174 175 ida->unit = device_get_unit(ida->dev); 176 ida->tag = rman_get_bustag(ida->regs); 177 ida->bsh = rman_get_bushandle(ida->regs); 178 179 SLIST_INIT(&ida->free_qcbs); 180 STAILQ_INIT(&ida->qcb_queue); 181 bufq_init(&ida->buf_queue); 182 183 ida->qcbs = (struct ida_qcb *) 184 malloc(IDA_QCB_MAX * sizeof(struct ida_qcb), M_DEVBUF, M_NOWAIT); 185 if (ida->qcbs == NULL) 186 return (ENOMEM); 187 bzero(ida->qcbs, IDA_QCB_MAX * sizeof(struct ida_qcb)); 188 189 /* 190 * Create our DMA tags 191 */ 192 193 /* DMA tag for our hardware QCB structures */ 194 error = bus_dma_tag_create(ida->parent_dmat, 195 /*alignment*/1, /*boundary*/0, 196 /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, 197 /*filter*/NULL, /*filterarg*/NULL, 198 IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), 199 /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, 200 /*flags*/0, &ida->hwqcb_dmat); 201 if (error) 202 return (ENOMEM); 203 204 /* DMA tag for mapping buffers into device space */ 205 error = bus_dma_tag_create(ida->parent_dmat, 206 /*alignment*/1, /*boundary*/0, 207 /*lowaddr*/BUS_SPACE_MAXADDR, /*highaddr*/BUS_SPACE_MAXADDR, 208 /*filter*/NULL, /*filterarg*/NULL, 209 /*maxsize*/MAXBSIZE, /*nsegments*/IDA_NSEG, 210 /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ida->buffer_dmat); 211 if (error) 212 return (ENOMEM); 213 214 /* Allocation of hardware QCBs */ 215 /* XXX allocation is rounded to hardware page size */ 216 error = bus_dmamem_alloc(ida->hwqcb_dmat, 217 (void **)&ida->hwqcbs, BUS_DMA_NOWAIT, &ida->hwqcb_dmamap); 218 if (error) 219 return (ENOMEM); 220 221 /* And permanently map them in */ 222 bus_dmamap_load(ida->hwqcb_dmat, ida->hwqcb_dmamap, 223 ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb), 224 ida_dma_map_cb, &ida->hwqcb_busaddr, /*flags*/0); 225 226 bzero(ida->hwqcbs, IDA_QCB_MAX * sizeof(struct ida_hardware_qcb)); 227 228 ida_alloc_qcb(ida); /* allocate an initial qcb */ 229 230 return (0); 231 } 232 233 void 234 ida_attach(struct ida_softc *ida) 235 { 236 struct ida_controller_info cinfo; 237 int error, i; 238 239 ida_outl(ida, R_INT_MASK, INT_DISABLE); 240 241 error = ida_command(ida, CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 242 IDA_CONTROLLER, DMA_DATA_IN); 243 if (error) { 244 device_printf(ida->dev, "CMD_GET_CTRL_INFO failed.\n"); 245 return; 246 } 247 248 device_printf(ida->dev, "drives=%d firm_rev=%c%c%c%c\n", 249 cinfo.num_drvs, cinfo.firm_rev[0], cinfo.firm_rev[1], 250 cinfo.firm_rev[2], cinfo.firm_rev[3]); 251 252 ida->num_drives = cinfo.num_drvs; 253 254 for (i = 0; i < ida->num_drives; i++) 255 device_add_child(ida->dev, "id", i, NULL); 256 257 bus_generic_attach(ida->dev); 258 259 ida_outl(ida, R_INT_MASK, INT_ENABLE); 260 } 261 262 static void 263 ida_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error) 264 { 265 struct ida_hardware_qcb *hwqcb = (struct ida_hardware_qcb *)arg; 266 int i; 267 268 hwqcb->hdr.size = (sizeof(struct ida_req) + 269 sizeof(struct ida_sgb) * IDA_NSEG) >> 2; 270 271 for (i = 0; i < nsegments; i++) { 272 hwqcb->seg[i].addr = segs[i].ds_addr; 273 hwqcb->seg[i].length = segs[i].ds_len; 274 } 275 hwqcb->req.sgcount = nsegments; 276 } 277 278 int 279 ida_command(struct ida_softc *ida, int command, void *data, int datasize, 280 int drive, int flags) 281 { 282 struct ida_hardware_qcb *hwqcb; 283 struct ida_qcb *qcb; 284 bus_dmasync_op_t op; 285 int s; 286 287 s = splbio(); 288 qcb = ida_get_qcb(ida); 289 splx(s); 290 291 if (qcb == NULL) { 292 printf("ida_command: out of QCBs"); 293 return (1); 294 } 295 296 hwqcb = qcb->hwqcb; 297 bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); 298 299 bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, 300 (void *)data, datasize, ida_setup_dmamap, hwqcb, 0); 301 op = qcb->flags & DMA_DATA_IN ? 302 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 303 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 304 305 hwqcb->hdr.drive = drive; /* XXX */ 306 hwqcb->req.bcount = howmany(datasize, DEV_BSIZE); 307 hwqcb->req.command = command; 308 309 qcb->flags = flags | IDA_COMMAND; 310 311 s = splbio(); 312 STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); 313 ida_start(ida); 314 ida_wait(ida, qcb, 500); 315 splx(s); 316 317 /* XXX should have status returned here? */ 318 /* XXX have "status pointer" area in QCB? */ 319 320 return (0); 321 } 322 323 void 324 ida_submit_buf(struct ida_softc *ida, struct buf *bp) 325 { 326 bufq_insert_tail(&ida->buf_queue, bp); 327 ida_construct_qcb(ida); 328 ida_start(ida); 329 } 330 331 static void 332 ida_construct_qcb(struct ida_softc *ida) 333 { 334 struct ida_hardware_qcb *hwqcb; 335 struct ida_qcb *qcb; 336 bus_dmasync_op_t op; 337 struct buf *bp; 338 339 bp = bufq_first(&ida->buf_queue); 340 if (bp == NULL) 341 return; /* no more buffers */ 342 343 qcb = ida_get_qcb(ida); 344 if (qcb == NULL) 345 return; /* out of resources */ 346 347 bufq_remove(&ida->buf_queue, bp); 348 qcb->buf = bp; 349 qcb->flags = 0; 350 351 hwqcb = qcb->hwqcb; 352 bzero(hwqcb, sizeof(struct ida_hdr) + sizeof(struct ida_req)); 353 354 bus_dmamap_load(ida->buffer_dmat, qcb->dmamap, 355 (void *)bp->b_data, bp->b_bcount, ida_setup_dmamap, hwqcb, 0); 356 op = qcb->flags & DMA_DATA_IN ? 357 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE; 358 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 359 360 /* 361 * XXX 362 */ 363 { 364 struct id_softc *drv = (struct id_softc *)bp->b_driver1; 365 hwqcb->hdr.drive = drv->unit; 366 } 367 368 hwqcb->req.blkno = bp->b_pblkno; 369 hwqcb->req.bcount = howmany(bp->b_bcount, DEV_BSIZE); 370 hwqcb->req.command = bp->b_flags & B_READ ? CMD_READ : CMD_WRITE; 371 372 STAILQ_INSERT_TAIL(&ida->qcb_queue, qcb, link.stqe); 373 } 374 375 static __inline bus_addr_t 376 idahwqcbvtop(struct ida_softc *ida, struct ida_hardware_qcb *hwqcb) 377 { 378 return (ida->hwqcb_busaddr + 379 ((bus_addr_t)hwqcb - (bus_addr_t)ida->hwqcbs)); 380 } 381 382 static __inline struct ida_qcb * 383 idahwqcbptov(struct ida_softc *ida, bus_addr_t hwqcb_addr) 384 { 385 struct ida_hardware_qcb *hwqcb; 386 387 hwqcb = (struct ida_hardware_qcb *) 388 ((bus_addr_t)ida->hwqcbs + (hwqcb_addr - ida->hwqcb_busaddr)); 389 return (hwqcb->qcb); 390 } 391 392 /* 393 * This routine will be called from ida_intr in order to queue up more 394 * I/O, meaning that we may be in an interrupt context. Hence, we should 395 * not muck around with spl() in this routine. 396 */ 397 static void 398 ida_start(struct ida_softc *ida) 399 { 400 struct ida_qcb *qcb; 401 402 while ((qcb = STAILQ_FIRST(&ida->qcb_queue)) != NULL) { 403 if (ida_inl(ida, R_CMD_FIFO) == 0) 404 break; /* fifo is full */ 405 STAILQ_REMOVE_HEAD(&ida->qcb_queue, link.stqe); 406 /* 407 * XXX 408 * place the qcb on an active list and set a timeout? 409 */ 410 qcb->state = QCB_ACTIVE; 411 /* 412 * XXX 413 * cache the physaddr so we don't keep doing this? 414 */ 415 ida_outl(ida, R_CMD_FIFO, idahwqcbvtop(ida, qcb->hwqcb)); 416 } 417 } 418 419 void 420 ida_wait(struct ida_softc *ida, struct ida_qcb *qcb, int delay) 421 { 422 struct ida_qcb *qcb_done = NULL; 423 bus_addr_t completed; 424 425 if (ida->flags & IDA_ATTACHED) { 426 if (tsleep((caddr_t)qcb, PRIBIO, "idacmd", delay)) 427 panic("ida_command: timeout waiting for interrupt"); 428 return; 429 } 430 431 while ((completed = ida_inl(ida, R_DONE_FIFO)) == 0) { 432 if (delay-- == 0) 433 panic("ida_wait: timeout waiting for completion"); 434 DELAY(10); 435 } 436 437 qcb_done = idahwqcbptov(ida, completed & ~3); 438 if (qcb_done != qcb) 439 panic("ida_wait: incorrect qcb returned"); 440 ida_done(ida, qcb); 441 return; 442 } 443 444 void 445 ida_intr(void *data) 446 { 447 struct ida_softc *ida; 448 struct ida_qcb *qcb; 449 bus_addr_t completed; 450 451 ida = (struct ida_softc *)data; 452 453 if (ida_inl(ida, R_INT_PENDING) == 0) 454 return; /* not our interrupt */ 455 456 while ((completed = ida_inl(ida, R_DONE_FIFO)) != 0) { 457 qcb = idahwqcbptov(ida, completed & ~3); 458 459 if (qcb == NULL || qcb->state != QCB_ACTIVE) { 460 device_printf(ida->dev, 461 "ignoring completion %x\n", completed); 462 continue; 463 } 464 ida_done(ida, qcb); 465 } 466 ida_start(ida); 467 } 468 469 /* 470 * should switch out command type; may be status, not just I/O. 471 */ 472 static void 473 ida_done(struct ida_softc *ida, struct ida_qcb *qcb) 474 { 475 int error = 0; 476 477 /* 478 * finish up command 479 */ 480 if (qcb->flags & DMA_DATA_TRANSFER) { 481 bus_dmasync_op_t op; 482 483 op = qcb->flags & DMA_DATA_IN ? 484 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE; 485 bus_dmamap_sync(ida->buffer_dmat, qcb->dmamap, op); 486 bus_dmamap_unload(ida->buffer_dmat, qcb->dmamap); 487 } 488 489 if (qcb->hwqcb->req.error & SOFT_ERROR) 490 device_printf(ida->dev, "soft error\n"); 491 if (qcb->hwqcb->req.error & HARD_ERROR) { 492 error = 1; 493 device_printf(ida->dev, "hard error\n"); 494 } 495 if (qcb->hwqcb->req.error & CMD_REJECTED) { 496 error = 1; 497 device_printf(ida->dev, "invalid request\n"); 498 } 499 500 if (qcb->flags & IDA_COMMAND) { 501 if (ida->flags & IDA_ATTACHED) 502 wakeup(qcb); 503 } else { 504 if (error) 505 qcb->buf->b_flags |= B_ERROR; 506 id_intr(qcb->buf); 507 } 508 509 qcb->state = QCB_FREE; 510 SLIST_INSERT_HEAD(&ida->free_qcbs, qcb, link.sle); 511 ida_construct_qcb(ida); 512 } 513