1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 */ 27 /*- 28 * Copyright (c) 2011 LSI Corp. 29 * All rights reserved. 30 * 31 * Redistribution and use in source and binary forms, with or without 32 * modification, are permitted provided that the following conditions 33 * are met: 34 * 1. Redistributions of source code must retain the above copyright 35 * notice, this list of conditions and the following disclaimer. 36 * 2. Redistributions in binary form must reproduce the above copyright 37 * notice, this list of conditions and the following disclaimer in the 38 * documentation and/or other materials provided with the distribution. 39 * 40 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 41 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 42 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 43 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 44 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 45 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 46 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 47 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 48 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 49 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 50 * SUCH DAMAGE. 51 * 52 * LSI MPT-Fusion Host Adapter FreeBSD 53 * 54 * $FreeBSD$ 55 */ 56 57 #include <sys/cdefs.h> 58 __FBSDID("$FreeBSD$"); 59 60 /* Communications core for LSI MPT2 */ 61 62 /* TODO Move headers to mpsvar */ 63 #include <sys/types.h> 64 #include <sys/param.h> 65 #include <sys/systm.h> 66 #include <sys/kernel.h> 67 #include <sys/selinfo.h> 68 #include <sys/lock.h> 69 #include <sys/mutex.h> 70 #include <sys/module.h> 71 #include <sys/bus.h> 72 #include <sys/conf.h> 73 #include <sys/bio.h> 74 #include <sys/malloc.h> 75 #include <sys/uio.h> 76 #include <sys/sysctl.h> 77 #include <sys/queue.h> 78 #include <sys/kthread.h> 79 #include <sys/endian.h> 80 #include <sys/eventhandler.h> 81 82 #include <machine/bus.h> 83 #include <machine/resource.h> 84 #include <sys/rman.h> 85 86 #include <dev/pci/pcivar.h> 87 88 #include <cam/scsi/scsi_all.h> 89 90 #include <dev/mps/mpi/mpi2_type.h> 91 #include <dev/mps/mpi/mpi2.h> 92 #include <dev/mps/mpi/mpi2_ioc.h> 93 #include <dev/mps/mpi/mpi2_sas.h> 94 #include <dev/mps/mpi/mpi2_cnfg.h> 95 #include <dev/mps/mpi/mpi2_init.h> 96 #include <dev/mps/mpi/mpi2_tool.h> 97 #include <dev/mps/mps_ioctl.h> 98 #include <dev/mps/mpsvar.h> 99 #include <dev/mps/mps_table.h> 100 101 static int mps_diag_reset(struct mps_softc *sc); 102 static int mps_init_queues(struct mps_softc *sc); 103 static int mps_message_unit_reset(struct mps_softc *sc); 104 static int mps_transition_operational(struct mps_softc *sc); 105 static void mps_startup(void *arg); 106 static int mps_send_iocinit(struct mps_softc *sc); 107 static int mps_attach_log(struct mps_softc *sc); 108 static __inline void mps_complete_command(struct mps_command *cm); 109 static void mps_dispatch_event(struct mps_softc *sc, uintptr_t data, 110 MPI2_EVENT_NOTIFICATION_REPLY *reply); 111 static void mps_config_complete(struct mps_softc *sc, struct mps_command *cm); 112 static void mps_periodic(void *); 113 static int mps_reregister_events(struct mps_softc *sc); 114 static void mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm); 115 116 SYSCTL_NODE(_hw, OID_AUTO, mps, CTLFLAG_RD, 0, "MPS Driver Parameters"); 117 118 MALLOC_DEFINE(M_MPT2, "mps", "mpt2 driver memory"); 119 120 /* 121 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of 122 * any state and back to its initialization state machine. 123 */ 124 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; 125 126 static int 127 mps_diag_reset(struct mps_softc *sc) 128 { 129 uint32_t reg; 130 int i, error, tries = 0; 131 132 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 133 134 /* Clear any pending interrupts */ 135 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 136 137 /* Push the magic sequence */ 138 error = ETIMEDOUT; 139 while (tries++ < 20) { 140 for (i = 0; i < sizeof(mpt2_reset_magic); i++) 141 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 142 mpt2_reset_magic[i]); 143 144 DELAY(100 * 1000); 145 146 reg = mps_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 147 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { 148 error = 0; 149 break; 150 } 151 } 152 if (error) 153 return (error); 154 155 /* Send the actual reset. XXX need to refresh the reg? */ 156 mps_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, 157 reg | MPI2_DIAG_RESET_ADAPTER); 158 159 /* Wait up to 300 seconds in 50ms intervals */ 160 error = ETIMEDOUT; 161 for (i = 0; i < 60000; i++) { 162 DELAY(50000); 163 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); 164 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { 165 error = 0; 166 break; 167 } 168 } 169 if (error) 170 return (error); 171 172 mps_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); 173 174 return (0); 175 } 176 177 static int 178 mps_message_unit_reset(struct mps_softc *sc) 179 { 180 181 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 182 183 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, 184 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << 185 MPI2_DOORBELL_FUNCTION_SHIFT); 186 DELAY(50000); 187 188 return (0); 189 } 190 191 static int 192 mps_transition_ready(struct mps_softc *sc) 193 { 194 uint32_t reg, state; 195 int error, tries = 0; 196 197 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 198 199 error = 0; 200 while (tries++ < 5) { 201 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); 202 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg); 203 204 /* 205 * Ensure the IOC is ready to talk. If it's not, try 206 * resetting it. 207 */ 208 if (reg & MPI2_DOORBELL_USED) { 209 mps_diag_reset(sc); 210 DELAY(50000); 211 continue; 212 } 213 214 /* Is the adapter owned by another peer? */ 215 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == 216 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { 217 device_printf(sc->mps_dev, "IOC is under the control " 218 "of another peer host, aborting initialization.\n"); 219 return (ENXIO); 220 } 221 222 state = reg & MPI2_IOC_STATE_MASK; 223 if (state == MPI2_IOC_STATE_READY) { 224 /* Ready to go! */ 225 error = 0; 226 break; 227 } else if (state == MPI2_IOC_STATE_FAULT) { 228 mps_dprint(sc, MPS_INFO, "IOC in fault state 0x%x\n", 229 state & MPI2_DOORBELL_FAULT_CODE_MASK); 230 mps_diag_reset(sc); 231 } else if (state == MPI2_IOC_STATE_OPERATIONAL) { 232 /* Need to take ownership */ 233 mps_message_unit_reset(sc); 234 } else if (state == MPI2_IOC_STATE_RESET) { 235 /* Wait a bit, IOC might be in transition */ 236 mps_dprint(sc, MPS_FAULT, 237 "IOC in unexpected reset state\n"); 238 } else { 239 mps_dprint(sc, MPS_FAULT, 240 "IOC in unknown state 0x%x\n", state); 241 error = EINVAL; 242 break; 243 } 244 245 /* Wait 50ms for things to settle down. */ 246 DELAY(50000); 247 } 248 249 if (error) 250 device_printf(sc->mps_dev, "Cannot transition IOC to ready\n"); 251 252 return (error); 253 } 254 255 static int 256 mps_transition_operational(struct mps_softc *sc) 257 { 258 uint32_t reg, state; 259 int error; 260 261 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 262 263 error = 0; 264 reg = mps_regread(sc, MPI2_DOORBELL_OFFSET); 265 mps_dprint(sc, MPS_INFO, "Doorbell= 0x%x\n", reg); 266 267 state = reg & MPI2_IOC_STATE_MASK; 268 if (state != MPI2_IOC_STATE_READY) { 269 if ((error = mps_transition_ready(sc)) != 0) { 270 mps_dprint(sc, MPS_FAULT, 271 "%s failed to transition ready\n", __func__); 272 return (error); 273 } 274 } 275 276 error = mps_send_iocinit(sc); 277 return (error); 278 } 279 280 /* 281 * XXX Some of this should probably move to mps.c 282 * 283 * The terms diag reset and hard reset are used interchangeably in the MPI 284 * docs to mean resetting the controller chip. In this code diag reset 285 * cleans everything up, and the hard reset function just sends the reset 286 * sequence to the chip. This should probably be refactored so that every 287 * subsystem gets a reset notification of some sort, and can clean up 288 * appropriately. 289 */ 290 int 291 mps_reinit(struct mps_softc *sc) 292 { 293 int error; 294 uint32_t db; 295 296 mps_printf(sc, "%s sc %p\n", __func__, sc); 297 298 mtx_assert(&sc->mps_mtx, MA_OWNED); 299 300 if (sc->mps_flags & MPS_FLAGS_DIAGRESET) { 301 mps_printf(sc, "%s reset already in progress\n", __func__); 302 return 0; 303 } 304 305 /* make sure the completion callbacks can recognize they're getting 306 * a NULL cm_reply due to a reset. 307 */ 308 sc->mps_flags |= MPS_FLAGS_DIAGRESET; 309 310 mps_printf(sc, "%s mask interrupts\n", __func__); 311 mps_mask_intr(sc); 312 313 error = mps_diag_reset(sc); 314 if (error != 0) { 315 panic("%s hard reset failed with error %d\n", 316 __func__, error); 317 } 318 319 /* Restore the PCI state, including the MSI-X registers */ 320 mps_pci_restore(sc); 321 322 /* Give the I/O subsystem special priority to get itself prepared */ 323 mpssas_handle_reinit(sc); 324 325 /* reinitialize queues after the reset */ 326 bzero(sc->free_queue, sc->fqdepth * 4); 327 mps_init_queues(sc); 328 329 /* get the chip out of the reset state */ 330 error = mps_transition_operational(sc); 331 if (error != 0) 332 panic("%s transition operational failed with error %d\n", 333 __func__, error); 334 335 /* Reinitialize the reply queue. This is delicate because this 336 * function is typically invoked by task mgmt completion callbacks, 337 * which are called by the interrupt thread. We need to make sure 338 * the interrupt handler loop will exit when we return to it, and 339 * that it will recognize the indexes we've changed. 340 */ 341 sc->replypostindex = 0; 342 mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 343 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex); 344 345 db = mps_regread(sc, MPI2_DOORBELL_OFFSET); 346 mps_printf(sc, "%s doorbell 0x%08x\n", __func__, db); 347 348 mps_printf(sc, "%s unmask interrupts post %u free %u\n", __func__, 349 sc->replypostindex, sc->replyfreeindex); 350 351 mps_unmask_intr(sc); 352 353 mps_printf(sc, "%s restarting post %u free %u\n", __func__, 354 sc->replypostindex, sc->replyfreeindex); 355 356 /* restart will reload the event masks clobbered by the reset, and 357 * then enable the port. 358 */ 359 mps_reregister_events(sc); 360 361 /* the end of discovery will release the simq, so we're done. */ 362 mps_printf(sc, "%s finished sc %p post %u free %u\n", 363 __func__, sc, 364 sc->replypostindex, sc->replyfreeindex); 365 366 sc->mps_flags &= ~MPS_FLAGS_DIAGRESET; 367 368 return 0; 369 } 370 371 /* Wait for the chip to ACK a word that we've put into its FIFO */ 372 static int 373 mps_wait_db_ack(struct mps_softc *sc) 374 { 375 int retry; 376 377 for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) { 378 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 379 MPI2_HIS_SYS2IOC_DB_STATUS) == 0) 380 return (0); 381 DELAY(2000); 382 } 383 return (ETIMEDOUT); 384 } 385 386 /* Wait for the chip to signal that the next word in its FIFO can be fetched */ 387 static int 388 mps_wait_db_int(struct mps_softc *sc) 389 { 390 int retry; 391 392 for (retry = 0; retry < MPS_DB_MAX_WAIT; retry++) { 393 if ((mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 394 MPI2_HIS_IOC2SYS_DB_STATUS) != 0) 395 return (0); 396 DELAY(2000); 397 } 398 return (ETIMEDOUT); 399 } 400 401 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ 402 static int 403 mps_request_sync(struct mps_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, 404 int req_sz, int reply_sz, int timeout) 405 { 406 uint32_t *data32; 407 uint16_t *data16; 408 int i, count, ioc_sz, residual; 409 410 /* Step 1 */ 411 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 412 413 /* Step 2 */ 414 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 415 return (EBUSY); 416 417 /* Step 3 418 * Announce that a message is coming through the doorbell. Messages 419 * are pushed at 32bit words, so round up if needed. 420 */ 421 count = (req_sz + 3) / 4; 422 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, 423 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | 424 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); 425 426 /* Step 4 */ 427 if (mps_wait_db_int(sc) || 428 (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { 429 mps_dprint(sc, MPS_FAULT, "Doorbell failed to activate\n"); 430 return (ENXIO); 431 } 432 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 433 if (mps_wait_db_ack(sc) != 0) { 434 mps_dprint(sc, MPS_FAULT, "Doorbell handshake failed\n"); 435 return (ENXIO); 436 } 437 438 /* Step 5 */ 439 /* Clock out the message data synchronously in 32-bit dwords*/ 440 data32 = (uint32_t *)req; 441 for (i = 0; i < count; i++) { 442 mps_regwrite(sc, MPI2_DOORBELL_OFFSET, data32[i]); 443 if (mps_wait_db_ack(sc) != 0) { 444 mps_dprint(sc, MPS_FAULT, 445 "Timeout while writing doorbell\n"); 446 return (ENXIO); 447 } 448 } 449 450 /* Step 6 */ 451 /* Clock in the reply in 16-bit words. The total length of the 452 * message is always in the 4th byte, so clock out the first 2 words 453 * manually, then loop the rest. 454 */ 455 data16 = (uint16_t *)reply; 456 if (mps_wait_db_int(sc) != 0) { 457 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 0\n"); 458 return (ENXIO); 459 } 460 data16[0] = 461 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 462 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 463 if (mps_wait_db_int(sc) != 0) { 464 mps_dprint(sc, MPS_FAULT, "Timeout reading doorbell 1\n"); 465 return (ENXIO); 466 } 467 data16[1] = 468 mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 469 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 470 471 /* Number of 32bit words in the message */ 472 ioc_sz = reply->MsgLength; 473 474 /* 475 * Figure out how many 16bit words to clock in without overrunning. 476 * The precision loss with dividing reply_sz can safely be 477 * ignored because the messages can only be multiples of 32bits. 478 */ 479 residual = 0; 480 count = MIN((reply_sz / 4), ioc_sz) * 2; 481 if (count < ioc_sz * 2) { 482 residual = ioc_sz * 2 - count; 483 mps_dprint(sc, MPS_FAULT, "Driver error, throwing away %d " 484 "residual message words\n", residual); 485 } 486 487 for (i = 2; i < count; i++) { 488 if (mps_wait_db_int(sc) != 0) { 489 mps_dprint(sc, MPS_FAULT, 490 "Timeout reading doorbell %d\n", i); 491 return (ENXIO); 492 } 493 data16[i] = mps_regread(sc, MPI2_DOORBELL_OFFSET) & 494 MPI2_DOORBELL_DATA_MASK; 495 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 496 } 497 498 /* 499 * Pull out residual words that won't fit into the provided buffer. 500 * This keeps the chip from hanging due to a driver programming 501 * error. 502 */ 503 while (residual--) { 504 if (mps_wait_db_int(sc) != 0) { 505 mps_dprint(sc, MPS_FAULT, 506 "Timeout reading doorbell\n"); 507 return (ENXIO); 508 } 509 (void)mps_regread(sc, MPI2_DOORBELL_OFFSET); 510 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 511 } 512 513 /* Step 7 */ 514 if (mps_wait_db_int(sc) != 0) { 515 mps_dprint(sc, MPS_FAULT, "Timeout waiting to exit doorbell\n"); 516 return (ENXIO); 517 } 518 if (mps_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 519 mps_dprint(sc, MPS_FAULT, "Warning, doorbell still active\n"); 520 mps_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 521 522 return (0); 523 } 524 525 static void 526 mps_enqueue_request(struct mps_softc *sc, struct mps_command *cm) 527 { 528 529 mps_dprint(sc, MPS_TRACE, "%s SMID %u cm %p ccb %p\n", __func__, 530 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); 531 532 if (sc->mps_flags & MPS_FLAGS_ATTACH_DONE) 533 mtx_assert(&sc->mps_mtx, MA_OWNED); 534 535 if (++sc->io_cmds_active > sc->io_cmds_highwater) 536 sc->io_cmds_highwater++; 537 538 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, 539 cm->cm_desc.Words.Low); 540 mps_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, 541 cm->cm_desc.Words.High); 542 } 543 544 /* 545 * Just the FACTS, ma'am. 546 */ 547 static int 548 mps_get_iocfacts(struct mps_softc *sc, MPI2_IOC_FACTS_REPLY *facts) 549 { 550 MPI2_DEFAULT_REPLY *reply; 551 MPI2_IOC_FACTS_REQUEST request; 552 int error, req_sz, reply_sz; 553 554 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 555 556 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); 557 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); 558 reply = (MPI2_DEFAULT_REPLY *)facts; 559 560 bzero(&request, req_sz); 561 request.Function = MPI2_FUNCTION_IOC_FACTS; 562 error = mps_request_sync(sc, &request, reply, req_sz, reply_sz, 5); 563 564 return (error); 565 } 566 567 static int 568 mps_get_portfacts(struct mps_softc *sc, MPI2_PORT_FACTS_REPLY *facts, int port) 569 { 570 MPI2_PORT_FACTS_REQUEST *request; 571 MPI2_PORT_FACTS_REPLY *reply; 572 struct mps_command *cm; 573 int error; 574 575 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 576 577 if ((cm = mps_alloc_command(sc)) == NULL) 578 return (EBUSY); 579 request = (MPI2_PORT_FACTS_REQUEST *)cm->cm_req; 580 request->Function = MPI2_FUNCTION_PORT_FACTS; 581 request->PortNumber = port; 582 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 583 cm->cm_data = NULL; 584 error = mps_request_polled(sc, cm); 585 reply = (MPI2_PORT_FACTS_REPLY *)cm->cm_reply; 586 if (reply == NULL) { 587 mps_printf(sc, "%s NULL reply\n", __func__); 588 goto done; 589 } 590 if ((reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) { 591 mps_printf(sc, 592 "%s error %d iocstatus 0x%x iocloginfo 0x%x type 0x%x\n", 593 __func__, error, reply->IOCStatus, reply->IOCLogInfo, 594 reply->PortType); 595 error = ENXIO; 596 } 597 bcopy(reply, facts, sizeof(MPI2_PORT_FACTS_REPLY)); 598 done: 599 mps_free_command(sc, cm); 600 601 return (error); 602 } 603 604 static int 605 mps_send_iocinit(struct mps_softc *sc) 606 { 607 MPI2_IOC_INIT_REQUEST init; 608 MPI2_DEFAULT_REPLY reply; 609 int req_sz, reply_sz, error; 610 611 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 612 613 req_sz = sizeof(MPI2_IOC_INIT_REQUEST); 614 reply_sz = sizeof(MPI2_IOC_INIT_REPLY); 615 bzero(&init, req_sz); 616 bzero(&reply, reply_sz); 617 618 /* 619 * Fill in the init block. Note that most addresses are 620 * deliberately in the lower 32bits of memory. This is a micro- 621 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. 622 */ 623 init.Function = MPI2_FUNCTION_IOC_INIT; 624 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 625 init.MsgVersion = MPI2_VERSION; 626 init.HeaderVersion = MPI2_HEADER_VERSION; 627 init.SystemRequestFrameSize = sc->facts->IOCRequestFrameSize; 628 init.ReplyDescriptorPostQueueDepth = sc->pqdepth; 629 init.ReplyFreeQueueDepth = sc->fqdepth; 630 init.SenseBufferAddressHigh = 0; 631 init.SystemReplyAddressHigh = 0; 632 init.SystemRequestFrameBaseAddress.High = 0; 633 init.SystemRequestFrameBaseAddress.Low = (uint32_t)sc->req_busaddr; 634 init.ReplyDescriptorPostQueueAddress.High = 0; 635 init.ReplyDescriptorPostQueueAddress.Low = (uint32_t)sc->post_busaddr; 636 init.ReplyFreeQueueAddress.High = 0; 637 init.ReplyFreeQueueAddress.Low = (uint32_t)sc->free_busaddr; 638 init.TimeStamp.High = 0; 639 init.TimeStamp.Low = (uint32_t)time_uptime; 640 641 error = mps_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); 642 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 643 error = ENXIO; 644 645 mps_dprint(sc, MPS_INFO, "IOCInit status= 0x%x\n", reply.IOCStatus); 646 return (error); 647 } 648 649 void 650 mps_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 651 { 652 bus_addr_t *addr; 653 654 addr = arg; 655 *addr = segs[0].ds_addr; 656 } 657 658 static int 659 mps_alloc_queues(struct mps_softc *sc) 660 { 661 bus_addr_t queues_busaddr; 662 uint8_t *queues; 663 int qsize, fqsize, pqsize; 664 665 /* 666 * The reply free queue contains 4 byte entries in multiples of 16 and 667 * aligned on a 16 byte boundary. There must always be an unused entry. 668 * This queue supplies fresh reply frames for the firmware to use. 669 * 670 * The reply descriptor post queue contains 8 byte entries in 671 * multiples of 16 and aligned on a 16 byte boundary. This queue 672 * contains filled-in reply frames sent from the firmware to the host. 673 * 674 * These two queues are allocated together for simplicity. 675 */ 676 sc->fqdepth = roundup2((sc->num_replies + 1), 16); 677 sc->pqdepth = roundup2((sc->num_replies + 1), 16); 678 fqsize= sc->fqdepth * 4; 679 pqsize = sc->pqdepth * 8; 680 qsize = fqsize + pqsize; 681 682 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 683 16, 0, /* algnmnt, boundary */ 684 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 685 BUS_SPACE_MAXADDR, /* highaddr */ 686 NULL, NULL, /* filter, filterarg */ 687 qsize, /* maxsize */ 688 1, /* nsegments */ 689 qsize, /* maxsegsize */ 690 0, /* flags */ 691 NULL, NULL, /* lockfunc, lockarg */ 692 &sc->queues_dmat)) { 693 device_printf(sc->mps_dev, "Cannot allocate queues DMA tag\n"); 694 return (ENOMEM); 695 } 696 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, 697 &sc->queues_map)) { 698 device_printf(sc->mps_dev, "Cannot allocate queues memory\n"); 699 return (ENOMEM); 700 } 701 bzero(queues, qsize); 702 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, 703 mps_memaddr_cb, &queues_busaddr, 0); 704 705 sc->free_queue = (uint32_t *)queues; 706 sc->free_busaddr = queues_busaddr; 707 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); 708 sc->post_busaddr = queues_busaddr + fqsize; 709 710 return (0); 711 } 712 713 static int 714 mps_alloc_replies(struct mps_softc *sc) 715 { 716 int rsize, num_replies; 717 718 /* 719 * sc->num_replies should be one less than sc->fqdepth. We need to 720 * allocate space for sc->fqdepth replies, but only sc->num_replies 721 * replies can be used at once. 722 */ 723 num_replies = max(sc->fqdepth, sc->num_replies); 724 725 rsize = sc->facts->ReplyFrameSize * num_replies * 4; 726 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 727 4, 0, /* algnmnt, boundary */ 728 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 729 BUS_SPACE_MAXADDR, /* highaddr */ 730 NULL, NULL, /* filter, filterarg */ 731 rsize, /* maxsize */ 732 1, /* nsegments */ 733 rsize, /* maxsegsize */ 734 0, /* flags */ 735 NULL, NULL, /* lockfunc, lockarg */ 736 &sc->reply_dmat)) { 737 device_printf(sc->mps_dev, "Cannot allocate replies DMA tag\n"); 738 return (ENOMEM); 739 } 740 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, 741 BUS_DMA_NOWAIT, &sc->reply_map)) { 742 device_printf(sc->mps_dev, "Cannot allocate replies memory\n"); 743 return (ENOMEM); 744 } 745 bzero(sc->reply_frames, rsize); 746 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, 747 mps_memaddr_cb, &sc->reply_busaddr, 0); 748 749 return (0); 750 } 751 752 static int 753 mps_alloc_requests(struct mps_softc *sc) 754 { 755 struct mps_command *cm; 756 struct mps_chain *chain; 757 int i, rsize, nsegs; 758 759 rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4; 760 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 761 16, 0, /* algnmnt, boundary */ 762 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 763 BUS_SPACE_MAXADDR, /* highaddr */ 764 NULL, NULL, /* filter, filterarg */ 765 rsize, /* maxsize */ 766 1, /* nsegments */ 767 rsize, /* maxsegsize */ 768 0, /* flags */ 769 NULL, NULL, /* lockfunc, lockarg */ 770 &sc->req_dmat)) { 771 device_printf(sc->mps_dev, "Cannot allocate request DMA tag\n"); 772 return (ENOMEM); 773 } 774 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, 775 BUS_DMA_NOWAIT, &sc->req_map)) { 776 device_printf(sc->mps_dev, "Cannot allocate request memory\n"); 777 return (ENOMEM); 778 } 779 bzero(sc->req_frames, rsize); 780 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, 781 mps_memaddr_cb, &sc->req_busaddr, 0); 782 783 rsize = sc->facts->IOCRequestFrameSize * sc->max_chains * 4; 784 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 785 16, 0, /* algnmnt, boundary */ 786 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 787 BUS_SPACE_MAXADDR, /* highaddr */ 788 NULL, NULL, /* filter, filterarg */ 789 rsize, /* maxsize */ 790 1, /* nsegments */ 791 rsize, /* maxsegsize */ 792 0, /* flags */ 793 NULL, NULL, /* lockfunc, lockarg */ 794 &sc->chain_dmat)) { 795 device_printf(sc->mps_dev, "Cannot allocate chain DMA tag\n"); 796 return (ENOMEM); 797 } 798 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, 799 BUS_DMA_NOWAIT, &sc->chain_map)) { 800 device_printf(sc->mps_dev, "Cannot allocate chain memory\n"); 801 return (ENOMEM); 802 } 803 bzero(sc->chain_frames, rsize); 804 bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize, 805 mps_memaddr_cb, &sc->chain_busaddr, 0); 806 807 rsize = MPS_SENSE_LEN * sc->num_reqs; 808 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 809 1, 0, /* algnmnt, boundary */ 810 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 811 BUS_SPACE_MAXADDR, /* highaddr */ 812 NULL, NULL, /* filter, filterarg */ 813 rsize, /* maxsize */ 814 1, /* nsegments */ 815 rsize, /* maxsegsize */ 816 0, /* flags */ 817 NULL, NULL, /* lockfunc, lockarg */ 818 &sc->sense_dmat)) { 819 device_printf(sc->mps_dev, "Cannot allocate sense DMA tag\n"); 820 return (ENOMEM); 821 } 822 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, 823 BUS_DMA_NOWAIT, &sc->sense_map)) { 824 device_printf(sc->mps_dev, "Cannot allocate sense memory\n"); 825 return (ENOMEM); 826 } 827 bzero(sc->sense_frames, rsize); 828 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, 829 mps_memaddr_cb, &sc->sense_busaddr, 0); 830 831 sc->chains = malloc(sizeof(struct mps_chain) * sc->max_chains, M_MPT2, 832 M_WAITOK | M_ZERO); 833 for (i = 0; i < sc->max_chains; i++) { 834 chain = &sc->chains[i]; 835 chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames + 836 i * sc->facts->IOCRequestFrameSize * 4); 837 chain->chain_busaddr = sc->chain_busaddr + 838 i * sc->facts->IOCRequestFrameSize * 4; 839 mps_free_chain(sc, chain); 840 sc->chain_free_lowwater++; 841 } 842 843 /* XXX Need to pick a more precise value */ 844 nsegs = (MAXPHYS / PAGE_SIZE) + 1; 845 if (bus_dma_tag_create( sc->mps_parent_dmat, /* parent */ 846 1, 0, /* algnmnt, boundary */ 847 BUS_SPACE_MAXADDR, /* lowaddr */ 848 BUS_SPACE_MAXADDR, /* highaddr */ 849 NULL, NULL, /* filter, filterarg */ 850 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 851 nsegs, /* nsegments */ 852 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 853 BUS_DMA_ALLOCNOW, /* flags */ 854 busdma_lock_mutex, /* lockfunc */ 855 &sc->mps_mtx, /* lockarg */ 856 &sc->buffer_dmat)) { 857 device_printf(sc->mps_dev, "Cannot allocate buffer DMA tag\n"); 858 return (ENOMEM); 859 } 860 861 /* 862 * SMID 0 cannot be used as a free command per the firmware spec. 863 * Just drop that command instead of risking accounting bugs. 864 */ 865 sc->commands = malloc(sizeof(struct mps_command) * sc->num_reqs, 866 M_MPT2, M_WAITOK | M_ZERO); 867 for (i = 1; i < sc->num_reqs; i++) { 868 cm = &sc->commands[i]; 869 cm->cm_req = sc->req_frames + 870 i * sc->facts->IOCRequestFrameSize * 4; 871 cm->cm_req_busaddr = sc->req_busaddr + 872 i * sc->facts->IOCRequestFrameSize * 4; 873 cm->cm_sense = &sc->sense_frames[i]; 874 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPS_SENSE_LEN; 875 cm->cm_desc.Default.SMID = i; 876 cm->cm_sc = sc; 877 TAILQ_INIT(&cm->cm_chain_list); 878 callout_init_mtx(&cm->cm_callout, &sc->mps_mtx, 0); 879 880 /* XXX Is a failure here a critical problem? */ 881 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) == 0) 882 if (i <= sc->facts->HighPriorityCredit) 883 mps_free_high_priority_command(sc, cm); 884 else 885 mps_free_command(sc, cm); 886 else { 887 panic("failed to allocate command %d\n", i); 888 sc->num_reqs = i; 889 break; 890 } 891 } 892 893 return (0); 894 } 895 896 static int 897 mps_init_queues(struct mps_softc *sc) 898 { 899 int i; 900 901 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); 902 903 /* 904 * According to the spec, we need to use one less reply than we 905 * have space for on the queue. So sc->num_replies (the number we 906 * use) should be less than sc->fqdepth (allocated size). 907 */ 908 if (sc->num_replies >= sc->fqdepth) 909 return (EINVAL); 910 911 /* 912 * Initialize all of the free queue entries. 913 */ 914 for (i = 0; i < sc->fqdepth; i++) 915 sc->free_queue[i] = sc->reply_busaddr + (i * sc->facts->ReplyFrameSize * 4); 916 sc->replyfreeindex = sc->num_replies; 917 918 return (0); 919 } 920 921 /* Get the driver parameter tunables. Lowest priority are the driver defaults. 922 * Next are the global settings, if they exist. Highest are the per-unit 923 * settings, if they exist. 924 */ 925 static void 926 mps_get_tunables(struct mps_softc *sc) 927 { 928 char tmpstr[80]; 929 930 /* XXX default to some debugging for now */ 931 sc->mps_debug = MPS_FAULT; 932 sc->disable_msix = 0; 933 sc->disable_msi = 0; 934 sc->max_chains = MPS_CHAIN_FRAMES; 935 936 /* 937 * Grab the global variables. 938 */ 939 TUNABLE_INT_FETCH("hw.mps.debug_level", &sc->mps_debug); 940 TUNABLE_INT_FETCH("hw.mps.disable_msix", &sc->disable_msix); 941 TUNABLE_INT_FETCH("hw.mps.disable_msi", &sc->disable_msi); 942 TUNABLE_INT_FETCH("hw.mps.max_chains", &sc->max_chains); 943 944 /* Grab the unit-instance variables */ 945 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.debug_level", 946 device_get_unit(sc->mps_dev)); 947 TUNABLE_INT_FETCH(tmpstr, &sc->mps_debug); 948 949 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msix", 950 device_get_unit(sc->mps_dev)); 951 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); 952 953 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.disable_msi", 954 device_get_unit(sc->mps_dev)); 955 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); 956 957 snprintf(tmpstr, sizeof(tmpstr), "dev.mps.%d.max_chains", 958 device_get_unit(sc->mps_dev)); 959 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); 960 } 961 962 static void 963 mps_setup_sysctl(struct mps_softc *sc) 964 { 965 struct sysctl_ctx_list *sysctl_ctx = NULL; 966 struct sysctl_oid *sysctl_tree = NULL; 967 char tmpstr[80], tmpstr2[80]; 968 969 /* 970 * Setup the sysctl variable so the user can change the debug level 971 * on the fly. 972 */ 973 snprintf(tmpstr, sizeof(tmpstr), "MPS controller %d", 974 device_get_unit(sc->mps_dev)); 975 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mps_dev)); 976 977 sysctl_ctx = device_get_sysctl_ctx(sc->mps_dev); 978 if (sysctl_ctx != NULL) 979 sysctl_tree = device_get_sysctl_tree(sc->mps_dev); 980 981 if (sysctl_tree == NULL) { 982 sysctl_ctx_init(&sc->sysctl_ctx); 983 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 984 SYSCTL_STATIC_CHILDREN(_hw_mps), OID_AUTO, tmpstr2, 985 CTLFLAG_RD, 0, tmpstr); 986 if (sc->sysctl_tree == NULL) 987 return; 988 sysctl_ctx = &sc->sysctl_ctx; 989 sysctl_tree = sc->sysctl_tree; 990 } 991 992 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 993 OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mps_debug, 0, 994 "mps debug level"); 995 996 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 997 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, 998 "Disable the use of MSI-X interrupts"); 999 1000 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1001 OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0, 1002 "Disable the use of MSI interrupts"); 1003 1004 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1005 OID_AUTO, "firmware_version", CTLFLAG_RW, &sc->fw_version, 1006 strlen(sc->fw_version), "firmware version"); 1007 1008 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1009 OID_AUTO, "driver_version", CTLFLAG_RW, MPS_DRIVER_VERSION, 1010 strlen(MPS_DRIVER_VERSION), "driver version"); 1011 1012 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1013 OID_AUTO, "io_cmds_active", CTLFLAG_RD, 1014 &sc->io_cmds_active, 0, "number of currently active commands"); 1015 1016 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1017 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 1018 &sc->io_cmds_highwater, 0, "maximum active commands seen"); 1019 1020 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1021 OID_AUTO, "chain_free", CTLFLAG_RD, 1022 &sc->chain_free, 0, "number of free chain elements"); 1023 1024 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1025 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, 1026 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); 1027 1028 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1029 OID_AUTO, "max_chains", CTLFLAG_RD, 1030 &sc->max_chains, 0,"maximum chain frames that will be allocated"); 1031 1032 #if __FreeBSD_version >= 900030 1033 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1034 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, 1035 &sc->chain_alloc_fail, "chain allocation failures"); 1036 #endif //FreeBSD_version >= 900030 1037 } 1038 1039 int 1040 mps_attach(struct mps_softc *sc) 1041 { 1042 int i, error; 1043 1044 mps_get_tunables(sc); 1045 1046 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1047 1048 mtx_init(&sc->mps_mtx, "MPT2SAS lock", NULL, MTX_DEF); 1049 callout_init_mtx(&sc->periodic, &sc->mps_mtx, 0); 1050 TAILQ_INIT(&sc->event_list); 1051 1052 if ((error = mps_transition_ready(sc)) != 0) { 1053 mps_printf(sc, "%s failed to transition ready\n", __func__); 1054 return (error); 1055 } 1056 1057 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPT2, 1058 M_ZERO|M_NOWAIT); 1059 if ((error = mps_get_iocfacts(sc, sc->facts)) != 0) 1060 return (error); 1061 1062 mps_print_iocfacts(sc, sc->facts); 1063 1064 snprintf(sc->fw_version, sizeof(sc->fw_version), 1065 "%02d.%02d.%02d.%02d", 1066 sc->facts->FWVersion.Struct.Major, 1067 sc->facts->FWVersion.Struct.Minor, 1068 sc->facts->FWVersion.Struct.Unit, 1069 sc->facts->FWVersion.Struct.Dev); 1070 1071 mps_printf(sc, "Firmware: %s, Driver: %s\n", sc->fw_version, 1072 MPS_DRIVER_VERSION); 1073 mps_printf(sc, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, 1074 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" 1075 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" 1076 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc"); 1077 1078 /* 1079 * If the chip doesn't support event replay then a hard reset will be 1080 * required to trigger a full discovery. Do the reset here then 1081 * retransition to Ready. A hard reset might have already been done, 1082 * but it doesn't hurt to do it again. 1083 */ 1084 if ((sc->facts->IOCCapabilities & 1085 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) { 1086 mps_diag_reset(sc); 1087 if ((error = mps_transition_ready(sc)) != 0) 1088 return (error); 1089 } 1090 1091 /* 1092 * Set flag if IR Firmware is loaded. 1093 */ 1094 if (sc->facts->IOCCapabilities & 1095 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) 1096 sc->ir_firmware = 1; 1097 1098 /* 1099 * Check if controller supports FW diag buffers and set flag to enable 1100 * each type. 1101 */ 1102 if (sc->facts->IOCCapabilities & 1103 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 1104 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE].enabled = 1105 TRUE; 1106 if (sc->facts->IOCCapabilities & 1107 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 1108 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT].enabled = 1109 TRUE; 1110 if (sc->facts->IOCCapabilities & 1111 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 1112 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED].enabled = 1113 TRUE; 1114 1115 /* 1116 * Set flag if EEDP is supported and if TLR is supported. 1117 */ 1118 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) 1119 sc->eedp_enabled = TRUE; 1120 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) 1121 sc->control_TLR = TRUE; 1122 1123 /* 1124 * Size the queues. Since the reply queues always need one free entry, 1125 * we'll just deduct one reply message here. 1126 */ 1127 sc->num_reqs = MIN(MPS_REQ_FRAMES, sc->facts->RequestCredit); 1128 sc->num_replies = MIN(MPS_REPLY_FRAMES + MPS_EVT_REPLY_FRAMES, 1129 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; 1130 TAILQ_INIT(&sc->req_list); 1131 TAILQ_INIT(&sc->high_priority_req_list); 1132 TAILQ_INIT(&sc->chain_list); 1133 TAILQ_INIT(&sc->tm_list); 1134 1135 if (((error = mps_alloc_queues(sc)) != 0) || 1136 ((error = mps_alloc_replies(sc)) != 0) || 1137 ((error = mps_alloc_requests(sc)) != 0)) { 1138 mps_printf(sc, "%s failed to alloc\n", __func__); 1139 mps_free(sc); 1140 return (error); 1141 } 1142 1143 if (((error = mps_init_queues(sc)) != 0) || 1144 ((error = mps_transition_operational(sc)) != 0)) { 1145 mps_printf(sc, "%s failed to transition operational\n", __func__); 1146 mps_free(sc); 1147 return (error); 1148 } 1149 1150 /* 1151 * Finish the queue initialization. 1152 * These are set here instead of in mps_init_queues() because the 1153 * IOC resets these values during the state transition in 1154 * mps_transition_operational(). The free index is set to 1 1155 * because the corresponding index in the IOC is set to 0, and the 1156 * IOC treats the queues as full if both are set to the same value. 1157 * Hence the reason that the queue can't hold all of the possible 1158 * replies. 1159 */ 1160 sc->replypostindex = 0; 1161 mps_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 1162 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); 1163 1164 sc->pfacts = malloc(sizeof(MPI2_PORT_FACTS_REPLY) * 1165 sc->facts->NumberOfPorts, M_MPT2, M_ZERO|M_WAITOK); 1166 for (i = 0; i < sc->facts->NumberOfPorts; i++) { 1167 if ((error = mps_get_portfacts(sc, &sc->pfacts[i], i)) != 0) { 1168 mps_printf(sc, "%s failed to get portfacts for port %d\n", 1169 __func__, i); 1170 mps_free(sc); 1171 return (error); 1172 } 1173 mps_print_portfacts(sc, &sc->pfacts[i]); 1174 } 1175 1176 /* Attach the subsystems so they can prepare their event masks. */ 1177 /* XXX Should be dynamic so that IM/IR and user modules can attach */ 1178 if (((error = mps_attach_log(sc)) != 0) || 1179 ((error = mps_attach_sas(sc)) != 0) || 1180 ((error = mps_attach_user(sc)) != 0)) { 1181 mps_printf(sc, "%s failed to attach all subsystems: error %d\n", 1182 __func__, error); 1183 mps_free(sc); 1184 return (error); 1185 } 1186 1187 if ((error = mps_pci_setup_interrupts(sc)) != 0) { 1188 mps_printf(sc, "%s failed to setup interrupts\n", __func__); 1189 mps_free(sc); 1190 return (error); 1191 } 1192 1193 /* 1194 * The static page function currently read is ioc page8. Others can be 1195 * added in future. 1196 */ 1197 mps_base_static_config_pages(sc); 1198 1199 /* Start the periodic watchdog check on the IOC Doorbell */ 1200 mps_periodic(sc); 1201 1202 /* 1203 * The portenable will kick off discovery events that will drive the 1204 * rest of the initialization process. The CAM/SAS module will 1205 * hold up the boot sequence until discovery is complete. 1206 */ 1207 sc->mps_ich.ich_func = mps_startup; 1208 sc->mps_ich.ich_arg = sc; 1209 if (config_intrhook_establish(&sc->mps_ich) != 0) { 1210 mps_dprint(sc, MPS_FAULT, "Cannot establish MPS config hook\n"); 1211 error = EINVAL; 1212 } 1213 1214 /* 1215 * Allow IR to shutdown gracefully when shutdown occurs. 1216 */ 1217 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1218 mpssas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); 1219 1220 if (sc->shutdown_eh == NULL) 1221 mps_dprint(sc, MPS_FAULT, "shutdown event registration " 1222 "failed\n"); 1223 1224 mps_setup_sysctl(sc); 1225 1226 sc->mps_flags |= MPS_FLAGS_ATTACH_DONE; 1227 1228 return (error); 1229 } 1230 1231 /* Run through any late-start handlers. */ 1232 static void 1233 mps_startup(void *arg) 1234 { 1235 struct mps_softc *sc; 1236 1237 sc = (struct mps_softc *)arg; 1238 1239 mps_lock(sc); 1240 mps_unmask_intr(sc); 1241 /* initialize device mapping tables */ 1242 mps_mapping_initialize(sc); 1243 mpssas_startup(sc); 1244 mps_unlock(sc); 1245 } 1246 1247 /* Periodic watchdog. Is called with the driver lock already held. */ 1248 static void 1249 mps_periodic(void *arg) 1250 { 1251 struct mps_softc *sc; 1252 uint32_t db; 1253 1254 sc = (struct mps_softc *)arg; 1255 if (sc->mps_flags & MPS_FLAGS_SHUTDOWN) 1256 return; 1257 1258 db = mps_regread(sc, MPI2_DOORBELL_OFFSET); 1259 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 1260 device_printf(sc->mps_dev, "IOC Fault 0x%08x, Resetting\n", db); 1261 1262 mps_reinit(sc); 1263 } 1264 1265 callout_reset(&sc->periodic, MPS_PERIODIC_DELAY * hz, mps_periodic, sc); 1266 } 1267 1268 static void 1269 mps_log_evt_handler(struct mps_softc *sc, uintptr_t data, 1270 MPI2_EVENT_NOTIFICATION_REPLY *event) 1271 { 1272 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; 1273 1274 mps_print_event(sc, event); 1275 1276 switch (event->Event) { 1277 case MPI2_EVENT_LOG_DATA: 1278 device_printf(sc->mps_dev, "MPI2_EVENT_LOG_DATA:\n"); 1279 hexdump(event->EventData, event->EventDataLength, NULL, 0); 1280 break; 1281 case MPI2_EVENT_LOG_ENTRY_ADDED: 1282 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; 1283 mps_dprint(sc, MPS_INFO, "MPI2_EVENT_LOG_ENTRY_ADDED event " 1284 "0x%x Sequence %d:\n", entry->LogEntryQualifier, 1285 entry->LogSequence); 1286 break; 1287 default: 1288 break; 1289 } 1290 return; 1291 } 1292 1293 static int 1294 mps_attach_log(struct mps_softc *sc) 1295 { 1296 uint8_t events[16]; 1297 1298 bzero(events, 16); 1299 setbit(events, MPI2_EVENT_LOG_DATA); 1300 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 1301 1302 mps_register_events(sc, events, mps_log_evt_handler, NULL, 1303 &sc->mps_log_eh); 1304 1305 return (0); 1306 } 1307 1308 static int 1309 mps_detach_log(struct mps_softc *sc) 1310 { 1311 1312 if (sc->mps_log_eh != NULL) 1313 mps_deregister_events(sc, sc->mps_log_eh); 1314 return (0); 1315 } 1316 1317 /* 1318 * Free all of the driver resources and detach submodules. Should be called 1319 * without the lock held. 1320 */ 1321 int 1322 mps_free(struct mps_softc *sc) 1323 { 1324 struct mps_command *cm; 1325 int i, error; 1326 1327 /* Turn off the watchdog */ 1328 mps_lock(sc); 1329 sc->mps_flags |= MPS_FLAGS_SHUTDOWN; 1330 mps_unlock(sc); 1331 /* Lock must not be held for this */ 1332 callout_drain(&sc->periodic); 1333 1334 if (((error = mps_detach_log(sc)) != 0) || 1335 ((error = mps_detach_sas(sc)) != 0)) 1336 return (error); 1337 1338 /* Put the IOC back in the READY state. */ 1339 mps_lock(sc); 1340 if ((error = mps_transition_ready(sc)) != 0) { 1341 mps_unlock(sc); 1342 return (error); 1343 } 1344 mps_unlock(sc); 1345 1346 if (sc->facts != NULL) 1347 free(sc->facts, M_MPT2); 1348 1349 if (sc->pfacts != NULL) 1350 free(sc->pfacts, M_MPT2); 1351 1352 if (sc->post_busaddr != 0) 1353 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); 1354 if (sc->post_queue != NULL) 1355 bus_dmamem_free(sc->queues_dmat, sc->post_queue, 1356 sc->queues_map); 1357 if (sc->queues_dmat != NULL) 1358 bus_dma_tag_destroy(sc->queues_dmat); 1359 1360 if (sc->chain_busaddr != 0) 1361 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); 1362 if (sc->chain_frames != NULL) 1363 bus_dmamem_free(sc->chain_dmat, sc->chain_frames,sc->chain_map); 1364 if (sc->chain_dmat != NULL) 1365 bus_dma_tag_destroy(sc->chain_dmat); 1366 1367 if (sc->sense_busaddr != 0) 1368 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); 1369 if (sc->sense_frames != NULL) 1370 bus_dmamem_free(sc->sense_dmat, sc->sense_frames,sc->sense_map); 1371 if (sc->sense_dmat != NULL) 1372 bus_dma_tag_destroy(sc->sense_dmat); 1373 1374 if (sc->reply_busaddr != 0) 1375 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); 1376 if (sc->reply_frames != NULL) 1377 bus_dmamem_free(sc->reply_dmat, sc->reply_frames,sc->reply_map); 1378 if (sc->reply_dmat != NULL) 1379 bus_dma_tag_destroy(sc->reply_dmat); 1380 1381 if (sc->req_busaddr != 0) 1382 bus_dmamap_unload(sc->req_dmat, sc->req_map); 1383 if (sc->req_frames != NULL) 1384 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); 1385 if (sc->req_dmat != NULL) 1386 bus_dma_tag_destroy(sc->req_dmat); 1387 1388 if (sc->chains != NULL) 1389 free(sc->chains, M_MPT2); 1390 if (sc->commands != NULL) { 1391 for (i = 1; i < sc->num_reqs; i++) { 1392 cm = &sc->commands[i]; 1393 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); 1394 } 1395 free(sc->commands, M_MPT2); 1396 } 1397 if (sc->buffer_dmat != NULL) 1398 bus_dma_tag_destroy(sc->buffer_dmat); 1399 1400 if (sc->sysctl_tree != NULL) 1401 sysctl_ctx_free(&sc->sysctl_ctx); 1402 1403 mps_mapping_free_memory(sc); 1404 1405 /* Deregister the shutdown function */ 1406 if (sc->shutdown_eh != NULL) 1407 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); 1408 1409 mtx_destroy(&sc->mps_mtx); 1410 1411 return (0); 1412 } 1413 1414 static __inline void 1415 mps_complete_command(struct mps_command *cm) 1416 { 1417 if (cm->cm_flags & MPS_CM_FLAGS_POLLED) 1418 cm->cm_flags |= MPS_CM_FLAGS_COMPLETE; 1419 1420 if (cm->cm_complete != NULL) { 1421 mps_dprint(cm->cm_sc, MPS_TRACE, 1422 "%s cm %p calling cm_complete %p data %p reply %p\n", 1423 __func__, cm, cm->cm_complete, cm->cm_complete_data, 1424 cm->cm_reply); 1425 cm->cm_complete(cm->cm_sc, cm); 1426 } 1427 1428 if (cm->cm_flags & MPS_CM_FLAGS_WAKEUP) { 1429 mps_dprint(cm->cm_sc, MPS_TRACE, "%s: waking up %p\n", 1430 __func__, cm); 1431 wakeup(cm); 1432 } 1433 1434 if (cm->cm_sc->io_cmds_active != 0) { 1435 cm->cm_sc->io_cmds_active--; 1436 } else { 1437 mps_dprint(cm->cm_sc, MPS_INFO, "Warning: io_cmds_active is " 1438 "out of sync - resynching to 0\n"); 1439 } 1440 } 1441 1442 void 1443 mps_intr(void *data) 1444 { 1445 struct mps_softc *sc; 1446 uint32_t status; 1447 1448 sc = (struct mps_softc *)data; 1449 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1450 1451 /* 1452 * Check interrupt status register to flush the bus. This is 1453 * needed for both INTx interrupts and driver-driven polling 1454 */ 1455 status = mps_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 1456 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) 1457 return; 1458 1459 mps_lock(sc); 1460 mps_intr_locked(data); 1461 mps_unlock(sc); 1462 return; 1463 } 1464 1465 /* 1466 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the 1467 * chip. Hopefully this theory is correct. 1468 */ 1469 void 1470 mps_intr_msi(void *data) 1471 { 1472 struct mps_softc *sc; 1473 1474 sc = (struct mps_softc *)data; 1475 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1476 mps_lock(sc); 1477 mps_intr_locked(data); 1478 mps_unlock(sc); 1479 return; 1480 } 1481 1482 /* 1483 * The locking is overly broad and simplistic, but easy to deal with for now. 1484 */ 1485 void 1486 mps_intr_locked(void *data) 1487 { 1488 MPI2_REPLY_DESCRIPTORS_UNION *desc; 1489 struct mps_softc *sc; 1490 struct mps_command *cm = NULL; 1491 uint8_t flags; 1492 u_int pq; 1493 MPI2_DIAG_RELEASE_REPLY *rel_rep; 1494 mps_fw_diagnostic_buffer_t *pBuffer; 1495 1496 sc = (struct mps_softc *)data; 1497 1498 pq = sc->replypostindex; 1499 mps_dprint(sc, MPS_TRACE, 1500 "%s sc %p starting with replypostindex %u\n", 1501 __func__, sc, sc->replypostindex); 1502 1503 for ( ;; ) { 1504 cm = NULL; 1505 desc = &sc->post_queue[sc->replypostindex]; 1506 flags = desc->Default.ReplyFlags & 1507 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 1508 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) 1509 || (desc->Words.High == 0xffffffff)) 1510 break; 1511 1512 /* increment the replypostindex now, so that event handlers 1513 * and cm completion handlers which decide to do a diag 1514 * reset can zero it without it getting incremented again 1515 * afterwards, and we break out of this loop on the next 1516 * iteration since the reply post queue has been cleared to 1517 * 0xFF and all descriptors look unused (which they are). 1518 */ 1519 if (++sc->replypostindex >= sc->pqdepth) 1520 sc->replypostindex = 0; 1521 1522 switch (flags) { 1523 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: 1524 cm = &sc->commands[desc->SCSIIOSuccess.SMID]; 1525 cm->cm_reply = NULL; 1526 break; 1527 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: 1528 { 1529 uint32_t baddr; 1530 uint8_t *reply; 1531 1532 /* 1533 * Re-compose the reply address from the address 1534 * sent back from the chip. The ReplyFrameAddress 1535 * is the lower 32 bits of the physical address of 1536 * particular reply frame. Convert that address to 1537 * host format, and then use that to provide the 1538 * offset against the virtual address base 1539 * (sc->reply_frames). 1540 */ 1541 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); 1542 reply = sc->reply_frames + 1543 (baddr - ((uint32_t)sc->reply_busaddr)); 1544 /* 1545 * Make sure the reply we got back is in a valid 1546 * range. If not, go ahead and panic here, since 1547 * we'll probably panic as soon as we deference the 1548 * reply pointer anyway. 1549 */ 1550 if ((reply < sc->reply_frames) 1551 || (reply > (sc->reply_frames + 1552 (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) { 1553 printf("%s: WARNING: reply %p out of range!\n", 1554 __func__, reply); 1555 printf("%s: reply_frames %p, fqdepth %d, " 1556 "frame size %d\n", __func__, 1557 sc->reply_frames, sc->fqdepth, 1558 sc->facts->ReplyFrameSize * 4); 1559 printf("%s: baddr %#x,\n", __func__, baddr); 1560 panic("Reply address out of range"); 1561 } 1562 if (desc->AddressReply.SMID == 0) { 1563 if (((MPI2_DEFAULT_REPLY *)reply)->Function == 1564 MPI2_FUNCTION_DIAG_BUFFER_POST) { 1565 /* 1566 * If SMID is 0 for Diag Buffer Post, 1567 * this implies that the reply is due to 1568 * a release function with a status that 1569 * the buffer has been released. Set 1570 * the buffer flags accordingly. 1571 */ 1572 rel_rep = 1573 (MPI2_DIAG_RELEASE_REPLY *)reply; 1574 if (rel_rep->IOCStatus == 1575 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) 1576 { 1577 pBuffer = 1578 &sc->fw_diag_buffer_list[ 1579 rel_rep->BufferType]; 1580 pBuffer->valid_data = TRUE; 1581 pBuffer->owned_by_firmware = 1582 FALSE; 1583 pBuffer->immediate = FALSE; 1584 } 1585 } else 1586 mps_dispatch_event(sc, baddr, 1587 (MPI2_EVENT_NOTIFICATION_REPLY *) 1588 reply); 1589 } else { 1590 cm = &sc->commands[desc->AddressReply.SMID]; 1591 cm->cm_reply = reply; 1592 cm->cm_reply_data = 1593 desc->AddressReply.ReplyFrameAddress; 1594 } 1595 break; 1596 } 1597 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: 1598 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: 1599 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: 1600 default: 1601 /* Unhandled */ 1602 device_printf(sc->mps_dev, "Unhandled reply 0x%x\n", 1603 desc->Default.ReplyFlags); 1604 cm = NULL; 1605 break; 1606 } 1607 1608 if (cm != NULL) 1609 mps_complete_command(cm); 1610 1611 desc->Words.Low = 0xffffffff; 1612 desc->Words.High = 0xffffffff; 1613 } 1614 1615 if (pq != sc->replypostindex) { 1616 mps_dprint(sc, MPS_TRACE, 1617 "%s sc %p writing postindex %d\n", 1618 __func__, sc, sc->replypostindex); 1619 mps_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, sc->replypostindex); 1620 } 1621 1622 return; 1623 } 1624 1625 static void 1626 mps_dispatch_event(struct mps_softc *sc, uintptr_t data, 1627 MPI2_EVENT_NOTIFICATION_REPLY *reply) 1628 { 1629 struct mps_event_handle *eh; 1630 int event, handled = 0; 1631 1632 event = reply->Event; 1633 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 1634 if (isset(eh->mask, event)) { 1635 eh->callback(sc, data, reply); 1636 handled++; 1637 } 1638 } 1639 1640 if (handled == 0) 1641 device_printf(sc->mps_dev, "Unhandled event 0x%x\n", event); 1642 1643 /* 1644 * This is the only place that the event/reply should be freed. 1645 * Anything wanting to hold onto the event data should have 1646 * already copied it into their own storage. 1647 */ 1648 mps_free_reply(sc, data); 1649 } 1650 1651 static void 1652 mps_reregister_events_complete(struct mps_softc *sc, struct mps_command *cm) 1653 { 1654 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1655 1656 if (cm->cm_reply) 1657 mps_print_event(sc, 1658 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); 1659 1660 mps_free_command(sc, cm); 1661 1662 /* next, send a port enable */ 1663 mpssas_startup(sc); 1664 } 1665 1666 /* 1667 * For both register_events and update_events, the caller supplies a bitmap 1668 * of events that it _wants_. These functions then turn that into a bitmask 1669 * suitable for the controller. 1670 */ 1671 int 1672 mps_register_events(struct mps_softc *sc, uint8_t *mask, 1673 mps_evt_callback_t *cb, void *data, struct mps_event_handle **handle) 1674 { 1675 struct mps_event_handle *eh; 1676 int error = 0; 1677 1678 eh = malloc(sizeof(struct mps_event_handle), M_MPT2, M_WAITOK|M_ZERO); 1679 eh->callback = cb; 1680 eh->data = data; 1681 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); 1682 if (mask != NULL) 1683 error = mps_update_events(sc, eh, mask); 1684 *handle = eh; 1685 1686 return (error); 1687 } 1688 1689 int 1690 mps_update_events(struct mps_softc *sc, struct mps_event_handle *handle, 1691 uint8_t *mask) 1692 { 1693 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 1694 MPI2_EVENT_NOTIFICATION_REPLY *reply; 1695 struct mps_command *cm; 1696 struct mps_event_handle *eh; 1697 int error, i; 1698 1699 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1700 1701 if ((mask != NULL) && (handle != NULL)) 1702 bcopy(mask, &handle->mask[0], 16); 1703 memset(sc->event_mask, 0xff, 16); 1704 1705 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 1706 for (i = 0; i < 16; i++) 1707 sc->event_mask[i] &= ~eh->mask[i]; 1708 } 1709 1710 if ((cm = mps_alloc_command(sc)) == NULL) 1711 return (EBUSY); 1712 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 1713 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 1714 evtreq->MsgFlags = 0; 1715 evtreq->SASBroadcastPrimitiveMasks = 0; 1716 #ifdef MPS_DEBUG_ALL_EVENTS 1717 { 1718 u_char fullmask[16]; 1719 memset(fullmask, 0x00, 16); 1720 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 1721 } 1722 #else 1723 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 1724 #endif 1725 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1726 cm->cm_data = NULL; 1727 1728 error = mps_request_polled(sc, cm); 1729 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; 1730 if ((reply == NULL) || 1731 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 1732 error = ENXIO; 1733 mps_print_event(sc, reply); 1734 mps_dprint(sc, MPS_TRACE, "%s finished error %d\n", __func__, error); 1735 1736 mps_free_command(sc, cm); 1737 return (error); 1738 } 1739 1740 static int 1741 mps_reregister_events(struct mps_softc *sc) 1742 { 1743 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 1744 struct mps_command *cm; 1745 struct mps_event_handle *eh; 1746 int error, i; 1747 1748 mps_dprint(sc, MPS_TRACE, "%s\n", __func__); 1749 1750 /* first, reregister events */ 1751 1752 memset(sc->event_mask, 0xff, 16); 1753 1754 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 1755 for (i = 0; i < 16; i++) 1756 sc->event_mask[i] &= ~eh->mask[i]; 1757 } 1758 1759 if ((cm = mps_alloc_command(sc)) == NULL) 1760 return (EBUSY); 1761 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 1762 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 1763 evtreq->MsgFlags = 0; 1764 evtreq->SASBroadcastPrimitiveMasks = 0; 1765 #ifdef MPS_DEBUG_ALL_EVENTS 1766 { 1767 u_char fullmask[16]; 1768 memset(fullmask, 0x00, 16); 1769 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 1770 } 1771 #else 1772 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 1773 #endif 1774 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 1775 cm->cm_data = NULL; 1776 cm->cm_complete = mps_reregister_events_complete; 1777 1778 error = mps_map_command(sc, cm); 1779 1780 mps_dprint(sc, MPS_TRACE, "%s finished with error %d\n", __func__, error); 1781 return (error); 1782 } 1783 1784 int 1785 mps_deregister_events(struct mps_softc *sc, struct mps_event_handle *handle) 1786 { 1787 1788 TAILQ_REMOVE(&sc->event_list, handle, eh_list); 1789 free(handle, M_MPT2); 1790 return (mps_update_events(sc, NULL, NULL)); 1791 } 1792 1793 /* 1794 * Add a chain element as the next SGE for the specified command. 1795 * Reset cm_sge and cm_sgesize to indicate all the available space. 1796 */ 1797 static int 1798 mps_add_chain(struct mps_command *cm) 1799 { 1800 MPI2_SGE_CHAIN32 *sgc; 1801 struct mps_chain *chain; 1802 int space; 1803 1804 if (cm->cm_sglsize < MPS_SGC_SIZE) 1805 panic("MPS: Need SGE Error Code\n"); 1806 1807 chain = mps_alloc_chain(cm->cm_sc); 1808 if (chain == NULL) 1809 return (ENOBUFS); 1810 1811 space = (int)cm->cm_sc->facts->IOCRequestFrameSize * 4; 1812 1813 /* 1814 * Note: a double-linked list is used to make it easier to 1815 * walk for debugging. 1816 */ 1817 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); 1818 1819 sgc = (MPI2_SGE_CHAIN32 *)&cm->cm_sge->MpiChain; 1820 sgc->Length = space; 1821 sgc->NextChainOffset = 0; 1822 sgc->Flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT; 1823 sgc->Address = chain->chain_busaddr; 1824 1825 cm->cm_sge = (MPI2_SGE_IO_UNION *)&chain->chain->MpiSimple; 1826 cm->cm_sglsize = space; 1827 return (0); 1828 } 1829 1830 /* 1831 * Add one scatter-gather element (chain, simple, transaction context) 1832 * to the scatter-gather list for a command. Maintain cm_sglsize and 1833 * cm_sge as the remaining size and pointer to the next SGE to fill 1834 * in, respectively. 1835 */ 1836 int 1837 mps_push_sge(struct mps_command *cm, void *sgep, size_t len, int segsleft) 1838 { 1839 MPI2_SGE_TRANSACTION_UNION *tc = sgep; 1840 MPI2_SGE_SIMPLE64 *sge = sgep; 1841 int error, type; 1842 uint32_t saved_buf_len, saved_address_low, saved_address_high; 1843 1844 type = (tc->Flags & MPI2_SGE_FLAGS_ELEMENT_MASK); 1845 1846 #ifdef INVARIANTS 1847 switch (type) { 1848 case MPI2_SGE_FLAGS_TRANSACTION_ELEMENT: { 1849 if (len != tc->DetailsLength + 4) 1850 panic("TC %p length %u or %zu?", tc, 1851 tc->DetailsLength + 4, len); 1852 } 1853 break; 1854 case MPI2_SGE_FLAGS_CHAIN_ELEMENT: 1855 /* Driver only uses 32-bit chain elements */ 1856 if (len != MPS_SGC_SIZE) 1857 panic("CHAIN %p length %u or %zu?", sgep, 1858 MPS_SGC_SIZE, len); 1859 break; 1860 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT: 1861 /* Driver only uses 64-bit SGE simple elements */ 1862 sge = sgep; 1863 if (len != MPS_SGE64_SIZE) 1864 panic("SGE simple %p length %u or %zu?", sge, 1865 MPS_SGE64_SIZE, len); 1866 if (((sge->FlagsLength >> MPI2_SGE_FLAGS_SHIFT) & 1867 MPI2_SGE_FLAGS_ADDRESS_SIZE) == 0) 1868 panic("SGE simple %p flags %02x not marked 64-bit?", 1869 sge, sge->FlagsLength >> MPI2_SGE_FLAGS_SHIFT); 1870 1871 break; 1872 default: 1873 panic("Unexpected SGE %p, flags %02x", tc, tc->Flags); 1874 } 1875 #endif 1876 1877 /* 1878 * case 1: 1 more segment, enough room for it 1879 * case 2: 2 more segments, enough room for both 1880 * case 3: >=2 more segments, only enough room for 1 and a chain 1881 * case 4: >=1 more segment, enough room for only a chain 1882 * case 5: >=1 more segment, no room for anything (error) 1883 */ 1884 1885 /* 1886 * There should be room for at least a chain element, or this 1887 * code is buggy. Case (5). 1888 */ 1889 if (cm->cm_sglsize < MPS_SGC_SIZE) 1890 panic("MPS: Need SGE Error Code\n"); 1891 1892 if (segsleft >= 2 && 1893 cm->cm_sglsize < len + MPS_SGC_SIZE + MPS_SGE64_SIZE) { 1894 /* 1895 * There are 2 or more segments left to add, and only 1896 * enough room for 1 and a chain. Case (3). 1897 * 1898 * Mark as last element in this chain if necessary. 1899 */ 1900 if (type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) { 1901 sge->FlagsLength |= 1902 (MPI2_SGE_FLAGS_LAST_ELEMENT << MPI2_SGE_FLAGS_SHIFT); 1903 } 1904 1905 /* 1906 * Add the item then a chain. Do the chain now, 1907 * rather than on the next iteration, to simplify 1908 * understanding the code. 1909 */ 1910 cm->cm_sglsize -= len; 1911 bcopy(sgep, cm->cm_sge, len); 1912 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 1913 return (mps_add_chain(cm)); 1914 } 1915 1916 if (segsleft >= 1 && cm->cm_sglsize < len + MPS_SGC_SIZE) { 1917 /* 1918 * 1 or more segment, enough room for only a chain. 1919 * Hope the previous element wasn't a Simple entry 1920 * that needed to be marked with 1921 * MPI2_SGE_FLAGS_LAST_ELEMENT. Case (4). 1922 */ 1923 if ((error = mps_add_chain(cm)) != 0) 1924 return (error); 1925 } 1926 1927 #ifdef INVARIANTS 1928 /* Case 1: 1 more segment, enough room for it. */ 1929 if (segsleft == 1 && cm->cm_sglsize < len) 1930 panic("1 seg left and no room? %u versus %zu", 1931 cm->cm_sglsize, len); 1932 1933 /* Case 2: 2 more segments, enough room for both */ 1934 if (segsleft == 2 && cm->cm_sglsize < len + MPS_SGE64_SIZE) 1935 panic("2 segs left and no room? %u versus %zu", 1936 cm->cm_sglsize, len); 1937 #endif 1938 1939 if (segsleft == 1 && type == MPI2_SGE_FLAGS_SIMPLE_ELEMENT) { 1940 /* 1941 * If this is a bi-directional request, need to account for that 1942 * here. Save the pre-filled sge values. These will be used 1943 * either for the 2nd SGL or for a single direction SGL. If 1944 * cm_out_len is non-zero, this is a bi-directional request, so 1945 * fill in the OUT SGL first, then the IN SGL, otherwise just 1946 * fill in the IN SGL. Note that at this time, when filling in 1947 * 2 SGL's for a bi-directional request, they both use the same 1948 * DMA buffer (same cm command). 1949 */ 1950 saved_buf_len = sge->FlagsLength & 0x00FFFFFF; 1951 saved_address_low = sge->Address.Low; 1952 saved_address_high = sge->Address.High; 1953 if (cm->cm_out_len) { 1954 sge->FlagsLength = cm->cm_out_len | 1955 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1956 MPI2_SGE_FLAGS_END_OF_BUFFER | 1957 MPI2_SGE_FLAGS_HOST_TO_IOC | 1958 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 1959 MPI2_SGE_FLAGS_SHIFT); 1960 cm->cm_sglsize -= len; 1961 bcopy(sgep, cm->cm_sge, len); 1962 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge 1963 + len); 1964 } 1965 sge->FlagsLength = saved_buf_len | 1966 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 1967 MPI2_SGE_FLAGS_END_OF_BUFFER | 1968 MPI2_SGE_FLAGS_LAST_ELEMENT | 1969 MPI2_SGE_FLAGS_END_OF_LIST | 1970 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 1971 MPI2_SGE_FLAGS_SHIFT); 1972 if (cm->cm_flags & MPS_CM_FLAGS_DATAIN) { 1973 sge->FlagsLength |= 1974 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 1975 MPI2_SGE_FLAGS_SHIFT); 1976 } else { 1977 sge->FlagsLength |= 1978 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 1979 MPI2_SGE_FLAGS_SHIFT); 1980 } 1981 sge->Address.Low = saved_address_low; 1982 sge->Address.High = saved_address_high; 1983 } 1984 1985 cm->cm_sglsize -= len; 1986 bcopy(sgep, cm->cm_sge, len); 1987 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 1988 return (0); 1989 } 1990 1991 /* 1992 * Add one dma segment to the scatter-gather list for a command. 1993 */ 1994 int 1995 mps_add_dmaseg(struct mps_command *cm, vm_paddr_t pa, size_t len, u_int flags, 1996 int segsleft) 1997 { 1998 MPI2_SGE_SIMPLE64 sge; 1999 2000 /* 2001 * This driver always uses 64-bit address elements for simplicity. 2002 */ 2003 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2004 MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 2005 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT); 2006 mps_from_u64(pa, &sge.Address); 2007 2008 return (mps_push_sge(cm, &sge, sizeof sge, segsleft)); 2009 } 2010 2011 static void 2012 mps_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 2013 { 2014 struct mps_softc *sc; 2015 struct mps_command *cm; 2016 u_int i, dir, sflags; 2017 2018 cm = (struct mps_command *)arg; 2019 sc = cm->cm_sc; 2020 2021 /* 2022 * In this case, just print out a warning and let the chip tell the 2023 * user they did the wrong thing. 2024 */ 2025 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { 2026 mps_printf(sc, "%s: warning: busdma returned %d segments, " 2027 "more than the %d allowed\n", __func__, nsegs, 2028 cm->cm_max_segs); 2029 } 2030 2031 /* 2032 * Set up DMA direction flags. Bi-directional requests are also handled 2033 * here. In that case, both direction flags will be set. 2034 */ 2035 sflags = 0; 2036 if (cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) { 2037 /* 2038 * We have to add a special case for SMP passthrough, there 2039 * is no easy way to generically handle it. The first 2040 * S/G element is used for the command (therefore the 2041 * direction bit needs to be set). The second one is used 2042 * for the reply. We'll leave it to the caller to make 2043 * sure we only have two buffers. 2044 */ 2045 /* 2046 * Even though the busdma man page says it doesn't make 2047 * sense to have both direction flags, it does in this case. 2048 * We have one s/g element being accessed in each direction. 2049 */ 2050 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; 2051 2052 /* 2053 * Set the direction flag on the first buffer in the SMP 2054 * passthrough request. We'll clear it for the second one. 2055 */ 2056 sflags |= MPI2_SGE_FLAGS_DIRECTION | 2057 MPI2_SGE_FLAGS_END_OF_BUFFER; 2058 } else if (cm->cm_flags & MPS_CM_FLAGS_DATAOUT) { 2059 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 2060 dir = BUS_DMASYNC_PREWRITE; 2061 } else 2062 dir = BUS_DMASYNC_PREREAD; 2063 2064 for (i = 0; i < nsegs; i++) { 2065 if ((cm->cm_flags & MPS_CM_FLAGS_SMP_PASS) && (i != 0)) { 2066 sflags &= ~MPI2_SGE_FLAGS_DIRECTION; 2067 } 2068 error = mps_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, 2069 sflags, nsegs - i); 2070 if (error != 0) { 2071 /* Resource shortage, roll back! */ 2072 mps_dprint(sc, MPS_INFO, "out of chain frames\n"); 2073 cm->cm_flags |= MPS_CM_FLAGS_CHAIN_FAILED; 2074 mps_complete_command(cm); 2075 return; 2076 } 2077 } 2078 2079 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 2080 mps_enqueue_request(sc, cm); 2081 2082 return; 2083 } 2084 2085 static void 2086 mps_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, 2087 int error) 2088 { 2089 mps_data_cb(arg, segs, nsegs, error); 2090 } 2091 2092 /* 2093 * This is the routine to enqueue commands ansynchronously. 2094 * Note that the only error path here is from bus_dmamap_load(), which can 2095 * return EINPROGRESS if it is waiting for resources. Other than this, it's 2096 * assumed that if you have a command in-hand, then you have enough credits 2097 * to use it. 2098 */ 2099 int 2100 mps_map_command(struct mps_softc *sc, struct mps_command *cm) 2101 { 2102 MPI2_SGE_SIMPLE32 *sge; 2103 int error = 0; 2104 2105 if (cm->cm_flags & MPS_CM_FLAGS_USE_UIO) { 2106 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, 2107 &cm->cm_uio, mps_data_cb2, cm, 0); 2108 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { 2109 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, 2110 cm->cm_data, cm->cm_length, mps_data_cb, cm, 0); 2111 } else { 2112 /* Add a zero-length element as needed */ 2113 if (cm->cm_sge != NULL) { 2114 sge = (MPI2_SGE_SIMPLE32 *)cm->cm_sge; 2115 sge->FlagsLength = (MPI2_SGE_FLAGS_LAST_ELEMENT | 2116 MPI2_SGE_FLAGS_END_OF_BUFFER | 2117 MPI2_SGE_FLAGS_END_OF_LIST | 2118 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) << 2119 MPI2_SGE_FLAGS_SHIFT; 2120 sge->Address = 0; 2121 } 2122 mps_enqueue_request(sc, cm); 2123 } 2124 2125 return (error); 2126 } 2127 2128 /* 2129 * This is the routine to enqueue commands synchronously. An error of 2130 * EINPROGRESS from mps_map_command() is ignored since the command will 2131 * be executed and enqueued automatically. Other errors come from msleep(). 2132 */ 2133 int 2134 mps_wait_command(struct mps_softc *sc, struct mps_command *cm, int timeout) 2135 { 2136 int error; 2137 2138 mtx_assert(&sc->mps_mtx, MA_OWNED); 2139 2140 cm->cm_complete = NULL; 2141 cm->cm_flags |= MPS_CM_FLAGS_WAKEUP; 2142 error = mps_map_command(sc, cm); 2143 if ((error != 0) && (error != EINPROGRESS)) 2144 return (error); 2145 error = msleep(cm, &sc->mps_mtx, 0, "mpswait", timeout); 2146 if (error == EWOULDBLOCK) 2147 error = ETIMEDOUT; 2148 return (error); 2149 } 2150 2151 /* 2152 * This is the routine to enqueue a command synchonously and poll for 2153 * completion. Its use should be rare. 2154 */ 2155 int 2156 mps_request_polled(struct mps_softc *sc, struct mps_command *cm) 2157 { 2158 int error, timeout = 0; 2159 2160 error = 0; 2161 2162 cm->cm_flags |= MPS_CM_FLAGS_POLLED; 2163 cm->cm_complete = NULL; 2164 mps_map_command(sc, cm); 2165 2166 while ((cm->cm_flags & MPS_CM_FLAGS_COMPLETE) == 0) { 2167 mps_intr_locked(sc); 2168 DELAY(50 * 1000); 2169 if (timeout++ > 1000) { 2170 mps_dprint(sc, MPS_FAULT, "polling failed\n"); 2171 error = ETIMEDOUT; 2172 break; 2173 } 2174 } 2175 2176 return (error); 2177 } 2178 2179 /* 2180 * The MPT driver had a verbose interface for config pages. In this driver, 2181 * reduce it to much simplier terms, similar to the Linux driver. 2182 */ 2183 int 2184 mps_read_config_page(struct mps_softc *sc, struct mps_config_params *params) 2185 { 2186 MPI2_CONFIG_REQUEST *req; 2187 struct mps_command *cm; 2188 int error; 2189 2190 if (sc->mps_flags & MPS_FLAGS_BUSY) { 2191 return (EBUSY); 2192 } 2193 2194 cm = mps_alloc_command(sc); 2195 if (cm == NULL) { 2196 return (EBUSY); 2197 } 2198 2199 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; 2200 req->Function = MPI2_FUNCTION_CONFIG; 2201 req->Action = params->action; 2202 req->SGLFlags = 0; 2203 req->ChainOffset = 0; 2204 req->PageAddress = params->page_address; 2205 if (params->hdr.Ext.ExtPageType != 0) { 2206 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 2207 2208 hdr = ¶ms->hdr.Ext; 2209 req->ExtPageType = hdr->ExtPageType; 2210 req->ExtPageLength = hdr->ExtPageLength; 2211 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 2212 req->Header.PageLength = 0; /* Must be set to zero */ 2213 req->Header.PageNumber = hdr->PageNumber; 2214 req->Header.PageVersion = hdr->PageVersion; 2215 } else { 2216 MPI2_CONFIG_PAGE_HEADER *hdr; 2217 2218 hdr = ¶ms->hdr.Struct; 2219 req->Header.PageType = hdr->PageType; 2220 req->Header.PageNumber = hdr->PageNumber; 2221 req->Header.PageLength = hdr->PageLength; 2222 req->Header.PageVersion = hdr->PageVersion; 2223 } 2224 2225 cm->cm_data = params->buffer; 2226 cm->cm_length = params->length; 2227 cm->cm_sge = &req->PageBufferSGE; 2228 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); 2229 cm->cm_flags = MPS_CM_FLAGS_SGE_SIMPLE | MPS_CM_FLAGS_DATAIN; 2230 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2231 2232 cm->cm_complete_data = params; 2233 if (params->callback != NULL) { 2234 cm->cm_complete = mps_config_complete; 2235 return (mps_map_command(sc, cm)); 2236 } else { 2237 error = mps_wait_command(sc, cm, 0); 2238 if (error) { 2239 mps_dprint(sc, MPS_FAULT, 2240 "Error %d reading config page\n", error); 2241 mps_free_command(sc, cm); 2242 return (error); 2243 } 2244 mps_config_complete(sc, cm); 2245 } 2246 2247 return (0); 2248 } 2249 2250 int 2251 mps_write_config_page(struct mps_softc *sc, struct mps_config_params *params) 2252 { 2253 return (EINVAL); 2254 } 2255 2256 static void 2257 mps_config_complete(struct mps_softc *sc, struct mps_command *cm) 2258 { 2259 MPI2_CONFIG_REPLY *reply; 2260 struct mps_config_params *params; 2261 2262 params = cm->cm_complete_data; 2263 2264 if (cm->cm_data != NULL) { 2265 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 2266 BUS_DMASYNC_POSTREAD); 2267 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 2268 } 2269 2270 /* 2271 * XXX KDM need to do more error recovery? This results in the 2272 * device in question not getting probed. 2273 */ 2274 if ((cm->cm_flags & MPS_CM_FLAGS_ERROR_MASK) != 0) { 2275 params->status = MPI2_IOCSTATUS_BUSY; 2276 goto done; 2277 } 2278 2279 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; 2280 if (reply == NULL) { 2281 params->status = MPI2_IOCSTATUS_BUSY; 2282 goto done; 2283 } 2284 params->status = reply->IOCStatus; 2285 if (params->hdr.Ext.ExtPageType != 0) { 2286 params->hdr.Ext.ExtPageType = reply->ExtPageType; 2287 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; 2288 } else { 2289 params->hdr.Struct.PageType = reply->Header.PageType; 2290 params->hdr.Struct.PageNumber = reply->Header.PageNumber; 2291 params->hdr.Struct.PageLength = reply->Header.PageLength; 2292 params->hdr.Struct.PageVersion = reply->Header.PageVersion; 2293 } 2294 2295 done: 2296 mps_free_command(sc, cm); 2297 if (params->callback != NULL) 2298 params->callback(sc, params); 2299 2300 return; 2301 } 2302