1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2016 Avago Technologies 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD 29 * 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 /* Communications core for Avago Technologies (LSI) MPT3 */ 36 37 /* TODO Move headers to mprvar */ 38 #include <sys/types.h> 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/selinfo.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/module.h> 46 #include <sys/bus.h> 47 #include <sys/conf.h> 48 #include <sys/bio.h> 49 #include <sys/malloc.h> 50 #include <sys/uio.h> 51 #include <sys/sysctl.h> 52 #include <sys/queue.h> 53 #include <sys/kthread.h> 54 #include <sys/taskqueue.h> 55 #include <sys/endian.h> 56 #include <sys/eventhandler.h> 57 58 #include <machine/bus.h> 59 #include <machine/resource.h> 60 #include <sys/rman.h> 61 #include <sys/proc.h> 62 63 #include <dev/pci/pcivar.h> 64 65 #include <cam/cam.h> 66 #include <cam/cam_ccb.h> 67 #include <cam/scsi/scsi_all.h> 68 69 #include <dev/mpr/mpi/mpi2_type.h> 70 #include <dev/mpr/mpi/mpi2.h> 71 #include <dev/mpr/mpi/mpi2_ioc.h> 72 #include <dev/mpr/mpi/mpi2_sas.h> 73 #include <dev/mpr/mpi/mpi2_pci.h> 74 #include <dev/mpr/mpi/mpi2_cnfg.h> 75 #include <dev/mpr/mpi/mpi2_init.h> 76 #include <dev/mpr/mpi/mpi2_tool.h> 77 #include <dev/mpr/mpr_ioctl.h> 78 #include <dev/mpr/mprvar.h> 79 #include <dev/mpr/mpr_table.h> 80 #include <dev/mpr/mpr_sas.h> 81 82 static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag); 83 static int mpr_init_queues(struct mpr_softc *sc); 84 static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag); 85 static int mpr_transition_operational(struct mpr_softc *sc); 86 static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching); 87 static void mpr_iocfacts_free(struct mpr_softc *sc); 88 static void mpr_startup(void *arg); 89 static int mpr_send_iocinit(struct mpr_softc *sc); 90 static int mpr_alloc_queues(struct mpr_softc *sc); 91 static int mpr_alloc_replies(struct mpr_softc *sc); 92 static int mpr_alloc_requests(struct mpr_softc *sc); 93 static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc); 94 static int mpr_attach_log(struct mpr_softc *sc); 95 static __inline void mpr_complete_command(struct mpr_softc *sc, 96 struct mpr_command *cm); 97 static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 98 MPI2_EVENT_NOTIFICATION_REPLY *reply); 99 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm); 100 static void mpr_periodic(void *); 101 static int mpr_reregister_events(struct mpr_softc *sc); 102 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm); 103 static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts); 104 static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag); 105 SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters"); 106 107 MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory"); 108 109 /* 110 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of 111 * any state and back to its initialization state machine. 112 */ 113 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; 114 115 /* 116 * Added this union to smoothly convert le64toh cm->cm_desc.Words. 117 * Compiler only supports uint64_t to be passed as an argument. 118 * Otherwise it will through this error: 119 * "aggregate value used where an integer was expected" 120 */ 121 typedef union _reply_descriptor { 122 u64 word; 123 struct { 124 u32 low; 125 u32 high; 126 } u; 127 } reply_descriptor, request_descriptor; 128 129 /* Rate limit chain-fail messages to 1 per minute */ 130 static struct timeval mpr_chainfail_interval = { 60, 0 }; 131 132 /* 133 * sleep_flag can be either CAN_SLEEP or NO_SLEEP. 134 * If this function is called from process context, it can sleep 135 * and there is no harm to sleep, in case if this fuction is called 136 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set. 137 * based on sleep flags driver will call either msleep, pause or DELAY. 138 * msleep and pause are of same variant, but pause is used when mpr_mtx 139 * is not hold by driver. 140 */ 141 static int 142 mpr_diag_reset(struct mpr_softc *sc,int sleep_flag) 143 { 144 uint32_t reg; 145 int i, error, tries = 0; 146 uint8_t first_wait_done = FALSE; 147 148 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 149 150 /* Clear any pending interrupts */ 151 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 152 153 /* 154 * Force NO_SLEEP for threads prohibited to sleep 155 * e.a Thread from interrupt handler are prohibited to sleep. 156 */ 157 #if __FreeBSD_version >= 1000029 158 if (curthread->td_no_sleeping) 159 #else //__FreeBSD_version < 1000029 160 if (curthread->td_pflags & TDP_NOSLEEPING) 161 #endif //__FreeBSD_version >= 1000029 162 sleep_flag = NO_SLEEP; 163 164 /* Push the magic sequence */ 165 error = ETIMEDOUT; 166 while (tries++ < 20) { 167 for (i = 0; i < sizeof(mpt2_reset_magic); i++) 168 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 169 mpt2_reset_magic[i]); 170 171 /* wait 100 msec */ 172 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 173 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 174 "mprdiag", hz/10); 175 else if (sleep_flag == CAN_SLEEP) 176 pause("mprdiag", hz/10); 177 else 178 DELAY(100 * 1000); 179 180 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 181 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { 182 error = 0; 183 break; 184 } 185 } 186 if (error) 187 return (error); 188 189 /* Send the actual reset. XXX need to refresh the reg? */ 190 mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, 191 reg | MPI2_DIAG_RESET_ADAPTER); 192 193 /* Wait up to 300 seconds in 50ms intervals */ 194 error = ETIMEDOUT; 195 for (i = 0; i < 6000; i++) { 196 /* 197 * Wait 50 msec. If this is the first time through, wait 256 198 * msec to satisfy Diag Reset timing requirements. 199 */ 200 if (first_wait_done) { 201 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 202 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 203 "mprdiag", hz/20); 204 else if (sleep_flag == CAN_SLEEP) 205 pause("mprdiag", hz/20); 206 else 207 DELAY(50 * 1000); 208 } else { 209 DELAY(256 * 1000); 210 first_wait_done = TRUE; 211 } 212 /* 213 * Check for the RESET_ADAPTER bit to be cleared first, then 214 * wait for the RESET state to be cleared, which takes a little 215 * longer. 216 */ 217 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 218 if (reg & MPI2_DIAG_RESET_ADAPTER) { 219 continue; 220 } 221 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 222 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { 223 error = 0; 224 break; 225 } 226 } 227 if (error) 228 return (error); 229 230 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); 231 232 return (0); 233 } 234 235 static int 236 mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag) 237 { 238 239 MPR_FUNCTRACE(sc); 240 241 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 242 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << 243 MPI2_DOORBELL_FUNCTION_SHIFT); 244 245 if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) { 246 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed : <%s>\n", 247 __func__); 248 return (ETIMEDOUT); 249 } 250 251 return (0); 252 } 253 254 static int 255 mpr_transition_ready(struct mpr_softc *sc) 256 { 257 uint32_t reg, state; 258 int error, tries = 0; 259 int sleep_flags; 260 261 MPR_FUNCTRACE(sc); 262 /* If we are in attach call, do not sleep */ 263 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) 264 ? CAN_SLEEP : NO_SLEEP; 265 266 error = 0; 267 while (tries++ < 1200) { 268 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 269 mpr_dprint(sc, MPR_INIT, "Doorbell= 0x%x\n", reg); 270 271 /* 272 * Ensure the IOC is ready to talk. If it's not, try 273 * resetting it. 274 */ 275 if (reg & MPI2_DOORBELL_USED) { 276 mpr_diag_reset(sc, sleep_flags); 277 DELAY(50000); 278 continue; 279 } 280 281 /* Is the adapter owned by another peer? */ 282 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == 283 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { 284 device_printf(sc->mpr_dev, "IOC is under the control " 285 "of another peer host, aborting initialization.\n"); 286 return (ENXIO); 287 } 288 289 state = reg & MPI2_IOC_STATE_MASK; 290 if (state == MPI2_IOC_STATE_READY) { 291 /* Ready to go! */ 292 error = 0; 293 break; 294 } else if (state == MPI2_IOC_STATE_FAULT) { 295 mpr_dprint(sc, MPR_FAULT, "IOC in fault state 0x%x\n", 296 state & MPI2_DOORBELL_FAULT_CODE_MASK); 297 mpr_diag_reset(sc, sleep_flags); 298 } else if (state == MPI2_IOC_STATE_OPERATIONAL) { 299 /* Need to take ownership */ 300 mpr_message_unit_reset(sc, sleep_flags); 301 } else if (state == MPI2_IOC_STATE_RESET) { 302 /* Wait a bit, IOC might be in transition */ 303 mpr_dprint(sc, MPR_FAULT, 304 "IOC in unexpected reset state\n"); 305 } else { 306 mpr_dprint(sc, MPR_FAULT, 307 "IOC in unknown state 0x%x\n", state); 308 error = EINVAL; 309 break; 310 } 311 312 /* Wait 50ms for things to settle down. */ 313 DELAY(50000); 314 } 315 316 if (error) 317 device_printf(sc->mpr_dev, "Cannot transition IOC to ready\n"); 318 return (error); 319 } 320 321 static int 322 mpr_transition_operational(struct mpr_softc *sc) 323 { 324 uint32_t reg, state; 325 int error; 326 327 MPR_FUNCTRACE(sc); 328 329 error = 0; 330 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 331 mpr_dprint(sc, MPR_INIT, "Doorbell= 0x%x\n", reg); 332 333 state = reg & MPI2_IOC_STATE_MASK; 334 if (state != MPI2_IOC_STATE_READY) { 335 if ((error = mpr_transition_ready(sc)) != 0) { 336 mpr_dprint(sc, MPR_FAULT, 337 "%s failed to transition ready\n", __func__); 338 return (error); 339 } 340 } 341 342 error = mpr_send_iocinit(sc); 343 return (error); 344 } 345 346 /* 347 * This is called during attach and when re-initializing due to a Diag Reset. 348 * IOC Facts is used to allocate many of the structures needed by the driver. 349 * If called from attach, de-allocation is not required because the driver has 350 * not allocated any structures yet, but if called from a Diag Reset, previously 351 * allocated structures based on IOC Facts will need to be freed and re- 352 * allocated bases on the latest IOC Facts. 353 */ 354 static int 355 mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching) 356 { 357 int error; 358 Mpi2IOCFactsReply_t saved_facts; 359 uint8_t saved_mode, reallocating; 360 361 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 362 363 /* Save old IOC Facts and then only reallocate if Facts have changed */ 364 if (!attaching) { 365 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); 366 } 367 368 /* 369 * Get IOC Facts. In all cases throughout this function, panic if doing 370 * a re-initialization and only return the error if attaching so the OS 371 * can handle it. 372 */ 373 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { 374 if (attaching) { 375 mpr_dprint(sc, MPR_FAULT, "%s failed to get IOC Facts " 376 "with error %d\n", __func__, error); 377 return (error); 378 } else { 379 panic("%s failed to get IOC Facts with error %d\n", 380 __func__, error); 381 } 382 } 383 384 mpr_print_iocfacts(sc, sc->facts); 385 386 snprintf(sc->fw_version, sizeof(sc->fw_version), 387 "%02d.%02d.%02d.%02d", 388 sc->facts->FWVersion.Struct.Major, 389 sc->facts->FWVersion.Struct.Minor, 390 sc->facts->FWVersion.Struct.Unit, 391 sc->facts->FWVersion.Struct.Dev); 392 393 mpr_printf(sc, "Firmware: %s, Driver: %s\n", sc->fw_version, 394 MPR_DRIVER_VERSION); 395 mpr_printf(sc, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, 396 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" 397 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" 398 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc" 399 "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV"); 400 401 /* 402 * If the chip doesn't support event replay then a hard reset will be 403 * required to trigger a full discovery. Do the reset here then 404 * retransition to Ready. A hard reset might have already been done, 405 * but it doesn't hurt to do it again. Only do this if attaching, not 406 * for a Diag Reset. 407 */ 408 if (attaching) { 409 if ((sc->facts->IOCCapabilities & 410 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) { 411 mpr_diag_reset(sc, NO_SLEEP); 412 if ((error = mpr_transition_ready(sc)) != 0) { 413 mpr_dprint(sc, MPR_FAULT, "%s failed to " 414 "transition to ready with error %d\n", 415 __func__, error); 416 return (error); 417 } 418 } 419 } 420 421 /* 422 * Set flag if IR Firmware is loaded. If the RAID Capability has 423 * changed from the previous IOC Facts, log a warning, but only if 424 * checking this after a Diag Reset and not during attach. 425 */ 426 saved_mode = sc->ir_firmware; 427 if (sc->facts->IOCCapabilities & 428 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) 429 sc->ir_firmware = 1; 430 if (!attaching) { 431 if (sc->ir_firmware != saved_mode) { 432 mpr_dprint(sc, MPR_FAULT, "%s new IR/IT mode in IOC " 433 "Facts does not match previous mode\n", __func__); 434 } 435 } 436 437 /* Only deallocate and reallocate if relevant IOC Facts have changed */ 438 reallocating = FALSE; 439 if ((!attaching) && 440 ((saved_facts.MsgVersion != sc->facts->MsgVersion) || 441 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || 442 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || 443 (saved_facts.RequestCredit != sc->facts->RequestCredit) || 444 (saved_facts.ProductID != sc->facts->ProductID) || 445 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || 446 (saved_facts.IOCRequestFrameSize != 447 sc->facts->IOCRequestFrameSize) || 448 (saved_facts.IOCMaxChainSegmentSize != 449 sc->facts->IOCMaxChainSegmentSize) || 450 (saved_facts.MaxTargets != sc->facts->MaxTargets) || 451 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || 452 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || 453 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || 454 (saved_facts.MaxReplyDescriptorPostQueueDepth != 455 sc->facts->MaxReplyDescriptorPostQueueDepth) || 456 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || 457 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || 458 (saved_facts.MaxPersistentEntries != 459 sc->facts->MaxPersistentEntries))) { 460 reallocating = TRUE; 461 } 462 463 /* 464 * Some things should be done if attaching or re-allocating after a Diag 465 * Reset, but are not needed after a Diag Reset if the FW has not 466 * changed. 467 */ 468 if (attaching || reallocating) { 469 /* 470 * Check if controller supports FW diag buffers and set flag to 471 * enable each type. 472 */ 473 if (sc->facts->IOCCapabilities & 474 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 475 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. 476 enabled = TRUE; 477 if (sc->facts->IOCCapabilities & 478 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 479 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. 480 enabled = TRUE; 481 if (sc->facts->IOCCapabilities & 482 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 483 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. 484 enabled = TRUE; 485 486 /* 487 * Set flags for some supported items. 488 */ 489 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) 490 sc->eedp_enabled = TRUE; 491 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) 492 sc->control_TLR = TRUE; 493 if (sc->facts->IOCCapabilities & 494 MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) 495 sc->atomic_desc_capable = TRUE; 496 497 /* 498 * Size the queues. Since the reply queues always need one free 499 * entry, we'll just deduct one reply message here. 500 */ 501 sc->num_reqs = MIN(MPR_REQ_FRAMES, sc->facts->RequestCredit); 502 sc->num_replies = MIN(MPR_REPLY_FRAMES + MPR_EVT_REPLY_FRAMES, 503 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; 504 505 /* 506 * Initialize all Tail Queues 507 */ 508 TAILQ_INIT(&sc->req_list); 509 TAILQ_INIT(&sc->high_priority_req_list); 510 TAILQ_INIT(&sc->chain_list); 511 TAILQ_INIT(&sc->prp_page_list); 512 TAILQ_INIT(&sc->tm_list); 513 } 514 515 /* 516 * If doing a Diag Reset and the FW is significantly different 517 * (reallocating will be set above in IOC Facts comparison), then all 518 * buffers based on the IOC Facts will need to be freed before they are 519 * reallocated. 520 */ 521 if (reallocating) { 522 mpr_iocfacts_free(sc); 523 mprsas_realloc_targets(sc, saved_facts.MaxTargets); 524 } 525 526 /* 527 * Any deallocation has been completed. Now start reallocating 528 * if needed. Will only need to reallocate if attaching or if the new 529 * IOC Facts are different from the previous IOC Facts after a Diag 530 * Reset. Targets have already been allocated above if needed. 531 */ 532 if (attaching || reallocating) { 533 if (((error = mpr_alloc_queues(sc)) != 0) || 534 ((error = mpr_alloc_replies(sc)) != 0) || 535 ((error = mpr_alloc_requests(sc)) != 0)) { 536 if (attaching ) { 537 mpr_dprint(sc, MPR_FAULT, "%s failed to alloc " 538 "queues with error %d\n", __func__, error); 539 mpr_free(sc); 540 return (error); 541 } else { 542 panic("%s failed to alloc queues with error " 543 "%d\n", __func__, error); 544 } 545 } 546 } 547 548 /* Always initialize the queues */ 549 bzero(sc->free_queue, sc->fqdepth * 4); 550 mpr_init_queues(sc); 551 552 /* 553 * Always get the chip out of the reset state, but only panic if not 554 * attaching. If attaching and there is an error, that is handled by 555 * the OS. 556 */ 557 error = mpr_transition_operational(sc); 558 if (error != 0) { 559 if (attaching) { 560 mpr_printf(sc, "%s failed to transition to operational " 561 "with error %d\n", __func__, error); 562 mpr_free(sc); 563 return (error); 564 } else { 565 panic("%s failed to transition to operational with " 566 "error %d\n", __func__, error); 567 } 568 } 569 570 /* 571 * Finish the queue initialization. 572 * These are set here instead of in mpr_init_queues() because the 573 * IOC resets these values during the state transition in 574 * mpr_transition_operational(). The free index is set to 1 575 * because the corresponding index in the IOC is set to 0, and the 576 * IOC treats the queues as full if both are set to the same value. 577 * Hence the reason that the queue can't hold all of the possible 578 * replies. 579 */ 580 sc->replypostindex = 0; 581 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 582 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); 583 584 /* 585 * Attach the subsystems so they can prepare their event masks. 586 */ 587 /* XXX Should be dynamic so that IM/IR and user modules can attach */ 588 if (attaching) { 589 if (((error = mpr_attach_log(sc)) != 0) || 590 ((error = mpr_attach_sas(sc)) != 0) || 591 ((error = mpr_attach_user(sc)) != 0)) { 592 mpr_printf(sc, "%s failed to attach all subsystems: " 593 "error %d\n", __func__, error); 594 mpr_free(sc); 595 return (error); 596 } 597 598 if ((error = mpr_pci_setup_interrupts(sc)) != 0) { 599 mpr_printf(sc, "%s failed to setup interrupts\n", 600 __func__); 601 mpr_free(sc); 602 return (error); 603 } 604 } 605 606 return (error); 607 } 608 609 /* 610 * This is called if memory is being free (during detach for example) and when 611 * buffers need to be reallocated due to a Diag Reset. 612 */ 613 static void 614 mpr_iocfacts_free(struct mpr_softc *sc) 615 { 616 struct mpr_command *cm; 617 int i; 618 619 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 620 621 if (sc->free_busaddr != 0) 622 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); 623 if (sc->free_queue != NULL) 624 bus_dmamem_free(sc->queues_dmat, sc->free_queue, 625 sc->queues_map); 626 if (sc->queues_dmat != NULL) 627 bus_dma_tag_destroy(sc->queues_dmat); 628 629 if (sc->chain_busaddr != 0) 630 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); 631 if (sc->chain_frames != NULL) 632 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, 633 sc->chain_map); 634 if (sc->chain_dmat != NULL) 635 bus_dma_tag_destroy(sc->chain_dmat); 636 637 if (sc->sense_busaddr != 0) 638 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); 639 if (sc->sense_frames != NULL) 640 bus_dmamem_free(sc->sense_dmat, sc->sense_frames, 641 sc->sense_map); 642 if (sc->sense_dmat != NULL) 643 bus_dma_tag_destroy(sc->sense_dmat); 644 645 if (sc->prp_page_busaddr != 0) 646 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map); 647 if (sc->prp_pages != NULL) 648 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages, 649 sc->prp_page_map); 650 if (sc->prp_page_dmat != NULL) 651 bus_dma_tag_destroy(sc->prp_page_dmat); 652 653 if (sc->reply_busaddr != 0) 654 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); 655 if (sc->reply_frames != NULL) 656 bus_dmamem_free(sc->reply_dmat, sc->reply_frames, 657 sc->reply_map); 658 if (sc->reply_dmat != NULL) 659 bus_dma_tag_destroy(sc->reply_dmat); 660 661 if (sc->req_busaddr != 0) 662 bus_dmamap_unload(sc->req_dmat, sc->req_map); 663 if (sc->req_frames != NULL) 664 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); 665 if (sc->req_dmat != NULL) 666 bus_dma_tag_destroy(sc->req_dmat); 667 668 if (sc->chains != NULL) 669 free(sc->chains, M_MPR); 670 if (sc->prps != NULL) 671 free(sc->prps, M_MPR); 672 if (sc->commands != NULL) { 673 for (i = 1; i < sc->num_reqs; i++) { 674 cm = &sc->commands[i]; 675 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); 676 } 677 free(sc->commands, M_MPR); 678 } 679 if (sc->buffer_dmat != NULL) 680 bus_dma_tag_destroy(sc->buffer_dmat); 681 } 682 683 /* 684 * The terms diag reset and hard reset are used interchangeably in the MPI 685 * docs to mean resetting the controller chip. In this code diag reset 686 * cleans everything up, and the hard reset function just sends the reset 687 * sequence to the chip. This should probably be refactored so that every 688 * subsystem gets a reset notification of some sort, and can clean up 689 * appropriately. 690 */ 691 int 692 mpr_reinit(struct mpr_softc *sc) 693 { 694 int error; 695 struct mprsas_softc *sassc; 696 697 sassc = sc->sassc; 698 699 MPR_FUNCTRACE(sc); 700 701 mtx_assert(&sc->mpr_mtx, MA_OWNED); 702 703 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { 704 mpr_dprint(sc, MPR_INIT, "%s reset already in progress\n", 705 __func__); 706 return 0; 707 } 708 709 mpr_dprint(sc, MPR_INFO, "Reinitializing controller,\n"); 710 /* make sure the completion callbacks can recognize they're getting 711 * a NULL cm_reply due to a reset. 712 */ 713 sc->mpr_flags |= MPR_FLAGS_DIAGRESET; 714 715 /* 716 * Mask interrupts here. 717 */ 718 mpr_dprint(sc, MPR_INIT, "%s mask interrupts\n", __func__); 719 mpr_mask_intr(sc); 720 721 error = mpr_diag_reset(sc, CAN_SLEEP); 722 if (error != 0) { 723 panic("%s hard reset failed with error %d\n", __func__, error); 724 } 725 726 /* Restore the PCI state, including the MSI-X registers */ 727 mpr_pci_restore(sc); 728 729 /* Give the I/O subsystem special priority to get itself prepared */ 730 mprsas_handle_reinit(sc); 731 732 /* 733 * Get IOC Facts and allocate all structures based on this information. 734 * The attach function will also call mpr_iocfacts_allocate at startup. 735 * If relevant values have changed in IOC Facts, this function will free 736 * all of the memory based on IOC Facts and reallocate that memory. 737 */ 738 if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) { 739 panic("%s IOC Facts based allocation failed with error %d\n", 740 __func__, error); 741 } 742 743 /* 744 * Mapping structures will be re-allocated after getting IOC Page8, so 745 * free these structures here. 746 */ 747 mpr_mapping_exit(sc); 748 749 /* 750 * The static page function currently read is IOC Page8. Others can be 751 * added in future. It's possible that the values in IOC Page8 have 752 * changed after a Diag Reset due to user modification, so always read 753 * these. Interrupts are masked, so unmask them before getting config 754 * pages. 755 */ 756 mpr_unmask_intr(sc); 757 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; 758 mpr_base_static_config_pages(sc); 759 760 /* 761 * Some mapping info is based in IOC Page8 data, so re-initialize the 762 * mapping tables. 763 */ 764 mpr_mapping_initialize(sc); 765 766 /* 767 * Restart will reload the event masks clobbered by the reset, and 768 * then enable the port. 769 */ 770 mpr_reregister_events(sc); 771 772 /* the end of discovery will release the simq, so we're done. */ 773 mpr_dprint(sc, MPR_INFO, "%s finished sc %p post %u free %u\n", 774 __func__, sc, sc->replypostindex, sc->replyfreeindex); 775 mprsas_release_simq_reinit(sassc); 776 777 return 0; 778 } 779 780 /* Wait for the chip to ACK a word that we've put into its FIFO 781 * Wait for <timeout> seconds. In single loop wait for busy loop 782 * for 500 microseconds. 783 * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds. 784 * */ 785 static int 786 mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag) 787 { 788 u32 cntdn, count; 789 u32 int_status; 790 u32 doorbell; 791 792 count = 0; 793 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 794 do { 795 int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 796 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 797 mpr_dprint(sc, MPR_INIT, "%s: successful count(%d), " 798 "timeout(%d)\n", __func__, count, timeout); 799 return 0; 800 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 801 doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 802 if ((doorbell & MPI2_IOC_STATE_MASK) == 803 MPI2_IOC_STATE_FAULT) { 804 mpr_dprint(sc, MPR_FAULT, 805 "fault_state(0x%04x)!\n", doorbell); 806 return (EFAULT); 807 } 808 } else if (int_status == 0xFFFFFFFF) 809 goto out; 810 811 /* 812 * If it can sleep, sleep for 1 milisecond, else busy loop for 813 * 0.5 milisecond 814 */ 815 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 816 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", 817 hz/1000); 818 else if (sleep_flag == CAN_SLEEP) 819 pause("mprdba", hz/1000); 820 else 821 DELAY(500); 822 count++; 823 } while (--cntdn); 824 825 out: 826 mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), " 827 "int_status(%x)!\n", __func__, count, int_status); 828 return (ETIMEDOUT); 829 } 830 831 /* Wait for the chip to signal that the next word in its FIFO can be fetched */ 832 static int 833 mpr_wait_db_int(struct mpr_softc *sc) 834 { 835 int retry; 836 837 for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) { 838 if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 839 MPI2_HIS_IOC2SYS_DB_STATUS) != 0) 840 return (0); 841 DELAY(2000); 842 } 843 return (ETIMEDOUT); 844 } 845 846 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ 847 static int 848 mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, 849 int req_sz, int reply_sz, int timeout) 850 { 851 uint32_t *data32; 852 uint16_t *data16; 853 int i, count, ioc_sz, residual; 854 int sleep_flags = CAN_SLEEP; 855 856 #if __FreeBSD_version >= 1000029 857 if (curthread->td_no_sleeping) 858 #else //__FreeBSD_version < 1000029 859 if (curthread->td_pflags & TDP_NOSLEEPING) 860 #endif //__FreeBSD_version >= 1000029 861 sleep_flags = NO_SLEEP; 862 863 /* Step 1 */ 864 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 865 866 /* Step 2 */ 867 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 868 return (EBUSY); 869 870 /* Step 3 871 * Announce that a message is coming through the doorbell. Messages 872 * are pushed at 32bit words, so round up if needed. 873 */ 874 count = (req_sz + 3) / 4; 875 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 876 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | 877 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); 878 879 /* Step 4 */ 880 if (mpr_wait_db_int(sc) || 881 (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { 882 mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n"); 883 return (ENXIO); 884 } 885 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 886 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 887 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n"); 888 return (ENXIO); 889 } 890 891 /* Step 5 */ 892 /* Clock out the message data synchronously in 32-bit dwords*/ 893 data32 = (uint32_t *)req; 894 for (i = 0; i < count; i++) { 895 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i])); 896 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 897 mpr_dprint(sc, MPR_FAULT, 898 "Timeout while writing doorbell\n"); 899 return (ENXIO); 900 } 901 } 902 903 /* Step 6 */ 904 /* Clock in the reply in 16-bit words. The total length of the 905 * message is always in the 4th byte, so clock out the first 2 words 906 * manually, then loop the rest. 907 */ 908 data16 = (uint16_t *)reply; 909 if (mpr_wait_db_int(sc) != 0) { 910 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n"); 911 return (ENXIO); 912 } 913 data16[0] = 914 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 915 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 916 if (mpr_wait_db_int(sc) != 0) { 917 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n"); 918 return (ENXIO); 919 } 920 data16[1] = 921 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 922 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 923 924 /* Number of 32bit words in the message */ 925 ioc_sz = reply->MsgLength; 926 927 /* 928 * Figure out how many 16bit words to clock in without overrunning. 929 * The precision loss with dividing reply_sz can safely be 930 * ignored because the messages can only be multiples of 32bits. 931 */ 932 residual = 0; 933 count = MIN((reply_sz / 4), ioc_sz) * 2; 934 if (count < ioc_sz * 2) { 935 residual = ioc_sz * 2 - count; 936 mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d " 937 "residual message words\n", residual); 938 } 939 940 for (i = 2; i < count; i++) { 941 if (mpr_wait_db_int(sc) != 0) { 942 mpr_dprint(sc, MPR_FAULT, 943 "Timeout reading doorbell %d\n", i); 944 return (ENXIO); 945 } 946 data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & 947 MPI2_DOORBELL_DATA_MASK; 948 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 949 } 950 951 /* 952 * Pull out residual words that won't fit into the provided buffer. 953 * This keeps the chip from hanging due to a driver programming 954 * error. 955 */ 956 while (residual--) { 957 if (mpr_wait_db_int(sc) != 0) { 958 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n"); 959 return (ENXIO); 960 } 961 (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET); 962 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 963 } 964 965 /* Step 7 */ 966 if (mpr_wait_db_int(sc) != 0) { 967 mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n"); 968 return (ENXIO); 969 } 970 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 971 mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n"); 972 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 973 974 return (0); 975 } 976 977 static void 978 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) 979 { 980 request_descriptor rd; 981 982 MPR_FUNCTRACE(sc); 983 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n", 984 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); 985 986 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & 987 MPR_FLAGS_SHUTDOWN)) 988 mtx_assert(&sc->mpr_mtx, MA_OWNED); 989 990 if (++sc->io_cmds_active > sc->io_cmds_highwater) 991 sc->io_cmds_highwater++; 992 993 if (sc->atomic_desc_capable) { 994 rd.u.low = cm->cm_desc.Words.Low; 995 mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET, 996 rd.u.low); 997 } else { 998 rd.u.low = cm->cm_desc.Words.Low; 999 rd.u.high = cm->cm_desc.Words.High; 1000 rd.word = htole64(rd.word); 1001 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, 1002 rd.u.low); 1003 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, 1004 rd.u.high); 1005 } 1006 } 1007 1008 /* 1009 * Just the FACTS, ma'am. 1010 */ 1011 static int 1012 mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts) 1013 { 1014 MPI2_DEFAULT_REPLY *reply; 1015 MPI2_IOC_FACTS_REQUEST request; 1016 int error, req_sz, reply_sz; 1017 1018 MPR_FUNCTRACE(sc); 1019 1020 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); 1021 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); 1022 reply = (MPI2_DEFAULT_REPLY *)facts; 1023 1024 bzero(&request, req_sz); 1025 request.Function = MPI2_FUNCTION_IOC_FACTS; 1026 error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5); 1027 1028 return (error); 1029 } 1030 1031 static int 1032 mpr_send_iocinit(struct mpr_softc *sc) 1033 { 1034 MPI2_IOC_INIT_REQUEST init; 1035 MPI2_DEFAULT_REPLY reply; 1036 int req_sz, reply_sz, error; 1037 struct timeval now; 1038 uint64_t time_in_msec; 1039 1040 MPR_FUNCTRACE(sc); 1041 1042 req_sz = sizeof(MPI2_IOC_INIT_REQUEST); 1043 reply_sz = sizeof(MPI2_IOC_INIT_REPLY); 1044 bzero(&init, req_sz); 1045 bzero(&reply, reply_sz); 1046 1047 /* 1048 * Fill in the init block. Note that most addresses are 1049 * deliberately in the lower 32bits of memory. This is a micro- 1050 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. 1051 */ 1052 init.Function = MPI2_FUNCTION_IOC_INIT; 1053 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1054 init.MsgVersion = htole16(MPI2_VERSION); 1055 init.HeaderVersion = htole16(MPI2_HEADER_VERSION); 1056 init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize); 1057 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); 1058 init.ReplyFreeQueueDepth = htole16(sc->fqdepth); 1059 init.SenseBufferAddressHigh = 0; 1060 init.SystemReplyAddressHigh = 0; 1061 init.SystemRequestFrameBaseAddress.High = 0; 1062 init.SystemRequestFrameBaseAddress.Low = 1063 htole32((uint32_t)sc->req_busaddr); 1064 init.ReplyDescriptorPostQueueAddress.High = 0; 1065 init.ReplyDescriptorPostQueueAddress.Low = 1066 htole32((uint32_t)sc->post_busaddr); 1067 init.ReplyFreeQueueAddress.High = 0; 1068 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); 1069 getmicrotime(&now); 1070 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000); 1071 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF); 1072 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF); 1073 init.HostPageSize = HOST_PAGE_SIZE_4K; 1074 1075 error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); 1076 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 1077 error = ENXIO; 1078 1079 mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus); 1080 return (error); 1081 } 1082 1083 void 1084 mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1085 { 1086 bus_addr_t *addr; 1087 1088 addr = arg; 1089 *addr = segs[0].ds_addr; 1090 } 1091 1092 static int 1093 mpr_alloc_queues(struct mpr_softc *sc) 1094 { 1095 bus_addr_t queues_busaddr; 1096 uint8_t *queues; 1097 int qsize, fqsize, pqsize; 1098 1099 /* 1100 * The reply free queue contains 4 byte entries in multiples of 16 and 1101 * aligned on a 16 byte boundary. There must always be an unused entry. 1102 * This queue supplies fresh reply frames for the firmware to use. 1103 * 1104 * The reply descriptor post queue contains 8 byte entries in 1105 * multiples of 16 and aligned on a 16 byte boundary. This queue 1106 * contains filled-in reply frames sent from the firmware to the host. 1107 * 1108 * These two queues are allocated together for simplicity. 1109 */ 1110 sc->fqdepth = roundup2(sc->num_replies + 1, 16); 1111 sc->pqdepth = roundup2(sc->num_replies + 1, 16); 1112 fqsize= sc->fqdepth * 4; 1113 pqsize = sc->pqdepth * 8; 1114 qsize = fqsize + pqsize; 1115 1116 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1117 16, 0, /* algnmnt, boundary */ 1118 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1119 BUS_SPACE_MAXADDR, /* highaddr */ 1120 NULL, NULL, /* filter, filterarg */ 1121 qsize, /* maxsize */ 1122 1, /* nsegments */ 1123 qsize, /* maxsegsize */ 1124 0, /* flags */ 1125 NULL, NULL, /* lockfunc, lockarg */ 1126 &sc->queues_dmat)) { 1127 device_printf(sc->mpr_dev, "Cannot allocate queues DMA tag\n"); 1128 return (ENOMEM); 1129 } 1130 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, 1131 &sc->queues_map)) { 1132 device_printf(sc->mpr_dev, "Cannot allocate queues memory\n"); 1133 return (ENOMEM); 1134 } 1135 bzero(queues, qsize); 1136 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, 1137 mpr_memaddr_cb, &queues_busaddr, 0); 1138 1139 sc->free_queue = (uint32_t *)queues; 1140 sc->free_busaddr = queues_busaddr; 1141 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); 1142 sc->post_busaddr = queues_busaddr + fqsize; 1143 1144 return (0); 1145 } 1146 1147 static int 1148 mpr_alloc_replies(struct mpr_softc *sc) 1149 { 1150 int rsize, num_replies; 1151 1152 /* 1153 * sc->num_replies should be one less than sc->fqdepth. We need to 1154 * allocate space for sc->fqdepth replies, but only sc->num_replies 1155 * replies can be used at once. 1156 */ 1157 num_replies = max(sc->fqdepth, sc->num_replies); 1158 1159 rsize = sc->facts->ReplyFrameSize * num_replies * 4; 1160 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1161 4, 0, /* algnmnt, boundary */ 1162 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1163 BUS_SPACE_MAXADDR, /* highaddr */ 1164 NULL, NULL, /* filter, filterarg */ 1165 rsize, /* maxsize */ 1166 1, /* nsegments */ 1167 rsize, /* maxsegsize */ 1168 0, /* flags */ 1169 NULL, NULL, /* lockfunc, lockarg */ 1170 &sc->reply_dmat)) { 1171 device_printf(sc->mpr_dev, "Cannot allocate replies DMA tag\n"); 1172 return (ENOMEM); 1173 } 1174 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, 1175 BUS_DMA_NOWAIT, &sc->reply_map)) { 1176 device_printf(sc->mpr_dev, "Cannot allocate replies memory\n"); 1177 return (ENOMEM); 1178 } 1179 bzero(sc->reply_frames, rsize); 1180 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, 1181 mpr_memaddr_cb, &sc->reply_busaddr, 0); 1182 1183 return (0); 1184 } 1185 1186 static int 1187 mpr_alloc_requests(struct mpr_softc *sc) 1188 { 1189 struct mpr_command *cm; 1190 struct mpr_chain *chain; 1191 int i, rsize, nsegs; 1192 1193 rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4; 1194 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1195 16, 0, /* algnmnt, boundary */ 1196 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1197 BUS_SPACE_MAXADDR, /* highaddr */ 1198 NULL, NULL, /* filter, filterarg */ 1199 rsize, /* maxsize */ 1200 1, /* nsegments */ 1201 rsize, /* maxsegsize */ 1202 0, /* flags */ 1203 NULL, NULL, /* lockfunc, lockarg */ 1204 &sc->req_dmat)) { 1205 device_printf(sc->mpr_dev, "Cannot allocate request DMA tag\n"); 1206 return (ENOMEM); 1207 } 1208 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, 1209 BUS_DMA_NOWAIT, &sc->req_map)) { 1210 device_printf(sc->mpr_dev, "Cannot allocate request memory\n"); 1211 return (ENOMEM); 1212 } 1213 bzero(sc->req_frames, rsize); 1214 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, 1215 mpr_memaddr_cb, &sc->req_busaddr, 0); 1216 1217 /* 1218 * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to 1219 * get the size of a Chain Frame. Previous versions use the size as a 1220 * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize 1221 * is 0, use the default value. The IOCMaxChainSegmentSize is the 1222 * number of 16-byte elelements that can fit in a Chain Frame, which is 1223 * the size of an IEEE Simple SGE. 1224 */ 1225 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) { 1226 sc->chain_seg_size = 1227 htole16(sc->facts->IOCMaxChainSegmentSize); 1228 if (sc->chain_seg_size == 0) { 1229 sc->chain_frame_size = MPR_DEFAULT_CHAIN_SEG_SIZE * 1230 MPR_MAX_CHAIN_ELEMENT_SIZE; 1231 } else { 1232 sc->chain_frame_size = sc->chain_seg_size * 1233 MPR_MAX_CHAIN_ELEMENT_SIZE; 1234 } 1235 } else { 1236 sc->chain_frame_size = sc->facts->IOCRequestFrameSize * 4; 1237 } 1238 rsize = sc->chain_frame_size * sc->max_chains; 1239 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1240 16, 0, /* algnmnt, boundary */ 1241 BUS_SPACE_MAXADDR, /* lowaddr */ 1242 BUS_SPACE_MAXADDR, /* highaddr */ 1243 NULL, NULL, /* filter, filterarg */ 1244 rsize, /* maxsize */ 1245 1, /* nsegments */ 1246 rsize, /* maxsegsize */ 1247 0, /* flags */ 1248 NULL, NULL, /* lockfunc, lockarg */ 1249 &sc->chain_dmat)) { 1250 device_printf(sc->mpr_dev, "Cannot allocate chain DMA tag\n"); 1251 return (ENOMEM); 1252 } 1253 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, 1254 BUS_DMA_NOWAIT, &sc->chain_map)) { 1255 device_printf(sc->mpr_dev, "Cannot allocate chain memory\n"); 1256 return (ENOMEM); 1257 } 1258 bzero(sc->chain_frames, rsize); 1259 bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize, 1260 mpr_memaddr_cb, &sc->chain_busaddr, 0); 1261 1262 rsize = MPR_SENSE_LEN * sc->num_reqs; 1263 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1264 1, 0, /* algnmnt, boundary */ 1265 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1266 BUS_SPACE_MAXADDR, /* highaddr */ 1267 NULL, NULL, /* filter, filterarg */ 1268 rsize, /* maxsize */ 1269 1, /* nsegments */ 1270 rsize, /* maxsegsize */ 1271 0, /* flags */ 1272 NULL, NULL, /* lockfunc, lockarg */ 1273 &sc->sense_dmat)) { 1274 device_printf(sc->mpr_dev, "Cannot allocate sense DMA tag\n"); 1275 return (ENOMEM); 1276 } 1277 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, 1278 BUS_DMA_NOWAIT, &sc->sense_map)) { 1279 device_printf(sc->mpr_dev, "Cannot allocate sense memory\n"); 1280 return (ENOMEM); 1281 } 1282 bzero(sc->sense_frames, rsize); 1283 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, 1284 mpr_memaddr_cb, &sc->sense_busaddr, 0); 1285 1286 sc->chains = malloc(sizeof(struct mpr_chain) * sc->max_chains, M_MPR, 1287 M_WAITOK | M_ZERO); 1288 if (!sc->chains) { 1289 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 1290 __func__, __LINE__); 1291 return (ENOMEM); 1292 } 1293 for (i = 0; i < sc->max_chains; i++) { 1294 chain = &sc->chains[i]; 1295 chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames + 1296 i * sc->chain_frame_size); 1297 chain->chain_busaddr = sc->chain_busaddr + 1298 i * sc->chain_frame_size; 1299 mpr_free_chain(sc, chain); 1300 sc->chain_free_lowwater++; 1301 } 1302 1303 /* 1304 * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports 1305 * these devices. 1306 */ 1307 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) && 1308 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) { 1309 if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM) 1310 return (ENOMEM); 1311 } 1312 1313 /* XXX Need to pick a more precise value */ 1314 nsegs = (MAXPHYS / PAGE_SIZE) + 1; 1315 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1316 1, 0, /* algnmnt, boundary */ 1317 BUS_SPACE_MAXADDR, /* lowaddr */ 1318 BUS_SPACE_MAXADDR, /* highaddr */ 1319 NULL, NULL, /* filter, filterarg */ 1320 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 1321 nsegs, /* nsegments */ 1322 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1323 BUS_DMA_ALLOCNOW, /* flags */ 1324 busdma_lock_mutex, /* lockfunc */ 1325 &sc->mpr_mtx, /* lockarg */ 1326 &sc->buffer_dmat)) { 1327 device_printf(sc->mpr_dev, "Cannot allocate buffer DMA tag\n"); 1328 return (ENOMEM); 1329 } 1330 1331 /* 1332 * SMID 0 cannot be used as a free command per the firmware spec. 1333 * Just drop that command instead of risking accounting bugs. 1334 */ 1335 sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs, 1336 M_MPR, M_WAITOK | M_ZERO); 1337 if (!sc->commands) { 1338 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 1339 __func__, __LINE__); 1340 return (ENOMEM); 1341 } 1342 for (i = 1; i < sc->num_reqs; i++) { 1343 cm = &sc->commands[i]; 1344 cm->cm_req = sc->req_frames + 1345 i * sc->facts->IOCRequestFrameSize * 4; 1346 cm->cm_req_busaddr = sc->req_busaddr + 1347 i * sc->facts->IOCRequestFrameSize * 4; 1348 cm->cm_sense = &sc->sense_frames[i]; 1349 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; 1350 cm->cm_desc.Default.SMID = i; 1351 cm->cm_sc = sc; 1352 TAILQ_INIT(&cm->cm_chain_list); 1353 TAILQ_INIT(&cm->cm_prp_page_list); 1354 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); 1355 1356 /* XXX Is a failure here a critical problem? */ 1357 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) 1358 == 0) { 1359 if (i <= sc->facts->HighPriorityCredit) 1360 mpr_free_high_priority_command(sc, cm); 1361 else 1362 mpr_free_command(sc, cm); 1363 } else { 1364 panic("failed to allocate command %d\n", i); 1365 sc->num_reqs = i; 1366 break; 1367 } 1368 } 1369 1370 return (0); 1371 } 1372 1373 /* 1374 * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs, 1375 * which are scatter/gather lists for NVMe devices. 1376 * 1377 * This buffer must be contiguous due to the nature of how NVMe PRPs are built 1378 * and translated by FW. 1379 * 1380 * returns ENOMEM if memory could not be allocated, otherwise returns 0. 1381 */ 1382 static int 1383 mpr_alloc_nvme_prp_pages(struct mpr_softc *sc) 1384 { 1385 int PRPs_per_page, PRPs_required, pages_required; 1386 int rsize, i; 1387 struct mpr_prp_page *prp_page; 1388 1389 /* 1390 * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number 1391 * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is: 1392 * MAX_IO_SIZE / PAGE_SIZE = 256 1393 * 1394 * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs 1395 * required for the remainder of the 1MB I/O. 512 PRPs can fit into one 1396 * page (4096 / 8 = 512), so only one page is required for each I/O. 1397 * 1398 * Each of these buffers will need to be contiguous. For simplicity, 1399 * only one buffer is allocated here, which has all of the space 1400 * required for the NVMe Queue Depth. If there are problems allocating 1401 * this one buffer, this function will need to change to allocate 1402 * individual, contiguous NVME_QDEPTH buffers. 1403 * 1404 * The real calculation will use the real max io size. Above is just an 1405 * example. 1406 * 1407 */ 1408 PRPs_required = sc->maxio / PAGE_SIZE; 1409 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1; 1410 pages_required = (PRPs_required / PRPs_per_page) + 1; 1411 1412 sc->prp_buffer_size = PAGE_SIZE * pages_required; 1413 rsize = sc->prp_buffer_size * NVME_QDEPTH; 1414 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1415 4, 0, /* algnmnt, boundary */ 1416 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1417 BUS_SPACE_MAXADDR, /* highaddr */ 1418 NULL, NULL, /* filter, filterarg */ 1419 rsize, /* maxsize */ 1420 1, /* nsegments */ 1421 rsize, /* maxsegsize */ 1422 0, /* flags */ 1423 NULL, NULL, /* lockfunc, lockarg */ 1424 &sc->prp_page_dmat)) { 1425 device_printf(sc->mpr_dev, "Cannot allocate NVMe PRP DMA " 1426 "tag\n"); 1427 return (ENOMEM); 1428 } 1429 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages, 1430 BUS_DMA_NOWAIT, &sc->prp_page_map)) { 1431 device_printf(sc->mpr_dev, "Cannot allocate NVMe PRP memory\n"); 1432 return (ENOMEM); 1433 } 1434 bzero(sc->prp_pages, rsize); 1435 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages, 1436 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0); 1437 1438 sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR, 1439 M_WAITOK | M_ZERO); 1440 for (i = 0; i < NVME_QDEPTH; i++) { 1441 prp_page = &sc->prps[i]; 1442 prp_page->prp_page = (uint64_t *)(sc->prp_pages + 1443 i * sc->prp_buffer_size); 1444 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr + 1445 i * sc->prp_buffer_size); 1446 mpr_free_prp_page(sc, prp_page); 1447 sc->prp_pages_free_lowwater++; 1448 } 1449 1450 return (0); 1451 } 1452 1453 static int 1454 mpr_init_queues(struct mpr_softc *sc) 1455 { 1456 int i; 1457 1458 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); 1459 1460 /* 1461 * According to the spec, we need to use one less reply than we 1462 * have space for on the queue. So sc->num_replies (the number we 1463 * use) should be less than sc->fqdepth (allocated size). 1464 */ 1465 if (sc->num_replies >= sc->fqdepth) 1466 return (EINVAL); 1467 1468 /* 1469 * Initialize all of the free queue entries. 1470 */ 1471 for (i = 0; i < sc->fqdepth; i++) { 1472 sc->free_queue[i] = sc->reply_busaddr + 1473 (i * sc->facts->ReplyFrameSize * 4); 1474 } 1475 sc->replyfreeindex = sc->num_replies; 1476 1477 return (0); 1478 } 1479 1480 /* Get the driver parameter tunables. Lowest priority are the driver defaults. 1481 * Next are the global settings, if they exist. Highest are the per-unit 1482 * settings, if they exist. 1483 */ 1484 static void 1485 mpr_get_tunables(struct mpr_softc *sc) 1486 { 1487 char tmpstr[80]; 1488 1489 /* XXX default to some debugging for now */ 1490 sc->mpr_debug = MPR_INFO | MPR_FAULT; 1491 sc->disable_msix = 0; 1492 sc->disable_msi = 0; 1493 sc->max_chains = MPR_CHAIN_FRAMES; 1494 sc->max_io_pages = MPR_MAXIO_PAGES; 1495 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; 1496 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; 1497 sc->use_phynum = 1; 1498 1499 /* 1500 * Grab the global variables. 1501 */ 1502 TUNABLE_INT_FETCH("hw.mpr.debug_level", &sc->mpr_debug); 1503 TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix); 1504 TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi); 1505 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); 1506 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages); 1507 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); 1508 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); 1509 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); 1510 1511 /* Grab the unit-instance variables */ 1512 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level", 1513 device_get_unit(sc->mpr_dev)); 1514 TUNABLE_INT_FETCH(tmpstr, &sc->mpr_debug); 1515 1516 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix", 1517 device_get_unit(sc->mpr_dev)); 1518 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); 1519 1520 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi", 1521 device_get_unit(sc->mpr_dev)); 1522 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); 1523 1524 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains", 1525 device_get_unit(sc->mpr_dev)); 1526 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); 1527 1528 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages", 1529 device_get_unit(sc->mpr_dev)); 1530 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); 1531 1532 bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); 1533 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids", 1534 device_get_unit(sc->mpr_dev)); 1535 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); 1536 1537 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu", 1538 device_get_unit(sc->mpr_dev)); 1539 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); 1540 1541 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time", 1542 device_get_unit(sc->mpr_dev)); 1543 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); 1544 1545 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num", 1546 device_get_unit(sc->mpr_dev)); 1547 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); 1548 } 1549 1550 static void 1551 mpr_setup_sysctl(struct mpr_softc *sc) 1552 { 1553 struct sysctl_ctx_list *sysctl_ctx = NULL; 1554 struct sysctl_oid *sysctl_tree = NULL; 1555 char tmpstr[80], tmpstr2[80]; 1556 1557 /* 1558 * Setup the sysctl variable so the user can change the debug level 1559 * on the fly. 1560 */ 1561 snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d", 1562 device_get_unit(sc->mpr_dev)); 1563 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); 1564 1565 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); 1566 if (sysctl_ctx != NULL) 1567 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); 1568 1569 if (sysctl_tree == NULL) { 1570 sysctl_ctx_init(&sc->sysctl_ctx); 1571 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1572 SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2, 1573 CTLFLAG_RD, 0, tmpstr); 1574 if (sc->sysctl_tree == NULL) 1575 return; 1576 sysctl_ctx = &sc->sysctl_ctx; 1577 sysctl_tree = sc->sysctl_tree; 1578 } 1579 1580 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1581 OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mpr_debug, 0, 1582 "mpr debug level"); 1583 1584 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1585 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, 1586 "Disable the use of MSI-X interrupts"); 1587 1588 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1589 OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0, 1590 "Disable the use of MSI interrupts"); 1591 1592 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1593 OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version, 1594 strlen(sc->fw_version), "firmware version"); 1595 1596 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1597 OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION, 1598 strlen(MPR_DRIVER_VERSION), "driver version"); 1599 1600 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1601 OID_AUTO, "io_cmds_active", CTLFLAG_RD, 1602 &sc->io_cmds_active, 0, "number of currently active commands"); 1603 1604 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1605 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 1606 &sc->io_cmds_highwater, 0, "maximum active commands seen"); 1607 1608 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1609 OID_AUTO, "chain_free", CTLFLAG_RD, 1610 &sc->chain_free, 0, "number of free chain elements"); 1611 1612 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1613 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, 1614 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); 1615 1616 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1617 OID_AUTO, "max_chains", CTLFLAG_RD, 1618 &sc->max_chains, 0,"maximum chain frames that will be allocated"); 1619 1620 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1621 OID_AUTO, "max_io_pages", CTLFLAG_RD, 1622 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " 1623 "IOCFacts)"); 1624 1625 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1626 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, 1627 "enable SSU to SATA SSD/HDD at shutdown"); 1628 1629 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1630 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, 1631 &sc->chain_alloc_fail, "chain allocation failures"); 1632 1633 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1634 OID_AUTO, "spinup_wait_time", CTLFLAG_RD, 1635 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " 1636 "spinup after SATA ID error"); 1637 1638 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1639 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, 1640 "Use the phy number for enumeration"); 1641 1642 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1643 OID_AUTO, "prp_pages_free", CTLFLAG_RD, 1644 &sc->prp_pages_free, 0, "number of free PRP pages"); 1645 1646 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1647 OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD, 1648 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages"); 1649 1650 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1651 OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD, 1652 &sc->prp_page_alloc_fail, "PRP page allocation failures"); 1653 } 1654 1655 int 1656 mpr_attach(struct mpr_softc *sc) 1657 { 1658 int error; 1659 1660 mpr_get_tunables(sc); 1661 1662 MPR_FUNCTRACE(sc); 1663 1664 mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF); 1665 callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0); 1666 TAILQ_INIT(&sc->event_list); 1667 timevalclear(&sc->lastfail); 1668 1669 if ((error = mpr_transition_ready(sc)) != 0) { 1670 mpr_printf(sc, "%s failed to transition ready\n", __func__); 1671 return (error); 1672 } 1673 1674 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, 1675 M_ZERO|M_NOWAIT); 1676 if (!sc->facts) { 1677 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 1678 __func__, __LINE__); 1679 return (ENOMEM); 1680 } 1681 1682 /* 1683 * Get IOC Facts and allocate all structures based on this information. 1684 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC 1685 * Facts. If relevant values have changed in IOC Facts, this function 1686 * will free all of the memory based on IOC Facts and reallocate that 1687 * memory. If this fails, any allocated memory should already be freed. 1688 */ 1689 if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) { 1690 mpr_dprint(sc, MPR_FAULT, "%s IOC Facts based allocation " 1691 "failed with error %d\n", __func__, error); 1692 return (error); 1693 } 1694 1695 /* Start the periodic watchdog check on the IOC Doorbell */ 1696 mpr_periodic(sc); 1697 1698 /* 1699 * The portenable will kick off discovery events that will drive the 1700 * rest of the initialization process. The CAM/SAS module will 1701 * hold up the boot sequence until discovery is complete. 1702 */ 1703 sc->mpr_ich.ich_func = mpr_startup; 1704 sc->mpr_ich.ich_arg = sc; 1705 if (config_intrhook_establish(&sc->mpr_ich) != 0) { 1706 mpr_dprint(sc, MPR_ERROR, "Cannot establish MPR config hook\n"); 1707 error = EINVAL; 1708 } 1709 1710 /* 1711 * Allow IR to shutdown gracefully when shutdown occurs. 1712 */ 1713 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1714 mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); 1715 1716 if (sc->shutdown_eh == NULL) 1717 mpr_dprint(sc, MPR_ERROR, "shutdown event registration " 1718 "failed\n"); 1719 1720 mpr_setup_sysctl(sc); 1721 1722 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; 1723 1724 return (error); 1725 } 1726 1727 /* Run through any late-start handlers. */ 1728 static void 1729 mpr_startup(void *arg) 1730 { 1731 struct mpr_softc *sc; 1732 1733 sc = (struct mpr_softc *)arg; 1734 1735 mpr_lock(sc); 1736 mpr_unmask_intr(sc); 1737 1738 /* initialize device mapping tables */ 1739 mpr_base_static_config_pages(sc); 1740 mpr_mapping_initialize(sc); 1741 mprsas_startup(sc); 1742 mpr_unlock(sc); 1743 } 1744 1745 /* Periodic watchdog. Is called with the driver lock already held. */ 1746 static void 1747 mpr_periodic(void *arg) 1748 { 1749 struct mpr_softc *sc; 1750 uint32_t db; 1751 1752 sc = (struct mpr_softc *)arg; 1753 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) 1754 return; 1755 1756 db = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 1757 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 1758 if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) == 1759 IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) { 1760 panic("TEMPERATURE FAULT: STOPPING."); 1761 } 1762 mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db); 1763 mpr_reinit(sc); 1764 } 1765 1766 callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc); 1767 } 1768 1769 static void 1770 mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data, 1771 MPI2_EVENT_NOTIFICATION_REPLY *event) 1772 { 1773 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; 1774 1775 mpr_print_event(sc, event); 1776 1777 switch (event->Event) { 1778 case MPI2_EVENT_LOG_DATA: 1779 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n"); 1780 if (sc->mpr_debug & MPR_EVENT) 1781 hexdump(event->EventData, event->EventDataLength, NULL, 1782 0); 1783 break; 1784 case MPI2_EVENT_LOG_ENTRY_ADDED: 1785 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; 1786 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event " 1787 "0x%x Sequence %d:\n", entry->LogEntryQualifier, 1788 entry->LogSequence); 1789 break; 1790 default: 1791 break; 1792 } 1793 return; 1794 } 1795 1796 static int 1797 mpr_attach_log(struct mpr_softc *sc) 1798 { 1799 uint8_t events[16]; 1800 1801 bzero(events, 16); 1802 setbit(events, MPI2_EVENT_LOG_DATA); 1803 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 1804 1805 mpr_register_events(sc, events, mpr_log_evt_handler, NULL, 1806 &sc->mpr_log_eh); 1807 1808 return (0); 1809 } 1810 1811 static int 1812 mpr_detach_log(struct mpr_softc *sc) 1813 { 1814 1815 if (sc->mpr_log_eh != NULL) 1816 mpr_deregister_events(sc, sc->mpr_log_eh); 1817 return (0); 1818 } 1819 1820 /* 1821 * Free all of the driver resources and detach submodules. Should be called 1822 * without the lock held. 1823 */ 1824 int 1825 mpr_free(struct mpr_softc *sc) 1826 { 1827 int error; 1828 1829 /* Turn off the watchdog */ 1830 mpr_lock(sc); 1831 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; 1832 mpr_unlock(sc); 1833 /* Lock must not be held for this */ 1834 callout_drain(&sc->periodic); 1835 1836 if (((error = mpr_detach_log(sc)) != 0) || 1837 ((error = mpr_detach_sas(sc)) != 0)) 1838 return (error); 1839 1840 mpr_detach_user(sc); 1841 1842 /* Put the IOC back in the READY state. */ 1843 mpr_lock(sc); 1844 if ((error = mpr_transition_ready(sc)) != 0) { 1845 mpr_unlock(sc); 1846 return (error); 1847 } 1848 mpr_unlock(sc); 1849 1850 if (sc->facts != NULL) 1851 free(sc->facts, M_MPR); 1852 1853 /* 1854 * Free all buffers that are based on IOC Facts. A Diag Reset may need 1855 * to free these buffers too. 1856 */ 1857 mpr_iocfacts_free(sc); 1858 1859 if (sc->sysctl_tree != NULL) 1860 sysctl_ctx_free(&sc->sysctl_ctx); 1861 1862 /* Deregister the shutdown function */ 1863 if (sc->shutdown_eh != NULL) 1864 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); 1865 1866 mtx_destroy(&sc->mpr_mtx); 1867 1868 return (0); 1869 } 1870 1871 static __inline void 1872 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) 1873 { 1874 MPR_FUNCTRACE(sc); 1875 1876 if (cm == NULL) { 1877 mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n"); 1878 return; 1879 } 1880 1881 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 1882 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 1883 1884 if (cm->cm_complete != NULL) { 1885 mpr_dprint(sc, MPR_TRACE, 1886 "%s cm %p calling cm_complete %p data %p reply %p\n", 1887 __func__, cm, cm->cm_complete, cm->cm_complete_data, 1888 cm->cm_reply); 1889 cm->cm_complete(sc, cm); 1890 } 1891 1892 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 1893 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); 1894 wakeup(cm); 1895 } 1896 1897 if (sc->io_cmds_active != 0) { 1898 sc->io_cmds_active--; 1899 } else { 1900 mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is " 1901 "out of sync - resynching to 0\n"); 1902 } 1903 } 1904 1905 static void 1906 mpr_sas_log_info(struct mpr_softc *sc , u32 log_info) 1907 { 1908 union loginfo_type { 1909 u32 loginfo; 1910 struct { 1911 u32 subcode:16; 1912 u32 code:8; 1913 u32 originator:4; 1914 u32 bus_type:4; 1915 } dw; 1916 }; 1917 union loginfo_type sas_loginfo; 1918 char *originator_str = NULL; 1919 1920 sas_loginfo.loginfo = log_info; 1921 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 1922 return; 1923 1924 /* each nexus loss loginfo */ 1925 if (log_info == 0x31170000) 1926 return; 1927 1928 /* eat the loginfos associated with task aborts */ 1929 if ((log_info == 30050000) || (log_info == 0x31140000) || 1930 (log_info == 0x31130000)) 1931 return; 1932 1933 switch (sas_loginfo.dw.originator) { 1934 case 0: 1935 originator_str = "IOP"; 1936 break; 1937 case 1: 1938 originator_str = "PL"; 1939 break; 1940 case 2: 1941 originator_str = "IR"; 1942 break; 1943 } 1944 1945 mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), " 1946 "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str, 1947 sas_loginfo.dw.code, sas_loginfo.dw.subcode); 1948 } 1949 1950 static void 1951 mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply) 1952 { 1953 MPI2DefaultReply_t *mpi_reply; 1954 u16 sc_status; 1955 1956 mpi_reply = (MPI2DefaultReply_t*)reply; 1957 sc_status = le16toh(mpi_reply->IOCStatus); 1958 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 1959 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); 1960 } 1961 1962 void 1963 mpr_intr(void *data) 1964 { 1965 struct mpr_softc *sc; 1966 uint32_t status; 1967 1968 sc = (struct mpr_softc *)data; 1969 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 1970 1971 /* 1972 * Check interrupt status register to flush the bus. This is 1973 * needed for both INTx interrupts and driver-driven polling 1974 */ 1975 status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 1976 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) 1977 return; 1978 1979 mpr_lock(sc); 1980 mpr_intr_locked(data); 1981 mpr_unlock(sc); 1982 return; 1983 } 1984 1985 /* 1986 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the 1987 * chip. Hopefully this theory is correct. 1988 */ 1989 void 1990 mpr_intr_msi(void *data) 1991 { 1992 struct mpr_softc *sc; 1993 1994 sc = (struct mpr_softc *)data; 1995 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 1996 mpr_lock(sc); 1997 mpr_intr_locked(data); 1998 mpr_unlock(sc); 1999 return; 2000 } 2001 2002 /* 2003 * The locking is overly broad and simplistic, but easy to deal with for now. 2004 */ 2005 void 2006 mpr_intr_locked(void *data) 2007 { 2008 MPI2_REPLY_DESCRIPTORS_UNION *desc; 2009 struct mpr_softc *sc; 2010 struct mpr_command *cm = NULL; 2011 uint8_t flags; 2012 u_int pq; 2013 MPI2_DIAG_RELEASE_REPLY *rel_rep; 2014 mpr_fw_diagnostic_buffer_t *pBuffer; 2015 2016 sc = (struct mpr_softc *)data; 2017 2018 pq = sc->replypostindex; 2019 mpr_dprint(sc, MPR_TRACE, 2020 "%s sc %p starting with replypostindex %u\n", 2021 __func__, sc, sc->replypostindex); 2022 2023 for ( ;; ) { 2024 cm = NULL; 2025 desc = &sc->post_queue[sc->replypostindex]; 2026 flags = desc->Default.ReplyFlags & 2027 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2028 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) || 2029 (le32toh(desc->Words.High) == 0xffffffff)) 2030 break; 2031 2032 /* increment the replypostindex now, so that event handlers 2033 * and cm completion handlers which decide to do a diag 2034 * reset can zero it without it getting incremented again 2035 * afterwards, and we break out of this loop on the next 2036 * iteration since the reply post queue has been cleared to 2037 * 0xFF and all descriptors look unused (which they are). 2038 */ 2039 if (++sc->replypostindex >= sc->pqdepth) 2040 sc->replypostindex = 0; 2041 2042 switch (flags) { 2043 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: 2044 case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS: 2045 case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS: 2046 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; 2047 cm->cm_reply = NULL; 2048 break; 2049 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: 2050 { 2051 uint32_t baddr; 2052 uint8_t *reply; 2053 2054 /* 2055 * Re-compose the reply address from the address 2056 * sent back from the chip. The ReplyFrameAddress 2057 * is the lower 32 bits of the physical address of 2058 * particular reply frame. Convert that address to 2059 * host format, and then use that to provide the 2060 * offset against the virtual address base 2061 * (sc->reply_frames). 2062 */ 2063 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); 2064 reply = sc->reply_frames + 2065 (baddr - ((uint32_t)sc->reply_busaddr)); 2066 /* 2067 * Make sure the reply we got back is in a valid 2068 * range. If not, go ahead and panic here, since 2069 * we'll probably panic as soon as we deference the 2070 * reply pointer anyway. 2071 */ 2072 if ((reply < sc->reply_frames) 2073 || (reply > (sc->reply_frames + 2074 (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) { 2075 printf("%s: WARNING: reply %p out of range!\n", 2076 __func__, reply); 2077 printf("%s: reply_frames %p, fqdepth %d, " 2078 "frame size %d\n", __func__, 2079 sc->reply_frames, sc->fqdepth, 2080 sc->facts->ReplyFrameSize * 4); 2081 printf("%s: baddr %#x,\n", __func__, baddr); 2082 /* LSI-TODO. See Linux Code for Graceful exit */ 2083 panic("Reply address out of range"); 2084 } 2085 if (le16toh(desc->AddressReply.SMID) == 0) { 2086 if (((MPI2_DEFAULT_REPLY *)reply)->Function == 2087 MPI2_FUNCTION_DIAG_BUFFER_POST) { 2088 /* 2089 * If SMID is 0 for Diag Buffer Post, 2090 * this implies that the reply is due to 2091 * a release function with a status that 2092 * the buffer has been released. Set 2093 * the buffer flags accordingly. 2094 */ 2095 rel_rep = 2096 (MPI2_DIAG_RELEASE_REPLY *)reply; 2097 if ((le16toh(rel_rep->IOCStatus) & 2098 MPI2_IOCSTATUS_MASK) == 2099 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) 2100 { 2101 pBuffer = 2102 &sc->fw_diag_buffer_list[ 2103 rel_rep->BufferType]; 2104 pBuffer->valid_data = TRUE; 2105 pBuffer->owned_by_firmware = 2106 FALSE; 2107 pBuffer->immediate = FALSE; 2108 } 2109 } else 2110 mpr_dispatch_event(sc, baddr, 2111 (MPI2_EVENT_NOTIFICATION_REPLY *) 2112 reply); 2113 } else { 2114 cm = &sc->commands[ 2115 le16toh(desc->AddressReply.SMID)]; 2116 cm->cm_reply = reply; 2117 cm->cm_reply_data = 2118 le32toh(desc->AddressReply. 2119 ReplyFrameAddress); 2120 } 2121 break; 2122 } 2123 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: 2124 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: 2125 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: 2126 default: 2127 /* Unhandled */ 2128 mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n", 2129 desc->Default.ReplyFlags); 2130 cm = NULL; 2131 break; 2132 } 2133 2134 if (cm != NULL) { 2135 // Print Error reply frame 2136 if (cm->cm_reply) 2137 mpr_display_reply_info(sc,cm->cm_reply); 2138 mpr_complete_command(sc, cm); 2139 } 2140 2141 desc->Words.Low = 0xffffffff; 2142 desc->Words.High = 0xffffffff; 2143 } 2144 2145 if (pq != sc->replypostindex) { 2146 mpr_dprint(sc, MPR_TRACE, 2147 "%s sc %p writing postindex %d\n", 2148 __func__, sc, sc->replypostindex); 2149 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 2150 sc->replypostindex); 2151 } 2152 2153 return; 2154 } 2155 2156 static void 2157 mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 2158 MPI2_EVENT_NOTIFICATION_REPLY *reply) 2159 { 2160 struct mpr_event_handle *eh; 2161 int event, handled = 0; 2162 2163 event = le16toh(reply->Event); 2164 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2165 if (isset(eh->mask, event)) { 2166 eh->callback(sc, data, reply); 2167 handled++; 2168 } 2169 } 2170 2171 if (handled == 0) 2172 mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n", 2173 le16toh(event)); 2174 2175 /* 2176 * This is the only place that the event/reply should be freed. 2177 * Anything wanting to hold onto the event data should have 2178 * already copied it into their own storage. 2179 */ 2180 mpr_free_reply(sc, data); 2181 } 2182 2183 static void 2184 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) 2185 { 2186 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2187 2188 if (cm->cm_reply) 2189 mpr_print_event(sc, 2190 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); 2191 2192 mpr_free_command(sc, cm); 2193 2194 /* next, send a port enable */ 2195 mprsas_startup(sc); 2196 } 2197 2198 /* 2199 * For both register_events and update_events, the caller supplies a bitmap 2200 * of events that it _wants_. These functions then turn that into a bitmask 2201 * suitable for the controller. 2202 */ 2203 int 2204 mpr_register_events(struct mpr_softc *sc, uint8_t *mask, 2205 mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle) 2206 { 2207 struct mpr_event_handle *eh; 2208 int error = 0; 2209 2210 eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO); 2211 if (!eh) { 2212 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 2213 __func__, __LINE__); 2214 return (ENOMEM); 2215 } 2216 eh->callback = cb; 2217 eh->data = data; 2218 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); 2219 if (mask != NULL) 2220 error = mpr_update_events(sc, eh, mask); 2221 *handle = eh; 2222 2223 return (error); 2224 } 2225 2226 int 2227 mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle, 2228 uint8_t *mask) 2229 { 2230 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2231 MPI2_EVENT_NOTIFICATION_REPLY *reply; 2232 struct mpr_command *cm; 2233 struct mpr_event_handle *eh; 2234 int error, i; 2235 2236 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2237 2238 if ((mask != NULL) && (handle != NULL)) 2239 bcopy(mask, &handle->mask[0], 16); 2240 memset(sc->event_mask, 0xff, 16); 2241 2242 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2243 for (i = 0; i < 16; i++) 2244 sc->event_mask[i] &= ~eh->mask[i]; 2245 } 2246 2247 if ((cm = mpr_alloc_command(sc)) == NULL) 2248 return (EBUSY); 2249 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2250 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2251 evtreq->MsgFlags = 0; 2252 evtreq->SASBroadcastPrimitiveMasks = 0; 2253 #ifdef MPR_DEBUG_ALL_EVENTS 2254 { 2255 u_char fullmask[16]; 2256 memset(fullmask, 0x00, 16); 2257 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2258 } 2259 #else 2260 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2261 #endif 2262 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2263 cm->cm_data = NULL; 2264 2265 error = mpr_request_polled(sc, cm); 2266 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; 2267 if ((reply == NULL) || 2268 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 2269 error = ENXIO; 2270 2271 if (reply) 2272 mpr_print_event(sc, reply); 2273 2274 mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error); 2275 2276 mpr_free_command(sc, cm); 2277 return (error); 2278 } 2279 2280 static int 2281 mpr_reregister_events(struct mpr_softc *sc) 2282 { 2283 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2284 struct mpr_command *cm; 2285 struct mpr_event_handle *eh; 2286 int error, i; 2287 2288 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2289 2290 /* first, reregister events */ 2291 2292 memset(sc->event_mask, 0xff, 16); 2293 2294 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2295 for (i = 0; i < 16; i++) 2296 sc->event_mask[i] &= ~eh->mask[i]; 2297 } 2298 2299 if ((cm = mpr_alloc_command(sc)) == NULL) 2300 return (EBUSY); 2301 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2302 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2303 evtreq->MsgFlags = 0; 2304 evtreq->SASBroadcastPrimitiveMasks = 0; 2305 #ifdef MPR_DEBUG_ALL_EVENTS 2306 { 2307 u_char fullmask[16]; 2308 memset(fullmask, 0x00, 16); 2309 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2310 } 2311 #else 2312 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2313 #endif 2314 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2315 cm->cm_data = NULL; 2316 cm->cm_complete = mpr_reregister_events_complete; 2317 2318 error = mpr_map_command(sc, cm); 2319 2320 mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__, 2321 error); 2322 return (error); 2323 } 2324 2325 int 2326 mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle) 2327 { 2328 2329 TAILQ_REMOVE(&sc->event_list, handle, eh_list); 2330 free(handle, M_MPR); 2331 return (mpr_update_events(sc, NULL, NULL)); 2332 } 2333 2334 /** 2335 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a 2336 * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry 2337 * of the NVMe message (PRP1). If the data buffer is small enough to be described 2338 * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to 2339 * describe a larger data buffer. If the data buffer is too large to describe 2340 * using the two PRP entriess inside the NVMe message, then PRP1 describes the 2341 * first data memory segment, and PRP2 contains a pointer to a PRP list located 2342 * elsewhere in memory to describe the remaining data memory segments. The PRP 2343 * list will be contiguous. 2344 2345 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 2346 * consists of a list of PRP entries to describe a number of noncontigous 2347 * physical memory segments as a single memory buffer, just as a SGL does. Note 2348 * however, that this function is only used by the IOCTL call, so the memory 2349 * given will be guaranteed to be contiguous. There is no need to translate 2350 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous 2351 * space that is one page size each. 2352 * 2353 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 2354 * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains 2355 * the second PRP element if the memory being described fits within 2 PRP 2356 * entries, or a PRP list pointer if the PRP spans more than two entries. 2357 * 2358 * A PRP list pointer contains the address of a PRP list, structured as a linear 2359 * array of PRP entries. Each PRP entry in this list describes a segment of 2360 * physical memory. 2361 * 2362 * Each 64-bit PRP entry comprises an address and an offset field. The address 2363 * always points to the beginning of a PAGE_SIZE physical memory page, and the 2364 * offset describes where within that page the memory segment begins. Only the 2365 * first element in a PRP list may contain a non-zero offest, implying that all 2366 * memory segments following the first begin at the start of a PAGE_SIZE page. 2367 * 2368 * Each PRP element normally describes a chunck of PAGE_SIZE physical memory, 2369 * with exceptions for the first and last elements in the list. If the memory 2370 * being described by the list begins at a non-zero offset within the first page, 2371 * then the first PRP element will contain a non-zero offset indicating where the 2372 * region begins within the page. The last memory segment may end before the end 2373 * of the PAGE_SIZE segment, depending upon the overall size of the memory being 2374 * described by the PRP list. 2375 * 2376 * Since PRP entries lack any indication of size, the overall data buffer length 2377 * is used to determine where the end of the data memory buffer is located, and 2378 * how many PRP entries are required to describe it. 2379 * 2380 * Returns nothing. 2381 */ 2382 void 2383 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm, 2384 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data, 2385 uint32_t data_in_sz, uint32_t data_out_sz) 2386 { 2387 int prp_size = PRP_ENTRY_SIZE; 2388 uint64_t *prp_entry, *prp1_entry, *prp2_entry; 2389 uint64_t *prp_entry_phys, *prp_page, *prp_page_phys; 2390 uint32_t offset, entry_len, page_mask_result, page_mask; 2391 bus_addr_t paddr; 2392 size_t length; 2393 struct mpr_prp_page *prp_page_info = NULL; 2394 2395 /* 2396 * Not all commands require a data transfer. If no data, just return 2397 * without constructing any PRP. 2398 */ 2399 if (!data_in_sz && !data_out_sz) 2400 return; 2401 2402 /* 2403 * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is 2404 * located at a 24 byte offset from the start of the NVMe command. Then 2405 * set the current PRP entry pointer to PRP1. 2406 */ 2407 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2408 NVME_CMD_PRP1_OFFSET); 2409 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2410 NVME_CMD_PRP2_OFFSET); 2411 prp_entry = prp1_entry; 2412 2413 /* 2414 * For the PRP entries, use the specially allocated buffer of 2415 * contiguous memory. PRP Page allocation failures should not happen 2416 * because there should be enough PRP page buffers to account for the 2417 * possible NVMe QDepth. 2418 */ 2419 prp_page_info = mpr_alloc_prp_page(sc); 2420 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 2421 "used for building a native NVMe SGL.\n", __func__)); 2422 prp_page = (uint64_t *)prp_page_info->prp_page; 2423 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 2424 2425 /* 2426 * Insert the allocated PRP page into the command's PRP page list. This 2427 * will be freed when the command is freed. 2428 */ 2429 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 2430 2431 /* 2432 * Check if we are within 1 entry of a page boundary we don't want our 2433 * first entry to be a PRP List entry. 2434 */ 2435 page_mask = PAGE_SIZE - 1; 2436 page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) & 2437 page_mask; 2438 if (!page_mask_result) 2439 { 2440 /* Bump up to next page boundary. */ 2441 prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size); 2442 prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys + 2443 prp_size); 2444 } 2445 2446 /* 2447 * Set PRP physical pointer, which initially points to the current PRP 2448 * DMA memory page. 2449 */ 2450 prp_entry_phys = prp_page_phys; 2451 2452 /* Get physical address and length of the data buffer. */ 2453 paddr = (bus_addr_t)data; 2454 if (data_in_sz) 2455 length = data_in_sz; 2456 else 2457 length = data_out_sz; 2458 2459 /* Loop while the length is not zero. */ 2460 while (length) 2461 { 2462 /* 2463 * Check if we need to put a list pointer here if we are at page 2464 * boundary - prp_size (8 bytes). 2465 */ 2466 page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys + 2467 prp_size) & page_mask; 2468 if (!page_mask_result) 2469 { 2470 /* 2471 * This is the last entry in a PRP List, so we need to 2472 * put a PRP list pointer here. What this does is: 2473 * - bump the current memory pointer to the next 2474 * address, which will be the next full page. 2475 * - set the PRP Entry to point to that page. This is 2476 * now the PRP List pointer. 2477 * - bump the PRP Entry pointer the start of the next 2478 * page. Since all of this PRP memory is contiguous, 2479 * no need to get a new page - it's just the next 2480 * address. 2481 */ 2482 prp_entry_phys++; 2483 *prp_entry = 2484 htole64((uint64_t)(uintptr_t)prp_entry_phys); 2485 prp_entry++; 2486 } 2487 2488 /* Need to handle if entry will be part of a page. */ 2489 offset = (uint32_t)paddr & page_mask; 2490 entry_len = PAGE_SIZE - offset; 2491 2492 if (prp_entry == prp1_entry) 2493 { 2494 /* 2495 * Must fill in the first PRP pointer (PRP1) before 2496 * moving on. 2497 */ 2498 *prp1_entry = htole64((uint64_t)paddr); 2499 2500 /* 2501 * Now point to the second PRP entry within the 2502 * command (PRP2). 2503 */ 2504 prp_entry = prp2_entry; 2505 } 2506 else if (prp_entry == prp2_entry) 2507 { 2508 /* 2509 * Should the PRP2 entry be a PRP List pointer or just a 2510 * regular PRP pointer? If there is more than one more 2511 * page of data, must use a PRP List pointer. 2512 */ 2513 if (length > PAGE_SIZE) 2514 { 2515 /* 2516 * PRP2 will contain a PRP List pointer because 2517 * more PRP's are needed with this command. The 2518 * list will start at the beginning of the 2519 * contiguous buffer. 2520 */ 2521 *prp2_entry = 2522 htole64( 2523 (uint64_t)(uintptr_t)prp_entry_phys); 2524 2525 /* 2526 * The next PRP Entry will be the start of the 2527 * first PRP List. 2528 */ 2529 prp_entry = prp_page; 2530 } 2531 else 2532 { 2533 /* 2534 * After this, the PRP Entries are complete. 2535 * This command uses 2 PRP's and no PRP list. 2536 */ 2537 *prp2_entry = htole64((uint64_t)paddr); 2538 } 2539 } 2540 else 2541 { 2542 /* 2543 * Put entry in list and bump the addresses. 2544 * 2545 * After PRP1 and PRP2 are filled in, this will fill in 2546 * all remaining PRP entries in a PRP List, one per each 2547 * time through the loop. 2548 */ 2549 *prp_entry = htole64((uint64_t)paddr); 2550 prp_entry++; 2551 prp_entry_phys++; 2552 } 2553 2554 /* 2555 * Bump the phys address of the command's data buffer by the 2556 * entry_len. 2557 */ 2558 paddr += entry_len; 2559 2560 /* Decrement length accounting for last partial page. */ 2561 if (entry_len > length) 2562 length = 0; 2563 else 2564 length -= entry_len; 2565 } 2566 } 2567 2568 /* 2569 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to 2570 * determine if the driver needs to build a native SGL. If so, that native SGL 2571 * is built in the contiguous buffers allocated especially for PCIe SGL 2572 * creation. If the driver will not build a native SGL, return TRUE and a 2573 * normal IEEE SGL will be built. Currently this routine supports NVMe devices 2574 * only. 2575 * 2576 * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built. 2577 */ 2578 static int 2579 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm, 2580 bus_dma_segment_t *segs, int segs_left) 2581 { 2582 uint32_t i, sge_dwords, length, offset, entry_len; 2583 uint32_t num_entries, buff_len = 0, sges_in_segment; 2584 uint32_t page_mask, page_mask_result, *curr_buff; 2585 uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset; 2586 uint32_t first_page_data_size, end_residual; 2587 uint64_t *msg_phys; 2588 bus_addr_t paddr; 2589 int build_native_sgl = 0, first_prp_entry; 2590 int prp_size = PRP_ENTRY_SIZE; 2591 Mpi25IeeeSgeChain64_t *main_chain_element = NULL; 2592 struct mpr_prp_page *prp_page_info = NULL; 2593 2594 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2595 2596 /* 2597 * Add up the sizes of each segment length to get the total transfer 2598 * size, which will be checked against the Maximum Data Transfer Size. 2599 * If the data transfer length exceeds the MDTS for this device, just 2600 * return 1 so a normal IEEE SGL will be built. F/W will break the I/O 2601 * up into multiple I/O's. [nvme_mdts = 0 means unlimited] 2602 */ 2603 for (i = 0; i < segs_left; i++) 2604 buff_len += htole32(segs[i].ds_len); 2605 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) 2606 return 1; 2607 2608 /* Create page_mask (to get offset within page) */ 2609 page_mask = PAGE_SIZE - 1; 2610 2611 /* 2612 * Check if the number of elements exceeds the max number that can be 2613 * put in the main message frame (H/W can only translate an SGL that 2614 * is contained entirely in the main message frame). 2615 */ 2616 sges_in_segment = (sc->facts->IOCRequestFrameSize - 2617 offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION); 2618 if (segs_left > sges_in_segment) 2619 build_native_sgl = 1; 2620 else 2621 { 2622 /* 2623 * NVMe uses one PRP for each physical page (or part of physical 2624 * page). 2625 * if 4 pages or less then IEEE is OK 2626 * if > 5 pages then we need to build a native SGL 2627 * if > 4 and <= 5 pages, then check the physical address of 2628 * the first SG entry, then if this first size in the page 2629 * is >= the residual beyond 4 pages then use IEEE, 2630 * otherwise use native SGL 2631 */ 2632 if (buff_len > (PAGE_SIZE * 5)) 2633 build_native_sgl = 1; 2634 else if ((buff_len > (PAGE_SIZE * 4)) && 2635 (buff_len <= (PAGE_SIZE * 5)) ) 2636 { 2637 msg_phys = (uint64_t *)segs[0].ds_addr; 2638 first_page_offset = 2639 ((uint32_t)(uint64_t)(uintptr_t)msg_phys & 2640 page_mask); 2641 first_page_data_size = PAGE_SIZE - first_page_offset; 2642 end_residual = buff_len % PAGE_SIZE; 2643 2644 /* 2645 * If offset into first page pushes the end of the data 2646 * beyond end of the 5th page, we need the extra PRP 2647 * list. 2648 */ 2649 if (first_page_data_size < end_residual) 2650 build_native_sgl = 1; 2651 2652 /* 2653 * Check if first SG entry size is < residual beyond 4 2654 * pages. 2655 */ 2656 if (htole32(segs[0].ds_len) < 2657 (buff_len - (PAGE_SIZE * 4))) 2658 build_native_sgl = 1; 2659 } 2660 } 2661 2662 /* check if native SGL is needed */ 2663 if (!build_native_sgl) 2664 return 1; 2665 2666 /* 2667 * Native SGL is needed. 2668 * Put a chain element in main message frame that points to the first 2669 * chain buffer. 2670 * 2671 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 2672 * a native SGL. 2673 */ 2674 2675 /* Set main message chain element pointer */ 2676 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; 2677 2678 /* 2679 * For NVMe the chain element needs to be the 2nd SGL entry in the main 2680 * message. 2681 */ 2682 main_chain_element = (Mpi25IeeeSgeChain64_t *) 2683 ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 2684 2685 /* 2686 * For the PRP entries, use the specially allocated buffer of 2687 * contiguous memory. PRP Page allocation failures should not happen 2688 * because there should be enough PRP page buffers to account for the 2689 * possible NVMe QDepth. 2690 */ 2691 prp_page_info = mpr_alloc_prp_page(sc); 2692 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 2693 "used for building a native NVMe SGL.\n", __func__)); 2694 curr_buff = (uint32_t *)prp_page_info->prp_page; 2695 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 2696 2697 /* 2698 * Insert the allocated PRP page into the command's PRP page list. This 2699 * will be freed when the command is freed. 2700 */ 2701 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 2702 2703 /* 2704 * Check if we are within 1 entry of a page boundary we don't want our 2705 * first entry to be a PRP List entry. 2706 */ 2707 page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) & 2708 page_mask; 2709 if (!page_mask_result) { 2710 /* Bump up to next page boundary. */ 2711 curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size); 2712 msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size); 2713 } 2714 2715 /* Fill in the chain element and make it an NVMe segment type. */ 2716 main_chain_element->Address.High = 2717 htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32)); 2718 main_chain_element->Address.Low = 2719 htole32((uint32_t)(uintptr_t)msg_phys); 2720 main_chain_element->NextChainOffset = 0; 2721 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2722 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 2723 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 2724 2725 /* Set SGL pointer to start of contiguous PCIe buffer. */ 2726 ptr_sgl = curr_buff; 2727 sge_dwords = 2; 2728 num_entries = 0; 2729 2730 /* 2731 * NVMe has a very convoluted PRP format. One PRP is required for each 2732 * page or partial page. We need to split up OS SG entries if they are 2733 * longer than one page or cross a page boundary. We also have to insert 2734 * a PRP list pointer entry as the last entry in each physical page of 2735 * the PRP list. 2736 * 2737 * NOTE: The first PRP "entry" is actually placed in the first SGL entry 2738 * in the main message in IEEE 64 format. The 2nd entry in the main 2739 * message is the chain element, and the rest of the PRP entries are 2740 * built in the contiguous PCIe buffer. 2741 */ 2742 first_prp_entry = 1; 2743 ptr_first_sgl = (uint32_t *)cm->cm_sge; 2744 2745 for (i = 0; i < segs_left; i++) { 2746 /* Get physical address and length of this SG entry. */ 2747 paddr = segs[i].ds_addr; 2748 length = segs[i].ds_len; 2749 2750 /* 2751 * Check whether a given SGE buffer lies on a non-PAGED 2752 * boundary if this is not the first page. If so, this is not 2753 * expected so have FW build the SGL. 2754 */ 2755 if (i) { 2756 if ((uint32_t)paddr & page_mask) { 2757 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while " 2758 "building NVMe PRPs, low address is 0x%x\n", 2759 (uint32_t)paddr); 2760 return 1; 2761 } 2762 } 2763 2764 /* Apart from last SGE, if any other SGE boundary is not page 2765 * aligned then it means that hole exists. Existence of hole 2766 * leads to data corruption. So fallback to IEEE SGEs. 2767 */ 2768 if (i != (segs_left - 1)) { 2769 if (((uint32_t)paddr + length) & page_mask) { 2770 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE " 2771 "boundary while building NVMe PRPs, low " 2772 "address: 0x%x and length: %u\n", 2773 (uint32_t)paddr, length); 2774 return 1; 2775 } 2776 } 2777 2778 /* Loop while the length is not zero. */ 2779 while (length) { 2780 /* 2781 * Check if we need to put a list pointer here if we are 2782 * at page boundary - prp_size. 2783 */ 2784 page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl + 2785 prp_size) & page_mask; 2786 if (!page_mask_result) { 2787 /* 2788 * Need to put a PRP list pointer here. 2789 */ 2790 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 2791 prp_size); 2792 *ptr_sgl = htole32((uintptr_t)msg_phys); 2793 *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t) 2794 msg_phys >> 32); 2795 ptr_sgl += sge_dwords; 2796 num_entries++; 2797 } 2798 2799 /* Need to handle if entry will be part of a page. */ 2800 offset = (uint32_t)paddr & page_mask; 2801 entry_len = PAGE_SIZE - offset; 2802 if (first_prp_entry) { 2803 /* 2804 * Put IEEE entry in first SGE in main message. 2805 * (Simple element, System addr, not end of 2806 * list.) 2807 */ 2808 *ptr_first_sgl = htole32((uint32_t)paddr); 2809 *(ptr_first_sgl + 1) = 2810 htole32((uint32_t)((uint64_t)paddr >> 32)); 2811 *(ptr_first_sgl + 2) = htole32(entry_len); 2812 *(ptr_first_sgl + 3) = 0; 2813 2814 /* No longer the first PRP entry. */ 2815 first_prp_entry = 0; 2816 } else { 2817 /* Put entry in list. */ 2818 *ptr_sgl = htole32((uint32_t)paddr); 2819 *(ptr_sgl + 1) = 2820 htole32((uint32_t)((uint64_t)paddr >> 32)); 2821 2822 /* Bump ptr_sgl, msg_phys, and num_entries. */ 2823 ptr_sgl += sge_dwords; 2824 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 2825 prp_size); 2826 num_entries++; 2827 } 2828 2829 /* Bump the phys address by the entry_len. */ 2830 paddr += entry_len; 2831 2832 /* Decrement length accounting for last partial page. */ 2833 if (entry_len > length) 2834 length = 0; 2835 else 2836 length -= entry_len; 2837 } 2838 } 2839 2840 /* Set chain element Length. */ 2841 main_chain_element->Length = htole32(num_entries * prp_size); 2842 2843 /* Return 0, indicating we built a native SGL. */ 2844 return 0; 2845 } 2846 2847 /* 2848 * Add a chain element as the next SGE for the specified command. 2849 * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are 2850 * only required for IEEE commands. Therefore there is no code for commands 2851 * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands 2852 * shouldn't be requesting chains). 2853 */ 2854 static int 2855 mpr_add_chain(struct mpr_command *cm, int segsleft) 2856 { 2857 struct mpr_softc *sc = cm->cm_sc; 2858 MPI2_REQUEST_HEADER *req; 2859 MPI25_IEEE_SGE_CHAIN64 *ieee_sgc; 2860 struct mpr_chain *chain; 2861 int sgc_size, current_segs, rem_segs, segs_per_frame; 2862 uint8_t next_chain_offset = 0; 2863 2864 /* 2865 * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3 2866 * only IEEE commands should be requesting chains. Return some error 2867 * code other than 0. 2868 */ 2869 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { 2870 mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to " 2871 "an MPI SGL.\n"); 2872 return(ENOBUFS); 2873 } 2874 2875 sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64); 2876 if (cm->cm_sglsize < sgc_size) 2877 panic("MPR: Need SGE Error Code\n"); 2878 2879 chain = mpr_alloc_chain(cm->cm_sc); 2880 if (chain == NULL) 2881 return (ENOBUFS); 2882 2883 /* 2884 * Note: a double-linked list is used to make it easier to walk for 2885 * debugging. 2886 */ 2887 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); 2888 2889 /* 2890 * Need to know if the number of frames left is more than 1 or not. If 2891 * more than 1 frame is required, NextChainOffset will need to be set, 2892 * which will just be the last segment of the frame. 2893 */ 2894 rem_segs = 0; 2895 if (cm->cm_sglsize < (sgc_size * segsleft)) { 2896 /* 2897 * rem_segs is the number of segements remaining after the 2898 * segments that will go into the current frame. Since it is 2899 * known that at least one more frame is required, account for 2900 * the chain element. To know if more than one more frame is 2901 * required, just check if there will be a remainder after using 2902 * the current frame (with this chain) and the next frame. If 2903 * so the NextChainOffset must be the last element of the next 2904 * frame. 2905 */ 2906 current_segs = (cm->cm_sglsize / sgc_size) - 1; 2907 rem_segs = segsleft - current_segs; 2908 segs_per_frame = sc->chain_frame_size / sgc_size; 2909 if (rem_segs > segs_per_frame) { 2910 next_chain_offset = segs_per_frame - 1; 2911 } 2912 } 2913 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; 2914 ieee_sgc->Length = next_chain_offset ? 2915 htole32((uint32_t)sc->chain_frame_size) : 2916 htole32((uint32_t)rem_segs * (uint32_t)sgc_size); 2917 ieee_sgc->NextChainOffset = next_chain_offset; 2918 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2919 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 2920 ieee_sgc->Address.Low = htole32(chain->chain_busaddr); 2921 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); 2922 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; 2923 req = (MPI2_REQUEST_HEADER *)cm->cm_req; 2924 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4; 2925 2926 cm->cm_sglsize = sc->chain_frame_size; 2927 return (0); 2928 } 2929 2930 /* 2931 * Add one scatter-gather element to the scatter-gather list for a command. 2932 * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the 2933 * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a 2934 * chain, so don't consider any chain additions. 2935 */ 2936 int 2937 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, 2938 int segsleft) 2939 { 2940 uint32_t saved_buf_len, saved_address_low, saved_address_high; 2941 u32 sge_flags; 2942 2943 /* 2944 * case 1: >=1 more segment, no room for anything (error) 2945 * case 2: 1 more segment and enough room for it 2946 */ 2947 2948 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { 2949 mpr_dprint(cm->cm_sc, MPR_ERROR, 2950 "%s: warning: Not enough room for MPI SGL in frame.\n", 2951 __func__); 2952 return(ENOBUFS); 2953 } 2954 2955 KASSERT(segsleft == 1, 2956 ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n", 2957 segsleft)); 2958 2959 /* 2960 * There is one more segment left to add for the MPI SGL and there is 2961 * enough room in the frame to add it. This is the normal case because 2962 * MPI SGL's don't have chains, otherwise something is wrong. 2963 * 2964 * If this is a bi-directional request, need to account for that 2965 * here. Save the pre-filled sge values. These will be used 2966 * either for the 2nd SGL or for a single direction SGL. If 2967 * cm_out_len is non-zero, this is a bi-directional request, so 2968 * fill in the OUT SGL first, then the IN SGL, otherwise just 2969 * fill in the IN SGL. Note that at this time, when filling in 2970 * 2 SGL's for a bi-directional request, they both use the same 2971 * DMA buffer (same cm command). 2972 */ 2973 saved_buf_len = sge->FlagsLength & 0x00FFFFFF; 2974 saved_address_low = sge->Address.Low; 2975 saved_address_high = sge->Address.High; 2976 if (cm->cm_out_len) { 2977 sge->FlagsLength = cm->cm_out_len | 2978 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2979 MPI2_SGE_FLAGS_END_OF_BUFFER | 2980 MPI2_SGE_FLAGS_HOST_TO_IOC | 2981 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 2982 MPI2_SGE_FLAGS_SHIFT); 2983 cm->cm_sglsize -= len; 2984 /* Endian Safe code */ 2985 sge_flags = sge->FlagsLength; 2986 sge->FlagsLength = htole32(sge_flags); 2987 sge->Address.High = htole32(sge->Address.High); 2988 sge->Address.Low = htole32(sge->Address.Low); 2989 bcopy(sge, cm->cm_sge, len); 2990 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 2991 } 2992 sge->FlagsLength = saved_buf_len | 2993 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2994 MPI2_SGE_FLAGS_END_OF_BUFFER | 2995 MPI2_SGE_FLAGS_LAST_ELEMENT | 2996 MPI2_SGE_FLAGS_END_OF_LIST | 2997 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 2998 MPI2_SGE_FLAGS_SHIFT); 2999 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { 3000 sge->FlagsLength |= 3001 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 3002 MPI2_SGE_FLAGS_SHIFT); 3003 } else { 3004 sge->FlagsLength |= 3005 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 3006 MPI2_SGE_FLAGS_SHIFT); 3007 } 3008 sge->Address.Low = saved_address_low; 3009 sge->Address.High = saved_address_high; 3010 3011 cm->cm_sglsize -= len; 3012 /* Endian Safe code */ 3013 sge_flags = sge->FlagsLength; 3014 sge->FlagsLength = htole32(sge_flags); 3015 sge->Address.High = htole32(sge->Address.High); 3016 sge->Address.Low = htole32(sge->Address.Low); 3017 bcopy(sge, cm->cm_sge, len); 3018 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3019 return (0); 3020 } 3021 3022 /* 3023 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter- 3024 * gather list for a command. Maintain cm_sglsize and cm_sge as the 3025 * remaining size and pointer to the next SGE to fill in, respectively. 3026 */ 3027 int 3028 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) 3029 { 3030 MPI2_IEEE_SGE_SIMPLE64 *sge = sgep; 3031 int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION); 3032 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3033 uint32_t sge_length; 3034 3035 /* 3036 * case 1: No room for chain or segment (error). 3037 * case 2: Two or more segments left but only room for chain. 3038 * case 3: Last segment and room for it, so set flags. 3039 */ 3040 3041 /* 3042 * There should be room for at least one element, or there is a big 3043 * problem. 3044 */ 3045 if (cm->cm_sglsize < ieee_sge_size) 3046 panic("MPR: Need SGE Error Code\n"); 3047 3048 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { 3049 if ((error = mpr_add_chain(cm, segsleft)) != 0) 3050 return (error); 3051 } 3052 3053 if (segsleft == 1) { 3054 /* 3055 * If this is a bi-directional request, need to account for that 3056 * here. Save the pre-filled sge values. These will be used 3057 * either for the 2nd SGL or for a single direction SGL. If 3058 * cm_out_len is non-zero, this is a bi-directional request, so 3059 * fill in the OUT SGL first, then the IN SGL, otherwise just 3060 * fill in the IN SGL. Note that at this time, when filling in 3061 * 2 SGL's for a bi-directional request, they both use the same 3062 * DMA buffer (same cm command). 3063 */ 3064 saved_buf_len = sge->Length; 3065 saved_address_low = sge->Address.Low; 3066 saved_address_high = sge->Address.High; 3067 if (cm->cm_out_len) { 3068 sge->Length = cm->cm_out_len; 3069 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3070 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3071 cm->cm_sglsize -= ieee_sge_size; 3072 /* Endian Safe code */ 3073 sge_length = sge->Length; 3074 sge->Length = htole32(sge_length); 3075 sge->Address.High = htole32(sge->Address.High); 3076 sge->Address.Low = htole32(sge->Address.Low); 3077 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3078 cm->cm_sge = 3079 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3080 ieee_sge_size); 3081 } 3082 sge->Length = saved_buf_len; 3083 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3084 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3085 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 3086 sge->Address.Low = saved_address_low; 3087 sge->Address.High = saved_address_high; 3088 } 3089 3090 cm->cm_sglsize -= ieee_sge_size; 3091 /* Endian Safe code */ 3092 sge_length = sge->Length; 3093 sge->Length = htole32(sge_length); 3094 sge->Address.High = htole32(sge->Address.High); 3095 sge->Address.Low = htole32(sge->Address.Low); 3096 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3097 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3098 ieee_sge_size); 3099 return (0); 3100 } 3101 3102 /* 3103 * Add one dma segment to the scatter-gather list for a command. 3104 */ 3105 int 3106 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, 3107 int segsleft) 3108 { 3109 MPI2_SGE_SIMPLE64 sge; 3110 MPI2_IEEE_SGE_SIMPLE64 ieee_sge; 3111 3112 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { 3113 ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3114 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3115 ieee_sge.Length = len; 3116 mpr_from_u64(pa, &ieee_sge.Address); 3117 3118 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); 3119 } else { 3120 /* 3121 * This driver always uses 64-bit address elements for 3122 * simplicity. 3123 */ 3124 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3125 MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 3126 /* Set Endian safe macro in mpr_push_sge */ 3127 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT); 3128 mpr_from_u64(pa, &sge.Address); 3129 3130 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); 3131 } 3132 } 3133 3134 static void 3135 mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3136 { 3137 struct mpr_softc *sc; 3138 struct mpr_command *cm; 3139 u_int i, dir, sflags; 3140 3141 cm = (struct mpr_command *)arg; 3142 sc = cm->cm_sc; 3143 3144 /* 3145 * In this case, just print out a warning and let the chip tell the 3146 * user they did the wrong thing. 3147 */ 3148 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { 3149 mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d " 3150 "segments, more than the %d allowed\n", __func__, nsegs, 3151 cm->cm_max_segs); 3152 } 3153 3154 /* 3155 * Set up DMA direction flags. Bi-directional requests are also handled 3156 * here. In that case, both direction flags will be set. 3157 */ 3158 sflags = 0; 3159 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { 3160 /* 3161 * We have to add a special case for SMP passthrough, there 3162 * is no easy way to generically handle it. The first 3163 * S/G element is used for the command (therefore the 3164 * direction bit needs to be set). The second one is used 3165 * for the reply. We'll leave it to the caller to make 3166 * sure we only have two buffers. 3167 */ 3168 /* 3169 * Even though the busdma man page says it doesn't make 3170 * sense to have both direction flags, it does in this case. 3171 * We have one s/g element being accessed in each direction. 3172 */ 3173 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; 3174 3175 /* 3176 * Set the direction flag on the first buffer in the SMP 3177 * passthrough request. We'll clear it for the second one. 3178 */ 3179 sflags |= MPI2_SGE_FLAGS_DIRECTION | 3180 MPI2_SGE_FLAGS_END_OF_BUFFER; 3181 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { 3182 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 3183 dir = BUS_DMASYNC_PREWRITE; 3184 } else 3185 dir = BUS_DMASYNC_PREREAD; 3186 3187 /* Check if a native SG list is needed for an NVMe PCIe device. */ 3188 if (cm->cm_targ && cm->cm_targ->is_nvme && 3189 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) { 3190 /* A native SG list was built, skip to end. */ 3191 goto out; 3192 } 3193 3194 for (i = 0; i < nsegs; i++) { 3195 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { 3196 sflags &= ~MPI2_SGE_FLAGS_DIRECTION; 3197 } 3198 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, 3199 sflags, nsegs - i); 3200 if (error != 0) { 3201 /* Resource shortage, roll back! */ 3202 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) 3203 mpr_dprint(sc, MPR_INFO, "Out of chain frames, " 3204 "consider increasing hw.mpr.max_chains.\n"); 3205 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; 3206 mpr_complete_command(sc, cm); 3207 return; 3208 } 3209 } 3210 3211 out: 3212 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 3213 mpr_enqueue_request(sc, cm); 3214 3215 return; 3216 } 3217 3218 static void 3219 mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, 3220 int error) 3221 { 3222 mpr_data_cb(arg, segs, nsegs, error); 3223 } 3224 3225 /* 3226 * This is the routine to enqueue commands ansynchronously. 3227 * Note that the only error path here is from bus_dmamap_load(), which can 3228 * return EINPROGRESS if it is waiting for resources. Other than this, it's 3229 * assumed that if you have a command in-hand, then you have enough credits 3230 * to use it. 3231 */ 3232 int 3233 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) 3234 { 3235 int error = 0; 3236 3237 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { 3238 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, 3239 &cm->cm_uio, mpr_data_cb2, cm, 0); 3240 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { 3241 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, 3242 cm->cm_data, mpr_data_cb, cm, 0); 3243 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { 3244 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, 3245 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); 3246 } else { 3247 /* Add a zero-length element as needed */ 3248 if (cm->cm_sge != NULL) 3249 mpr_add_dmaseg(cm, 0, 0, 0, 1); 3250 mpr_enqueue_request(sc, cm); 3251 } 3252 3253 return (error); 3254 } 3255 3256 /* 3257 * This is the routine to enqueue commands synchronously. An error of 3258 * EINPROGRESS from mpr_map_command() is ignored since the command will 3259 * be executed and enqueued automatically. Other errors come from msleep(). 3260 */ 3261 int 3262 mpr_wait_command(struct mpr_softc *sc, struct mpr_command *cm, int timeout, 3263 int sleep_flag) 3264 { 3265 int error, rc; 3266 struct timeval cur_time, start_time; 3267 3268 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) 3269 return EBUSY; 3270 3271 cm->cm_complete = NULL; 3272 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); 3273 error = mpr_map_command(sc, cm); 3274 if ((error != 0) && (error != EINPROGRESS)) 3275 return (error); 3276 3277 // Check for context and wait for 50 mSec at a time until time has 3278 // expired or the command has finished. If msleep can't be used, need 3279 // to poll. 3280 #if __FreeBSD_version >= 1000029 3281 if (curthread->td_no_sleeping) 3282 #else //__FreeBSD_version < 1000029 3283 if (curthread->td_pflags & TDP_NOSLEEPING) 3284 #endif //__FreeBSD_version >= 1000029 3285 sleep_flag = NO_SLEEP; 3286 getmicrotime(&start_time); 3287 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) { 3288 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); 3289 } else { 3290 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3291 mpr_intr_locked(sc); 3292 if (sleep_flag == CAN_SLEEP) 3293 pause("mprwait", hz/20); 3294 else 3295 DELAY(50000); 3296 3297 getmicrotime(&cur_time); 3298 if ((cur_time.tv_sec - start_time.tv_sec) > timeout) { 3299 error = EWOULDBLOCK; 3300 break; 3301 } 3302 } 3303 } 3304 3305 if (error == EWOULDBLOCK) { 3306 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); 3307 rc = mpr_reinit(sc); 3308 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3309 "failed"); 3310 error = ETIMEDOUT; 3311 } 3312 return (error); 3313 } 3314 3315 /* 3316 * This is the routine to enqueue a command synchonously and poll for 3317 * completion. Its use should be rare. 3318 */ 3319 int 3320 mpr_request_polled(struct mpr_softc *sc, struct mpr_command *cm) 3321 { 3322 int error, timeout = 0, rc; 3323 struct timeval cur_time, start_time; 3324 3325 error = 0; 3326 3327 cm->cm_flags |= MPR_CM_FLAGS_POLLED; 3328 cm->cm_complete = NULL; 3329 mpr_map_command(sc, cm); 3330 3331 getmicrotime(&start_time); 3332 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3333 mpr_intr_locked(sc); 3334 3335 if (mtx_owned(&sc->mpr_mtx)) 3336 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 3337 "mprpoll", hz/20); 3338 else 3339 pause("mprpoll", hz/20); 3340 3341 /* 3342 * Check for real-time timeout and fail if more than 60 seconds. 3343 */ 3344 getmicrotime(&cur_time); 3345 timeout = cur_time.tv_sec - start_time.tv_sec; 3346 if (timeout > 60) { 3347 mpr_dprint(sc, MPR_FAULT, "polling failed\n"); 3348 error = ETIMEDOUT; 3349 break; 3350 } 3351 } 3352 3353 if (error) { 3354 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); 3355 rc = mpr_reinit(sc); 3356 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3357 "failed"); 3358 } 3359 return (error); 3360 } 3361 3362 /* 3363 * The MPT driver had a verbose interface for config pages. In this driver, 3364 * reduce it to much simpler terms, similar to the Linux driver. 3365 */ 3366 int 3367 mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3368 { 3369 MPI2_CONFIG_REQUEST *req; 3370 struct mpr_command *cm; 3371 int error; 3372 3373 if (sc->mpr_flags & MPR_FLAGS_BUSY) { 3374 return (EBUSY); 3375 } 3376 3377 cm = mpr_alloc_command(sc); 3378 if (cm == NULL) { 3379 return (EBUSY); 3380 } 3381 3382 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; 3383 req->Function = MPI2_FUNCTION_CONFIG; 3384 req->Action = params->action; 3385 req->SGLFlags = 0; 3386 req->ChainOffset = 0; 3387 req->PageAddress = params->page_address; 3388 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3389 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 3390 3391 hdr = ¶ms->hdr.Ext; 3392 req->ExtPageType = hdr->ExtPageType; 3393 req->ExtPageLength = hdr->ExtPageLength; 3394 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 3395 req->Header.PageLength = 0; /* Must be set to zero */ 3396 req->Header.PageNumber = hdr->PageNumber; 3397 req->Header.PageVersion = hdr->PageVersion; 3398 } else { 3399 MPI2_CONFIG_PAGE_HEADER *hdr; 3400 3401 hdr = ¶ms->hdr.Struct; 3402 req->Header.PageType = hdr->PageType; 3403 req->Header.PageNumber = hdr->PageNumber; 3404 req->Header.PageLength = hdr->PageLength; 3405 req->Header.PageVersion = hdr->PageVersion; 3406 } 3407 3408 cm->cm_data = params->buffer; 3409 cm->cm_length = params->length; 3410 if (cm->cm_data != NULL) { 3411 cm->cm_sge = &req->PageBufferSGE; 3412 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); 3413 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; 3414 } else 3415 cm->cm_sge = NULL; 3416 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3417 3418 cm->cm_complete_data = params; 3419 if (params->callback != NULL) { 3420 cm->cm_complete = mpr_config_complete; 3421 return (mpr_map_command(sc, cm)); 3422 } else { 3423 error = mpr_wait_command(sc, cm, 0, CAN_SLEEP); 3424 if (error) { 3425 mpr_dprint(sc, MPR_FAULT, 3426 "Error %d reading config page\n", error); 3427 mpr_free_command(sc, cm); 3428 return (error); 3429 } 3430 mpr_config_complete(sc, cm); 3431 } 3432 3433 return (0); 3434 } 3435 3436 int 3437 mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3438 { 3439 return (EINVAL); 3440 } 3441 3442 static void 3443 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) 3444 { 3445 MPI2_CONFIG_REPLY *reply; 3446 struct mpr_config_params *params; 3447 3448 MPR_FUNCTRACE(sc); 3449 params = cm->cm_complete_data; 3450 3451 if (cm->cm_data != NULL) { 3452 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 3453 BUS_DMASYNC_POSTREAD); 3454 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 3455 } 3456 3457 /* 3458 * XXX KDM need to do more error recovery? This results in the 3459 * device in question not getting probed. 3460 */ 3461 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3462 params->status = MPI2_IOCSTATUS_BUSY; 3463 goto done; 3464 } 3465 3466 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; 3467 if (reply == NULL) { 3468 params->status = MPI2_IOCSTATUS_BUSY; 3469 goto done; 3470 } 3471 params->status = reply->IOCStatus; 3472 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3473 params->hdr.Ext.ExtPageType = reply->ExtPageType; 3474 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; 3475 params->hdr.Ext.PageType = reply->Header.PageType; 3476 params->hdr.Ext.PageNumber = reply->Header.PageNumber; 3477 params->hdr.Ext.PageVersion = reply->Header.PageVersion; 3478 } else { 3479 params->hdr.Struct.PageType = reply->Header.PageType; 3480 params->hdr.Struct.PageNumber = reply->Header.PageNumber; 3481 params->hdr.Struct.PageLength = reply->Header.PageLength; 3482 params->hdr.Struct.PageVersion = reply->Header.PageVersion; 3483 } 3484 3485 done: 3486 mpr_free_command(sc, cm); 3487 if (params->callback != NULL) 3488 params->callback(sc, params); 3489 3490 return; 3491 } 3492