1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2016 Avago Technologies 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD 29 * 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 /* Communications core for Avago Technologies (LSI) MPT3 */ 36 37 /* TODO Move headers to mprvar */ 38 #include <sys/types.h> 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/selinfo.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/module.h> 46 #include <sys/bus.h> 47 #include <sys/conf.h> 48 #include <sys/bio.h> 49 #include <sys/malloc.h> 50 #include <sys/uio.h> 51 #include <sys/sysctl.h> 52 #include <sys/queue.h> 53 #include <sys/kthread.h> 54 #include <sys/taskqueue.h> 55 #include <sys/endian.h> 56 #include <sys/eventhandler.h> 57 58 #include <machine/bus.h> 59 #include <machine/resource.h> 60 #include <sys/rman.h> 61 #include <sys/proc.h> 62 63 #include <dev/pci/pcivar.h> 64 65 #include <cam/cam.h> 66 #include <cam/cam_ccb.h> 67 #include <cam/scsi/scsi_all.h> 68 69 #include <dev/mpr/mpi/mpi2_type.h> 70 #include <dev/mpr/mpi/mpi2.h> 71 #include <dev/mpr/mpi/mpi2_ioc.h> 72 #include <dev/mpr/mpi/mpi2_sas.h> 73 #include <dev/mpr/mpi/mpi2_pci.h> 74 #include <dev/mpr/mpi/mpi2_cnfg.h> 75 #include <dev/mpr/mpi/mpi2_init.h> 76 #include <dev/mpr/mpi/mpi2_tool.h> 77 #include <dev/mpr/mpr_ioctl.h> 78 #include <dev/mpr/mprvar.h> 79 #include <dev/mpr/mpr_table.h> 80 #include <dev/mpr/mpr_sas.h> 81 82 static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag); 83 static int mpr_init_queues(struct mpr_softc *sc); 84 static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag); 85 static int mpr_transition_operational(struct mpr_softc *sc); 86 static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching); 87 static void mpr_iocfacts_free(struct mpr_softc *sc); 88 static void mpr_startup(void *arg); 89 static int mpr_send_iocinit(struct mpr_softc *sc); 90 static int mpr_alloc_queues(struct mpr_softc *sc); 91 static int mpr_alloc_replies(struct mpr_softc *sc); 92 static int mpr_alloc_requests(struct mpr_softc *sc); 93 static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc); 94 static int mpr_attach_log(struct mpr_softc *sc); 95 static __inline void mpr_complete_command(struct mpr_softc *sc, 96 struct mpr_command *cm); 97 static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 98 MPI2_EVENT_NOTIFICATION_REPLY *reply); 99 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm); 100 static void mpr_periodic(void *); 101 static int mpr_reregister_events(struct mpr_softc *sc); 102 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm); 103 static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts); 104 static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag); 105 SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters"); 106 107 MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory"); 108 109 /* 110 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of 111 * any state and back to its initialization state machine. 112 */ 113 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; 114 115 /* 116 * Added this union to smoothly convert le64toh cm->cm_desc.Words. 117 * Compiler only supports uint64_t to be passed as an argument. 118 * Otherwise it will through this error: 119 * "aggregate value used where an integer was expected" 120 */ 121 typedef union _reply_descriptor { 122 u64 word; 123 struct { 124 u32 low; 125 u32 high; 126 } u; 127 } reply_descriptor, request_descriptor; 128 129 /* Rate limit chain-fail messages to 1 per minute */ 130 static struct timeval mpr_chainfail_interval = { 60, 0 }; 131 132 /* 133 * sleep_flag can be either CAN_SLEEP or NO_SLEEP. 134 * If this function is called from process context, it can sleep 135 * and there is no harm to sleep, in case if this fuction is called 136 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set. 137 * based on sleep flags driver will call either msleep, pause or DELAY. 138 * msleep and pause are of same variant, but pause is used when mpr_mtx 139 * is not hold by driver. 140 */ 141 static int 142 mpr_diag_reset(struct mpr_softc *sc,int sleep_flag) 143 { 144 uint32_t reg; 145 int i, error, tries = 0; 146 uint8_t first_wait_done = FALSE; 147 148 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 149 150 /* Clear any pending interrupts */ 151 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 152 153 /* 154 * Force NO_SLEEP for threads prohibited to sleep 155 * e.a Thread from interrupt handler are prohibited to sleep. 156 */ 157 #if __FreeBSD_version >= 1000029 158 if (curthread->td_no_sleeping) 159 #else //__FreeBSD_version < 1000029 160 if (curthread->td_pflags & TDP_NOSLEEPING) 161 #endif //__FreeBSD_version >= 1000029 162 sleep_flag = NO_SLEEP; 163 164 /* Push the magic sequence */ 165 error = ETIMEDOUT; 166 while (tries++ < 20) { 167 for (i = 0; i < sizeof(mpt2_reset_magic); i++) 168 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 169 mpt2_reset_magic[i]); 170 171 /* wait 100 msec */ 172 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 173 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 174 "mprdiag", hz/10); 175 else if (sleep_flag == CAN_SLEEP) 176 pause("mprdiag", hz/10); 177 else 178 DELAY(100 * 1000); 179 180 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 181 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { 182 error = 0; 183 break; 184 } 185 } 186 if (error) 187 return (error); 188 189 /* Send the actual reset. XXX need to refresh the reg? */ 190 mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, 191 reg | MPI2_DIAG_RESET_ADAPTER); 192 193 /* Wait up to 300 seconds in 50ms intervals */ 194 error = ETIMEDOUT; 195 for (i = 0; i < 6000; i++) { 196 /* 197 * Wait 50 msec. If this is the first time through, wait 256 198 * msec to satisfy Diag Reset timing requirements. 199 */ 200 if (first_wait_done) { 201 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 202 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 203 "mprdiag", hz/20); 204 else if (sleep_flag == CAN_SLEEP) 205 pause("mprdiag", hz/20); 206 else 207 DELAY(50 * 1000); 208 } else { 209 DELAY(256 * 1000); 210 first_wait_done = TRUE; 211 } 212 /* 213 * Check for the RESET_ADAPTER bit to be cleared first, then 214 * wait for the RESET state to be cleared, which takes a little 215 * longer. 216 */ 217 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 218 if (reg & MPI2_DIAG_RESET_ADAPTER) { 219 continue; 220 } 221 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 222 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { 223 error = 0; 224 break; 225 } 226 } 227 if (error) 228 return (error); 229 230 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); 231 232 return (0); 233 } 234 235 static int 236 mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag) 237 { 238 239 MPR_FUNCTRACE(sc); 240 241 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 242 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << 243 MPI2_DOORBELL_FUNCTION_SHIFT); 244 245 if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) { 246 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed : <%s>\n", 247 __func__); 248 return (ETIMEDOUT); 249 } 250 251 return (0); 252 } 253 254 static int 255 mpr_transition_ready(struct mpr_softc *sc) 256 { 257 uint32_t reg, state; 258 int error, tries = 0; 259 int sleep_flags; 260 261 MPR_FUNCTRACE(sc); 262 /* If we are in attach call, do not sleep */ 263 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) 264 ? CAN_SLEEP : NO_SLEEP; 265 266 error = 0; 267 while (tries++ < 1200) { 268 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 269 mpr_dprint(sc, MPR_INIT, "Doorbell= 0x%x\n", reg); 270 271 /* 272 * Ensure the IOC is ready to talk. If it's not, try 273 * resetting it. 274 */ 275 if (reg & MPI2_DOORBELL_USED) { 276 mpr_diag_reset(sc, sleep_flags); 277 DELAY(50000); 278 continue; 279 } 280 281 /* Is the adapter owned by another peer? */ 282 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == 283 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { 284 device_printf(sc->mpr_dev, "IOC is under the control " 285 "of another peer host, aborting initialization.\n"); 286 return (ENXIO); 287 } 288 289 state = reg & MPI2_IOC_STATE_MASK; 290 if (state == MPI2_IOC_STATE_READY) { 291 /* Ready to go! */ 292 error = 0; 293 break; 294 } else if (state == MPI2_IOC_STATE_FAULT) { 295 mpr_dprint(sc, MPR_FAULT, "IOC in fault state 0x%x\n", 296 state & MPI2_DOORBELL_FAULT_CODE_MASK); 297 mpr_diag_reset(sc, sleep_flags); 298 } else if (state == MPI2_IOC_STATE_OPERATIONAL) { 299 /* Need to take ownership */ 300 mpr_message_unit_reset(sc, sleep_flags); 301 } else if (state == MPI2_IOC_STATE_RESET) { 302 /* Wait a bit, IOC might be in transition */ 303 mpr_dprint(sc, MPR_FAULT, 304 "IOC in unexpected reset state\n"); 305 } else { 306 mpr_dprint(sc, MPR_FAULT, 307 "IOC in unknown state 0x%x\n", state); 308 error = EINVAL; 309 break; 310 } 311 312 /* Wait 50ms for things to settle down. */ 313 DELAY(50000); 314 } 315 316 if (error) 317 device_printf(sc->mpr_dev, "Cannot transition IOC to ready\n"); 318 return (error); 319 } 320 321 static int 322 mpr_transition_operational(struct mpr_softc *sc) 323 { 324 uint32_t reg, state; 325 int error; 326 327 MPR_FUNCTRACE(sc); 328 329 error = 0; 330 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 331 mpr_dprint(sc, MPR_INIT, "Doorbell= 0x%x\n", reg); 332 333 state = reg & MPI2_IOC_STATE_MASK; 334 if (state != MPI2_IOC_STATE_READY) { 335 if ((error = mpr_transition_ready(sc)) != 0) { 336 mpr_dprint(sc, MPR_FAULT, 337 "%s failed to transition ready\n", __func__); 338 return (error); 339 } 340 } 341 342 error = mpr_send_iocinit(sc); 343 return (error); 344 } 345 346 /* 347 * This is called during attach and when re-initializing due to a Diag Reset. 348 * IOC Facts is used to allocate many of the structures needed by the driver. 349 * If called from attach, de-allocation is not required because the driver has 350 * not allocated any structures yet, but if called from a Diag Reset, previously 351 * allocated structures based on IOC Facts will need to be freed and re- 352 * allocated bases on the latest IOC Facts. 353 */ 354 static int 355 mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching) 356 { 357 int error; 358 Mpi2IOCFactsReply_t saved_facts; 359 uint8_t saved_mode, reallocating; 360 361 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 362 363 /* Save old IOC Facts and then only reallocate if Facts have changed */ 364 if (!attaching) { 365 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); 366 } 367 368 /* 369 * Get IOC Facts. In all cases throughout this function, panic if doing 370 * a re-initialization and only return the error if attaching so the OS 371 * can handle it. 372 */ 373 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { 374 if (attaching) { 375 mpr_dprint(sc, MPR_FAULT, "%s failed to get IOC Facts " 376 "with error %d\n", __func__, error); 377 return (error); 378 } else { 379 panic("%s failed to get IOC Facts with error %d\n", 380 __func__, error); 381 } 382 } 383 384 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts); 385 386 snprintf(sc->fw_version, sizeof(sc->fw_version), 387 "%02d.%02d.%02d.%02d", 388 sc->facts->FWVersion.Struct.Major, 389 sc->facts->FWVersion.Struct.Minor, 390 sc->facts->FWVersion.Struct.Unit, 391 sc->facts->FWVersion.Struct.Dev); 392 393 mpr_printf(sc, "Firmware: %s, Driver: %s\n", sc->fw_version, 394 MPR_DRIVER_VERSION); 395 mpr_printf(sc, "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, 396 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" 397 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" 398 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc" 399 "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV"); 400 401 /* 402 * If the chip doesn't support event replay then a hard reset will be 403 * required to trigger a full discovery. Do the reset here then 404 * retransition to Ready. A hard reset might have already been done, 405 * but it doesn't hurt to do it again. Only do this if attaching, not 406 * for a Diag Reset. 407 */ 408 if (attaching) { 409 if ((sc->facts->IOCCapabilities & 410 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0) { 411 mpr_diag_reset(sc, NO_SLEEP); 412 if ((error = mpr_transition_ready(sc)) != 0) { 413 mpr_dprint(sc, MPR_FAULT, "%s failed to " 414 "transition to ready with error %d\n", 415 __func__, error); 416 return (error); 417 } 418 } 419 } 420 421 /* 422 * Set flag if IR Firmware is loaded. If the RAID Capability has 423 * changed from the previous IOC Facts, log a warning, but only if 424 * checking this after a Diag Reset and not during attach. 425 */ 426 saved_mode = sc->ir_firmware; 427 if (sc->facts->IOCCapabilities & 428 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) 429 sc->ir_firmware = 1; 430 if (!attaching) { 431 if (sc->ir_firmware != saved_mode) { 432 mpr_dprint(sc, MPR_FAULT, "%s new IR/IT mode in IOC " 433 "Facts does not match previous mode\n", __func__); 434 } 435 } 436 437 /* Only deallocate and reallocate if relevant IOC Facts have changed */ 438 reallocating = FALSE; 439 if ((!attaching) && 440 ((saved_facts.MsgVersion != sc->facts->MsgVersion) || 441 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || 442 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || 443 (saved_facts.RequestCredit != sc->facts->RequestCredit) || 444 (saved_facts.ProductID != sc->facts->ProductID) || 445 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || 446 (saved_facts.IOCRequestFrameSize != 447 sc->facts->IOCRequestFrameSize) || 448 (saved_facts.IOCMaxChainSegmentSize != 449 sc->facts->IOCMaxChainSegmentSize) || 450 (saved_facts.MaxTargets != sc->facts->MaxTargets) || 451 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || 452 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || 453 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || 454 (saved_facts.MaxReplyDescriptorPostQueueDepth != 455 sc->facts->MaxReplyDescriptorPostQueueDepth) || 456 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || 457 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || 458 (saved_facts.MaxPersistentEntries != 459 sc->facts->MaxPersistentEntries))) { 460 reallocating = TRUE; 461 } 462 463 /* 464 * Some things should be done if attaching or re-allocating after a Diag 465 * Reset, but are not needed after a Diag Reset if the FW has not 466 * changed. 467 */ 468 if (attaching || reallocating) { 469 /* 470 * Check if controller supports FW diag buffers and set flag to 471 * enable each type. 472 */ 473 if (sc->facts->IOCCapabilities & 474 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 475 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. 476 enabled = TRUE; 477 if (sc->facts->IOCCapabilities & 478 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 479 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. 480 enabled = TRUE; 481 if (sc->facts->IOCCapabilities & 482 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 483 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. 484 enabled = TRUE; 485 486 /* 487 * Set flags for some supported items. 488 */ 489 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) 490 sc->eedp_enabled = TRUE; 491 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) 492 sc->control_TLR = TRUE; 493 if (sc->facts->IOCCapabilities & 494 MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) 495 sc->atomic_desc_capable = TRUE; 496 497 /* 498 * Size the queues. Since the reply queues always need one free 499 * entry, we'll just deduct one reply message here. 500 */ 501 sc->num_reqs = MIN(MPR_REQ_FRAMES, sc->facts->RequestCredit); 502 sc->num_replies = MIN(MPR_REPLY_FRAMES + MPR_EVT_REPLY_FRAMES, 503 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; 504 505 /* 506 * Initialize all Tail Queues 507 */ 508 TAILQ_INIT(&sc->req_list); 509 TAILQ_INIT(&sc->high_priority_req_list); 510 TAILQ_INIT(&sc->chain_list); 511 TAILQ_INIT(&sc->prp_page_list); 512 TAILQ_INIT(&sc->tm_list); 513 } 514 515 /* 516 * If doing a Diag Reset and the FW is significantly different 517 * (reallocating will be set above in IOC Facts comparison), then all 518 * buffers based on the IOC Facts will need to be freed before they are 519 * reallocated. 520 */ 521 if (reallocating) { 522 mpr_iocfacts_free(sc); 523 mprsas_realloc_targets(sc, saved_facts.MaxTargets + 524 saved_facts.MaxVolumes); 525 } 526 527 /* 528 * Any deallocation has been completed. Now start reallocating 529 * if needed. Will only need to reallocate if attaching or if the new 530 * IOC Facts are different from the previous IOC Facts after a Diag 531 * Reset. Targets have already been allocated above if needed. 532 */ 533 if (attaching || reallocating) { 534 if (((error = mpr_alloc_queues(sc)) != 0) || 535 ((error = mpr_alloc_replies(sc)) != 0) || 536 ((error = mpr_alloc_requests(sc)) != 0)) { 537 if (attaching ) { 538 mpr_dprint(sc, MPR_FAULT, "%s failed to alloc " 539 "queues with error %d\n", __func__, error); 540 mpr_free(sc); 541 return (error); 542 } else { 543 panic("%s failed to alloc queues with error " 544 "%d\n", __func__, error); 545 } 546 } 547 } 548 549 /* Always initialize the queues */ 550 bzero(sc->free_queue, sc->fqdepth * 4); 551 mpr_init_queues(sc); 552 553 /* 554 * Always get the chip out of the reset state, but only panic if not 555 * attaching. If attaching and there is an error, that is handled by 556 * the OS. 557 */ 558 error = mpr_transition_operational(sc); 559 if (error != 0) { 560 if (attaching) { 561 mpr_printf(sc, "%s failed to transition to operational " 562 "with error %d\n", __func__, error); 563 mpr_free(sc); 564 return (error); 565 } else { 566 panic("%s failed to transition to operational with " 567 "error %d\n", __func__, error); 568 } 569 } 570 571 /* 572 * Finish the queue initialization. 573 * These are set here instead of in mpr_init_queues() because the 574 * IOC resets these values during the state transition in 575 * mpr_transition_operational(). The free index is set to 1 576 * because the corresponding index in the IOC is set to 0, and the 577 * IOC treats the queues as full if both are set to the same value. 578 * Hence the reason that the queue can't hold all of the possible 579 * replies. 580 */ 581 sc->replypostindex = 0; 582 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 583 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); 584 585 /* 586 * Attach the subsystems so they can prepare their event masks. 587 */ 588 /* XXX Should be dynamic so that IM/IR and user modules can attach */ 589 if (attaching) { 590 if (((error = mpr_attach_log(sc)) != 0) || 591 ((error = mpr_attach_sas(sc)) != 0) || 592 ((error = mpr_attach_user(sc)) != 0)) { 593 mpr_printf(sc, "%s failed to attach all subsystems: " 594 "error %d\n", __func__, error); 595 mpr_free(sc); 596 return (error); 597 } 598 599 if ((error = mpr_pci_setup_interrupts(sc)) != 0) { 600 mpr_printf(sc, "%s failed to setup interrupts\n", 601 __func__); 602 mpr_free(sc); 603 return (error); 604 } 605 } 606 607 return (error); 608 } 609 610 /* 611 * This is called if memory is being free (during detach for example) and when 612 * buffers need to be reallocated due to a Diag Reset. 613 */ 614 static void 615 mpr_iocfacts_free(struct mpr_softc *sc) 616 { 617 struct mpr_command *cm; 618 int i; 619 620 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 621 622 if (sc->free_busaddr != 0) 623 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); 624 if (sc->free_queue != NULL) 625 bus_dmamem_free(sc->queues_dmat, sc->free_queue, 626 sc->queues_map); 627 if (sc->queues_dmat != NULL) 628 bus_dma_tag_destroy(sc->queues_dmat); 629 630 if (sc->chain_busaddr != 0) 631 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); 632 if (sc->chain_frames != NULL) 633 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, 634 sc->chain_map); 635 if (sc->chain_dmat != NULL) 636 bus_dma_tag_destroy(sc->chain_dmat); 637 638 if (sc->sense_busaddr != 0) 639 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); 640 if (sc->sense_frames != NULL) 641 bus_dmamem_free(sc->sense_dmat, sc->sense_frames, 642 sc->sense_map); 643 if (sc->sense_dmat != NULL) 644 bus_dma_tag_destroy(sc->sense_dmat); 645 646 if (sc->prp_page_busaddr != 0) 647 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map); 648 if (sc->prp_pages != NULL) 649 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages, 650 sc->prp_page_map); 651 if (sc->prp_page_dmat != NULL) 652 bus_dma_tag_destroy(sc->prp_page_dmat); 653 654 if (sc->reply_busaddr != 0) 655 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); 656 if (sc->reply_frames != NULL) 657 bus_dmamem_free(sc->reply_dmat, sc->reply_frames, 658 sc->reply_map); 659 if (sc->reply_dmat != NULL) 660 bus_dma_tag_destroy(sc->reply_dmat); 661 662 if (sc->req_busaddr != 0) 663 bus_dmamap_unload(sc->req_dmat, sc->req_map); 664 if (sc->req_frames != NULL) 665 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); 666 if (sc->req_dmat != NULL) 667 bus_dma_tag_destroy(sc->req_dmat); 668 669 if (sc->chains != NULL) 670 free(sc->chains, M_MPR); 671 if (sc->prps != NULL) 672 free(sc->prps, M_MPR); 673 if (sc->commands != NULL) { 674 for (i = 1; i < sc->num_reqs; i++) { 675 cm = &sc->commands[i]; 676 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); 677 } 678 free(sc->commands, M_MPR); 679 } 680 if (sc->buffer_dmat != NULL) 681 bus_dma_tag_destroy(sc->buffer_dmat); 682 } 683 684 /* 685 * The terms diag reset and hard reset are used interchangeably in the MPI 686 * docs to mean resetting the controller chip. In this code diag reset 687 * cleans everything up, and the hard reset function just sends the reset 688 * sequence to the chip. This should probably be refactored so that every 689 * subsystem gets a reset notification of some sort, and can clean up 690 * appropriately. 691 */ 692 int 693 mpr_reinit(struct mpr_softc *sc) 694 { 695 int error; 696 struct mprsas_softc *sassc; 697 698 sassc = sc->sassc; 699 700 MPR_FUNCTRACE(sc); 701 702 mtx_assert(&sc->mpr_mtx, MA_OWNED); 703 704 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { 705 mpr_dprint(sc, MPR_INIT, "%s reset already in progress\n", 706 __func__); 707 return 0; 708 } 709 710 mpr_dprint(sc, MPR_INFO, "Reinitializing controller,\n"); 711 /* make sure the completion callbacks can recognize they're getting 712 * a NULL cm_reply due to a reset. 713 */ 714 sc->mpr_flags |= MPR_FLAGS_DIAGRESET; 715 716 /* 717 * Mask interrupts here. 718 */ 719 mpr_dprint(sc, MPR_INIT, "%s mask interrupts\n", __func__); 720 mpr_mask_intr(sc); 721 722 error = mpr_diag_reset(sc, CAN_SLEEP); 723 if (error != 0) { 724 panic("%s hard reset failed with error %d\n", __func__, error); 725 } 726 727 /* Restore the PCI state, including the MSI-X registers */ 728 mpr_pci_restore(sc); 729 730 /* Give the I/O subsystem special priority to get itself prepared */ 731 mprsas_handle_reinit(sc); 732 733 /* 734 * Get IOC Facts and allocate all structures based on this information. 735 * The attach function will also call mpr_iocfacts_allocate at startup. 736 * If relevant values have changed in IOC Facts, this function will free 737 * all of the memory based on IOC Facts and reallocate that memory. 738 */ 739 if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) { 740 panic("%s IOC Facts based allocation failed with error %d\n", 741 __func__, error); 742 } 743 744 /* 745 * Mapping structures will be re-allocated after getting IOC Page8, so 746 * free these structures here. 747 */ 748 mpr_mapping_exit(sc); 749 750 /* 751 * The static page function currently read is IOC Page8. Others can be 752 * added in future. It's possible that the values in IOC Page8 have 753 * changed after a Diag Reset due to user modification, so always read 754 * these. Interrupts are masked, so unmask them before getting config 755 * pages. 756 */ 757 mpr_unmask_intr(sc); 758 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; 759 mpr_base_static_config_pages(sc); 760 761 /* 762 * Some mapping info is based in IOC Page8 data, so re-initialize the 763 * mapping tables. 764 */ 765 mpr_mapping_initialize(sc); 766 767 /* 768 * Restart will reload the event masks clobbered by the reset, and 769 * then enable the port. 770 */ 771 mpr_reregister_events(sc); 772 773 /* the end of discovery will release the simq, so we're done. */ 774 mpr_dprint(sc, MPR_INFO, "%s finished sc %p post %u free %u\n", 775 __func__, sc, sc->replypostindex, sc->replyfreeindex); 776 mprsas_release_simq_reinit(sassc); 777 778 return 0; 779 } 780 781 /* Wait for the chip to ACK a word that we've put into its FIFO 782 * Wait for <timeout> seconds. In single loop wait for busy loop 783 * for 500 microseconds. 784 * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds. 785 * */ 786 static int 787 mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag) 788 { 789 u32 cntdn, count; 790 u32 int_status; 791 u32 doorbell; 792 793 count = 0; 794 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 795 do { 796 int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 797 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 798 mpr_dprint(sc, MPR_INIT, "%s: successful count(%d), " 799 "timeout(%d)\n", __func__, count, timeout); 800 return 0; 801 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 802 doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 803 if ((doorbell & MPI2_IOC_STATE_MASK) == 804 MPI2_IOC_STATE_FAULT) { 805 mpr_dprint(sc, MPR_FAULT, 806 "fault_state(0x%04x)!\n", doorbell); 807 return (EFAULT); 808 } 809 } else if (int_status == 0xFFFFFFFF) 810 goto out; 811 812 /* 813 * If it can sleep, sleep for 1 milisecond, else busy loop for 814 * 0.5 milisecond 815 */ 816 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 817 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", 818 hz/1000); 819 else if (sleep_flag == CAN_SLEEP) 820 pause("mprdba", hz/1000); 821 else 822 DELAY(500); 823 count++; 824 } while (--cntdn); 825 826 out: 827 mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), " 828 "int_status(%x)!\n", __func__, count, int_status); 829 return (ETIMEDOUT); 830 } 831 832 /* Wait for the chip to signal that the next word in its FIFO can be fetched */ 833 static int 834 mpr_wait_db_int(struct mpr_softc *sc) 835 { 836 int retry; 837 838 for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) { 839 if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 840 MPI2_HIS_IOC2SYS_DB_STATUS) != 0) 841 return (0); 842 DELAY(2000); 843 } 844 return (ETIMEDOUT); 845 } 846 847 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ 848 static int 849 mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, 850 int req_sz, int reply_sz, int timeout) 851 { 852 uint32_t *data32; 853 uint16_t *data16; 854 int i, count, ioc_sz, residual; 855 int sleep_flags = CAN_SLEEP; 856 857 #if __FreeBSD_version >= 1000029 858 if (curthread->td_no_sleeping) 859 #else //__FreeBSD_version < 1000029 860 if (curthread->td_pflags & TDP_NOSLEEPING) 861 #endif //__FreeBSD_version >= 1000029 862 sleep_flags = NO_SLEEP; 863 864 /* Step 1 */ 865 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 866 867 /* Step 2 */ 868 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 869 return (EBUSY); 870 871 /* Step 3 872 * Announce that a message is coming through the doorbell. Messages 873 * are pushed at 32bit words, so round up if needed. 874 */ 875 count = (req_sz + 3) / 4; 876 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 877 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | 878 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); 879 880 /* Step 4 */ 881 if (mpr_wait_db_int(sc) || 882 (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { 883 mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n"); 884 return (ENXIO); 885 } 886 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 887 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 888 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n"); 889 return (ENXIO); 890 } 891 892 /* Step 5 */ 893 /* Clock out the message data synchronously in 32-bit dwords*/ 894 data32 = (uint32_t *)req; 895 for (i = 0; i < count; i++) { 896 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i])); 897 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 898 mpr_dprint(sc, MPR_FAULT, 899 "Timeout while writing doorbell\n"); 900 return (ENXIO); 901 } 902 } 903 904 /* Step 6 */ 905 /* Clock in the reply in 16-bit words. The total length of the 906 * message is always in the 4th byte, so clock out the first 2 words 907 * manually, then loop the rest. 908 */ 909 data16 = (uint16_t *)reply; 910 if (mpr_wait_db_int(sc) != 0) { 911 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n"); 912 return (ENXIO); 913 } 914 data16[0] = 915 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 916 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 917 if (mpr_wait_db_int(sc) != 0) { 918 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n"); 919 return (ENXIO); 920 } 921 data16[1] = 922 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 923 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 924 925 /* Number of 32bit words in the message */ 926 ioc_sz = reply->MsgLength; 927 928 /* 929 * Figure out how many 16bit words to clock in without overrunning. 930 * The precision loss with dividing reply_sz can safely be 931 * ignored because the messages can only be multiples of 32bits. 932 */ 933 residual = 0; 934 count = MIN((reply_sz / 4), ioc_sz) * 2; 935 if (count < ioc_sz * 2) { 936 residual = ioc_sz * 2 - count; 937 mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d " 938 "residual message words\n", residual); 939 } 940 941 for (i = 2; i < count; i++) { 942 if (mpr_wait_db_int(sc) != 0) { 943 mpr_dprint(sc, MPR_FAULT, 944 "Timeout reading doorbell %d\n", i); 945 return (ENXIO); 946 } 947 data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & 948 MPI2_DOORBELL_DATA_MASK; 949 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 950 } 951 952 /* 953 * Pull out residual words that won't fit into the provided buffer. 954 * This keeps the chip from hanging due to a driver programming 955 * error. 956 */ 957 while (residual--) { 958 if (mpr_wait_db_int(sc) != 0) { 959 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n"); 960 return (ENXIO); 961 } 962 (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET); 963 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 964 } 965 966 /* Step 7 */ 967 if (mpr_wait_db_int(sc) != 0) { 968 mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n"); 969 return (ENXIO); 970 } 971 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 972 mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n"); 973 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 974 975 return (0); 976 } 977 978 static void 979 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) 980 { 981 request_descriptor rd; 982 983 MPR_FUNCTRACE(sc); 984 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n", 985 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); 986 987 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & 988 MPR_FLAGS_SHUTDOWN)) 989 mtx_assert(&sc->mpr_mtx, MA_OWNED); 990 991 if (++sc->io_cmds_active > sc->io_cmds_highwater) 992 sc->io_cmds_highwater++; 993 994 if (sc->atomic_desc_capable) { 995 rd.u.low = cm->cm_desc.Words.Low; 996 mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET, 997 rd.u.low); 998 } else { 999 rd.u.low = cm->cm_desc.Words.Low; 1000 rd.u.high = cm->cm_desc.Words.High; 1001 rd.word = htole64(rd.word); 1002 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, 1003 rd.u.low); 1004 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, 1005 rd.u.high); 1006 } 1007 } 1008 1009 /* 1010 * Just the FACTS, ma'am. 1011 */ 1012 static int 1013 mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts) 1014 { 1015 MPI2_DEFAULT_REPLY *reply; 1016 MPI2_IOC_FACTS_REQUEST request; 1017 int error, req_sz, reply_sz; 1018 1019 MPR_FUNCTRACE(sc); 1020 1021 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); 1022 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); 1023 reply = (MPI2_DEFAULT_REPLY *)facts; 1024 1025 bzero(&request, req_sz); 1026 request.Function = MPI2_FUNCTION_IOC_FACTS; 1027 error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5); 1028 1029 return (error); 1030 } 1031 1032 static int 1033 mpr_send_iocinit(struct mpr_softc *sc) 1034 { 1035 MPI2_IOC_INIT_REQUEST init; 1036 MPI2_DEFAULT_REPLY reply; 1037 int req_sz, reply_sz, error; 1038 struct timeval now; 1039 uint64_t time_in_msec; 1040 1041 MPR_FUNCTRACE(sc); 1042 1043 req_sz = sizeof(MPI2_IOC_INIT_REQUEST); 1044 reply_sz = sizeof(MPI2_IOC_INIT_REPLY); 1045 bzero(&init, req_sz); 1046 bzero(&reply, reply_sz); 1047 1048 /* 1049 * Fill in the init block. Note that most addresses are 1050 * deliberately in the lower 32bits of memory. This is a micro- 1051 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. 1052 */ 1053 init.Function = MPI2_FUNCTION_IOC_INIT; 1054 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1055 init.MsgVersion = htole16(MPI2_VERSION); 1056 init.HeaderVersion = htole16(MPI2_HEADER_VERSION); 1057 init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize); 1058 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); 1059 init.ReplyFreeQueueDepth = htole16(sc->fqdepth); 1060 init.SenseBufferAddressHigh = 0; 1061 init.SystemReplyAddressHigh = 0; 1062 init.SystemRequestFrameBaseAddress.High = 0; 1063 init.SystemRequestFrameBaseAddress.Low = 1064 htole32((uint32_t)sc->req_busaddr); 1065 init.ReplyDescriptorPostQueueAddress.High = 0; 1066 init.ReplyDescriptorPostQueueAddress.Low = 1067 htole32((uint32_t)sc->post_busaddr); 1068 init.ReplyFreeQueueAddress.High = 0; 1069 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); 1070 getmicrotime(&now); 1071 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000); 1072 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF); 1073 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF); 1074 init.HostPageSize = HOST_PAGE_SIZE_4K; 1075 1076 error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); 1077 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 1078 error = ENXIO; 1079 1080 mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus); 1081 return (error); 1082 } 1083 1084 void 1085 mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1086 { 1087 bus_addr_t *addr; 1088 1089 addr = arg; 1090 *addr = segs[0].ds_addr; 1091 } 1092 1093 static int 1094 mpr_alloc_queues(struct mpr_softc *sc) 1095 { 1096 bus_addr_t queues_busaddr; 1097 uint8_t *queues; 1098 int qsize, fqsize, pqsize; 1099 1100 /* 1101 * The reply free queue contains 4 byte entries in multiples of 16 and 1102 * aligned on a 16 byte boundary. There must always be an unused entry. 1103 * This queue supplies fresh reply frames for the firmware to use. 1104 * 1105 * The reply descriptor post queue contains 8 byte entries in 1106 * multiples of 16 and aligned on a 16 byte boundary. This queue 1107 * contains filled-in reply frames sent from the firmware to the host. 1108 * 1109 * These two queues are allocated together for simplicity. 1110 */ 1111 sc->fqdepth = roundup2(sc->num_replies + 1, 16); 1112 sc->pqdepth = roundup2(sc->num_replies + 1, 16); 1113 fqsize= sc->fqdepth * 4; 1114 pqsize = sc->pqdepth * 8; 1115 qsize = fqsize + pqsize; 1116 1117 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1118 16, 0, /* algnmnt, boundary */ 1119 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1120 BUS_SPACE_MAXADDR, /* highaddr */ 1121 NULL, NULL, /* filter, filterarg */ 1122 qsize, /* maxsize */ 1123 1, /* nsegments */ 1124 qsize, /* maxsegsize */ 1125 0, /* flags */ 1126 NULL, NULL, /* lockfunc, lockarg */ 1127 &sc->queues_dmat)) { 1128 device_printf(sc->mpr_dev, "Cannot allocate queues DMA tag\n"); 1129 return (ENOMEM); 1130 } 1131 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, 1132 &sc->queues_map)) { 1133 device_printf(sc->mpr_dev, "Cannot allocate queues memory\n"); 1134 return (ENOMEM); 1135 } 1136 bzero(queues, qsize); 1137 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, 1138 mpr_memaddr_cb, &queues_busaddr, 0); 1139 1140 sc->free_queue = (uint32_t *)queues; 1141 sc->free_busaddr = queues_busaddr; 1142 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); 1143 sc->post_busaddr = queues_busaddr + fqsize; 1144 1145 return (0); 1146 } 1147 1148 static int 1149 mpr_alloc_replies(struct mpr_softc *sc) 1150 { 1151 int rsize, num_replies; 1152 1153 /* 1154 * sc->num_replies should be one less than sc->fqdepth. We need to 1155 * allocate space for sc->fqdepth replies, but only sc->num_replies 1156 * replies can be used at once. 1157 */ 1158 num_replies = max(sc->fqdepth, sc->num_replies); 1159 1160 rsize = sc->facts->ReplyFrameSize * num_replies * 4; 1161 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1162 4, 0, /* algnmnt, boundary */ 1163 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1164 BUS_SPACE_MAXADDR, /* highaddr */ 1165 NULL, NULL, /* filter, filterarg */ 1166 rsize, /* maxsize */ 1167 1, /* nsegments */ 1168 rsize, /* maxsegsize */ 1169 0, /* flags */ 1170 NULL, NULL, /* lockfunc, lockarg */ 1171 &sc->reply_dmat)) { 1172 device_printf(sc->mpr_dev, "Cannot allocate replies DMA tag\n"); 1173 return (ENOMEM); 1174 } 1175 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, 1176 BUS_DMA_NOWAIT, &sc->reply_map)) { 1177 device_printf(sc->mpr_dev, "Cannot allocate replies memory\n"); 1178 return (ENOMEM); 1179 } 1180 bzero(sc->reply_frames, rsize); 1181 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, 1182 mpr_memaddr_cb, &sc->reply_busaddr, 0); 1183 1184 return (0); 1185 } 1186 1187 static int 1188 mpr_alloc_requests(struct mpr_softc *sc) 1189 { 1190 struct mpr_command *cm; 1191 struct mpr_chain *chain; 1192 int i, rsize, nsegs; 1193 1194 rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4; 1195 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1196 16, 0, /* algnmnt, boundary */ 1197 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1198 BUS_SPACE_MAXADDR, /* highaddr */ 1199 NULL, NULL, /* filter, filterarg */ 1200 rsize, /* maxsize */ 1201 1, /* nsegments */ 1202 rsize, /* maxsegsize */ 1203 0, /* flags */ 1204 NULL, NULL, /* lockfunc, lockarg */ 1205 &sc->req_dmat)) { 1206 device_printf(sc->mpr_dev, "Cannot allocate request DMA tag\n"); 1207 return (ENOMEM); 1208 } 1209 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, 1210 BUS_DMA_NOWAIT, &sc->req_map)) { 1211 device_printf(sc->mpr_dev, "Cannot allocate request memory\n"); 1212 return (ENOMEM); 1213 } 1214 bzero(sc->req_frames, rsize); 1215 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, 1216 mpr_memaddr_cb, &sc->req_busaddr, 0); 1217 1218 /* 1219 * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to 1220 * get the size of a Chain Frame. Previous versions use the size as a 1221 * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize 1222 * is 0, use the default value. The IOCMaxChainSegmentSize is the 1223 * number of 16-byte elelements that can fit in a Chain Frame, which is 1224 * the size of an IEEE Simple SGE. 1225 */ 1226 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) { 1227 sc->chain_seg_size = 1228 htole16(sc->facts->IOCMaxChainSegmentSize); 1229 if (sc->chain_seg_size == 0) { 1230 sc->chain_frame_size = MPR_DEFAULT_CHAIN_SEG_SIZE * 1231 MPR_MAX_CHAIN_ELEMENT_SIZE; 1232 } else { 1233 sc->chain_frame_size = sc->chain_seg_size * 1234 MPR_MAX_CHAIN_ELEMENT_SIZE; 1235 } 1236 } else { 1237 sc->chain_frame_size = sc->facts->IOCRequestFrameSize * 4; 1238 } 1239 rsize = sc->chain_frame_size * sc->max_chains; 1240 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1241 16, 0, /* algnmnt, boundary */ 1242 BUS_SPACE_MAXADDR, /* lowaddr */ 1243 BUS_SPACE_MAXADDR, /* highaddr */ 1244 NULL, NULL, /* filter, filterarg */ 1245 rsize, /* maxsize */ 1246 1, /* nsegments */ 1247 rsize, /* maxsegsize */ 1248 0, /* flags */ 1249 NULL, NULL, /* lockfunc, lockarg */ 1250 &sc->chain_dmat)) { 1251 device_printf(sc->mpr_dev, "Cannot allocate chain DMA tag\n"); 1252 return (ENOMEM); 1253 } 1254 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, 1255 BUS_DMA_NOWAIT, &sc->chain_map)) { 1256 device_printf(sc->mpr_dev, "Cannot allocate chain memory\n"); 1257 return (ENOMEM); 1258 } 1259 bzero(sc->chain_frames, rsize); 1260 bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize, 1261 mpr_memaddr_cb, &sc->chain_busaddr, 0); 1262 1263 rsize = MPR_SENSE_LEN * sc->num_reqs; 1264 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1265 1, 0, /* algnmnt, boundary */ 1266 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1267 BUS_SPACE_MAXADDR, /* highaddr */ 1268 NULL, NULL, /* filter, filterarg */ 1269 rsize, /* maxsize */ 1270 1, /* nsegments */ 1271 rsize, /* maxsegsize */ 1272 0, /* flags */ 1273 NULL, NULL, /* lockfunc, lockarg */ 1274 &sc->sense_dmat)) { 1275 device_printf(sc->mpr_dev, "Cannot allocate sense DMA tag\n"); 1276 return (ENOMEM); 1277 } 1278 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, 1279 BUS_DMA_NOWAIT, &sc->sense_map)) { 1280 device_printf(sc->mpr_dev, "Cannot allocate sense memory\n"); 1281 return (ENOMEM); 1282 } 1283 bzero(sc->sense_frames, rsize); 1284 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, 1285 mpr_memaddr_cb, &sc->sense_busaddr, 0); 1286 1287 sc->chains = malloc(sizeof(struct mpr_chain) * sc->max_chains, M_MPR, 1288 M_WAITOK | M_ZERO); 1289 if (!sc->chains) { 1290 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 1291 __func__, __LINE__); 1292 return (ENOMEM); 1293 } 1294 for (i = 0; i < sc->max_chains; i++) { 1295 chain = &sc->chains[i]; 1296 chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames + 1297 i * sc->chain_frame_size); 1298 chain->chain_busaddr = sc->chain_busaddr + 1299 i * sc->chain_frame_size; 1300 mpr_free_chain(sc, chain); 1301 sc->chain_free_lowwater++; 1302 } 1303 1304 /* 1305 * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports 1306 * these devices. 1307 */ 1308 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) && 1309 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) { 1310 if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM) 1311 return (ENOMEM); 1312 } 1313 1314 /* XXX Need to pick a more precise value */ 1315 nsegs = (MAXPHYS / PAGE_SIZE) + 1; 1316 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1317 1, 0, /* algnmnt, boundary */ 1318 BUS_SPACE_MAXADDR, /* lowaddr */ 1319 BUS_SPACE_MAXADDR, /* highaddr */ 1320 NULL, NULL, /* filter, filterarg */ 1321 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 1322 nsegs, /* nsegments */ 1323 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1324 BUS_DMA_ALLOCNOW, /* flags */ 1325 busdma_lock_mutex, /* lockfunc */ 1326 &sc->mpr_mtx, /* lockarg */ 1327 &sc->buffer_dmat)) { 1328 device_printf(sc->mpr_dev, "Cannot allocate buffer DMA tag\n"); 1329 return (ENOMEM); 1330 } 1331 1332 /* 1333 * SMID 0 cannot be used as a free command per the firmware spec. 1334 * Just drop that command instead of risking accounting bugs. 1335 */ 1336 sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs, 1337 M_MPR, M_WAITOK | M_ZERO); 1338 if (!sc->commands) { 1339 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 1340 __func__, __LINE__); 1341 return (ENOMEM); 1342 } 1343 for (i = 1; i < sc->num_reqs; i++) { 1344 cm = &sc->commands[i]; 1345 cm->cm_req = sc->req_frames + 1346 i * sc->facts->IOCRequestFrameSize * 4; 1347 cm->cm_req_busaddr = sc->req_busaddr + 1348 i * sc->facts->IOCRequestFrameSize * 4; 1349 cm->cm_sense = &sc->sense_frames[i]; 1350 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; 1351 cm->cm_desc.Default.SMID = i; 1352 cm->cm_sc = sc; 1353 TAILQ_INIT(&cm->cm_chain_list); 1354 TAILQ_INIT(&cm->cm_prp_page_list); 1355 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); 1356 1357 /* XXX Is a failure here a critical problem? */ 1358 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) 1359 == 0) { 1360 if (i <= sc->facts->HighPriorityCredit) 1361 mpr_free_high_priority_command(sc, cm); 1362 else 1363 mpr_free_command(sc, cm); 1364 } else { 1365 panic("failed to allocate command %d\n", i); 1366 sc->num_reqs = i; 1367 break; 1368 } 1369 } 1370 1371 return (0); 1372 } 1373 1374 /* 1375 * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs, 1376 * which are scatter/gather lists for NVMe devices. 1377 * 1378 * This buffer must be contiguous due to the nature of how NVMe PRPs are built 1379 * and translated by FW. 1380 * 1381 * returns ENOMEM if memory could not be allocated, otherwise returns 0. 1382 */ 1383 static int 1384 mpr_alloc_nvme_prp_pages(struct mpr_softc *sc) 1385 { 1386 int PRPs_per_page, PRPs_required, pages_required; 1387 int rsize, i; 1388 struct mpr_prp_page *prp_page; 1389 1390 /* 1391 * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number 1392 * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is: 1393 * MAX_IO_SIZE / PAGE_SIZE = 256 1394 * 1395 * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs 1396 * required for the remainder of the 1MB I/O. 512 PRPs can fit into one 1397 * page (4096 / 8 = 512), so only one page is required for each I/O. 1398 * 1399 * Each of these buffers will need to be contiguous. For simplicity, 1400 * only one buffer is allocated here, which has all of the space 1401 * required for the NVMe Queue Depth. If there are problems allocating 1402 * this one buffer, this function will need to change to allocate 1403 * individual, contiguous NVME_QDEPTH buffers. 1404 * 1405 * The real calculation will use the real max io size. Above is just an 1406 * example. 1407 * 1408 */ 1409 PRPs_required = sc->maxio / PAGE_SIZE; 1410 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1; 1411 pages_required = (PRPs_required / PRPs_per_page) + 1; 1412 1413 sc->prp_buffer_size = PAGE_SIZE * pages_required; 1414 rsize = sc->prp_buffer_size * NVME_QDEPTH; 1415 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1416 4, 0, /* algnmnt, boundary */ 1417 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1418 BUS_SPACE_MAXADDR, /* highaddr */ 1419 NULL, NULL, /* filter, filterarg */ 1420 rsize, /* maxsize */ 1421 1, /* nsegments */ 1422 rsize, /* maxsegsize */ 1423 0, /* flags */ 1424 NULL, NULL, /* lockfunc, lockarg */ 1425 &sc->prp_page_dmat)) { 1426 device_printf(sc->mpr_dev, "Cannot allocate NVMe PRP DMA " 1427 "tag\n"); 1428 return (ENOMEM); 1429 } 1430 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages, 1431 BUS_DMA_NOWAIT, &sc->prp_page_map)) { 1432 device_printf(sc->mpr_dev, "Cannot allocate NVMe PRP memory\n"); 1433 return (ENOMEM); 1434 } 1435 bzero(sc->prp_pages, rsize); 1436 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages, 1437 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0); 1438 1439 sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR, 1440 M_WAITOK | M_ZERO); 1441 for (i = 0; i < NVME_QDEPTH; i++) { 1442 prp_page = &sc->prps[i]; 1443 prp_page->prp_page = (uint64_t *)(sc->prp_pages + 1444 i * sc->prp_buffer_size); 1445 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr + 1446 i * sc->prp_buffer_size); 1447 mpr_free_prp_page(sc, prp_page); 1448 sc->prp_pages_free_lowwater++; 1449 } 1450 1451 return (0); 1452 } 1453 1454 static int 1455 mpr_init_queues(struct mpr_softc *sc) 1456 { 1457 int i; 1458 1459 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); 1460 1461 /* 1462 * According to the spec, we need to use one less reply than we 1463 * have space for on the queue. So sc->num_replies (the number we 1464 * use) should be less than sc->fqdepth (allocated size). 1465 */ 1466 if (sc->num_replies >= sc->fqdepth) 1467 return (EINVAL); 1468 1469 /* 1470 * Initialize all of the free queue entries. 1471 */ 1472 for (i = 0; i < sc->fqdepth; i++) { 1473 sc->free_queue[i] = sc->reply_busaddr + 1474 (i * sc->facts->ReplyFrameSize * 4); 1475 } 1476 sc->replyfreeindex = sc->num_replies; 1477 1478 return (0); 1479 } 1480 1481 /* Get the driver parameter tunables. Lowest priority are the driver defaults. 1482 * Next are the global settings, if they exist. Highest are the per-unit 1483 * settings, if they exist. 1484 */ 1485 void 1486 mpr_get_tunables(struct mpr_softc *sc) 1487 { 1488 char tmpstr[80]; 1489 1490 /* XXX default to some debugging for now */ 1491 sc->mpr_debug = MPR_INFO | MPR_FAULT; 1492 sc->disable_msix = 0; 1493 sc->disable_msi = 0; 1494 sc->max_chains = MPR_CHAIN_FRAMES; 1495 sc->max_io_pages = MPR_MAXIO_PAGES; 1496 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; 1497 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; 1498 sc->use_phynum = 1; 1499 1500 /* 1501 * Grab the global variables. 1502 */ 1503 TUNABLE_INT_FETCH("hw.mpr.debug_level", &sc->mpr_debug); 1504 TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix); 1505 TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi); 1506 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); 1507 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages); 1508 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); 1509 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); 1510 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); 1511 1512 /* Grab the unit-instance variables */ 1513 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level", 1514 device_get_unit(sc->mpr_dev)); 1515 TUNABLE_INT_FETCH(tmpstr, &sc->mpr_debug); 1516 1517 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix", 1518 device_get_unit(sc->mpr_dev)); 1519 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); 1520 1521 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi", 1522 device_get_unit(sc->mpr_dev)); 1523 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); 1524 1525 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains", 1526 device_get_unit(sc->mpr_dev)); 1527 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); 1528 1529 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages", 1530 device_get_unit(sc->mpr_dev)); 1531 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); 1532 1533 bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); 1534 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids", 1535 device_get_unit(sc->mpr_dev)); 1536 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); 1537 1538 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu", 1539 device_get_unit(sc->mpr_dev)); 1540 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); 1541 1542 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time", 1543 device_get_unit(sc->mpr_dev)); 1544 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); 1545 1546 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num", 1547 device_get_unit(sc->mpr_dev)); 1548 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); 1549 } 1550 1551 static void 1552 mpr_setup_sysctl(struct mpr_softc *sc) 1553 { 1554 struct sysctl_ctx_list *sysctl_ctx = NULL; 1555 struct sysctl_oid *sysctl_tree = NULL; 1556 char tmpstr[80], tmpstr2[80]; 1557 1558 /* 1559 * Setup the sysctl variable so the user can change the debug level 1560 * on the fly. 1561 */ 1562 snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d", 1563 device_get_unit(sc->mpr_dev)); 1564 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); 1565 1566 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); 1567 if (sysctl_ctx != NULL) 1568 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); 1569 1570 if (sysctl_tree == NULL) { 1571 sysctl_ctx_init(&sc->sysctl_ctx); 1572 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1573 SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2, 1574 CTLFLAG_RD, 0, tmpstr); 1575 if (sc->sysctl_tree == NULL) 1576 return; 1577 sysctl_ctx = &sc->sysctl_ctx; 1578 sysctl_tree = sc->sysctl_tree; 1579 } 1580 1581 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1582 OID_AUTO, "debug_level", CTLFLAG_RW, &sc->mpr_debug, 0, 1583 "mpr debug level"); 1584 1585 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1586 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, 1587 "Disable the use of MSI-X interrupts"); 1588 1589 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1590 OID_AUTO, "disable_msi", CTLFLAG_RD, &sc->disable_msi, 0, 1591 "Disable the use of MSI interrupts"); 1592 1593 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1594 OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version, 1595 strlen(sc->fw_version), "firmware version"); 1596 1597 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1598 OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION, 1599 strlen(MPR_DRIVER_VERSION), "driver version"); 1600 1601 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1602 OID_AUTO, "io_cmds_active", CTLFLAG_RD, 1603 &sc->io_cmds_active, 0, "number of currently active commands"); 1604 1605 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1606 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 1607 &sc->io_cmds_highwater, 0, "maximum active commands seen"); 1608 1609 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1610 OID_AUTO, "chain_free", CTLFLAG_RD, 1611 &sc->chain_free, 0, "number of free chain elements"); 1612 1613 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1614 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, 1615 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); 1616 1617 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1618 OID_AUTO, "max_chains", CTLFLAG_RD, 1619 &sc->max_chains, 0,"maximum chain frames that will be allocated"); 1620 1621 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1622 OID_AUTO, "max_io_pages", CTLFLAG_RD, 1623 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " 1624 "IOCFacts)"); 1625 1626 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1627 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, 1628 "enable SSU to SATA SSD/HDD at shutdown"); 1629 1630 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1631 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, 1632 &sc->chain_alloc_fail, "chain allocation failures"); 1633 1634 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1635 OID_AUTO, "spinup_wait_time", CTLFLAG_RD, 1636 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " 1637 "spinup after SATA ID error"); 1638 1639 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1640 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, 1641 "Use the phy number for enumeration"); 1642 1643 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1644 OID_AUTO, "prp_pages_free", CTLFLAG_RD, 1645 &sc->prp_pages_free, 0, "number of free PRP pages"); 1646 1647 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1648 OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD, 1649 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages"); 1650 1651 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1652 OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD, 1653 &sc->prp_page_alloc_fail, "PRP page allocation failures"); 1654 } 1655 1656 int 1657 mpr_attach(struct mpr_softc *sc) 1658 { 1659 int error; 1660 1661 MPR_FUNCTRACE(sc); 1662 1663 mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF); 1664 callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0); 1665 callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0); 1666 TAILQ_INIT(&sc->event_list); 1667 timevalclear(&sc->lastfail); 1668 1669 if ((error = mpr_transition_ready(sc)) != 0) { 1670 mpr_printf(sc, "%s failed to transition ready\n", __func__); 1671 return (error); 1672 } 1673 1674 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, 1675 M_ZERO|M_NOWAIT); 1676 if (!sc->facts) { 1677 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 1678 __func__, __LINE__); 1679 return (ENOMEM); 1680 } 1681 1682 /* 1683 * Get IOC Facts and allocate all structures based on this information. 1684 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC 1685 * Facts. If relevant values have changed in IOC Facts, this function 1686 * will free all of the memory based on IOC Facts and reallocate that 1687 * memory. If this fails, any allocated memory should already be freed. 1688 */ 1689 if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) { 1690 mpr_dprint(sc, MPR_FAULT, "%s IOC Facts based allocation " 1691 "failed with error %d\n", __func__, error); 1692 return (error); 1693 } 1694 1695 /* Start the periodic watchdog check on the IOC Doorbell */ 1696 mpr_periodic(sc); 1697 1698 /* 1699 * The portenable will kick off discovery events that will drive the 1700 * rest of the initialization process. The CAM/SAS module will 1701 * hold up the boot sequence until discovery is complete. 1702 */ 1703 sc->mpr_ich.ich_func = mpr_startup; 1704 sc->mpr_ich.ich_arg = sc; 1705 if (config_intrhook_establish(&sc->mpr_ich) != 0) { 1706 mpr_dprint(sc, MPR_ERROR, "Cannot establish MPR config hook\n"); 1707 error = EINVAL; 1708 } 1709 1710 /* 1711 * Allow IR to shutdown gracefully when shutdown occurs. 1712 */ 1713 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 1714 mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); 1715 1716 if (sc->shutdown_eh == NULL) 1717 mpr_dprint(sc, MPR_ERROR, "shutdown event registration " 1718 "failed\n"); 1719 1720 mpr_setup_sysctl(sc); 1721 1722 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; 1723 1724 return (error); 1725 } 1726 1727 /* Run through any late-start handlers. */ 1728 static void 1729 mpr_startup(void *arg) 1730 { 1731 struct mpr_softc *sc; 1732 1733 sc = (struct mpr_softc *)arg; 1734 1735 mpr_lock(sc); 1736 mpr_unmask_intr(sc); 1737 1738 /* initialize device mapping tables */ 1739 mpr_base_static_config_pages(sc); 1740 mpr_mapping_initialize(sc); 1741 mprsas_startup(sc); 1742 mpr_unlock(sc); 1743 } 1744 1745 /* Periodic watchdog. Is called with the driver lock already held. */ 1746 static void 1747 mpr_periodic(void *arg) 1748 { 1749 struct mpr_softc *sc; 1750 uint32_t db; 1751 1752 sc = (struct mpr_softc *)arg; 1753 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) 1754 return; 1755 1756 db = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 1757 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 1758 if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) == 1759 IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) { 1760 panic("TEMPERATURE FAULT: STOPPING."); 1761 } 1762 mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db); 1763 mpr_reinit(sc); 1764 } 1765 1766 callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc); 1767 } 1768 1769 static void 1770 mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data, 1771 MPI2_EVENT_NOTIFICATION_REPLY *event) 1772 { 1773 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; 1774 1775 MPR_DPRINT_EVENT(sc, generic, event); 1776 1777 switch (event->Event) { 1778 case MPI2_EVENT_LOG_DATA: 1779 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n"); 1780 if (sc->mpr_debug & MPR_EVENT) 1781 hexdump(event->EventData, event->EventDataLength, NULL, 1782 0); 1783 break; 1784 case MPI2_EVENT_LOG_ENTRY_ADDED: 1785 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; 1786 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event " 1787 "0x%x Sequence %d:\n", entry->LogEntryQualifier, 1788 entry->LogSequence); 1789 break; 1790 default: 1791 break; 1792 } 1793 return; 1794 } 1795 1796 static int 1797 mpr_attach_log(struct mpr_softc *sc) 1798 { 1799 uint8_t events[16]; 1800 1801 bzero(events, 16); 1802 setbit(events, MPI2_EVENT_LOG_DATA); 1803 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 1804 1805 mpr_register_events(sc, events, mpr_log_evt_handler, NULL, 1806 &sc->mpr_log_eh); 1807 1808 return (0); 1809 } 1810 1811 static int 1812 mpr_detach_log(struct mpr_softc *sc) 1813 { 1814 1815 if (sc->mpr_log_eh != NULL) 1816 mpr_deregister_events(sc, sc->mpr_log_eh); 1817 return (0); 1818 } 1819 1820 /* 1821 * Free all of the driver resources and detach submodules. Should be called 1822 * without the lock held. 1823 */ 1824 int 1825 mpr_free(struct mpr_softc *sc) 1826 { 1827 int error; 1828 1829 /* Turn off the watchdog */ 1830 mpr_lock(sc); 1831 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; 1832 mpr_unlock(sc); 1833 /* Lock must not be held for this */ 1834 callout_drain(&sc->periodic); 1835 callout_drain(&sc->device_check_callout); 1836 1837 if (((error = mpr_detach_log(sc)) != 0) || 1838 ((error = mpr_detach_sas(sc)) != 0)) 1839 return (error); 1840 1841 mpr_detach_user(sc); 1842 1843 /* Put the IOC back in the READY state. */ 1844 mpr_lock(sc); 1845 if ((error = mpr_transition_ready(sc)) != 0) { 1846 mpr_unlock(sc); 1847 return (error); 1848 } 1849 mpr_unlock(sc); 1850 1851 if (sc->facts != NULL) 1852 free(sc->facts, M_MPR); 1853 1854 /* 1855 * Free all buffers that are based on IOC Facts. A Diag Reset may need 1856 * to free these buffers too. 1857 */ 1858 mpr_iocfacts_free(sc); 1859 1860 if (sc->sysctl_tree != NULL) 1861 sysctl_ctx_free(&sc->sysctl_ctx); 1862 1863 /* Deregister the shutdown function */ 1864 if (sc->shutdown_eh != NULL) 1865 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); 1866 1867 mtx_destroy(&sc->mpr_mtx); 1868 1869 return (0); 1870 } 1871 1872 static __inline void 1873 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) 1874 { 1875 MPR_FUNCTRACE(sc); 1876 1877 if (cm == NULL) { 1878 mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n"); 1879 return; 1880 } 1881 1882 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 1883 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 1884 1885 if (cm->cm_complete != NULL) { 1886 mpr_dprint(sc, MPR_TRACE, 1887 "%s cm %p calling cm_complete %p data %p reply %p\n", 1888 __func__, cm, cm->cm_complete, cm->cm_complete_data, 1889 cm->cm_reply); 1890 cm->cm_complete(sc, cm); 1891 } 1892 1893 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 1894 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); 1895 wakeup(cm); 1896 } 1897 1898 if (sc->io_cmds_active != 0) { 1899 sc->io_cmds_active--; 1900 } else { 1901 mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is " 1902 "out of sync - resynching to 0\n"); 1903 } 1904 } 1905 1906 static void 1907 mpr_sas_log_info(struct mpr_softc *sc , u32 log_info) 1908 { 1909 union loginfo_type { 1910 u32 loginfo; 1911 struct { 1912 u32 subcode:16; 1913 u32 code:8; 1914 u32 originator:4; 1915 u32 bus_type:4; 1916 } dw; 1917 }; 1918 union loginfo_type sas_loginfo; 1919 char *originator_str = NULL; 1920 1921 sas_loginfo.loginfo = log_info; 1922 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 1923 return; 1924 1925 /* each nexus loss loginfo */ 1926 if (log_info == 0x31170000) 1927 return; 1928 1929 /* eat the loginfos associated with task aborts */ 1930 if ((log_info == 30050000) || (log_info == 0x31140000) || 1931 (log_info == 0x31130000)) 1932 return; 1933 1934 switch (sas_loginfo.dw.originator) { 1935 case 0: 1936 originator_str = "IOP"; 1937 break; 1938 case 1: 1939 originator_str = "PL"; 1940 break; 1941 case 2: 1942 originator_str = "IR"; 1943 break; 1944 } 1945 1946 mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), " 1947 "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str, 1948 sas_loginfo.dw.code, sas_loginfo.dw.subcode); 1949 } 1950 1951 static void 1952 mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply) 1953 { 1954 MPI2DefaultReply_t *mpi_reply; 1955 u16 sc_status; 1956 1957 mpi_reply = (MPI2DefaultReply_t*)reply; 1958 sc_status = le16toh(mpi_reply->IOCStatus); 1959 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 1960 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); 1961 } 1962 1963 void 1964 mpr_intr(void *data) 1965 { 1966 struct mpr_softc *sc; 1967 uint32_t status; 1968 1969 sc = (struct mpr_softc *)data; 1970 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 1971 1972 /* 1973 * Check interrupt status register to flush the bus. This is 1974 * needed for both INTx interrupts and driver-driven polling 1975 */ 1976 status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 1977 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) 1978 return; 1979 1980 mpr_lock(sc); 1981 mpr_intr_locked(data); 1982 mpr_unlock(sc); 1983 return; 1984 } 1985 1986 /* 1987 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the 1988 * chip. Hopefully this theory is correct. 1989 */ 1990 void 1991 mpr_intr_msi(void *data) 1992 { 1993 struct mpr_softc *sc; 1994 1995 sc = (struct mpr_softc *)data; 1996 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 1997 mpr_lock(sc); 1998 mpr_intr_locked(data); 1999 mpr_unlock(sc); 2000 return; 2001 } 2002 2003 /* 2004 * The locking is overly broad and simplistic, but easy to deal with for now. 2005 */ 2006 void 2007 mpr_intr_locked(void *data) 2008 { 2009 MPI2_REPLY_DESCRIPTORS_UNION *desc; 2010 struct mpr_softc *sc; 2011 struct mpr_command *cm = NULL; 2012 uint8_t flags; 2013 u_int pq; 2014 MPI2_DIAG_RELEASE_REPLY *rel_rep; 2015 mpr_fw_diagnostic_buffer_t *pBuffer; 2016 2017 sc = (struct mpr_softc *)data; 2018 2019 pq = sc->replypostindex; 2020 mpr_dprint(sc, MPR_TRACE, 2021 "%s sc %p starting with replypostindex %u\n", 2022 __func__, sc, sc->replypostindex); 2023 2024 for ( ;; ) { 2025 cm = NULL; 2026 desc = &sc->post_queue[sc->replypostindex]; 2027 flags = desc->Default.ReplyFlags & 2028 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2029 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) || 2030 (le32toh(desc->Words.High) == 0xffffffff)) 2031 break; 2032 2033 /* increment the replypostindex now, so that event handlers 2034 * and cm completion handlers which decide to do a diag 2035 * reset can zero it without it getting incremented again 2036 * afterwards, and we break out of this loop on the next 2037 * iteration since the reply post queue has been cleared to 2038 * 0xFF and all descriptors look unused (which they are). 2039 */ 2040 if (++sc->replypostindex >= sc->pqdepth) 2041 sc->replypostindex = 0; 2042 2043 switch (flags) { 2044 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: 2045 case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS: 2046 case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS: 2047 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; 2048 cm->cm_reply = NULL; 2049 break; 2050 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: 2051 { 2052 uint32_t baddr; 2053 uint8_t *reply; 2054 2055 /* 2056 * Re-compose the reply address from the address 2057 * sent back from the chip. The ReplyFrameAddress 2058 * is the lower 32 bits of the physical address of 2059 * particular reply frame. Convert that address to 2060 * host format, and then use that to provide the 2061 * offset against the virtual address base 2062 * (sc->reply_frames). 2063 */ 2064 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); 2065 reply = sc->reply_frames + 2066 (baddr - ((uint32_t)sc->reply_busaddr)); 2067 /* 2068 * Make sure the reply we got back is in a valid 2069 * range. If not, go ahead and panic here, since 2070 * we'll probably panic as soon as we deference the 2071 * reply pointer anyway. 2072 */ 2073 if ((reply < sc->reply_frames) 2074 || (reply > (sc->reply_frames + 2075 (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) { 2076 printf("%s: WARNING: reply %p out of range!\n", 2077 __func__, reply); 2078 printf("%s: reply_frames %p, fqdepth %d, " 2079 "frame size %d\n", __func__, 2080 sc->reply_frames, sc->fqdepth, 2081 sc->facts->ReplyFrameSize * 4); 2082 printf("%s: baddr %#x,\n", __func__, baddr); 2083 /* LSI-TODO. See Linux Code for Graceful exit */ 2084 panic("Reply address out of range"); 2085 } 2086 if (le16toh(desc->AddressReply.SMID) == 0) { 2087 if (((MPI2_DEFAULT_REPLY *)reply)->Function == 2088 MPI2_FUNCTION_DIAG_BUFFER_POST) { 2089 /* 2090 * If SMID is 0 for Diag Buffer Post, 2091 * this implies that the reply is due to 2092 * a release function with a status that 2093 * the buffer has been released. Set 2094 * the buffer flags accordingly. 2095 */ 2096 rel_rep = 2097 (MPI2_DIAG_RELEASE_REPLY *)reply; 2098 if ((le16toh(rel_rep->IOCStatus) & 2099 MPI2_IOCSTATUS_MASK) == 2100 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) 2101 { 2102 pBuffer = 2103 &sc->fw_diag_buffer_list[ 2104 rel_rep->BufferType]; 2105 pBuffer->valid_data = TRUE; 2106 pBuffer->owned_by_firmware = 2107 FALSE; 2108 pBuffer->immediate = FALSE; 2109 } 2110 } else 2111 mpr_dispatch_event(sc, baddr, 2112 (MPI2_EVENT_NOTIFICATION_REPLY *) 2113 reply); 2114 } else { 2115 cm = &sc->commands[ 2116 le16toh(desc->AddressReply.SMID)]; 2117 cm->cm_reply = reply; 2118 cm->cm_reply_data = 2119 le32toh(desc->AddressReply. 2120 ReplyFrameAddress); 2121 } 2122 break; 2123 } 2124 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: 2125 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: 2126 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: 2127 default: 2128 /* Unhandled */ 2129 mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n", 2130 desc->Default.ReplyFlags); 2131 cm = NULL; 2132 break; 2133 } 2134 2135 if (cm != NULL) { 2136 // Print Error reply frame 2137 if (cm->cm_reply) 2138 mpr_display_reply_info(sc,cm->cm_reply); 2139 mpr_complete_command(sc, cm); 2140 } 2141 2142 desc->Words.Low = 0xffffffff; 2143 desc->Words.High = 0xffffffff; 2144 } 2145 2146 if (pq != sc->replypostindex) { 2147 mpr_dprint(sc, MPR_TRACE, 2148 "%s sc %p writing postindex %d\n", 2149 __func__, sc, sc->replypostindex); 2150 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 2151 sc->replypostindex); 2152 } 2153 2154 return; 2155 } 2156 2157 static void 2158 mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 2159 MPI2_EVENT_NOTIFICATION_REPLY *reply) 2160 { 2161 struct mpr_event_handle *eh; 2162 int event, handled = 0; 2163 2164 event = le16toh(reply->Event); 2165 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2166 if (isset(eh->mask, event)) { 2167 eh->callback(sc, data, reply); 2168 handled++; 2169 } 2170 } 2171 2172 if (handled == 0) 2173 mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n", 2174 le16toh(event)); 2175 2176 /* 2177 * This is the only place that the event/reply should be freed. 2178 * Anything wanting to hold onto the event data should have 2179 * already copied it into their own storage. 2180 */ 2181 mpr_free_reply(sc, data); 2182 } 2183 2184 static void 2185 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) 2186 { 2187 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2188 2189 if (cm->cm_reply) 2190 MPR_DPRINT_EVENT(sc, generic, 2191 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); 2192 2193 mpr_free_command(sc, cm); 2194 2195 /* next, send a port enable */ 2196 mprsas_startup(sc); 2197 } 2198 2199 /* 2200 * For both register_events and update_events, the caller supplies a bitmap 2201 * of events that it _wants_. These functions then turn that into a bitmask 2202 * suitable for the controller. 2203 */ 2204 int 2205 mpr_register_events(struct mpr_softc *sc, uint8_t *mask, 2206 mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle) 2207 { 2208 struct mpr_event_handle *eh; 2209 int error = 0; 2210 2211 eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO); 2212 if (!eh) { 2213 device_printf(sc->mpr_dev, "Cannot allocate memory %s %d\n", 2214 __func__, __LINE__); 2215 return (ENOMEM); 2216 } 2217 eh->callback = cb; 2218 eh->data = data; 2219 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); 2220 if (mask != NULL) 2221 error = mpr_update_events(sc, eh, mask); 2222 *handle = eh; 2223 2224 return (error); 2225 } 2226 2227 int 2228 mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle, 2229 uint8_t *mask) 2230 { 2231 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2232 MPI2_EVENT_NOTIFICATION_REPLY *reply; 2233 struct mpr_command *cm; 2234 struct mpr_event_handle *eh; 2235 int error, i; 2236 2237 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2238 2239 if ((mask != NULL) && (handle != NULL)) 2240 bcopy(mask, &handle->mask[0], 16); 2241 memset(sc->event_mask, 0xff, 16); 2242 2243 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2244 for (i = 0; i < 16; i++) 2245 sc->event_mask[i] &= ~eh->mask[i]; 2246 } 2247 2248 if ((cm = mpr_alloc_command(sc)) == NULL) 2249 return (EBUSY); 2250 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2251 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2252 evtreq->MsgFlags = 0; 2253 evtreq->SASBroadcastPrimitiveMasks = 0; 2254 #ifdef MPR_DEBUG_ALL_EVENTS 2255 { 2256 u_char fullmask[16]; 2257 memset(fullmask, 0x00, 16); 2258 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2259 } 2260 #else 2261 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2262 #endif 2263 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2264 cm->cm_data = NULL; 2265 2266 error = mpr_request_polled(sc, cm); 2267 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; 2268 if ((reply == NULL) || 2269 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 2270 error = ENXIO; 2271 2272 if (reply) 2273 MPR_DPRINT_EVENT(sc, generic, reply); 2274 2275 mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error); 2276 2277 mpr_free_command(sc, cm); 2278 return (error); 2279 } 2280 2281 static int 2282 mpr_reregister_events(struct mpr_softc *sc) 2283 { 2284 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2285 struct mpr_command *cm; 2286 struct mpr_event_handle *eh; 2287 int error, i; 2288 2289 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2290 2291 /* first, reregister events */ 2292 2293 memset(sc->event_mask, 0xff, 16); 2294 2295 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2296 for (i = 0; i < 16; i++) 2297 sc->event_mask[i] &= ~eh->mask[i]; 2298 } 2299 2300 if ((cm = mpr_alloc_command(sc)) == NULL) 2301 return (EBUSY); 2302 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2303 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2304 evtreq->MsgFlags = 0; 2305 evtreq->SASBroadcastPrimitiveMasks = 0; 2306 #ifdef MPR_DEBUG_ALL_EVENTS 2307 { 2308 u_char fullmask[16]; 2309 memset(fullmask, 0x00, 16); 2310 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2311 } 2312 #else 2313 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2314 #endif 2315 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2316 cm->cm_data = NULL; 2317 cm->cm_complete = mpr_reregister_events_complete; 2318 2319 error = mpr_map_command(sc, cm); 2320 2321 mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__, 2322 error); 2323 return (error); 2324 } 2325 2326 int 2327 mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle) 2328 { 2329 2330 TAILQ_REMOVE(&sc->event_list, handle, eh_list); 2331 free(handle, M_MPR); 2332 return (mpr_update_events(sc, NULL, NULL)); 2333 } 2334 2335 /** 2336 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a 2337 * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry 2338 * of the NVMe message (PRP1). If the data buffer is small enough to be described 2339 * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to 2340 * describe a larger data buffer. If the data buffer is too large to describe 2341 * using the two PRP entriess inside the NVMe message, then PRP1 describes the 2342 * first data memory segment, and PRP2 contains a pointer to a PRP list located 2343 * elsewhere in memory to describe the remaining data memory segments. The PRP 2344 * list will be contiguous. 2345 2346 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 2347 * consists of a list of PRP entries to describe a number of noncontigous 2348 * physical memory segments as a single memory buffer, just as a SGL does. Note 2349 * however, that this function is only used by the IOCTL call, so the memory 2350 * given will be guaranteed to be contiguous. There is no need to translate 2351 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous 2352 * space that is one page size each. 2353 * 2354 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 2355 * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains 2356 * the second PRP element if the memory being described fits within 2 PRP 2357 * entries, or a PRP list pointer if the PRP spans more than two entries. 2358 * 2359 * A PRP list pointer contains the address of a PRP list, structured as a linear 2360 * array of PRP entries. Each PRP entry in this list describes a segment of 2361 * physical memory. 2362 * 2363 * Each 64-bit PRP entry comprises an address and an offset field. The address 2364 * always points to the beginning of a PAGE_SIZE physical memory page, and the 2365 * offset describes where within that page the memory segment begins. Only the 2366 * first element in a PRP list may contain a non-zero offest, implying that all 2367 * memory segments following the first begin at the start of a PAGE_SIZE page. 2368 * 2369 * Each PRP element normally describes a chunck of PAGE_SIZE physical memory, 2370 * with exceptions for the first and last elements in the list. If the memory 2371 * being described by the list begins at a non-zero offset within the first page, 2372 * then the first PRP element will contain a non-zero offset indicating where the 2373 * region begins within the page. The last memory segment may end before the end 2374 * of the PAGE_SIZE segment, depending upon the overall size of the memory being 2375 * described by the PRP list. 2376 * 2377 * Since PRP entries lack any indication of size, the overall data buffer length 2378 * is used to determine where the end of the data memory buffer is located, and 2379 * how many PRP entries are required to describe it. 2380 * 2381 * Returns nothing. 2382 */ 2383 void 2384 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm, 2385 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data, 2386 uint32_t data_in_sz, uint32_t data_out_sz) 2387 { 2388 int prp_size = PRP_ENTRY_SIZE; 2389 uint64_t *prp_entry, *prp1_entry, *prp2_entry; 2390 uint64_t *prp_entry_phys, *prp_page, *prp_page_phys; 2391 uint32_t offset, entry_len, page_mask_result, page_mask; 2392 bus_addr_t paddr; 2393 size_t length; 2394 struct mpr_prp_page *prp_page_info = NULL; 2395 2396 /* 2397 * Not all commands require a data transfer. If no data, just return 2398 * without constructing any PRP. 2399 */ 2400 if (!data_in_sz && !data_out_sz) 2401 return; 2402 2403 /* 2404 * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is 2405 * located at a 24 byte offset from the start of the NVMe command. Then 2406 * set the current PRP entry pointer to PRP1. 2407 */ 2408 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2409 NVME_CMD_PRP1_OFFSET); 2410 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2411 NVME_CMD_PRP2_OFFSET); 2412 prp_entry = prp1_entry; 2413 2414 /* 2415 * For the PRP entries, use the specially allocated buffer of 2416 * contiguous memory. PRP Page allocation failures should not happen 2417 * because there should be enough PRP page buffers to account for the 2418 * possible NVMe QDepth. 2419 */ 2420 prp_page_info = mpr_alloc_prp_page(sc); 2421 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 2422 "used for building a native NVMe SGL.\n", __func__)); 2423 prp_page = (uint64_t *)prp_page_info->prp_page; 2424 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 2425 2426 /* 2427 * Insert the allocated PRP page into the command's PRP page list. This 2428 * will be freed when the command is freed. 2429 */ 2430 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 2431 2432 /* 2433 * Check if we are within 1 entry of a page boundary we don't want our 2434 * first entry to be a PRP List entry. 2435 */ 2436 page_mask = PAGE_SIZE - 1; 2437 page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) & 2438 page_mask; 2439 if (!page_mask_result) 2440 { 2441 /* Bump up to next page boundary. */ 2442 prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size); 2443 prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys + 2444 prp_size); 2445 } 2446 2447 /* 2448 * Set PRP physical pointer, which initially points to the current PRP 2449 * DMA memory page. 2450 */ 2451 prp_entry_phys = prp_page_phys; 2452 2453 /* Get physical address and length of the data buffer. */ 2454 paddr = (bus_addr_t)data; 2455 if (data_in_sz) 2456 length = data_in_sz; 2457 else 2458 length = data_out_sz; 2459 2460 /* Loop while the length is not zero. */ 2461 while (length) 2462 { 2463 /* 2464 * Check if we need to put a list pointer here if we are at page 2465 * boundary - prp_size (8 bytes). 2466 */ 2467 page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys + 2468 prp_size) & page_mask; 2469 if (!page_mask_result) 2470 { 2471 /* 2472 * This is the last entry in a PRP List, so we need to 2473 * put a PRP list pointer here. What this does is: 2474 * - bump the current memory pointer to the next 2475 * address, which will be the next full page. 2476 * - set the PRP Entry to point to that page. This is 2477 * now the PRP List pointer. 2478 * - bump the PRP Entry pointer the start of the next 2479 * page. Since all of this PRP memory is contiguous, 2480 * no need to get a new page - it's just the next 2481 * address. 2482 */ 2483 prp_entry_phys++; 2484 *prp_entry = 2485 htole64((uint64_t)(uintptr_t)prp_entry_phys); 2486 prp_entry++; 2487 } 2488 2489 /* Need to handle if entry will be part of a page. */ 2490 offset = (uint32_t)paddr & page_mask; 2491 entry_len = PAGE_SIZE - offset; 2492 2493 if (prp_entry == prp1_entry) 2494 { 2495 /* 2496 * Must fill in the first PRP pointer (PRP1) before 2497 * moving on. 2498 */ 2499 *prp1_entry = htole64((uint64_t)paddr); 2500 2501 /* 2502 * Now point to the second PRP entry within the 2503 * command (PRP2). 2504 */ 2505 prp_entry = prp2_entry; 2506 } 2507 else if (prp_entry == prp2_entry) 2508 { 2509 /* 2510 * Should the PRP2 entry be a PRP List pointer or just a 2511 * regular PRP pointer? If there is more than one more 2512 * page of data, must use a PRP List pointer. 2513 */ 2514 if (length > PAGE_SIZE) 2515 { 2516 /* 2517 * PRP2 will contain a PRP List pointer because 2518 * more PRP's are needed with this command. The 2519 * list will start at the beginning of the 2520 * contiguous buffer. 2521 */ 2522 *prp2_entry = 2523 htole64( 2524 (uint64_t)(uintptr_t)prp_entry_phys); 2525 2526 /* 2527 * The next PRP Entry will be the start of the 2528 * first PRP List. 2529 */ 2530 prp_entry = prp_page; 2531 } 2532 else 2533 { 2534 /* 2535 * After this, the PRP Entries are complete. 2536 * This command uses 2 PRP's and no PRP list. 2537 */ 2538 *prp2_entry = htole64((uint64_t)paddr); 2539 } 2540 } 2541 else 2542 { 2543 /* 2544 * Put entry in list and bump the addresses. 2545 * 2546 * After PRP1 and PRP2 are filled in, this will fill in 2547 * all remaining PRP entries in a PRP List, one per each 2548 * time through the loop. 2549 */ 2550 *prp_entry = htole64((uint64_t)paddr); 2551 prp_entry++; 2552 prp_entry_phys++; 2553 } 2554 2555 /* 2556 * Bump the phys address of the command's data buffer by the 2557 * entry_len. 2558 */ 2559 paddr += entry_len; 2560 2561 /* Decrement length accounting for last partial page. */ 2562 if (entry_len > length) 2563 length = 0; 2564 else 2565 length -= entry_len; 2566 } 2567 } 2568 2569 /* 2570 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to 2571 * determine if the driver needs to build a native SGL. If so, that native SGL 2572 * is built in the contiguous buffers allocated especially for PCIe SGL 2573 * creation. If the driver will not build a native SGL, return TRUE and a 2574 * normal IEEE SGL will be built. Currently this routine supports NVMe devices 2575 * only. 2576 * 2577 * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built. 2578 */ 2579 static int 2580 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm, 2581 bus_dma_segment_t *segs, int segs_left) 2582 { 2583 uint32_t i, sge_dwords, length, offset, entry_len; 2584 uint32_t num_entries, buff_len = 0, sges_in_segment; 2585 uint32_t page_mask, page_mask_result, *curr_buff; 2586 uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset; 2587 uint32_t first_page_data_size, end_residual; 2588 uint64_t *msg_phys; 2589 bus_addr_t paddr; 2590 int build_native_sgl = 0, first_prp_entry; 2591 int prp_size = PRP_ENTRY_SIZE; 2592 Mpi25IeeeSgeChain64_t *main_chain_element = NULL; 2593 struct mpr_prp_page *prp_page_info = NULL; 2594 2595 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2596 2597 /* 2598 * Add up the sizes of each segment length to get the total transfer 2599 * size, which will be checked against the Maximum Data Transfer Size. 2600 * If the data transfer length exceeds the MDTS for this device, just 2601 * return 1 so a normal IEEE SGL will be built. F/W will break the I/O 2602 * up into multiple I/O's. [nvme_mdts = 0 means unlimited] 2603 */ 2604 for (i = 0; i < segs_left; i++) 2605 buff_len += htole32(segs[i].ds_len); 2606 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) 2607 return 1; 2608 2609 /* Create page_mask (to get offset within page) */ 2610 page_mask = PAGE_SIZE - 1; 2611 2612 /* 2613 * Check if the number of elements exceeds the max number that can be 2614 * put in the main message frame (H/W can only translate an SGL that 2615 * is contained entirely in the main message frame). 2616 */ 2617 sges_in_segment = (sc->facts->IOCRequestFrameSize - 2618 offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION); 2619 if (segs_left > sges_in_segment) 2620 build_native_sgl = 1; 2621 else 2622 { 2623 /* 2624 * NVMe uses one PRP for each physical page (or part of physical 2625 * page). 2626 * if 4 pages or less then IEEE is OK 2627 * if > 5 pages then we need to build a native SGL 2628 * if > 4 and <= 5 pages, then check the physical address of 2629 * the first SG entry, then if this first size in the page 2630 * is >= the residual beyond 4 pages then use IEEE, 2631 * otherwise use native SGL 2632 */ 2633 if (buff_len > (PAGE_SIZE * 5)) 2634 build_native_sgl = 1; 2635 else if ((buff_len > (PAGE_SIZE * 4)) && 2636 (buff_len <= (PAGE_SIZE * 5)) ) 2637 { 2638 msg_phys = (uint64_t *)segs[0].ds_addr; 2639 first_page_offset = 2640 ((uint32_t)(uint64_t)(uintptr_t)msg_phys & 2641 page_mask); 2642 first_page_data_size = PAGE_SIZE - first_page_offset; 2643 end_residual = buff_len % PAGE_SIZE; 2644 2645 /* 2646 * If offset into first page pushes the end of the data 2647 * beyond end of the 5th page, we need the extra PRP 2648 * list. 2649 */ 2650 if (first_page_data_size < end_residual) 2651 build_native_sgl = 1; 2652 2653 /* 2654 * Check if first SG entry size is < residual beyond 4 2655 * pages. 2656 */ 2657 if (htole32(segs[0].ds_len) < 2658 (buff_len - (PAGE_SIZE * 4))) 2659 build_native_sgl = 1; 2660 } 2661 } 2662 2663 /* check if native SGL is needed */ 2664 if (!build_native_sgl) 2665 return 1; 2666 2667 /* 2668 * Native SGL is needed. 2669 * Put a chain element in main message frame that points to the first 2670 * chain buffer. 2671 * 2672 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 2673 * a native SGL. 2674 */ 2675 2676 /* Set main message chain element pointer */ 2677 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; 2678 2679 /* 2680 * For NVMe the chain element needs to be the 2nd SGL entry in the main 2681 * message. 2682 */ 2683 main_chain_element = (Mpi25IeeeSgeChain64_t *) 2684 ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 2685 2686 /* 2687 * For the PRP entries, use the specially allocated buffer of 2688 * contiguous memory. PRP Page allocation failures should not happen 2689 * because there should be enough PRP page buffers to account for the 2690 * possible NVMe QDepth. 2691 */ 2692 prp_page_info = mpr_alloc_prp_page(sc); 2693 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 2694 "used for building a native NVMe SGL.\n", __func__)); 2695 curr_buff = (uint32_t *)prp_page_info->prp_page; 2696 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 2697 2698 /* 2699 * Insert the allocated PRP page into the command's PRP page list. This 2700 * will be freed when the command is freed. 2701 */ 2702 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 2703 2704 /* 2705 * Check if we are within 1 entry of a page boundary we don't want our 2706 * first entry to be a PRP List entry. 2707 */ 2708 page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) & 2709 page_mask; 2710 if (!page_mask_result) { 2711 /* Bump up to next page boundary. */ 2712 curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size); 2713 msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size); 2714 } 2715 2716 /* Fill in the chain element and make it an NVMe segment type. */ 2717 main_chain_element->Address.High = 2718 htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32)); 2719 main_chain_element->Address.Low = 2720 htole32((uint32_t)(uintptr_t)msg_phys); 2721 main_chain_element->NextChainOffset = 0; 2722 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2723 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 2724 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 2725 2726 /* Set SGL pointer to start of contiguous PCIe buffer. */ 2727 ptr_sgl = curr_buff; 2728 sge_dwords = 2; 2729 num_entries = 0; 2730 2731 /* 2732 * NVMe has a very convoluted PRP format. One PRP is required for each 2733 * page or partial page. We need to split up OS SG entries if they are 2734 * longer than one page or cross a page boundary. We also have to insert 2735 * a PRP list pointer entry as the last entry in each physical page of 2736 * the PRP list. 2737 * 2738 * NOTE: The first PRP "entry" is actually placed in the first SGL entry 2739 * in the main message in IEEE 64 format. The 2nd entry in the main 2740 * message is the chain element, and the rest of the PRP entries are 2741 * built in the contiguous PCIe buffer. 2742 */ 2743 first_prp_entry = 1; 2744 ptr_first_sgl = (uint32_t *)cm->cm_sge; 2745 2746 for (i = 0; i < segs_left; i++) { 2747 /* Get physical address and length of this SG entry. */ 2748 paddr = segs[i].ds_addr; 2749 length = segs[i].ds_len; 2750 2751 /* 2752 * Check whether a given SGE buffer lies on a non-PAGED 2753 * boundary if this is not the first page. If so, this is not 2754 * expected so have FW build the SGL. 2755 */ 2756 if (i) { 2757 if ((uint32_t)paddr & page_mask) { 2758 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while " 2759 "building NVMe PRPs, low address is 0x%x\n", 2760 (uint32_t)paddr); 2761 return 1; 2762 } 2763 } 2764 2765 /* Apart from last SGE, if any other SGE boundary is not page 2766 * aligned then it means that hole exists. Existence of hole 2767 * leads to data corruption. So fallback to IEEE SGEs. 2768 */ 2769 if (i != (segs_left - 1)) { 2770 if (((uint32_t)paddr + length) & page_mask) { 2771 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE " 2772 "boundary while building NVMe PRPs, low " 2773 "address: 0x%x and length: %u\n", 2774 (uint32_t)paddr, length); 2775 return 1; 2776 } 2777 } 2778 2779 /* Loop while the length is not zero. */ 2780 while (length) { 2781 /* 2782 * Check if we need to put a list pointer here if we are 2783 * at page boundary - prp_size. 2784 */ 2785 page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl + 2786 prp_size) & page_mask; 2787 if (!page_mask_result) { 2788 /* 2789 * Need to put a PRP list pointer here. 2790 */ 2791 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 2792 prp_size); 2793 *ptr_sgl = htole32((uintptr_t)msg_phys); 2794 *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t) 2795 msg_phys >> 32); 2796 ptr_sgl += sge_dwords; 2797 num_entries++; 2798 } 2799 2800 /* Need to handle if entry will be part of a page. */ 2801 offset = (uint32_t)paddr & page_mask; 2802 entry_len = PAGE_SIZE - offset; 2803 if (first_prp_entry) { 2804 /* 2805 * Put IEEE entry in first SGE in main message. 2806 * (Simple element, System addr, not end of 2807 * list.) 2808 */ 2809 *ptr_first_sgl = htole32((uint32_t)paddr); 2810 *(ptr_first_sgl + 1) = 2811 htole32((uint32_t)((uint64_t)paddr >> 32)); 2812 *(ptr_first_sgl + 2) = htole32(entry_len); 2813 *(ptr_first_sgl + 3) = 0; 2814 2815 /* No longer the first PRP entry. */ 2816 first_prp_entry = 0; 2817 } else { 2818 /* Put entry in list. */ 2819 *ptr_sgl = htole32((uint32_t)paddr); 2820 *(ptr_sgl + 1) = 2821 htole32((uint32_t)((uint64_t)paddr >> 32)); 2822 2823 /* Bump ptr_sgl, msg_phys, and num_entries. */ 2824 ptr_sgl += sge_dwords; 2825 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 2826 prp_size); 2827 num_entries++; 2828 } 2829 2830 /* Bump the phys address by the entry_len. */ 2831 paddr += entry_len; 2832 2833 /* Decrement length accounting for last partial page. */ 2834 if (entry_len > length) 2835 length = 0; 2836 else 2837 length -= entry_len; 2838 } 2839 } 2840 2841 /* Set chain element Length. */ 2842 main_chain_element->Length = htole32(num_entries * prp_size); 2843 2844 /* Return 0, indicating we built a native SGL. */ 2845 return 0; 2846 } 2847 2848 /* 2849 * Add a chain element as the next SGE for the specified command. 2850 * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are 2851 * only required for IEEE commands. Therefore there is no code for commands 2852 * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands 2853 * shouldn't be requesting chains). 2854 */ 2855 static int 2856 mpr_add_chain(struct mpr_command *cm, int segsleft) 2857 { 2858 struct mpr_softc *sc = cm->cm_sc; 2859 MPI2_REQUEST_HEADER *req; 2860 MPI25_IEEE_SGE_CHAIN64 *ieee_sgc; 2861 struct mpr_chain *chain; 2862 int sgc_size, current_segs, rem_segs, segs_per_frame; 2863 uint8_t next_chain_offset = 0; 2864 2865 /* 2866 * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3 2867 * only IEEE commands should be requesting chains. Return some error 2868 * code other than 0. 2869 */ 2870 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { 2871 mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to " 2872 "an MPI SGL.\n"); 2873 return(ENOBUFS); 2874 } 2875 2876 sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64); 2877 if (cm->cm_sglsize < sgc_size) 2878 panic("MPR: Need SGE Error Code\n"); 2879 2880 chain = mpr_alloc_chain(cm->cm_sc); 2881 if (chain == NULL) 2882 return (ENOBUFS); 2883 2884 /* 2885 * Note: a double-linked list is used to make it easier to walk for 2886 * debugging. 2887 */ 2888 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); 2889 2890 /* 2891 * Need to know if the number of frames left is more than 1 or not. If 2892 * more than 1 frame is required, NextChainOffset will need to be set, 2893 * which will just be the last segment of the frame. 2894 */ 2895 rem_segs = 0; 2896 if (cm->cm_sglsize < (sgc_size * segsleft)) { 2897 /* 2898 * rem_segs is the number of segements remaining after the 2899 * segments that will go into the current frame. Since it is 2900 * known that at least one more frame is required, account for 2901 * the chain element. To know if more than one more frame is 2902 * required, just check if there will be a remainder after using 2903 * the current frame (with this chain) and the next frame. If 2904 * so the NextChainOffset must be the last element of the next 2905 * frame. 2906 */ 2907 current_segs = (cm->cm_sglsize / sgc_size) - 1; 2908 rem_segs = segsleft - current_segs; 2909 segs_per_frame = sc->chain_frame_size / sgc_size; 2910 if (rem_segs > segs_per_frame) { 2911 next_chain_offset = segs_per_frame - 1; 2912 } 2913 } 2914 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; 2915 ieee_sgc->Length = next_chain_offset ? 2916 htole32((uint32_t)sc->chain_frame_size) : 2917 htole32((uint32_t)rem_segs * (uint32_t)sgc_size); 2918 ieee_sgc->NextChainOffset = next_chain_offset; 2919 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 2920 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 2921 ieee_sgc->Address.Low = htole32(chain->chain_busaddr); 2922 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); 2923 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; 2924 req = (MPI2_REQUEST_HEADER *)cm->cm_req; 2925 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4; 2926 2927 cm->cm_sglsize = sc->chain_frame_size; 2928 return (0); 2929 } 2930 2931 /* 2932 * Add one scatter-gather element to the scatter-gather list for a command. 2933 * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the 2934 * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a 2935 * chain, so don't consider any chain additions. 2936 */ 2937 int 2938 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, 2939 int segsleft) 2940 { 2941 uint32_t saved_buf_len, saved_address_low, saved_address_high; 2942 u32 sge_flags; 2943 2944 /* 2945 * case 1: >=1 more segment, no room for anything (error) 2946 * case 2: 1 more segment and enough room for it 2947 */ 2948 2949 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { 2950 mpr_dprint(cm->cm_sc, MPR_ERROR, 2951 "%s: warning: Not enough room for MPI SGL in frame.\n", 2952 __func__); 2953 return(ENOBUFS); 2954 } 2955 2956 KASSERT(segsleft == 1, 2957 ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n", 2958 segsleft)); 2959 2960 /* 2961 * There is one more segment left to add for the MPI SGL and there is 2962 * enough room in the frame to add it. This is the normal case because 2963 * MPI SGL's don't have chains, otherwise something is wrong. 2964 * 2965 * If this is a bi-directional request, need to account for that 2966 * here. Save the pre-filled sge values. These will be used 2967 * either for the 2nd SGL or for a single direction SGL. If 2968 * cm_out_len is non-zero, this is a bi-directional request, so 2969 * fill in the OUT SGL first, then the IN SGL, otherwise just 2970 * fill in the IN SGL. Note that at this time, when filling in 2971 * 2 SGL's for a bi-directional request, they both use the same 2972 * DMA buffer (same cm command). 2973 */ 2974 saved_buf_len = sge->FlagsLength & 0x00FFFFFF; 2975 saved_address_low = sge->Address.Low; 2976 saved_address_high = sge->Address.High; 2977 if (cm->cm_out_len) { 2978 sge->FlagsLength = cm->cm_out_len | 2979 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2980 MPI2_SGE_FLAGS_END_OF_BUFFER | 2981 MPI2_SGE_FLAGS_HOST_TO_IOC | 2982 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 2983 MPI2_SGE_FLAGS_SHIFT); 2984 cm->cm_sglsize -= len; 2985 /* Endian Safe code */ 2986 sge_flags = sge->FlagsLength; 2987 sge->FlagsLength = htole32(sge_flags); 2988 sge->Address.High = htole32(sge->Address.High); 2989 sge->Address.Low = htole32(sge->Address.Low); 2990 bcopy(sge, cm->cm_sge, len); 2991 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 2992 } 2993 sge->FlagsLength = saved_buf_len | 2994 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2995 MPI2_SGE_FLAGS_END_OF_BUFFER | 2996 MPI2_SGE_FLAGS_LAST_ELEMENT | 2997 MPI2_SGE_FLAGS_END_OF_LIST | 2998 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 2999 MPI2_SGE_FLAGS_SHIFT); 3000 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { 3001 sge->FlagsLength |= 3002 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 3003 MPI2_SGE_FLAGS_SHIFT); 3004 } else { 3005 sge->FlagsLength |= 3006 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 3007 MPI2_SGE_FLAGS_SHIFT); 3008 } 3009 sge->Address.Low = saved_address_low; 3010 sge->Address.High = saved_address_high; 3011 3012 cm->cm_sglsize -= len; 3013 /* Endian Safe code */ 3014 sge_flags = sge->FlagsLength; 3015 sge->FlagsLength = htole32(sge_flags); 3016 sge->Address.High = htole32(sge->Address.High); 3017 sge->Address.Low = htole32(sge->Address.Low); 3018 bcopy(sge, cm->cm_sge, len); 3019 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3020 return (0); 3021 } 3022 3023 /* 3024 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter- 3025 * gather list for a command. Maintain cm_sglsize and cm_sge as the 3026 * remaining size and pointer to the next SGE to fill in, respectively. 3027 */ 3028 int 3029 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) 3030 { 3031 MPI2_IEEE_SGE_SIMPLE64 *sge = sgep; 3032 int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION); 3033 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3034 uint32_t sge_length; 3035 3036 /* 3037 * case 1: No room for chain or segment (error). 3038 * case 2: Two or more segments left but only room for chain. 3039 * case 3: Last segment and room for it, so set flags. 3040 */ 3041 3042 /* 3043 * There should be room for at least one element, or there is a big 3044 * problem. 3045 */ 3046 if (cm->cm_sglsize < ieee_sge_size) 3047 panic("MPR: Need SGE Error Code\n"); 3048 3049 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { 3050 if ((error = mpr_add_chain(cm, segsleft)) != 0) 3051 return (error); 3052 } 3053 3054 if (segsleft == 1) { 3055 /* 3056 * If this is a bi-directional request, need to account for that 3057 * here. Save the pre-filled sge values. These will be used 3058 * either for the 2nd SGL or for a single direction SGL. If 3059 * cm_out_len is non-zero, this is a bi-directional request, so 3060 * fill in the OUT SGL first, then the IN SGL, otherwise just 3061 * fill in the IN SGL. Note that at this time, when filling in 3062 * 2 SGL's for a bi-directional request, they both use the same 3063 * DMA buffer (same cm command). 3064 */ 3065 saved_buf_len = sge->Length; 3066 saved_address_low = sge->Address.Low; 3067 saved_address_high = sge->Address.High; 3068 if (cm->cm_out_len) { 3069 sge->Length = cm->cm_out_len; 3070 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3071 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3072 cm->cm_sglsize -= ieee_sge_size; 3073 /* Endian Safe code */ 3074 sge_length = sge->Length; 3075 sge->Length = htole32(sge_length); 3076 sge->Address.High = htole32(sge->Address.High); 3077 sge->Address.Low = htole32(sge->Address.Low); 3078 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3079 cm->cm_sge = 3080 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3081 ieee_sge_size); 3082 } 3083 sge->Length = saved_buf_len; 3084 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3085 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3086 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 3087 sge->Address.Low = saved_address_low; 3088 sge->Address.High = saved_address_high; 3089 } 3090 3091 cm->cm_sglsize -= ieee_sge_size; 3092 /* Endian Safe code */ 3093 sge_length = sge->Length; 3094 sge->Length = htole32(sge_length); 3095 sge->Address.High = htole32(sge->Address.High); 3096 sge->Address.Low = htole32(sge->Address.Low); 3097 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3098 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3099 ieee_sge_size); 3100 return (0); 3101 } 3102 3103 /* 3104 * Add one dma segment to the scatter-gather list for a command. 3105 */ 3106 int 3107 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, 3108 int segsleft) 3109 { 3110 MPI2_SGE_SIMPLE64 sge; 3111 MPI2_IEEE_SGE_SIMPLE64 ieee_sge; 3112 3113 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { 3114 ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3115 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3116 ieee_sge.Length = len; 3117 mpr_from_u64(pa, &ieee_sge.Address); 3118 3119 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); 3120 } else { 3121 /* 3122 * This driver always uses 64-bit address elements for 3123 * simplicity. 3124 */ 3125 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3126 MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 3127 /* Set Endian safe macro in mpr_push_sge */ 3128 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT); 3129 mpr_from_u64(pa, &sge.Address); 3130 3131 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); 3132 } 3133 } 3134 3135 static void 3136 mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3137 { 3138 struct mpr_softc *sc; 3139 struct mpr_command *cm; 3140 u_int i, dir, sflags; 3141 3142 cm = (struct mpr_command *)arg; 3143 sc = cm->cm_sc; 3144 3145 /* 3146 * In this case, just print out a warning and let the chip tell the 3147 * user they did the wrong thing. 3148 */ 3149 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { 3150 mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d " 3151 "segments, more than the %d allowed\n", __func__, nsegs, 3152 cm->cm_max_segs); 3153 } 3154 3155 /* 3156 * Set up DMA direction flags. Bi-directional requests are also handled 3157 * here. In that case, both direction flags will be set. 3158 */ 3159 sflags = 0; 3160 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { 3161 /* 3162 * We have to add a special case for SMP passthrough, there 3163 * is no easy way to generically handle it. The first 3164 * S/G element is used for the command (therefore the 3165 * direction bit needs to be set). The second one is used 3166 * for the reply. We'll leave it to the caller to make 3167 * sure we only have two buffers. 3168 */ 3169 /* 3170 * Even though the busdma man page says it doesn't make 3171 * sense to have both direction flags, it does in this case. 3172 * We have one s/g element being accessed in each direction. 3173 */ 3174 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; 3175 3176 /* 3177 * Set the direction flag on the first buffer in the SMP 3178 * passthrough request. We'll clear it for the second one. 3179 */ 3180 sflags |= MPI2_SGE_FLAGS_DIRECTION | 3181 MPI2_SGE_FLAGS_END_OF_BUFFER; 3182 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { 3183 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 3184 dir = BUS_DMASYNC_PREWRITE; 3185 } else 3186 dir = BUS_DMASYNC_PREREAD; 3187 3188 /* Check if a native SG list is needed for an NVMe PCIe device. */ 3189 if (cm->cm_targ && cm->cm_targ->is_nvme && 3190 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) { 3191 /* A native SG list was built, skip to end. */ 3192 goto out; 3193 } 3194 3195 for (i = 0; i < nsegs; i++) { 3196 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { 3197 sflags &= ~MPI2_SGE_FLAGS_DIRECTION; 3198 } 3199 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, 3200 sflags, nsegs - i); 3201 if (error != 0) { 3202 /* Resource shortage, roll back! */ 3203 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) 3204 mpr_dprint(sc, MPR_INFO, "Out of chain frames, " 3205 "consider increasing hw.mpr.max_chains.\n"); 3206 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; 3207 mpr_complete_command(sc, cm); 3208 return; 3209 } 3210 } 3211 3212 out: 3213 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 3214 mpr_enqueue_request(sc, cm); 3215 3216 return; 3217 } 3218 3219 static void 3220 mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, 3221 int error) 3222 { 3223 mpr_data_cb(arg, segs, nsegs, error); 3224 } 3225 3226 /* 3227 * This is the routine to enqueue commands ansynchronously. 3228 * Note that the only error path here is from bus_dmamap_load(), which can 3229 * return EINPROGRESS if it is waiting for resources. Other than this, it's 3230 * assumed that if you have a command in-hand, then you have enough credits 3231 * to use it. 3232 */ 3233 int 3234 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) 3235 { 3236 int error = 0; 3237 3238 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { 3239 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, 3240 &cm->cm_uio, mpr_data_cb2, cm, 0); 3241 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { 3242 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, 3243 cm->cm_data, mpr_data_cb, cm, 0); 3244 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { 3245 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, 3246 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); 3247 } else { 3248 /* Add a zero-length element as needed */ 3249 if (cm->cm_sge != NULL) 3250 mpr_add_dmaseg(cm, 0, 0, 0, 1); 3251 mpr_enqueue_request(sc, cm); 3252 } 3253 3254 return (error); 3255 } 3256 3257 /* 3258 * This is the routine to enqueue commands synchronously. An error of 3259 * EINPROGRESS from mpr_map_command() is ignored since the command will 3260 * be executed and enqueued automatically. Other errors come from msleep(). 3261 */ 3262 int 3263 mpr_wait_command(struct mpr_softc *sc, struct mpr_command *cm, int timeout, 3264 int sleep_flag) 3265 { 3266 int error, rc; 3267 struct timeval cur_time, start_time; 3268 3269 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) 3270 return EBUSY; 3271 3272 cm->cm_complete = NULL; 3273 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); 3274 error = mpr_map_command(sc, cm); 3275 if ((error != 0) && (error != EINPROGRESS)) 3276 return (error); 3277 3278 // Check for context and wait for 50 mSec at a time until time has 3279 // expired or the command has finished. If msleep can't be used, need 3280 // to poll. 3281 #if __FreeBSD_version >= 1000029 3282 if (curthread->td_no_sleeping) 3283 #else //__FreeBSD_version < 1000029 3284 if (curthread->td_pflags & TDP_NOSLEEPING) 3285 #endif //__FreeBSD_version >= 1000029 3286 sleep_flag = NO_SLEEP; 3287 getmicrouptime(&start_time); 3288 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) { 3289 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); 3290 if (error == EWOULDBLOCK) { 3291 /* 3292 * Record the actual elapsed time in the case of a 3293 * timeout for the message below. 3294 */ 3295 getmicrouptime(&cur_time); 3296 timevalsub(&cur_time, &start_time); 3297 } 3298 } else { 3299 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3300 mpr_intr_locked(sc); 3301 if (sleep_flag == CAN_SLEEP) 3302 pause("mprwait", hz/20); 3303 else 3304 DELAY(50000); 3305 3306 getmicrouptime(&cur_time); 3307 timevalsub(&cur_time, &start_time); 3308 if (cur_time.tv_sec > timeout) { 3309 error = EWOULDBLOCK; 3310 break; 3311 } 3312 } 3313 } 3314 3315 if (error == EWOULDBLOCK) { 3316 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d," 3317 " elapsed=%jd\n", __func__, timeout, 3318 (intmax_t)cur_time.tv_sec); 3319 rc = mpr_reinit(sc); 3320 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3321 "failed"); 3322 error = ETIMEDOUT; 3323 } 3324 return (error); 3325 } 3326 3327 /* 3328 * This is the routine to enqueue a command synchonously and poll for 3329 * completion. Its use should be rare. 3330 */ 3331 int 3332 mpr_request_polled(struct mpr_softc *sc, struct mpr_command *cm) 3333 { 3334 int error, timeout = 0, rc; 3335 struct timeval cur_time, start_time; 3336 3337 error = 0; 3338 3339 cm->cm_flags |= MPR_CM_FLAGS_POLLED; 3340 cm->cm_complete = NULL; 3341 mpr_map_command(sc, cm); 3342 3343 getmicrotime(&start_time); 3344 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3345 mpr_intr_locked(sc); 3346 3347 if (mtx_owned(&sc->mpr_mtx)) 3348 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 3349 "mprpoll", hz/20); 3350 else 3351 pause("mprpoll", hz/20); 3352 3353 /* 3354 * Check for real-time timeout and fail if more than 60 seconds. 3355 */ 3356 getmicrotime(&cur_time); 3357 timeout = cur_time.tv_sec - start_time.tv_sec; 3358 if (timeout > 60) { 3359 mpr_dprint(sc, MPR_FAULT, "polling failed\n"); 3360 error = ETIMEDOUT; 3361 break; 3362 } 3363 } 3364 3365 if (error) { 3366 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); 3367 rc = mpr_reinit(sc); 3368 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3369 "failed"); 3370 } 3371 return (error); 3372 } 3373 3374 /* 3375 * The MPT driver had a verbose interface for config pages. In this driver, 3376 * reduce it to much simpler terms, similar to the Linux driver. 3377 */ 3378 int 3379 mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3380 { 3381 MPI2_CONFIG_REQUEST *req; 3382 struct mpr_command *cm; 3383 int error; 3384 3385 if (sc->mpr_flags & MPR_FLAGS_BUSY) { 3386 return (EBUSY); 3387 } 3388 3389 cm = mpr_alloc_command(sc); 3390 if (cm == NULL) { 3391 return (EBUSY); 3392 } 3393 3394 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; 3395 req->Function = MPI2_FUNCTION_CONFIG; 3396 req->Action = params->action; 3397 req->SGLFlags = 0; 3398 req->ChainOffset = 0; 3399 req->PageAddress = params->page_address; 3400 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3401 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 3402 3403 hdr = ¶ms->hdr.Ext; 3404 req->ExtPageType = hdr->ExtPageType; 3405 req->ExtPageLength = hdr->ExtPageLength; 3406 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 3407 req->Header.PageLength = 0; /* Must be set to zero */ 3408 req->Header.PageNumber = hdr->PageNumber; 3409 req->Header.PageVersion = hdr->PageVersion; 3410 } else { 3411 MPI2_CONFIG_PAGE_HEADER *hdr; 3412 3413 hdr = ¶ms->hdr.Struct; 3414 req->Header.PageType = hdr->PageType; 3415 req->Header.PageNumber = hdr->PageNumber; 3416 req->Header.PageLength = hdr->PageLength; 3417 req->Header.PageVersion = hdr->PageVersion; 3418 } 3419 3420 cm->cm_data = params->buffer; 3421 cm->cm_length = params->length; 3422 if (cm->cm_data != NULL) { 3423 cm->cm_sge = &req->PageBufferSGE; 3424 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); 3425 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; 3426 } else 3427 cm->cm_sge = NULL; 3428 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3429 3430 cm->cm_complete_data = params; 3431 if (params->callback != NULL) { 3432 cm->cm_complete = mpr_config_complete; 3433 return (mpr_map_command(sc, cm)); 3434 } else { 3435 error = mpr_wait_command(sc, cm, 0, CAN_SLEEP); 3436 if (error) { 3437 mpr_dprint(sc, MPR_FAULT, 3438 "Error %d reading config page\n", error); 3439 mpr_free_command(sc, cm); 3440 return (error); 3441 } 3442 mpr_config_complete(sc, cm); 3443 } 3444 3445 return (0); 3446 } 3447 3448 int 3449 mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3450 { 3451 return (EINVAL); 3452 } 3453 3454 static void 3455 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) 3456 { 3457 MPI2_CONFIG_REPLY *reply; 3458 struct mpr_config_params *params; 3459 3460 MPR_FUNCTRACE(sc); 3461 params = cm->cm_complete_data; 3462 3463 if (cm->cm_data != NULL) { 3464 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 3465 BUS_DMASYNC_POSTREAD); 3466 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 3467 } 3468 3469 /* 3470 * XXX KDM need to do more error recovery? This results in the 3471 * device in question not getting probed. 3472 */ 3473 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3474 params->status = MPI2_IOCSTATUS_BUSY; 3475 goto done; 3476 } 3477 3478 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; 3479 if (reply == NULL) { 3480 params->status = MPI2_IOCSTATUS_BUSY; 3481 goto done; 3482 } 3483 params->status = reply->IOCStatus; 3484 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3485 params->hdr.Ext.ExtPageType = reply->ExtPageType; 3486 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; 3487 params->hdr.Ext.PageType = reply->Header.PageType; 3488 params->hdr.Ext.PageNumber = reply->Header.PageNumber; 3489 params->hdr.Ext.PageVersion = reply->Header.PageVersion; 3490 } else { 3491 params->hdr.Struct.PageType = reply->Header.PageType; 3492 params->hdr.Struct.PageNumber = reply->Header.PageNumber; 3493 params->hdr.Struct.PageLength = reply->Header.PageLength; 3494 params->hdr.Struct.PageVersion = reply->Header.PageVersion; 3495 } 3496 3497 done: 3498 mpr_free_command(sc, cm); 3499 if (params->callback != NULL) 3500 params->callback(sc, params); 3501 3502 return; 3503 } 3504