1 /*- 2 * Copyright (c) 2009 Yahoo! Inc. 3 * Copyright (c) 2011-2015 LSI Corp. 4 * Copyright (c) 2013-2016 Avago Technologies 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * Avago Technologies (LSI) MPT-Fusion Host Adapter FreeBSD 29 * 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 /* Communications core for Avago Technologies (LSI) MPT3 */ 36 37 /* TODO Move headers to mprvar */ 38 #include <sys/types.h> 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/selinfo.h> 43 #include <sys/lock.h> 44 #include <sys/mutex.h> 45 #include <sys/module.h> 46 #include <sys/bus.h> 47 #include <sys/conf.h> 48 #include <sys/bio.h> 49 #include <sys/malloc.h> 50 #include <sys/uio.h> 51 #include <sys/sysctl.h> 52 #include <sys/smp.h> 53 #include <sys/queue.h> 54 #include <sys/kthread.h> 55 #include <sys/taskqueue.h> 56 #include <sys/endian.h> 57 #include <sys/eventhandler.h> 58 #include <sys/sbuf.h> 59 60 #include <machine/bus.h> 61 #include <machine/resource.h> 62 #include <sys/rman.h> 63 #include <sys/proc.h> 64 65 #include <dev/pci/pcivar.h> 66 67 #include <cam/cam.h> 68 #include <cam/cam_ccb.h> 69 #include <cam/scsi/scsi_all.h> 70 71 #include <dev/mpr/mpi/mpi2_type.h> 72 #include <dev/mpr/mpi/mpi2.h> 73 #include <dev/mpr/mpi/mpi2_ioc.h> 74 #include <dev/mpr/mpi/mpi2_sas.h> 75 #include <dev/mpr/mpi/mpi2_pci.h> 76 #include <dev/mpr/mpi/mpi2_cnfg.h> 77 #include <dev/mpr/mpi/mpi2_init.h> 78 #include <dev/mpr/mpi/mpi2_tool.h> 79 #include <dev/mpr/mpr_ioctl.h> 80 #include <dev/mpr/mprvar.h> 81 #include <dev/mpr/mpr_table.h> 82 #include <dev/mpr/mpr_sas.h> 83 84 static int mpr_diag_reset(struct mpr_softc *sc, int sleep_flag); 85 static int mpr_init_queues(struct mpr_softc *sc); 86 static void mpr_resize_queues(struct mpr_softc *sc); 87 static int mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag); 88 static int mpr_transition_operational(struct mpr_softc *sc); 89 static int mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching); 90 static void mpr_iocfacts_free(struct mpr_softc *sc); 91 static void mpr_startup(void *arg); 92 static int mpr_send_iocinit(struct mpr_softc *sc); 93 static int mpr_alloc_queues(struct mpr_softc *sc); 94 static int mpr_alloc_hw_queues(struct mpr_softc *sc); 95 static int mpr_alloc_replies(struct mpr_softc *sc); 96 static int mpr_alloc_requests(struct mpr_softc *sc); 97 static int mpr_alloc_nvme_prp_pages(struct mpr_softc *sc); 98 static int mpr_attach_log(struct mpr_softc *sc); 99 static __inline void mpr_complete_command(struct mpr_softc *sc, 100 struct mpr_command *cm); 101 static void mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 102 MPI2_EVENT_NOTIFICATION_REPLY *reply); 103 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm); 104 static void mpr_periodic(void *); 105 static int mpr_reregister_events(struct mpr_softc *sc); 106 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm); 107 static int mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts); 108 static int mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag); 109 static int mpr_debug_sysctl(SYSCTL_HANDLER_ARGS); 110 static void mpr_parse_debug(struct mpr_softc *sc, char *list); 111 112 SYSCTL_NODE(_hw, OID_AUTO, mpr, CTLFLAG_RD, 0, "MPR Driver Parameters"); 113 114 MALLOC_DEFINE(M_MPR, "mpr", "mpr driver memory"); 115 116 /* 117 * Do a "Diagnostic Reset" aka a hard reset. This should get the chip out of 118 * any state and back to its initialization state machine. 119 */ 120 static char mpt2_reset_magic[] = { 0x00, 0x0f, 0x04, 0x0b, 0x02, 0x07, 0x0d }; 121 122 /* 123 * Added this union to smoothly convert le64toh cm->cm_desc.Words. 124 * Compiler only supports uint64_t to be passed as an argument. 125 * Otherwise it will throw this error: 126 * "aggregate value used where an integer was expected" 127 */ 128 typedef union _reply_descriptor { 129 u64 word; 130 struct { 131 u32 low; 132 u32 high; 133 } u; 134 } reply_descriptor, request_descriptor; 135 136 /* Rate limit chain-fail messages to 1 per minute */ 137 static struct timeval mpr_chainfail_interval = { 60, 0 }; 138 139 /* 140 * sleep_flag can be either CAN_SLEEP or NO_SLEEP. 141 * If this function is called from process context, it can sleep 142 * and there is no harm to sleep, in case if this fuction is called 143 * from Interrupt handler, we can not sleep and need NO_SLEEP flag set. 144 * based on sleep flags driver will call either msleep, pause or DELAY. 145 * msleep and pause are of same variant, but pause is used when mpr_mtx 146 * is not hold by driver. 147 */ 148 static int 149 mpr_diag_reset(struct mpr_softc *sc,int sleep_flag) 150 { 151 uint32_t reg; 152 int i, error, tries = 0; 153 uint8_t first_wait_done = FALSE; 154 155 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 156 157 /* Clear any pending interrupts */ 158 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 159 160 /* 161 * Force NO_SLEEP for threads prohibited to sleep 162 * e.a Thread from interrupt handler are prohibited to sleep. 163 */ 164 #if __FreeBSD_version >= 1000029 165 if (curthread->td_no_sleeping) 166 #else //__FreeBSD_version < 1000029 167 if (curthread->td_pflags & TDP_NOSLEEPING) 168 #endif //__FreeBSD_version >= 1000029 169 sleep_flag = NO_SLEEP; 170 171 mpr_dprint(sc, MPR_INIT, "sequence start, sleep_flag=%d\n", sleep_flag); 172 /* Push the magic sequence */ 173 error = ETIMEDOUT; 174 while (tries++ < 20) { 175 for (i = 0; i < sizeof(mpt2_reset_magic); i++) 176 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 177 mpt2_reset_magic[i]); 178 179 /* wait 100 msec */ 180 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 181 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 182 "mprdiag", hz/10); 183 else if (sleep_flag == CAN_SLEEP) 184 pause("mprdiag", hz/10); 185 else 186 DELAY(100 * 1000); 187 188 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 189 if (reg & MPI2_DIAG_DIAG_WRITE_ENABLE) { 190 error = 0; 191 break; 192 } 193 } 194 if (error) { 195 mpr_dprint(sc, MPR_INIT, "sequence failed, error=%d, exit\n", 196 error); 197 return (error); 198 } 199 200 /* Send the actual reset. XXX need to refresh the reg? */ 201 reg |= MPI2_DIAG_RESET_ADAPTER; 202 mpr_dprint(sc, MPR_INIT, "sequence success, sending reset, reg= 0x%x\n", 203 reg); 204 mpr_regwrite(sc, MPI2_HOST_DIAGNOSTIC_OFFSET, reg); 205 206 /* Wait up to 300 seconds in 50ms intervals */ 207 error = ETIMEDOUT; 208 for (i = 0; i < 6000; i++) { 209 /* 210 * Wait 50 msec. If this is the first time through, wait 256 211 * msec to satisfy Diag Reset timing requirements. 212 */ 213 if (first_wait_done) { 214 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 215 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 216 "mprdiag", hz/20); 217 else if (sleep_flag == CAN_SLEEP) 218 pause("mprdiag", hz/20); 219 else 220 DELAY(50 * 1000); 221 } else { 222 DELAY(256 * 1000); 223 first_wait_done = TRUE; 224 } 225 /* 226 * Check for the RESET_ADAPTER bit to be cleared first, then 227 * wait for the RESET state to be cleared, which takes a little 228 * longer. 229 */ 230 reg = mpr_regread(sc, MPI2_HOST_DIAGNOSTIC_OFFSET); 231 if (reg & MPI2_DIAG_RESET_ADAPTER) { 232 continue; 233 } 234 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 235 if ((reg & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_RESET) { 236 error = 0; 237 break; 238 } 239 } 240 if (error) { 241 mpr_dprint(sc, MPR_INIT, "reset failed, error= %d, exit\n", 242 error); 243 return (error); 244 } 245 246 mpr_regwrite(sc, MPI2_WRITE_SEQUENCE_OFFSET, 0x0); 247 mpr_dprint(sc, MPR_INIT, "diag reset success, exit\n"); 248 249 return (0); 250 } 251 252 static int 253 mpr_message_unit_reset(struct mpr_softc *sc, int sleep_flag) 254 { 255 int error; 256 257 MPR_FUNCTRACE(sc); 258 259 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 260 261 error = 0; 262 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 263 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET << 264 MPI2_DOORBELL_FUNCTION_SHIFT); 265 266 if (mpr_wait_db_ack(sc, 5, sleep_flag) != 0) { 267 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 268 "Doorbell handshake failed\n"); 269 error = ETIMEDOUT; 270 } 271 272 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 273 return (error); 274 } 275 276 static int 277 mpr_transition_ready(struct mpr_softc *sc) 278 { 279 uint32_t reg, state; 280 int error, tries = 0; 281 int sleep_flags; 282 283 MPR_FUNCTRACE(sc); 284 /* If we are in attach call, do not sleep */ 285 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) 286 ? CAN_SLEEP : NO_SLEEP; 287 288 error = 0; 289 290 mpr_dprint(sc, MPR_INIT, "%s entered, sleep_flags= %d\n", 291 __func__, sleep_flags); 292 293 while (tries++ < 1200) { 294 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 295 mpr_dprint(sc, MPR_INIT, " Doorbell= 0x%x\n", reg); 296 297 /* 298 * Ensure the IOC is ready to talk. If it's not, try 299 * resetting it. 300 */ 301 if (reg & MPI2_DOORBELL_USED) { 302 mpr_dprint(sc, MPR_INIT, " Not ready, sending diag " 303 "reset\n"); 304 mpr_diag_reset(sc, sleep_flags); 305 DELAY(50000); 306 continue; 307 } 308 309 /* Is the adapter owned by another peer? */ 310 if ((reg & MPI2_DOORBELL_WHO_INIT_MASK) == 311 (MPI2_WHOINIT_PCI_PEER << MPI2_DOORBELL_WHO_INIT_SHIFT)) { 312 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC is under the " 313 "control of another peer host, aborting " 314 "initialization.\n"); 315 error = ENXIO; 316 break; 317 } 318 319 state = reg & MPI2_IOC_STATE_MASK; 320 if (state == MPI2_IOC_STATE_READY) { 321 /* Ready to go! */ 322 error = 0; 323 break; 324 } else if (state == MPI2_IOC_STATE_FAULT) { 325 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC in fault " 326 "state 0x%x, resetting\n", 327 state & MPI2_DOORBELL_FAULT_CODE_MASK); 328 mpr_diag_reset(sc, sleep_flags); 329 } else if (state == MPI2_IOC_STATE_OPERATIONAL) { 330 /* Need to take ownership */ 331 mpr_message_unit_reset(sc, sleep_flags); 332 } else if (state == MPI2_IOC_STATE_RESET) { 333 /* Wait a bit, IOC might be in transition */ 334 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 335 "IOC in unexpected reset state\n"); 336 } else { 337 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 338 "IOC in unknown state 0x%x\n", state); 339 error = EINVAL; 340 break; 341 } 342 343 /* Wait 50ms for things to settle down. */ 344 DELAY(50000); 345 } 346 347 if (error) 348 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 349 "Cannot transition IOC to ready\n"); 350 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 351 return (error); 352 } 353 354 static int 355 mpr_transition_operational(struct mpr_softc *sc) 356 { 357 uint32_t reg, state; 358 int error; 359 360 MPR_FUNCTRACE(sc); 361 362 error = 0; 363 reg = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 364 mpr_dprint(sc, MPR_INIT, "%s entered, Doorbell= 0x%x\n", __func__, reg); 365 366 state = reg & MPI2_IOC_STATE_MASK; 367 if (state != MPI2_IOC_STATE_READY) { 368 mpr_dprint(sc, MPR_INIT, "IOC not ready\n"); 369 if ((error = mpr_transition_ready(sc)) != 0) { 370 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 371 "failed to transition ready, exit\n"); 372 return (error); 373 } 374 } 375 376 error = mpr_send_iocinit(sc); 377 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 378 379 return (error); 380 } 381 382 static void 383 mpr_resize_queues(struct mpr_softc *sc) 384 { 385 int reqcr, prireqcr; 386 387 /* 388 * Size the queues. Since the reply queues always need one free 389 * entry, we'll deduct one reply message here. The LSI documents 390 * suggest instead to add a count to the request queue, but I think 391 * that it's better to deduct from reply queue. 392 */ 393 prireqcr = MAX(1, sc->max_prireqframes); 394 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit); 395 396 reqcr = MAX(2, sc->max_reqframes); 397 reqcr = MIN(reqcr, sc->facts->RequestCredit); 398 399 sc->num_reqs = prireqcr + reqcr; 400 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes, 401 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; 402 403 /* 404 * Figure out the number of MSIx-based queues. If the firmware or 405 * user has done something crazy and not allowed enough credit for 406 * the queues to be useful then don't enable multi-queue. 407 */ 408 if (sc->facts->MaxMSIxVectors < 2) 409 sc->msi_msgs = 1; 410 411 if (sc->msi_msgs > 1) { 412 sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus); 413 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors); 414 if (sc->num_reqs / sc->msi_msgs < 2) 415 sc->msi_msgs = 1; 416 } 417 418 mpr_dprint(sc, MPR_INIT, "Sized queues to q=%d reqs=%d replies=%d\n", 419 sc->msi_msgs, sc->num_reqs, sc->num_replies); 420 } 421 422 /* 423 * This is called during attach and when re-initializing due to a Diag Reset. 424 * IOC Facts is used to allocate many of the structures needed by the driver. 425 * If called from attach, de-allocation is not required because the driver has 426 * not allocated any structures yet, but if called from a Diag Reset, previously 427 * allocated structures based on IOC Facts will need to be freed and re- 428 * allocated bases on the latest IOC Facts. 429 */ 430 static int 431 mpr_iocfacts_allocate(struct mpr_softc *sc, uint8_t attaching) 432 { 433 int error; 434 Mpi2IOCFactsReply_t saved_facts; 435 uint8_t saved_mode, reallocating; 436 437 mpr_dprint(sc, MPR_INIT|MPR_TRACE, "%s entered\n", __func__); 438 439 /* Save old IOC Facts and then only reallocate if Facts have changed */ 440 if (!attaching) { 441 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); 442 } 443 444 /* 445 * Get IOC Facts. In all cases throughout this function, panic if doing 446 * a re-initialization and only return the error if attaching so the OS 447 * can handle it. 448 */ 449 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { 450 if (attaching) { 451 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to get " 452 "IOC Facts with error %d, exit\n", error); 453 return (error); 454 } else { 455 panic("%s failed to get IOC Facts with error %d\n", 456 __func__, error); 457 } 458 } 459 460 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts); 461 462 snprintf(sc->fw_version, sizeof(sc->fw_version), 463 "%02d.%02d.%02d.%02d", 464 sc->facts->FWVersion.Struct.Major, 465 sc->facts->FWVersion.Struct.Minor, 466 sc->facts->FWVersion.Struct.Unit, 467 sc->facts->FWVersion.Struct.Dev); 468 469 mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version, 470 MPR_DRIVER_VERSION); 471 mpr_dprint(sc, MPR_INFO, 472 "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, 473 "\20" "\3ScsiTaskFull" "\4DiagTrace" "\5SnapBuf" "\6ExtBuf" 474 "\7EEDP" "\10BiDirTarg" "\11Multicast" "\14TransRetry" "\15IR" 475 "\16EventReplay" "\17RaidAccel" "\20MSIXIndex" "\21HostDisc" 476 "\22FastPath" "\23RDPQArray" "\24AtomicReqDesc" "\25PCIeSRIOV"); 477 478 /* 479 * If the chip doesn't support event replay then a hard reset will be 480 * required to trigger a full discovery. Do the reset here then 481 * retransition to Ready. A hard reset might have already been done, 482 * but it doesn't hurt to do it again. Only do this if attaching, not 483 * for a Diag Reset. 484 */ 485 if (attaching && ((sc->facts->IOCCapabilities & 486 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY) == 0)) { 487 mpr_dprint(sc, MPR_INIT, "No event replay, resetting\n"); 488 mpr_diag_reset(sc, NO_SLEEP); 489 if ((error = mpr_transition_ready(sc)) != 0) { 490 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " 491 "transition to ready with error %d, exit\n", 492 error); 493 return (error); 494 } 495 } 496 497 /* 498 * Set flag if IR Firmware is loaded. If the RAID Capability has 499 * changed from the previous IOC Facts, log a warning, but only if 500 * checking this after a Diag Reset and not during attach. 501 */ 502 saved_mode = sc->ir_firmware; 503 if (sc->facts->IOCCapabilities & 504 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) 505 sc->ir_firmware = 1; 506 if (!attaching) { 507 if (sc->ir_firmware != saved_mode) { 508 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode " 509 "in IOC Facts does not match previous mode\n"); 510 } 511 } 512 513 /* Only deallocate and reallocate if relevant IOC Facts have changed */ 514 reallocating = FALSE; 515 sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED; 516 517 if ((!attaching) && 518 ((saved_facts.MsgVersion != sc->facts->MsgVersion) || 519 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || 520 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || 521 (saved_facts.RequestCredit != sc->facts->RequestCredit) || 522 (saved_facts.ProductID != sc->facts->ProductID) || 523 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || 524 (saved_facts.IOCRequestFrameSize != 525 sc->facts->IOCRequestFrameSize) || 526 (saved_facts.IOCMaxChainSegmentSize != 527 sc->facts->IOCMaxChainSegmentSize) || 528 (saved_facts.MaxTargets != sc->facts->MaxTargets) || 529 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || 530 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || 531 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || 532 (saved_facts.MaxReplyDescriptorPostQueueDepth != 533 sc->facts->MaxReplyDescriptorPostQueueDepth) || 534 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || 535 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || 536 (saved_facts.MaxPersistentEntries != 537 sc->facts->MaxPersistentEntries))) { 538 reallocating = TRUE; 539 540 /* Record that we reallocated everything */ 541 sc->mpr_flags |= MPR_FLAGS_REALLOCATED; 542 } 543 544 /* 545 * Some things should be done if attaching or re-allocating after a Diag 546 * Reset, but are not needed after a Diag Reset if the FW has not 547 * changed. 548 */ 549 if (attaching || reallocating) { 550 /* 551 * Check if controller supports FW diag buffers and set flag to 552 * enable each type. 553 */ 554 if (sc->facts->IOCCapabilities & 555 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) 556 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. 557 enabled = TRUE; 558 if (sc->facts->IOCCapabilities & 559 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) 560 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. 561 enabled = TRUE; 562 if (sc->facts->IOCCapabilities & 563 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) 564 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. 565 enabled = TRUE; 566 567 /* 568 * Set flags for some supported items. 569 */ 570 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) 571 sc->eedp_enabled = TRUE; 572 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) 573 sc->control_TLR = TRUE; 574 if (sc->facts->IOCCapabilities & 575 MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ) 576 sc->atomic_desc_capable = TRUE; 577 578 mpr_resize_queues(sc); 579 580 /* 581 * Initialize all Tail Queues 582 */ 583 TAILQ_INIT(&sc->req_list); 584 TAILQ_INIT(&sc->high_priority_req_list); 585 TAILQ_INIT(&sc->chain_list); 586 TAILQ_INIT(&sc->prp_page_list); 587 TAILQ_INIT(&sc->tm_list); 588 } 589 590 /* 591 * If doing a Diag Reset and the FW is significantly different 592 * (reallocating will be set above in IOC Facts comparison), then all 593 * buffers based on the IOC Facts will need to be freed before they are 594 * reallocated. 595 */ 596 if (reallocating) { 597 mpr_iocfacts_free(sc); 598 mprsas_realloc_targets(sc, saved_facts.MaxTargets + 599 saved_facts.MaxVolumes); 600 } 601 602 /* 603 * Any deallocation has been completed. Now start reallocating 604 * if needed. Will only need to reallocate if attaching or if the new 605 * IOC Facts are different from the previous IOC Facts after a Diag 606 * Reset. Targets have already been allocated above if needed. 607 */ 608 error = 0; 609 while (attaching || reallocating) { 610 if ((error = mpr_alloc_hw_queues(sc)) != 0) 611 break; 612 if ((error = mpr_alloc_replies(sc)) != 0) 613 break; 614 if ((error = mpr_alloc_requests(sc)) != 0) 615 break; 616 if ((error = mpr_alloc_queues(sc)) != 0) 617 break; 618 break; 619 } 620 if (error) { 621 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 622 "Failed to alloc queues with error %d\n", error); 623 mpr_free(sc); 624 return (error); 625 } 626 627 /* Always initialize the queues */ 628 bzero(sc->free_queue, sc->fqdepth * 4); 629 mpr_init_queues(sc); 630 631 /* 632 * Always get the chip out of the reset state, but only panic if not 633 * attaching. If attaching and there is an error, that is handled by 634 * the OS. 635 */ 636 error = mpr_transition_operational(sc); 637 if (error != 0) { 638 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "Failed to " 639 "transition to operational with error %d\n", error); 640 mpr_free(sc); 641 return (error); 642 } 643 644 /* 645 * Finish the queue initialization. 646 * These are set here instead of in mpr_init_queues() because the 647 * IOC resets these values during the state transition in 648 * mpr_transition_operational(). The free index is set to 1 649 * because the corresponding index in the IOC is set to 0, and the 650 * IOC treats the queues as full if both are set to the same value. 651 * Hence the reason that the queue can't hold all of the possible 652 * replies. 653 */ 654 sc->replypostindex = 0; 655 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); 656 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 0); 657 658 /* 659 * Attach the subsystems so they can prepare their event masks. 660 * XXX Should be dynamic so that IM/IR and user modules can attach 661 */ 662 error = 0; 663 while (attaching) { 664 mpr_dprint(sc, MPR_INIT, "Attaching subsystems\n"); 665 if ((error = mpr_attach_log(sc)) != 0) 666 break; 667 if ((error = mpr_attach_sas(sc)) != 0) 668 break; 669 if ((error = mpr_attach_user(sc)) != 0) 670 break; 671 break; 672 } 673 if (error) { 674 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 675 "Failed to attach all subsystems: error %d\n", error); 676 mpr_free(sc); 677 return (error); 678 } 679 680 /* 681 * XXX If the number of MSI-X vectors changes during re-init, this 682 * won't see it and adjust. 683 */ 684 if (attaching && (error = mpr_pci_setup_interrupts(sc)) != 0) { 685 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 686 "Failed to setup interrupts\n"); 687 mpr_free(sc); 688 return (error); 689 } 690 691 return (error); 692 } 693 694 /* 695 * This is called if memory is being free (during detach for example) and when 696 * buffers need to be reallocated due to a Diag Reset. 697 */ 698 static void 699 mpr_iocfacts_free(struct mpr_softc *sc) 700 { 701 struct mpr_command *cm; 702 int i; 703 704 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 705 706 if (sc->free_busaddr != 0) 707 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); 708 if (sc->free_queue != NULL) 709 bus_dmamem_free(sc->queues_dmat, sc->free_queue, 710 sc->queues_map); 711 if (sc->queues_dmat != NULL) 712 bus_dma_tag_destroy(sc->queues_dmat); 713 714 if (sc->chain_busaddr != 0) 715 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); 716 if (sc->chain_frames != NULL) 717 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, 718 sc->chain_map); 719 if (sc->chain_dmat != NULL) 720 bus_dma_tag_destroy(sc->chain_dmat); 721 722 if (sc->sense_busaddr != 0) 723 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); 724 if (sc->sense_frames != NULL) 725 bus_dmamem_free(sc->sense_dmat, sc->sense_frames, 726 sc->sense_map); 727 if (sc->sense_dmat != NULL) 728 bus_dma_tag_destroy(sc->sense_dmat); 729 730 if (sc->prp_page_busaddr != 0) 731 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map); 732 if (sc->prp_pages != NULL) 733 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages, 734 sc->prp_page_map); 735 if (sc->prp_page_dmat != NULL) 736 bus_dma_tag_destroy(sc->prp_page_dmat); 737 738 if (sc->reply_busaddr != 0) 739 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); 740 if (sc->reply_frames != NULL) 741 bus_dmamem_free(sc->reply_dmat, sc->reply_frames, 742 sc->reply_map); 743 if (sc->reply_dmat != NULL) 744 bus_dma_tag_destroy(sc->reply_dmat); 745 746 if (sc->req_busaddr != 0) 747 bus_dmamap_unload(sc->req_dmat, sc->req_map); 748 if (sc->req_frames != NULL) 749 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); 750 if (sc->req_dmat != NULL) 751 bus_dma_tag_destroy(sc->req_dmat); 752 753 if (sc->chains != NULL) 754 free(sc->chains, M_MPR); 755 if (sc->prps != NULL) 756 free(sc->prps, M_MPR); 757 if (sc->commands != NULL) { 758 for (i = 1; i < sc->num_reqs; i++) { 759 cm = &sc->commands[i]; 760 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); 761 } 762 free(sc->commands, M_MPR); 763 } 764 if (sc->buffer_dmat != NULL) 765 bus_dma_tag_destroy(sc->buffer_dmat); 766 767 mpr_pci_free_interrupts(sc); 768 free(sc->queues, M_MPR); 769 sc->queues = NULL; 770 } 771 772 /* 773 * The terms diag reset and hard reset are used interchangeably in the MPI 774 * docs to mean resetting the controller chip. In this code diag reset 775 * cleans everything up, and the hard reset function just sends the reset 776 * sequence to the chip. This should probably be refactored so that every 777 * subsystem gets a reset notification of some sort, and can clean up 778 * appropriately. 779 */ 780 int 781 mpr_reinit(struct mpr_softc *sc) 782 { 783 int error; 784 struct mprsas_softc *sassc; 785 786 sassc = sc->sassc; 787 788 MPR_FUNCTRACE(sc); 789 790 mtx_assert(&sc->mpr_mtx, MA_OWNED); 791 792 mpr_dprint(sc, MPR_INIT|MPR_INFO, "Reinitializing controller\n"); 793 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { 794 mpr_dprint(sc, MPR_INIT, "Reset already in progress\n"); 795 return 0; 796 } 797 798 /* 799 * Make sure the completion callbacks can recognize they're getting 800 * a NULL cm_reply due to a reset. 801 */ 802 sc->mpr_flags |= MPR_FLAGS_DIAGRESET; 803 804 /* 805 * Mask interrupts here. 806 */ 807 mpr_dprint(sc, MPR_INIT, "Masking interrupts and resetting\n"); 808 mpr_mask_intr(sc); 809 810 error = mpr_diag_reset(sc, CAN_SLEEP); 811 if (error != 0) { 812 panic("%s hard reset failed with error %d\n", __func__, error); 813 } 814 815 /* Restore the PCI state, including the MSI-X registers */ 816 mpr_pci_restore(sc); 817 818 /* Give the I/O subsystem special priority to get itself prepared */ 819 mprsas_handle_reinit(sc); 820 821 /* 822 * Get IOC Facts and allocate all structures based on this information. 823 * The attach function will also call mpr_iocfacts_allocate at startup. 824 * If relevant values have changed in IOC Facts, this function will free 825 * all of the memory based on IOC Facts and reallocate that memory. 826 */ 827 if ((error = mpr_iocfacts_allocate(sc, FALSE)) != 0) { 828 panic("%s IOC Facts based allocation failed with error %d\n", 829 __func__, error); 830 } 831 832 /* 833 * Mapping structures will be re-allocated after getting IOC Page8, so 834 * free these structures here. 835 */ 836 mpr_mapping_exit(sc); 837 838 /* 839 * The static page function currently read is IOC Page8. Others can be 840 * added in future. It's possible that the values in IOC Page8 have 841 * changed after a Diag Reset due to user modification, so always read 842 * these. Interrupts are masked, so unmask them before getting config 843 * pages. 844 */ 845 mpr_unmask_intr(sc); 846 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; 847 mpr_base_static_config_pages(sc); 848 849 /* 850 * Some mapping info is based in IOC Page8 data, so re-initialize the 851 * mapping tables. 852 */ 853 mpr_mapping_initialize(sc); 854 855 /* 856 * Restart will reload the event masks clobbered by the reset, and 857 * then enable the port. 858 */ 859 mpr_reregister_events(sc); 860 861 /* the end of discovery will release the simq, so we're done. */ 862 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Finished sc %p post %u free %u\n", 863 sc, sc->replypostindex, sc->replyfreeindex); 864 mprsas_release_simq_reinit(sassc); 865 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); 866 867 return 0; 868 } 869 870 /* Wait for the chip to ACK a word that we've put into its FIFO 871 * Wait for <timeout> seconds. In single loop wait for busy loop 872 * for 500 microseconds. 873 * Total is [ 0.5 * (2000 * <timeout>) ] in miliseconds. 874 * */ 875 static int 876 mpr_wait_db_ack(struct mpr_softc *sc, int timeout, int sleep_flag) 877 { 878 u32 cntdn, count; 879 u32 int_status; 880 u32 doorbell; 881 882 count = 0; 883 cntdn = (sleep_flag == CAN_SLEEP) ? 1000*timeout : 2000*timeout; 884 do { 885 int_status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 886 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) { 887 mpr_dprint(sc, MPR_TRACE, "%s: successful count(%d), " 888 "timeout(%d)\n", __func__, count, timeout); 889 return 0; 890 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) { 891 doorbell = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 892 if ((doorbell & MPI2_IOC_STATE_MASK) == 893 MPI2_IOC_STATE_FAULT) { 894 mpr_dprint(sc, MPR_FAULT, 895 "fault_state(0x%04x)!\n", doorbell); 896 return (EFAULT); 897 } 898 } else if (int_status == 0xFFFFFFFF) 899 goto out; 900 901 /* 902 * If it can sleep, sleep for 1 milisecond, else busy loop for 903 * 0.5 milisecond 904 */ 905 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) 906 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", 907 hz/1000); 908 else if (sleep_flag == CAN_SLEEP) 909 pause("mprdba", hz/1000); 910 else 911 DELAY(500); 912 count++; 913 } while (--cntdn); 914 915 out: 916 mpr_dprint(sc, MPR_FAULT, "%s: failed due to timeout count(%d), " 917 "int_status(%x)!\n", __func__, count, int_status); 918 return (ETIMEDOUT); 919 } 920 921 /* Wait for the chip to signal that the next word in its FIFO can be fetched */ 922 static int 923 mpr_wait_db_int(struct mpr_softc *sc) 924 { 925 int retry; 926 927 for (retry = 0; retry < MPR_DB_MAX_WAIT; retry++) { 928 if ((mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET) & 929 MPI2_HIS_IOC2SYS_DB_STATUS) != 0) 930 return (0); 931 DELAY(2000); 932 } 933 return (ETIMEDOUT); 934 } 935 936 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */ 937 static int 938 mpr_request_sync(struct mpr_softc *sc, void *req, MPI2_DEFAULT_REPLY *reply, 939 int req_sz, int reply_sz, int timeout) 940 { 941 uint32_t *data32; 942 uint16_t *data16; 943 int i, count, ioc_sz, residual; 944 int sleep_flags = CAN_SLEEP; 945 946 #if __FreeBSD_version >= 1000029 947 if (curthread->td_no_sleeping) 948 #else //__FreeBSD_version < 1000029 949 if (curthread->td_pflags & TDP_NOSLEEPING) 950 #endif //__FreeBSD_version >= 1000029 951 sleep_flags = NO_SLEEP; 952 953 /* Step 1 */ 954 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 955 956 /* Step 2 */ 957 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 958 return (EBUSY); 959 960 /* Step 3 961 * Announce that a message is coming through the doorbell. Messages 962 * are pushed at 32bit words, so round up if needed. 963 */ 964 count = (req_sz + 3) / 4; 965 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, 966 (MPI2_FUNCTION_HANDSHAKE << MPI2_DOORBELL_FUNCTION_SHIFT) | 967 (count << MPI2_DOORBELL_ADD_DWORDS_SHIFT)); 968 969 /* Step 4 */ 970 if (mpr_wait_db_int(sc) || 971 (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) == 0) { 972 mpr_dprint(sc, MPR_FAULT, "Doorbell failed to activate\n"); 973 return (ENXIO); 974 } 975 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 976 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 977 mpr_dprint(sc, MPR_FAULT, "Doorbell handshake failed\n"); 978 return (ENXIO); 979 } 980 981 /* Step 5 */ 982 /* Clock out the message data synchronously in 32-bit dwords*/ 983 data32 = (uint32_t *)req; 984 for (i = 0; i < count; i++) { 985 mpr_regwrite(sc, MPI2_DOORBELL_OFFSET, htole32(data32[i])); 986 if (mpr_wait_db_ack(sc, 5, sleep_flags) != 0) { 987 mpr_dprint(sc, MPR_FAULT, 988 "Timeout while writing doorbell\n"); 989 return (ENXIO); 990 } 991 } 992 993 /* Step 6 */ 994 /* Clock in the reply in 16-bit words. The total length of the 995 * message is always in the 4th byte, so clock out the first 2 words 996 * manually, then loop the rest. 997 */ 998 data16 = (uint16_t *)reply; 999 if (mpr_wait_db_int(sc) != 0) { 1000 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 0\n"); 1001 return (ENXIO); 1002 } 1003 data16[0] = 1004 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 1005 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1006 if (mpr_wait_db_int(sc) != 0) { 1007 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell 1\n"); 1008 return (ENXIO); 1009 } 1010 data16[1] = 1011 mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_DATA_MASK; 1012 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1013 1014 /* Number of 32bit words in the message */ 1015 ioc_sz = reply->MsgLength; 1016 1017 /* 1018 * Figure out how many 16bit words to clock in without overrunning. 1019 * The precision loss with dividing reply_sz can safely be 1020 * ignored because the messages can only be multiples of 32bits. 1021 */ 1022 residual = 0; 1023 count = MIN((reply_sz / 4), ioc_sz) * 2; 1024 if (count < ioc_sz * 2) { 1025 residual = ioc_sz * 2 - count; 1026 mpr_dprint(sc, MPR_ERROR, "Driver error, throwing away %d " 1027 "residual message words\n", residual); 1028 } 1029 1030 for (i = 2; i < count; i++) { 1031 if (mpr_wait_db_int(sc) != 0) { 1032 mpr_dprint(sc, MPR_FAULT, 1033 "Timeout reading doorbell %d\n", i); 1034 return (ENXIO); 1035 } 1036 data16[i] = mpr_regread(sc, MPI2_DOORBELL_OFFSET) & 1037 MPI2_DOORBELL_DATA_MASK; 1038 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1039 } 1040 1041 /* 1042 * Pull out residual words that won't fit into the provided buffer. 1043 * This keeps the chip from hanging due to a driver programming 1044 * error. 1045 */ 1046 while (residual--) { 1047 if (mpr_wait_db_int(sc) != 0) { 1048 mpr_dprint(sc, MPR_FAULT, "Timeout reading doorbell\n"); 1049 return (ENXIO); 1050 } 1051 (void)mpr_regread(sc, MPI2_DOORBELL_OFFSET); 1052 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1053 } 1054 1055 /* Step 7 */ 1056 if (mpr_wait_db_int(sc) != 0) { 1057 mpr_dprint(sc, MPR_FAULT, "Timeout waiting to exit doorbell\n"); 1058 return (ENXIO); 1059 } 1060 if (mpr_regread(sc, MPI2_DOORBELL_OFFSET) & MPI2_DOORBELL_USED) 1061 mpr_dprint(sc, MPR_FAULT, "Warning, doorbell still active\n"); 1062 mpr_regwrite(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET, 0x0); 1063 1064 return (0); 1065 } 1066 1067 static void 1068 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) 1069 { 1070 request_descriptor rd; 1071 1072 MPR_FUNCTRACE(sc); 1073 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n", 1074 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); 1075 1076 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & 1077 MPR_FLAGS_SHUTDOWN)) 1078 mtx_assert(&sc->mpr_mtx, MA_OWNED); 1079 1080 if (++sc->io_cmds_active > sc->io_cmds_highwater) 1081 sc->io_cmds_highwater++; 1082 1083 if (sc->atomic_desc_capable) { 1084 rd.u.low = cm->cm_desc.Words.Low; 1085 mpr_regwrite(sc, MPI26_ATOMIC_REQUEST_DESCRIPTOR_POST_OFFSET, 1086 rd.u.low); 1087 } else { 1088 rd.u.low = cm->cm_desc.Words.Low; 1089 rd.u.high = cm->cm_desc.Words.High; 1090 rd.word = htole64(rd.word); 1091 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_LOW_OFFSET, 1092 rd.u.low); 1093 mpr_regwrite(sc, MPI2_REQUEST_DESCRIPTOR_POST_HIGH_OFFSET, 1094 rd.u.high); 1095 } 1096 } 1097 1098 /* 1099 * Just the FACTS, ma'am. 1100 */ 1101 static int 1102 mpr_get_iocfacts(struct mpr_softc *sc, MPI2_IOC_FACTS_REPLY *facts) 1103 { 1104 MPI2_DEFAULT_REPLY *reply; 1105 MPI2_IOC_FACTS_REQUEST request; 1106 int error, req_sz, reply_sz; 1107 1108 MPR_FUNCTRACE(sc); 1109 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1110 1111 req_sz = sizeof(MPI2_IOC_FACTS_REQUEST); 1112 reply_sz = sizeof(MPI2_IOC_FACTS_REPLY); 1113 reply = (MPI2_DEFAULT_REPLY *)facts; 1114 1115 bzero(&request, req_sz); 1116 request.Function = MPI2_FUNCTION_IOC_FACTS; 1117 error = mpr_request_sync(sc, &request, reply, req_sz, reply_sz, 5); 1118 1119 mpr_dprint(sc, MPR_INIT, "%s exit, error= %d\n", __func__, error); 1120 return (error); 1121 } 1122 1123 static int 1124 mpr_send_iocinit(struct mpr_softc *sc) 1125 { 1126 MPI2_IOC_INIT_REQUEST init; 1127 MPI2_DEFAULT_REPLY reply; 1128 int req_sz, reply_sz, error; 1129 struct timeval now; 1130 uint64_t time_in_msec; 1131 1132 MPR_FUNCTRACE(sc); 1133 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1134 1135 req_sz = sizeof(MPI2_IOC_INIT_REQUEST); 1136 reply_sz = sizeof(MPI2_IOC_INIT_REPLY); 1137 bzero(&init, req_sz); 1138 bzero(&reply, reply_sz); 1139 1140 /* 1141 * Fill in the init block. Note that most addresses are 1142 * deliberately in the lower 32bits of memory. This is a micro- 1143 * optimzation for PCI/PCIX, though it's not clear if it helps PCIe. 1144 */ 1145 init.Function = MPI2_FUNCTION_IOC_INIT; 1146 init.WhoInit = MPI2_WHOINIT_HOST_DRIVER; 1147 init.MsgVersion = htole16(MPI2_VERSION); 1148 init.HeaderVersion = htole16(MPI2_HEADER_VERSION); 1149 init.SystemRequestFrameSize = htole16(sc->facts->IOCRequestFrameSize); 1150 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); 1151 init.ReplyFreeQueueDepth = htole16(sc->fqdepth); 1152 init.SenseBufferAddressHigh = 0; 1153 init.SystemReplyAddressHigh = 0; 1154 init.SystemRequestFrameBaseAddress.High = 0; 1155 init.SystemRequestFrameBaseAddress.Low = 1156 htole32((uint32_t)sc->req_busaddr); 1157 init.ReplyDescriptorPostQueueAddress.High = 0; 1158 init.ReplyDescriptorPostQueueAddress.Low = 1159 htole32((uint32_t)sc->post_busaddr); 1160 init.ReplyFreeQueueAddress.High = 0; 1161 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); 1162 getmicrotime(&now); 1163 time_in_msec = (now.tv_sec * 1000 + now.tv_usec/1000); 1164 init.TimeStamp.High = htole32((time_in_msec >> 32) & 0xFFFFFFFF); 1165 init.TimeStamp.Low = htole32(time_in_msec & 0xFFFFFFFF); 1166 init.HostPageSize = HOST_PAGE_SIZE_4K; 1167 1168 error = mpr_request_sync(sc, &init, &reply, req_sz, reply_sz, 5); 1169 if ((reply.IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 1170 error = ENXIO; 1171 1172 mpr_dprint(sc, MPR_INIT, "IOCInit status= 0x%x\n", reply.IOCStatus); 1173 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 1174 return (error); 1175 } 1176 1177 void 1178 mpr_memaddr_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1179 { 1180 bus_addr_t *addr; 1181 1182 addr = arg; 1183 *addr = segs[0].ds_addr; 1184 } 1185 1186 static int 1187 mpr_alloc_queues(struct mpr_softc *sc) 1188 { 1189 struct mpr_queue *q; 1190 int nq, i; 1191 1192 nq = sc->msi_msgs; 1193 mpr_dprint(sc, MPR_INIT|MPR_XINFO, "Allocating %d I/O queues\n", nq); 1194 1195 sc->queues = mallocarray(nq, sizeof(struct mpr_queue), M_MPR, 1196 M_NOWAIT|M_ZERO); 1197 if (sc->queues == NULL) 1198 return (ENOMEM); 1199 1200 for (i = 0; i < nq; i++) { 1201 q = &sc->queues[i]; 1202 mpr_dprint(sc, MPR_INIT, "Configuring queue %d %p\n", i, q); 1203 q->sc = sc; 1204 q->qnum = i; 1205 } 1206 return (0); 1207 } 1208 1209 static int 1210 mpr_alloc_hw_queues(struct mpr_softc *sc) 1211 { 1212 bus_addr_t queues_busaddr; 1213 uint8_t *queues; 1214 int qsize, fqsize, pqsize; 1215 1216 /* 1217 * The reply free queue contains 4 byte entries in multiples of 16 and 1218 * aligned on a 16 byte boundary. There must always be an unused entry. 1219 * This queue supplies fresh reply frames for the firmware to use. 1220 * 1221 * The reply descriptor post queue contains 8 byte entries in 1222 * multiples of 16 and aligned on a 16 byte boundary. This queue 1223 * contains filled-in reply frames sent from the firmware to the host. 1224 * 1225 * These two queues are allocated together for simplicity. 1226 */ 1227 sc->fqdepth = roundup2(sc->num_replies + 1, 16); 1228 sc->pqdepth = roundup2(sc->num_replies + 1, 16); 1229 fqsize= sc->fqdepth * 4; 1230 pqsize = sc->pqdepth * 8; 1231 qsize = fqsize + pqsize; 1232 1233 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1234 16, 0, /* algnmnt, boundary */ 1235 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1236 BUS_SPACE_MAXADDR, /* highaddr */ 1237 NULL, NULL, /* filter, filterarg */ 1238 qsize, /* maxsize */ 1239 1, /* nsegments */ 1240 qsize, /* maxsegsize */ 1241 0, /* flags */ 1242 NULL, NULL, /* lockfunc, lockarg */ 1243 &sc->queues_dmat)) { 1244 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues DMA tag\n"); 1245 return (ENOMEM); 1246 } 1247 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, 1248 &sc->queues_map)) { 1249 mpr_dprint(sc, MPR_ERROR, "Cannot allocate queues memory\n"); 1250 return (ENOMEM); 1251 } 1252 bzero(queues, qsize); 1253 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, 1254 mpr_memaddr_cb, &queues_busaddr, 0); 1255 1256 sc->free_queue = (uint32_t *)queues; 1257 sc->free_busaddr = queues_busaddr; 1258 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); 1259 sc->post_busaddr = queues_busaddr + fqsize; 1260 1261 return (0); 1262 } 1263 1264 static int 1265 mpr_alloc_replies(struct mpr_softc *sc) 1266 { 1267 int rsize, num_replies; 1268 1269 /* 1270 * sc->num_replies should be one less than sc->fqdepth. We need to 1271 * allocate space for sc->fqdepth replies, but only sc->num_replies 1272 * replies can be used at once. 1273 */ 1274 num_replies = max(sc->fqdepth, sc->num_replies); 1275 1276 rsize = sc->facts->ReplyFrameSize * num_replies * 4; 1277 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1278 4, 0, /* algnmnt, boundary */ 1279 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1280 BUS_SPACE_MAXADDR, /* highaddr */ 1281 NULL, NULL, /* filter, filterarg */ 1282 rsize, /* maxsize */ 1283 1, /* nsegments */ 1284 rsize, /* maxsegsize */ 1285 0, /* flags */ 1286 NULL, NULL, /* lockfunc, lockarg */ 1287 &sc->reply_dmat)) { 1288 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies DMA tag\n"); 1289 return (ENOMEM); 1290 } 1291 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, 1292 BUS_DMA_NOWAIT, &sc->reply_map)) { 1293 mpr_dprint(sc, MPR_ERROR, "Cannot allocate replies memory\n"); 1294 return (ENOMEM); 1295 } 1296 bzero(sc->reply_frames, rsize); 1297 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, 1298 mpr_memaddr_cb, &sc->reply_busaddr, 0); 1299 1300 return (0); 1301 } 1302 1303 static int 1304 mpr_alloc_requests(struct mpr_softc *sc) 1305 { 1306 struct mpr_command *cm; 1307 struct mpr_chain *chain; 1308 int i, rsize, nsegs; 1309 1310 rsize = sc->facts->IOCRequestFrameSize * sc->num_reqs * 4; 1311 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1312 16, 0, /* algnmnt, boundary */ 1313 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1314 BUS_SPACE_MAXADDR, /* highaddr */ 1315 NULL, NULL, /* filter, filterarg */ 1316 rsize, /* maxsize */ 1317 1, /* nsegments */ 1318 rsize, /* maxsegsize */ 1319 0, /* flags */ 1320 NULL, NULL, /* lockfunc, lockarg */ 1321 &sc->req_dmat)) { 1322 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request DMA tag\n"); 1323 return (ENOMEM); 1324 } 1325 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, 1326 BUS_DMA_NOWAIT, &sc->req_map)) { 1327 mpr_dprint(sc, MPR_ERROR, "Cannot allocate request memory\n"); 1328 return (ENOMEM); 1329 } 1330 bzero(sc->req_frames, rsize); 1331 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, 1332 mpr_memaddr_cb, &sc->req_busaddr, 0); 1333 1334 /* 1335 * Gen3 and beyond uses the IOCMaxChainSegmentSize from IOC Facts to 1336 * get the size of a Chain Frame. Previous versions use the size as a 1337 * Request Frame for the Chain Frame size. If IOCMaxChainSegmentSize 1338 * is 0, use the default value. The IOCMaxChainSegmentSize is the 1339 * number of 16-byte elelements that can fit in a Chain Frame, which is 1340 * the size of an IEEE Simple SGE. 1341 */ 1342 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) { 1343 sc->chain_seg_size = 1344 htole16(sc->facts->IOCMaxChainSegmentSize); 1345 if (sc->chain_seg_size == 0) { 1346 sc->chain_frame_size = MPR_DEFAULT_CHAIN_SEG_SIZE * 1347 MPR_MAX_CHAIN_ELEMENT_SIZE; 1348 } else { 1349 sc->chain_frame_size = sc->chain_seg_size * 1350 MPR_MAX_CHAIN_ELEMENT_SIZE; 1351 } 1352 } else { 1353 sc->chain_frame_size = sc->facts->IOCRequestFrameSize * 4; 1354 } 1355 rsize = sc->chain_frame_size * sc->max_chains; 1356 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1357 16, 0, /* algnmnt, boundary */ 1358 BUS_SPACE_MAXADDR, /* lowaddr */ 1359 BUS_SPACE_MAXADDR, /* highaddr */ 1360 NULL, NULL, /* filter, filterarg */ 1361 rsize, /* maxsize */ 1362 1, /* nsegments */ 1363 rsize, /* maxsegsize */ 1364 0, /* flags */ 1365 NULL, NULL, /* lockfunc, lockarg */ 1366 &sc->chain_dmat)) { 1367 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain DMA tag\n"); 1368 return (ENOMEM); 1369 } 1370 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, 1371 BUS_DMA_NOWAIT, &sc->chain_map)) { 1372 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); 1373 return (ENOMEM); 1374 } 1375 bzero(sc->chain_frames, rsize); 1376 bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, rsize, 1377 mpr_memaddr_cb, &sc->chain_busaddr, 0); 1378 1379 rsize = MPR_SENSE_LEN * sc->num_reqs; 1380 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1381 1, 0, /* algnmnt, boundary */ 1382 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1383 BUS_SPACE_MAXADDR, /* highaddr */ 1384 NULL, NULL, /* filter, filterarg */ 1385 rsize, /* maxsize */ 1386 1, /* nsegments */ 1387 rsize, /* maxsegsize */ 1388 0, /* flags */ 1389 NULL, NULL, /* lockfunc, lockarg */ 1390 &sc->sense_dmat)) { 1391 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense DMA tag\n"); 1392 return (ENOMEM); 1393 } 1394 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, 1395 BUS_DMA_NOWAIT, &sc->sense_map)) { 1396 mpr_dprint(sc, MPR_ERROR, "Cannot allocate sense memory\n"); 1397 return (ENOMEM); 1398 } 1399 bzero(sc->sense_frames, rsize); 1400 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, 1401 mpr_memaddr_cb, &sc->sense_busaddr, 0); 1402 1403 sc->chains = malloc(sizeof(struct mpr_chain) * sc->max_chains, M_MPR, 1404 M_WAITOK | M_ZERO); 1405 if (!sc->chains) { 1406 mpr_dprint(sc, MPR_ERROR, "Cannot allocate chain memory\n"); 1407 return (ENOMEM); 1408 } 1409 for (i = 0; i < sc->max_chains; i++) { 1410 chain = &sc->chains[i]; 1411 chain->chain = (MPI2_SGE_IO_UNION *)(sc->chain_frames + 1412 i * sc->chain_frame_size); 1413 chain->chain_busaddr = sc->chain_busaddr + 1414 i * sc->chain_frame_size; 1415 mpr_free_chain(sc, chain); 1416 sc->chain_free_lowwater++; 1417 } 1418 1419 /* 1420 * Allocate NVMe PRP Pages for NVMe SGL support only if the FW supports 1421 * these devices. 1422 */ 1423 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) && 1424 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) { 1425 if (mpr_alloc_nvme_prp_pages(sc) == ENOMEM) 1426 return (ENOMEM); 1427 } 1428 1429 /* XXX Need to pick a more precise value */ 1430 nsegs = (MAXPHYS / PAGE_SIZE) + 1; 1431 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1432 1, 0, /* algnmnt, boundary */ 1433 BUS_SPACE_MAXADDR, /* lowaddr */ 1434 BUS_SPACE_MAXADDR, /* highaddr */ 1435 NULL, NULL, /* filter, filterarg */ 1436 BUS_SPACE_MAXSIZE_32BIT,/* maxsize */ 1437 nsegs, /* nsegments */ 1438 BUS_SPACE_MAXSIZE_32BIT,/* maxsegsize */ 1439 BUS_DMA_ALLOCNOW, /* flags */ 1440 busdma_lock_mutex, /* lockfunc */ 1441 &sc->mpr_mtx, /* lockarg */ 1442 &sc->buffer_dmat)) { 1443 mpr_dprint(sc, MPR_ERROR, "Cannot allocate buffer DMA tag\n"); 1444 return (ENOMEM); 1445 } 1446 1447 /* 1448 * SMID 0 cannot be used as a free command per the firmware spec. 1449 * Just drop that command instead of risking accounting bugs. 1450 */ 1451 sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs, 1452 M_MPR, M_WAITOK | M_ZERO); 1453 if (!sc->commands) { 1454 mpr_dprint(sc, MPR_ERROR, "Cannot allocate command memory\n"); 1455 return (ENOMEM); 1456 } 1457 for (i = 1; i < sc->num_reqs; i++) { 1458 cm = &sc->commands[i]; 1459 cm->cm_req = sc->req_frames + 1460 i * sc->facts->IOCRequestFrameSize * 4; 1461 cm->cm_req_busaddr = sc->req_busaddr + 1462 i * sc->facts->IOCRequestFrameSize * 4; 1463 cm->cm_sense = &sc->sense_frames[i]; 1464 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; 1465 cm->cm_desc.Default.SMID = i; 1466 cm->cm_sc = sc; 1467 TAILQ_INIT(&cm->cm_chain_list); 1468 TAILQ_INIT(&cm->cm_prp_page_list); 1469 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); 1470 1471 /* XXX Is a failure here a critical problem? */ 1472 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) 1473 == 0) { 1474 if (i <= sc->facts->HighPriorityCredit) 1475 mpr_free_high_priority_command(sc, cm); 1476 else 1477 mpr_free_command(sc, cm); 1478 } else { 1479 panic("failed to allocate command %d\n", i); 1480 sc->num_reqs = i; 1481 break; 1482 } 1483 } 1484 1485 return (0); 1486 } 1487 1488 /* 1489 * Allocate contiguous buffers for PCIe NVMe devices for building native PRPs, 1490 * which are scatter/gather lists for NVMe devices. 1491 * 1492 * This buffer must be contiguous due to the nature of how NVMe PRPs are built 1493 * and translated by FW. 1494 * 1495 * returns ENOMEM if memory could not be allocated, otherwise returns 0. 1496 */ 1497 static int 1498 mpr_alloc_nvme_prp_pages(struct mpr_softc *sc) 1499 { 1500 int PRPs_per_page, PRPs_required, pages_required; 1501 int rsize, i; 1502 struct mpr_prp_page *prp_page; 1503 1504 /* 1505 * Assuming a MAX_IO_SIZE of 1MB and a PAGE_SIZE of 4k, the max number 1506 * of PRPs (NVMe's Scatter/Gather Element) needed per I/O is: 1507 * MAX_IO_SIZE / PAGE_SIZE = 256 1508 * 1509 * 1 PRP entry in main frame for PRP list pointer still leaves 255 PRPs 1510 * required for the remainder of the 1MB I/O. 512 PRPs can fit into one 1511 * page (4096 / 8 = 512), so only one page is required for each I/O. 1512 * 1513 * Each of these buffers will need to be contiguous. For simplicity, 1514 * only one buffer is allocated here, which has all of the space 1515 * required for the NVMe Queue Depth. If there are problems allocating 1516 * this one buffer, this function will need to change to allocate 1517 * individual, contiguous NVME_QDEPTH buffers. 1518 * 1519 * The real calculation will use the real max io size. Above is just an 1520 * example. 1521 * 1522 */ 1523 PRPs_required = sc->maxio / PAGE_SIZE; 1524 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1; 1525 pages_required = (PRPs_required / PRPs_per_page) + 1; 1526 1527 sc->prp_buffer_size = PAGE_SIZE * pages_required; 1528 rsize = sc->prp_buffer_size * NVME_QDEPTH; 1529 if (bus_dma_tag_create( sc->mpr_parent_dmat, /* parent */ 1530 4, 0, /* algnmnt, boundary */ 1531 BUS_SPACE_MAXADDR_32BIT,/* lowaddr */ 1532 BUS_SPACE_MAXADDR, /* highaddr */ 1533 NULL, NULL, /* filter, filterarg */ 1534 rsize, /* maxsize */ 1535 1, /* nsegments */ 1536 rsize, /* maxsegsize */ 1537 0, /* flags */ 1538 NULL, NULL, /* lockfunc, lockarg */ 1539 &sc->prp_page_dmat)) { 1540 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP DMA " 1541 "tag\n"); 1542 return (ENOMEM); 1543 } 1544 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages, 1545 BUS_DMA_NOWAIT, &sc->prp_page_map)) { 1546 mpr_dprint(sc, MPR_ERROR, "Cannot allocate NVMe PRP memory\n"); 1547 return (ENOMEM); 1548 } 1549 bzero(sc->prp_pages, rsize); 1550 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages, 1551 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0); 1552 1553 sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR, 1554 M_WAITOK | M_ZERO); 1555 for (i = 0; i < NVME_QDEPTH; i++) { 1556 prp_page = &sc->prps[i]; 1557 prp_page->prp_page = (uint64_t *)(sc->prp_pages + 1558 i * sc->prp_buffer_size); 1559 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr + 1560 i * sc->prp_buffer_size); 1561 mpr_free_prp_page(sc, prp_page); 1562 sc->prp_pages_free_lowwater++; 1563 } 1564 1565 return (0); 1566 } 1567 1568 static int 1569 mpr_init_queues(struct mpr_softc *sc) 1570 { 1571 int i; 1572 1573 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); 1574 1575 /* 1576 * According to the spec, we need to use one less reply than we 1577 * have space for on the queue. So sc->num_replies (the number we 1578 * use) should be less than sc->fqdepth (allocated size). 1579 */ 1580 if (sc->num_replies >= sc->fqdepth) 1581 return (EINVAL); 1582 1583 /* 1584 * Initialize all of the free queue entries. 1585 */ 1586 for (i = 0; i < sc->fqdepth; i++) { 1587 sc->free_queue[i] = sc->reply_busaddr + 1588 (i * sc->facts->ReplyFrameSize * 4); 1589 } 1590 sc->replyfreeindex = sc->num_replies; 1591 1592 return (0); 1593 } 1594 1595 /* Get the driver parameter tunables. Lowest priority are the driver defaults. 1596 * Next are the global settings, if they exist. Highest are the per-unit 1597 * settings, if they exist. 1598 */ 1599 void 1600 mpr_get_tunables(struct mpr_softc *sc) 1601 { 1602 char tmpstr[80], mpr_debug[80]; 1603 1604 /* XXX default to some debugging for now */ 1605 sc->mpr_debug = MPR_INFO | MPR_FAULT; 1606 sc->disable_msix = 0; 1607 sc->disable_msi = 0; 1608 sc->max_msix = MPR_MSIX_MAX; 1609 sc->max_chains = MPR_CHAIN_FRAMES; 1610 sc->max_io_pages = MPR_MAXIO_PAGES; 1611 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; 1612 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; 1613 sc->use_phynum = 1; 1614 sc->max_reqframes = MPR_REQ_FRAMES; 1615 sc->max_prireqframes = MPR_PRI_REQ_FRAMES; 1616 sc->max_replyframes = MPR_REPLY_FRAMES; 1617 sc->max_evtframes = MPR_EVT_REPLY_FRAMES; 1618 1619 /* 1620 * Grab the global variables. 1621 */ 1622 bzero(mpr_debug, 80); 1623 if (TUNABLE_STR_FETCH("hw.mpr.debug_level", mpr_debug, 80) != 0) 1624 mpr_parse_debug(sc, mpr_debug); 1625 TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix); 1626 TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi); 1627 TUNABLE_INT_FETCH("hw.mpr.max_msix", &sc->max_msix); 1628 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); 1629 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages); 1630 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); 1631 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); 1632 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); 1633 TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes); 1634 TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes); 1635 TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes); 1636 TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes); 1637 1638 /* Grab the unit-instance variables */ 1639 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.debug_level", 1640 device_get_unit(sc->mpr_dev)); 1641 bzero(mpr_debug, 80); 1642 if (TUNABLE_STR_FETCH(tmpstr, mpr_debug, 80) != 0) 1643 mpr_parse_debug(sc, mpr_debug); 1644 1645 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msix", 1646 device_get_unit(sc->mpr_dev)); 1647 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); 1648 1649 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.disable_msi", 1650 device_get_unit(sc->mpr_dev)); 1651 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); 1652 1653 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_msix", 1654 device_get_unit(sc->mpr_dev)); 1655 TUNABLE_INT_FETCH(tmpstr, &sc->max_msix); 1656 1657 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_chains", 1658 device_get_unit(sc->mpr_dev)); 1659 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); 1660 1661 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_io_pages", 1662 device_get_unit(sc->mpr_dev)); 1663 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); 1664 1665 bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); 1666 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.exclude_ids", 1667 device_get_unit(sc->mpr_dev)); 1668 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); 1669 1670 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.enable_ssu", 1671 device_get_unit(sc->mpr_dev)); 1672 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); 1673 1674 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.spinup_wait_time", 1675 device_get_unit(sc->mpr_dev)); 1676 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); 1677 1678 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.use_phy_num", 1679 device_get_unit(sc->mpr_dev)); 1680 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); 1681 1682 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_reqframes", 1683 device_get_unit(sc->mpr_dev)); 1684 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes); 1685 1686 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_prireqframes", 1687 device_get_unit(sc->mpr_dev)); 1688 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes); 1689 1690 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_replyframes", 1691 device_get_unit(sc->mpr_dev)); 1692 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes); 1693 1694 snprintf(tmpstr, sizeof(tmpstr), "dev.mpr.%d.max_evtframes", 1695 device_get_unit(sc->mpr_dev)); 1696 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes); 1697 } 1698 1699 static void 1700 mpr_setup_sysctl(struct mpr_softc *sc) 1701 { 1702 struct sysctl_ctx_list *sysctl_ctx = NULL; 1703 struct sysctl_oid *sysctl_tree = NULL; 1704 char tmpstr[80], tmpstr2[80]; 1705 1706 /* 1707 * Setup the sysctl variable so the user can change the debug level 1708 * on the fly. 1709 */ 1710 snprintf(tmpstr, sizeof(tmpstr), "MPR controller %d", 1711 device_get_unit(sc->mpr_dev)); 1712 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); 1713 1714 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); 1715 if (sysctl_ctx != NULL) 1716 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); 1717 1718 if (sysctl_tree == NULL) { 1719 sysctl_ctx_init(&sc->sysctl_ctx); 1720 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, 1721 SYSCTL_STATIC_CHILDREN(_hw_mpr), OID_AUTO, tmpstr2, 1722 CTLFLAG_RD, 0, tmpstr); 1723 if (sc->sysctl_tree == NULL) 1724 return; 1725 sysctl_ctx = &sc->sysctl_ctx; 1726 sysctl_tree = sc->sysctl_tree; 1727 } 1728 1729 SYSCTL_ADD_PROC(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1730 OID_AUTO, "debug_level", CTLTYPE_STRING | CTLFLAG_RW | CTLFLAG_MPSAFE, 1731 sc, 0, mpr_debug_sysctl, "A", "mpr debug level"); 1732 1733 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1734 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, 1735 "Disable the use of MSI-X interrupts"); 1736 1737 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1738 OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0, 1739 "User-defined maximum number of MSIX queues"); 1740 1741 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1742 OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0, 1743 "Negotiated number of MSIX queues"); 1744 1745 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1746 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0, 1747 "Total number of allocated request frames"); 1748 1749 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1750 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0, 1751 "Total number of allocated high priority request frames"); 1752 1753 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1754 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0, 1755 "Total number of allocated reply frames"); 1756 1757 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1758 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0, 1759 "Total number of event frames allocated"); 1760 1761 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1762 OID_AUTO, "firmware_version", CTLFLAG_RW, sc->fw_version, 1763 strlen(sc->fw_version), "firmware version"); 1764 1765 SYSCTL_ADD_STRING(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1766 OID_AUTO, "driver_version", CTLFLAG_RW, MPR_DRIVER_VERSION, 1767 strlen(MPR_DRIVER_VERSION), "driver version"); 1768 1769 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1770 OID_AUTO, "io_cmds_active", CTLFLAG_RD, 1771 &sc->io_cmds_active, 0, "number of currently active commands"); 1772 1773 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1774 OID_AUTO, "io_cmds_highwater", CTLFLAG_RD, 1775 &sc->io_cmds_highwater, 0, "maximum active commands seen"); 1776 1777 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1778 OID_AUTO, "chain_free", CTLFLAG_RD, 1779 &sc->chain_free, 0, "number of free chain elements"); 1780 1781 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1782 OID_AUTO, "chain_free_lowwater", CTLFLAG_RD, 1783 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); 1784 1785 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1786 OID_AUTO, "max_chains", CTLFLAG_RD, 1787 &sc->max_chains, 0,"maximum chain frames that will be allocated"); 1788 1789 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1790 OID_AUTO, "max_io_pages", CTLFLAG_RD, 1791 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " 1792 "IOCFacts)"); 1793 1794 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1795 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, 1796 "enable SSU to SATA SSD/HDD at shutdown"); 1797 1798 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1799 OID_AUTO, "chain_alloc_fail", CTLFLAG_RD, 1800 &sc->chain_alloc_fail, "chain allocation failures"); 1801 1802 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1803 OID_AUTO, "spinup_wait_time", CTLFLAG_RD, 1804 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " 1805 "spinup after SATA ID error"); 1806 1807 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1808 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, 1809 "Use the phy number for enumeration"); 1810 1811 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1812 OID_AUTO, "prp_pages_free", CTLFLAG_RD, 1813 &sc->prp_pages_free, 0, "number of free PRP pages"); 1814 1815 SYSCTL_ADD_INT(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1816 OID_AUTO, "prp_pages_free_lowwater", CTLFLAG_RD, 1817 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages"); 1818 1819 SYSCTL_ADD_UQUAD(sysctl_ctx, SYSCTL_CHILDREN(sysctl_tree), 1820 OID_AUTO, "prp_page_alloc_fail", CTLFLAG_RD, 1821 &sc->prp_page_alloc_fail, "PRP page allocation failures"); 1822 } 1823 1824 static struct mpr_debug_string { 1825 char *name; 1826 int flag; 1827 } mpr_debug_strings[] = { 1828 {"info", MPR_INFO}, 1829 {"fault", MPR_FAULT}, 1830 {"event", MPR_EVENT}, 1831 {"log", MPR_LOG}, 1832 {"recovery", MPR_RECOVERY}, 1833 {"error", MPR_ERROR}, 1834 {"init", MPR_INIT}, 1835 {"xinfo", MPR_XINFO}, 1836 {"user", MPR_USER}, 1837 {"mapping", MPR_MAPPING}, 1838 {"trace", MPR_TRACE} 1839 }; 1840 1841 enum mpr_debug_level_combiner { 1842 COMB_NONE, 1843 COMB_ADD, 1844 COMB_SUB 1845 }; 1846 1847 static int 1848 mpr_debug_sysctl(SYSCTL_HANDLER_ARGS) 1849 { 1850 struct mpr_softc *sc; 1851 struct mpr_debug_string *string; 1852 struct sbuf *sbuf; 1853 char *buffer; 1854 size_t sz; 1855 int i, len, debug, error; 1856 1857 sc = (struct mpr_softc *)arg1; 1858 1859 error = sysctl_wire_old_buffer(req, 0); 1860 if (error != 0) 1861 return (error); 1862 1863 sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req); 1864 debug = sc->mpr_debug; 1865 1866 sbuf_printf(sbuf, "%#x", debug); 1867 1868 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]); 1869 for (i = 0; i < sz; i++) { 1870 string = &mpr_debug_strings[i]; 1871 if (debug & string->flag) 1872 sbuf_printf(sbuf, ",%s", string->name); 1873 } 1874 1875 error = sbuf_finish(sbuf); 1876 sbuf_delete(sbuf); 1877 1878 if (error || req->newptr == NULL) 1879 return (error); 1880 1881 len = req->newlen - req->newidx; 1882 if (len == 0) 1883 return (0); 1884 1885 buffer = malloc(len, M_MPR, M_ZERO|M_WAITOK); 1886 error = SYSCTL_IN(req, buffer, len); 1887 1888 mpr_parse_debug(sc, buffer); 1889 1890 free(buffer, M_MPR); 1891 return (error); 1892 } 1893 1894 static void 1895 mpr_parse_debug(struct mpr_softc *sc, char *list) 1896 { 1897 struct mpr_debug_string *string; 1898 enum mpr_debug_level_combiner op; 1899 char *token, *endtoken; 1900 size_t sz; 1901 int flags, i; 1902 1903 if (list == NULL || *list == '\0') 1904 return; 1905 1906 if (*list == '+') { 1907 op = COMB_ADD; 1908 list++; 1909 } else if (*list == '-') { 1910 op = COMB_SUB; 1911 list++; 1912 } else 1913 op = COMB_NONE; 1914 if (*list == '\0') 1915 return; 1916 1917 flags = 0; 1918 sz = sizeof(mpr_debug_strings) / sizeof(mpr_debug_strings[0]); 1919 while ((token = strsep(&list, ":,")) != NULL) { 1920 1921 /* Handle integer flags */ 1922 flags |= strtol(token, &endtoken, 0); 1923 if (token != endtoken) 1924 continue; 1925 1926 /* Handle text flags */ 1927 for (i = 0; i < sz; i++) { 1928 string = &mpr_debug_strings[i]; 1929 if (strcasecmp(token, string->name) == 0) { 1930 flags |= string->flag; 1931 break; 1932 } 1933 } 1934 } 1935 1936 switch (op) { 1937 case COMB_NONE: 1938 sc->mpr_debug = flags; 1939 break; 1940 case COMB_ADD: 1941 sc->mpr_debug |= flags; 1942 break; 1943 case COMB_SUB: 1944 sc->mpr_debug &= (~flags); 1945 break; 1946 } 1947 return; 1948 } 1949 1950 int 1951 mpr_attach(struct mpr_softc *sc) 1952 { 1953 int error; 1954 1955 MPR_FUNCTRACE(sc); 1956 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 1957 1958 mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF); 1959 callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0); 1960 callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0); 1961 TAILQ_INIT(&sc->event_list); 1962 timevalclear(&sc->lastfail); 1963 1964 if ((error = mpr_transition_ready(sc)) != 0) { 1965 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 1966 "Failed to transition ready\n"); 1967 return (error); 1968 } 1969 1970 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, 1971 M_ZERO|M_NOWAIT); 1972 if (!sc->facts) { 1973 mpr_dprint(sc, MPR_INIT|MPR_FAULT, 1974 "Cannot allocate memory, exit\n"); 1975 return (ENOMEM); 1976 } 1977 1978 /* 1979 * Get IOC Facts and allocate all structures based on this information. 1980 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC 1981 * Facts. If relevant values have changed in IOC Facts, this function 1982 * will free all of the memory based on IOC Facts and reallocate that 1983 * memory. If this fails, any allocated memory should already be freed. 1984 */ 1985 if ((error = mpr_iocfacts_allocate(sc, TRUE)) != 0) { 1986 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "IOC Facts allocation " 1987 "failed with error %d\n", error); 1988 return (error); 1989 } 1990 1991 /* Start the periodic watchdog check on the IOC Doorbell */ 1992 mpr_periodic(sc); 1993 1994 /* 1995 * The portenable will kick off discovery events that will drive the 1996 * rest of the initialization process. The CAM/SAS module will 1997 * hold up the boot sequence until discovery is complete. 1998 */ 1999 sc->mpr_ich.ich_func = mpr_startup; 2000 sc->mpr_ich.ich_arg = sc; 2001 if (config_intrhook_establish(&sc->mpr_ich) != 0) { 2002 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 2003 "Cannot establish MPR config hook\n"); 2004 error = EINVAL; 2005 } 2006 2007 /* 2008 * Allow IR to shutdown gracefully when shutdown occurs. 2009 */ 2010 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, 2011 mprsas_ir_shutdown, sc, SHUTDOWN_PRI_DEFAULT); 2012 2013 if (sc->shutdown_eh == NULL) 2014 mpr_dprint(sc, MPR_INIT|MPR_ERROR, 2015 "shutdown event registration failed\n"); 2016 2017 mpr_setup_sysctl(sc); 2018 2019 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; 2020 mpr_dprint(sc, MPR_INIT, "%s exit error= %d\n", __func__, error); 2021 2022 return (error); 2023 } 2024 2025 /* Run through any late-start handlers. */ 2026 static void 2027 mpr_startup(void *arg) 2028 { 2029 struct mpr_softc *sc; 2030 2031 sc = (struct mpr_softc *)arg; 2032 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2033 2034 mpr_lock(sc); 2035 mpr_unmask_intr(sc); 2036 2037 /* initialize device mapping tables */ 2038 mpr_base_static_config_pages(sc); 2039 mpr_mapping_initialize(sc); 2040 mprsas_startup(sc); 2041 mpr_unlock(sc); 2042 2043 mpr_dprint(sc, MPR_INIT, "disestablish config intrhook\n"); 2044 config_intrhook_disestablish(&sc->mpr_ich); 2045 sc->mpr_ich.ich_arg = NULL; 2046 2047 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 2048 } 2049 2050 /* Periodic watchdog. Is called with the driver lock already held. */ 2051 static void 2052 mpr_periodic(void *arg) 2053 { 2054 struct mpr_softc *sc; 2055 uint32_t db; 2056 2057 sc = (struct mpr_softc *)arg; 2058 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) 2059 return; 2060 2061 db = mpr_regread(sc, MPI2_DOORBELL_OFFSET); 2062 if ((db & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) { 2063 if ((db & MPI2_DOORBELL_FAULT_CODE_MASK) == 2064 IFAULT_IOP_OVER_TEMP_THRESHOLD_EXCEEDED) { 2065 panic("TEMPERATURE FAULT: STOPPING."); 2066 } 2067 mpr_dprint(sc, MPR_FAULT, "IOC Fault 0x%08x, Resetting\n", db); 2068 mpr_reinit(sc); 2069 } 2070 2071 callout_reset(&sc->periodic, MPR_PERIODIC_DELAY * hz, mpr_periodic, sc); 2072 } 2073 2074 static void 2075 mpr_log_evt_handler(struct mpr_softc *sc, uintptr_t data, 2076 MPI2_EVENT_NOTIFICATION_REPLY *event) 2077 { 2078 MPI2_EVENT_DATA_LOG_ENTRY_ADDED *entry; 2079 2080 MPR_DPRINT_EVENT(sc, generic, event); 2081 2082 switch (event->Event) { 2083 case MPI2_EVENT_LOG_DATA: 2084 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_DATA:\n"); 2085 if (sc->mpr_debug & MPR_EVENT) 2086 hexdump(event->EventData, event->EventDataLength, NULL, 2087 0); 2088 break; 2089 case MPI2_EVENT_LOG_ENTRY_ADDED: 2090 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; 2091 mpr_dprint(sc, MPR_EVENT, "MPI2_EVENT_LOG_ENTRY_ADDED event " 2092 "0x%x Sequence %d:\n", entry->LogEntryQualifier, 2093 entry->LogSequence); 2094 break; 2095 default: 2096 break; 2097 } 2098 return; 2099 } 2100 2101 static int 2102 mpr_attach_log(struct mpr_softc *sc) 2103 { 2104 uint8_t events[16]; 2105 2106 bzero(events, 16); 2107 setbit(events, MPI2_EVENT_LOG_DATA); 2108 setbit(events, MPI2_EVENT_LOG_ENTRY_ADDED); 2109 2110 mpr_register_events(sc, events, mpr_log_evt_handler, NULL, 2111 &sc->mpr_log_eh); 2112 2113 return (0); 2114 } 2115 2116 static int 2117 mpr_detach_log(struct mpr_softc *sc) 2118 { 2119 2120 if (sc->mpr_log_eh != NULL) 2121 mpr_deregister_events(sc, sc->mpr_log_eh); 2122 return (0); 2123 } 2124 2125 /* 2126 * Free all of the driver resources and detach submodules. Should be called 2127 * without the lock held. 2128 */ 2129 int 2130 mpr_free(struct mpr_softc *sc) 2131 { 2132 int error; 2133 2134 mpr_dprint(sc, MPR_INIT, "%s entered\n", __func__); 2135 /* Turn off the watchdog */ 2136 mpr_lock(sc); 2137 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; 2138 mpr_unlock(sc); 2139 /* Lock must not be held for this */ 2140 callout_drain(&sc->periodic); 2141 callout_drain(&sc->device_check_callout); 2142 2143 if (((error = mpr_detach_log(sc)) != 0) || 2144 ((error = mpr_detach_sas(sc)) != 0)) { 2145 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "failed to detach " 2146 "subsystems, error= %d, exit\n", error); 2147 return (error); 2148 } 2149 2150 mpr_detach_user(sc); 2151 2152 /* Put the IOC back in the READY state. */ 2153 mpr_lock(sc); 2154 if ((error = mpr_transition_ready(sc)) != 0) { 2155 mpr_unlock(sc); 2156 return (error); 2157 } 2158 mpr_unlock(sc); 2159 2160 if (sc->facts != NULL) 2161 free(sc->facts, M_MPR); 2162 2163 /* 2164 * Free all buffers that are based on IOC Facts. A Diag Reset may need 2165 * to free these buffers too. 2166 */ 2167 mpr_iocfacts_free(sc); 2168 2169 if (sc->sysctl_tree != NULL) 2170 sysctl_ctx_free(&sc->sysctl_ctx); 2171 2172 /* Deregister the shutdown function */ 2173 if (sc->shutdown_eh != NULL) 2174 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); 2175 2176 mtx_destroy(&sc->mpr_mtx); 2177 mpr_dprint(sc, MPR_INIT, "%s exit\n", __func__); 2178 2179 return (0); 2180 } 2181 2182 static __inline void 2183 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) 2184 { 2185 MPR_FUNCTRACE(sc); 2186 2187 if (cm == NULL) { 2188 mpr_dprint(sc, MPR_ERROR, "Completing NULL command\n"); 2189 return; 2190 } 2191 2192 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) 2193 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; 2194 2195 if (cm->cm_complete != NULL) { 2196 mpr_dprint(sc, MPR_TRACE, 2197 "%s cm %p calling cm_complete %p data %p reply %p\n", 2198 __func__, cm, cm->cm_complete, cm->cm_complete_data, 2199 cm->cm_reply); 2200 cm->cm_complete(sc, cm); 2201 } 2202 2203 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { 2204 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); 2205 wakeup(cm); 2206 } 2207 2208 if (sc->io_cmds_active != 0) { 2209 sc->io_cmds_active--; 2210 } else { 2211 mpr_dprint(sc, MPR_ERROR, "Warning: io_cmds_active is " 2212 "out of sync - resynching to 0\n"); 2213 } 2214 } 2215 2216 static void 2217 mpr_sas_log_info(struct mpr_softc *sc , u32 log_info) 2218 { 2219 union loginfo_type { 2220 u32 loginfo; 2221 struct { 2222 u32 subcode:16; 2223 u32 code:8; 2224 u32 originator:4; 2225 u32 bus_type:4; 2226 } dw; 2227 }; 2228 union loginfo_type sas_loginfo; 2229 char *originator_str = NULL; 2230 2231 sas_loginfo.loginfo = log_info; 2232 if (sas_loginfo.dw.bus_type != 3 /*SAS*/) 2233 return; 2234 2235 /* each nexus loss loginfo */ 2236 if (log_info == 0x31170000) 2237 return; 2238 2239 /* eat the loginfos associated with task aborts */ 2240 if ((log_info == 30050000) || (log_info == 0x31140000) || 2241 (log_info == 0x31130000)) 2242 return; 2243 2244 switch (sas_loginfo.dw.originator) { 2245 case 0: 2246 originator_str = "IOP"; 2247 break; 2248 case 1: 2249 originator_str = "PL"; 2250 break; 2251 case 2: 2252 originator_str = "IR"; 2253 break; 2254 } 2255 2256 mpr_dprint(sc, MPR_LOG, "log_info(0x%08x): originator(%s), " 2257 "code(0x%02x), sub_code(0x%04x)\n", log_info, originator_str, 2258 sas_loginfo.dw.code, sas_loginfo.dw.subcode); 2259 } 2260 2261 static void 2262 mpr_display_reply_info(struct mpr_softc *sc, uint8_t *reply) 2263 { 2264 MPI2DefaultReply_t *mpi_reply; 2265 u16 sc_status; 2266 2267 mpi_reply = (MPI2DefaultReply_t*)reply; 2268 sc_status = le16toh(mpi_reply->IOCStatus); 2269 if (sc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) 2270 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); 2271 } 2272 2273 void 2274 mpr_intr(void *data) 2275 { 2276 struct mpr_softc *sc; 2277 uint32_t status; 2278 2279 sc = (struct mpr_softc *)data; 2280 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2281 2282 /* 2283 * Check interrupt status register to flush the bus. This is 2284 * needed for both INTx interrupts and driver-driven polling 2285 */ 2286 status = mpr_regread(sc, MPI2_HOST_INTERRUPT_STATUS_OFFSET); 2287 if ((status & MPI2_HIS_REPLY_DESCRIPTOR_INTERRUPT) == 0) 2288 return; 2289 2290 mpr_lock(sc); 2291 mpr_intr_locked(data); 2292 mpr_unlock(sc); 2293 return; 2294 } 2295 2296 /* 2297 * In theory, MSI/MSIX interrupts shouldn't need to read any registers on the 2298 * chip. Hopefully this theory is correct. 2299 */ 2300 void 2301 mpr_intr_msi(void *data) 2302 { 2303 struct mpr_softc *sc; 2304 2305 sc = (struct mpr_softc *)data; 2306 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2307 mpr_lock(sc); 2308 mpr_intr_locked(data); 2309 mpr_unlock(sc); 2310 return; 2311 } 2312 2313 /* 2314 * The locking is overly broad and simplistic, but easy to deal with for now. 2315 */ 2316 void 2317 mpr_intr_locked(void *data) 2318 { 2319 MPI2_REPLY_DESCRIPTORS_UNION *desc; 2320 struct mpr_softc *sc; 2321 struct mpr_command *cm = NULL; 2322 uint8_t flags; 2323 u_int pq; 2324 MPI2_DIAG_RELEASE_REPLY *rel_rep; 2325 mpr_fw_diagnostic_buffer_t *pBuffer; 2326 2327 sc = (struct mpr_softc *)data; 2328 2329 pq = sc->replypostindex; 2330 mpr_dprint(sc, MPR_TRACE, 2331 "%s sc %p starting with replypostindex %u\n", 2332 __func__, sc, sc->replypostindex); 2333 2334 for ( ;; ) { 2335 cm = NULL; 2336 desc = &sc->post_queue[sc->replypostindex]; 2337 flags = desc->Default.ReplyFlags & 2338 MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK; 2339 if ((flags == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) || 2340 (le32toh(desc->Words.High) == 0xffffffff)) 2341 break; 2342 2343 /* increment the replypostindex now, so that event handlers 2344 * and cm completion handlers which decide to do a diag 2345 * reset can zero it without it getting incremented again 2346 * afterwards, and we break out of this loop on the next 2347 * iteration since the reply post queue has been cleared to 2348 * 0xFF and all descriptors look unused (which they are). 2349 */ 2350 if (++sc->replypostindex >= sc->pqdepth) 2351 sc->replypostindex = 0; 2352 2353 switch (flags) { 2354 case MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS: 2355 case MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS: 2356 case MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS: 2357 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; 2358 cm->cm_reply = NULL; 2359 break; 2360 case MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY: 2361 { 2362 uint32_t baddr; 2363 uint8_t *reply; 2364 2365 /* 2366 * Re-compose the reply address from the address 2367 * sent back from the chip. The ReplyFrameAddress 2368 * is the lower 32 bits of the physical address of 2369 * particular reply frame. Convert that address to 2370 * host format, and then use that to provide the 2371 * offset against the virtual address base 2372 * (sc->reply_frames). 2373 */ 2374 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); 2375 reply = sc->reply_frames + 2376 (baddr - ((uint32_t)sc->reply_busaddr)); 2377 /* 2378 * Make sure the reply we got back is in a valid 2379 * range. If not, go ahead and panic here, since 2380 * we'll probably panic as soon as we deference the 2381 * reply pointer anyway. 2382 */ 2383 if ((reply < sc->reply_frames) 2384 || (reply > (sc->reply_frames + 2385 (sc->fqdepth * sc->facts->ReplyFrameSize * 4)))) { 2386 printf("%s: WARNING: reply %p out of range!\n", 2387 __func__, reply); 2388 printf("%s: reply_frames %p, fqdepth %d, " 2389 "frame size %d\n", __func__, 2390 sc->reply_frames, sc->fqdepth, 2391 sc->facts->ReplyFrameSize * 4); 2392 printf("%s: baddr %#x,\n", __func__, baddr); 2393 /* LSI-TODO. See Linux Code for Graceful exit */ 2394 panic("Reply address out of range"); 2395 } 2396 if (le16toh(desc->AddressReply.SMID) == 0) { 2397 if (((MPI2_DEFAULT_REPLY *)reply)->Function == 2398 MPI2_FUNCTION_DIAG_BUFFER_POST) { 2399 /* 2400 * If SMID is 0 for Diag Buffer Post, 2401 * this implies that the reply is due to 2402 * a release function with a status that 2403 * the buffer has been released. Set 2404 * the buffer flags accordingly. 2405 */ 2406 rel_rep = 2407 (MPI2_DIAG_RELEASE_REPLY *)reply; 2408 if ((le16toh(rel_rep->IOCStatus) & 2409 MPI2_IOCSTATUS_MASK) == 2410 MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED) 2411 { 2412 pBuffer = 2413 &sc->fw_diag_buffer_list[ 2414 rel_rep->BufferType]; 2415 pBuffer->valid_data = TRUE; 2416 pBuffer->owned_by_firmware = 2417 FALSE; 2418 pBuffer->immediate = FALSE; 2419 } 2420 } else 2421 mpr_dispatch_event(sc, baddr, 2422 (MPI2_EVENT_NOTIFICATION_REPLY *) 2423 reply); 2424 } else { 2425 cm = &sc->commands[ 2426 le16toh(desc->AddressReply.SMID)]; 2427 cm->cm_reply = reply; 2428 cm->cm_reply_data = 2429 le32toh(desc->AddressReply. 2430 ReplyFrameAddress); 2431 } 2432 break; 2433 } 2434 case MPI2_RPY_DESCRIPT_FLAGS_TARGETASSIST_SUCCESS: 2435 case MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER: 2436 case MPI2_RPY_DESCRIPT_FLAGS_RAID_ACCELERATOR_SUCCESS: 2437 default: 2438 /* Unhandled */ 2439 mpr_dprint(sc, MPR_ERROR, "Unhandled reply 0x%x\n", 2440 desc->Default.ReplyFlags); 2441 cm = NULL; 2442 break; 2443 } 2444 2445 if (cm != NULL) { 2446 // Print Error reply frame 2447 if (cm->cm_reply) 2448 mpr_display_reply_info(sc,cm->cm_reply); 2449 mpr_complete_command(sc, cm); 2450 } 2451 2452 desc->Words.Low = 0xffffffff; 2453 desc->Words.High = 0xffffffff; 2454 } 2455 2456 if (pq != sc->replypostindex) { 2457 mpr_dprint(sc, MPR_TRACE, 2458 "%s sc %p writing postindex %d\n", 2459 __func__, sc, sc->replypostindex); 2460 mpr_regwrite(sc, MPI2_REPLY_POST_HOST_INDEX_OFFSET, 2461 sc->replypostindex); 2462 } 2463 2464 return; 2465 } 2466 2467 static void 2468 mpr_dispatch_event(struct mpr_softc *sc, uintptr_t data, 2469 MPI2_EVENT_NOTIFICATION_REPLY *reply) 2470 { 2471 struct mpr_event_handle *eh; 2472 int event, handled = 0; 2473 2474 event = le16toh(reply->Event); 2475 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2476 if (isset(eh->mask, event)) { 2477 eh->callback(sc, data, reply); 2478 handled++; 2479 } 2480 } 2481 2482 if (handled == 0) 2483 mpr_dprint(sc, MPR_EVENT, "Unhandled event 0x%x\n", 2484 le16toh(event)); 2485 2486 /* 2487 * This is the only place that the event/reply should be freed. 2488 * Anything wanting to hold onto the event data should have 2489 * already copied it into their own storage. 2490 */ 2491 mpr_free_reply(sc, data); 2492 } 2493 2494 static void 2495 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) 2496 { 2497 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2498 2499 if (cm->cm_reply) 2500 MPR_DPRINT_EVENT(sc, generic, 2501 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); 2502 2503 mpr_free_command(sc, cm); 2504 2505 /* next, send a port enable */ 2506 mprsas_startup(sc); 2507 } 2508 2509 /* 2510 * For both register_events and update_events, the caller supplies a bitmap 2511 * of events that it _wants_. These functions then turn that into a bitmask 2512 * suitable for the controller. 2513 */ 2514 int 2515 mpr_register_events(struct mpr_softc *sc, uint8_t *mask, 2516 mpr_evt_callback_t *cb, void *data, struct mpr_event_handle **handle) 2517 { 2518 struct mpr_event_handle *eh; 2519 int error = 0; 2520 2521 eh = malloc(sizeof(struct mpr_event_handle), M_MPR, M_WAITOK|M_ZERO); 2522 if (!eh) { 2523 mpr_dprint(sc, MPR_EVENT|MPR_ERROR, 2524 "Cannot allocate event memory\n"); 2525 return (ENOMEM); 2526 } 2527 eh->callback = cb; 2528 eh->data = data; 2529 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); 2530 if (mask != NULL) 2531 error = mpr_update_events(sc, eh, mask); 2532 *handle = eh; 2533 2534 return (error); 2535 } 2536 2537 int 2538 mpr_update_events(struct mpr_softc *sc, struct mpr_event_handle *handle, 2539 uint8_t *mask) 2540 { 2541 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2542 MPI2_EVENT_NOTIFICATION_REPLY *reply = NULL; 2543 struct mpr_command *cm = NULL; 2544 struct mpr_event_handle *eh; 2545 int error, i; 2546 2547 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2548 2549 if ((mask != NULL) && (handle != NULL)) 2550 bcopy(mask, &handle->mask[0], 16); 2551 memset(sc->event_mask, 0xff, 16); 2552 2553 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2554 for (i = 0; i < 16; i++) 2555 sc->event_mask[i] &= ~eh->mask[i]; 2556 } 2557 2558 if ((cm = mpr_alloc_command(sc)) == NULL) 2559 return (EBUSY); 2560 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2561 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2562 evtreq->MsgFlags = 0; 2563 evtreq->SASBroadcastPrimitiveMasks = 0; 2564 #ifdef MPR_DEBUG_ALL_EVENTS 2565 { 2566 u_char fullmask[16]; 2567 memset(fullmask, 0x00, 16); 2568 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2569 } 2570 #else 2571 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2572 #endif 2573 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2574 cm->cm_data = NULL; 2575 2576 error = mpr_request_polled(sc, &cm); 2577 if (cm != NULL) 2578 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; 2579 if ((reply == NULL) || 2580 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) 2581 error = ENXIO; 2582 2583 if (reply) 2584 MPR_DPRINT_EVENT(sc, generic, reply); 2585 2586 mpr_dprint(sc, MPR_TRACE, "%s finished error %d\n", __func__, error); 2587 2588 if (cm != NULL) 2589 mpr_free_command(sc, cm); 2590 return (error); 2591 } 2592 2593 static int 2594 mpr_reregister_events(struct mpr_softc *sc) 2595 { 2596 MPI2_EVENT_NOTIFICATION_REQUEST *evtreq; 2597 struct mpr_command *cm; 2598 struct mpr_event_handle *eh; 2599 int error, i; 2600 2601 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2602 2603 /* first, reregister events */ 2604 2605 memset(sc->event_mask, 0xff, 16); 2606 2607 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { 2608 for (i = 0; i < 16; i++) 2609 sc->event_mask[i] &= ~eh->mask[i]; 2610 } 2611 2612 if ((cm = mpr_alloc_command(sc)) == NULL) 2613 return (EBUSY); 2614 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; 2615 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; 2616 evtreq->MsgFlags = 0; 2617 evtreq->SASBroadcastPrimitiveMasks = 0; 2618 #ifdef MPR_DEBUG_ALL_EVENTS 2619 { 2620 u_char fullmask[16]; 2621 memset(fullmask, 0x00, 16); 2622 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, 16); 2623 } 2624 #else 2625 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, 16); 2626 #endif 2627 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 2628 cm->cm_data = NULL; 2629 cm->cm_complete = mpr_reregister_events_complete; 2630 2631 error = mpr_map_command(sc, cm); 2632 2633 mpr_dprint(sc, MPR_TRACE, "%s finished with error %d\n", __func__, 2634 error); 2635 return (error); 2636 } 2637 2638 int 2639 mpr_deregister_events(struct mpr_softc *sc, struct mpr_event_handle *handle) 2640 { 2641 2642 TAILQ_REMOVE(&sc->event_list, handle, eh_list); 2643 free(handle, M_MPR); 2644 return (mpr_update_events(sc, NULL, NULL)); 2645 } 2646 2647 /** 2648 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a 2649 * native SGL (NVMe PRP). The native SGL is built starting in the first PRP entry 2650 * of the NVMe message (PRP1). If the data buffer is small enough to be described 2651 * entirely using PRP1, then PRP2 is not used. If needed, PRP2 is used to 2652 * describe a larger data buffer. If the data buffer is too large to describe 2653 * using the two PRP entriess inside the NVMe message, then PRP1 describes the 2654 * first data memory segment, and PRP2 contains a pointer to a PRP list located 2655 * elsewhere in memory to describe the remaining data memory segments. The PRP 2656 * list will be contiguous. 2657 2658 * The native SGL for NVMe devices is a Physical Region Page (PRP). A PRP 2659 * consists of a list of PRP entries to describe a number of noncontigous 2660 * physical memory segments as a single memory buffer, just as a SGL does. Note 2661 * however, that this function is only used by the IOCTL call, so the memory 2662 * given will be guaranteed to be contiguous. There is no need to translate 2663 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous 2664 * space that is one page size each. 2665 * 2666 * Each NVMe message contains two PRP entries. The first (PRP1) either contains 2667 * a PRP list pointer or a PRP element, depending upon the command. PRP2 contains 2668 * the second PRP element if the memory being described fits within 2 PRP 2669 * entries, or a PRP list pointer if the PRP spans more than two entries. 2670 * 2671 * A PRP list pointer contains the address of a PRP list, structured as a linear 2672 * array of PRP entries. Each PRP entry in this list describes a segment of 2673 * physical memory. 2674 * 2675 * Each 64-bit PRP entry comprises an address and an offset field. The address 2676 * always points to the beginning of a PAGE_SIZE physical memory page, and the 2677 * offset describes where within that page the memory segment begins. Only the 2678 * first element in a PRP list may contain a non-zero offest, implying that all 2679 * memory segments following the first begin at the start of a PAGE_SIZE page. 2680 * 2681 * Each PRP element normally describes a chunck of PAGE_SIZE physical memory, 2682 * with exceptions for the first and last elements in the list. If the memory 2683 * being described by the list begins at a non-zero offset within the first page, 2684 * then the first PRP element will contain a non-zero offset indicating where the 2685 * region begins within the page. The last memory segment may end before the end 2686 * of the PAGE_SIZE segment, depending upon the overall size of the memory being 2687 * described by the PRP list. 2688 * 2689 * Since PRP entries lack any indication of size, the overall data buffer length 2690 * is used to determine where the end of the data memory buffer is located, and 2691 * how many PRP entries are required to describe it. 2692 * 2693 * Returns nothing. 2694 */ 2695 void 2696 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm, 2697 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request, void *data, 2698 uint32_t data_in_sz, uint32_t data_out_sz) 2699 { 2700 int prp_size = PRP_ENTRY_SIZE; 2701 uint64_t *prp_entry, *prp1_entry, *prp2_entry; 2702 uint64_t *prp_entry_phys, *prp_page, *prp_page_phys; 2703 uint32_t offset, entry_len, page_mask_result, page_mask; 2704 bus_addr_t paddr; 2705 size_t length; 2706 struct mpr_prp_page *prp_page_info = NULL; 2707 2708 /* 2709 * Not all commands require a data transfer. If no data, just return 2710 * without constructing any PRP. 2711 */ 2712 if (!data_in_sz && !data_out_sz) 2713 return; 2714 2715 /* 2716 * Set pointers to PRP1 and PRP2, which are in the NVMe command. PRP1 is 2717 * located at a 24 byte offset from the start of the NVMe command. Then 2718 * set the current PRP entry pointer to PRP1. 2719 */ 2720 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2721 NVME_CMD_PRP1_OFFSET); 2722 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + 2723 NVME_CMD_PRP2_OFFSET); 2724 prp_entry = prp1_entry; 2725 2726 /* 2727 * For the PRP entries, use the specially allocated buffer of 2728 * contiguous memory. PRP Page allocation failures should not happen 2729 * because there should be enough PRP page buffers to account for the 2730 * possible NVMe QDepth. 2731 */ 2732 prp_page_info = mpr_alloc_prp_page(sc); 2733 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 2734 "used for building a native NVMe SGL.\n", __func__)); 2735 prp_page = (uint64_t *)prp_page_info->prp_page; 2736 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 2737 2738 /* 2739 * Insert the allocated PRP page into the command's PRP page list. This 2740 * will be freed when the command is freed. 2741 */ 2742 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 2743 2744 /* 2745 * Check if we are within 1 entry of a page boundary we don't want our 2746 * first entry to be a PRP List entry. 2747 */ 2748 page_mask = PAGE_SIZE - 1; 2749 page_mask_result = (uintptr_t)((uint8_t *)prp_page + prp_size) & 2750 page_mask; 2751 if (!page_mask_result) 2752 { 2753 /* Bump up to next page boundary. */ 2754 prp_page = (uint64_t *)((uint8_t *)prp_page + prp_size); 2755 prp_page_phys = (uint64_t *)((uint8_t *)prp_page_phys + 2756 prp_size); 2757 } 2758 2759 /* 2760 * Set PRP physical pointer, which initially points to the current PRP 2761 * DMA memory page. 2762 */ 2763 prp_entry_phys = prp_page_phys; 2764 2765 /* Get physical address and length of the data buffer. */ 2766 paddr = (bus_addr_t)data; 2767 if (data_in_sz) 2768 length = data_in_sz; 2769 else 2770 length = data_out_sz; 2771 2772 /* Loop while the length is not zero. */ 2773 while (length) 2774 { 2775 /* 2776 * Check if we need to put a list pointer here if we are at page 2777 * boundary - prp_size (8 bytes). 2778 */ 2779 page_mask_result = (uintptr_t)((uint8_t *)prp_entry_phys + 2780 prp_size) & page_mask; 2781 if (!page_mask_result) 2782 { 2783 /* 2784 * This is the last entry in a PRP List, so we need to 2785 * put a PRP list pointer here. What this does is: 2786 * - bump the current memory pointer to the next 2787 * address, which will be the next full page. 2788 * - set the PRP Entry to point to that page. This is 2789 * now the PRP List pointer. 2790 * - bump the PRP Entry pointer the start of the next 2791 * page. Since all of this PRP memory is contiguous, 2792 * no need to get a new page - it's just the next 2793 * address. 2794 */ 2795 prp_entry_phys++; 2796 *prp_entry = 2797 htole64((uint64_t)(uintptr_t)prp_entry_phys); 2798 prp_entry++; 2799 } 2800 2801 /* Need to handle if entry will be part of a page. */ 2802 offset = (uint32_t)paddr & page_mask; 2803 entry_len = PAGE_SIZE - offset; 2804 2805 if (prp_entry == prp1_entry) 2806 { 2807 /* 2808 * Must fill in the first PRP pointer (PRP1) before 2809 * moving on. 2810 */ 2811 *prp1_entry = htole64((uint64_t)paddr); 2812 2813 /* 2814 * Now point to the second PRP entry within the 2815 * command (PRP2). 2816 */ 2817 prp_entry = prp2_entry; 2818 } 2819 else if (prp_entry == prp2_entry) 2820 { 2821 /* 2822 * Should the PRP2 entry be a PRP List pointer or just a 2823 * regular PRP pointer? If there is more than one more 2824 * page of data, must use a PRP List pointer. 2825 */ 2826 if (length > PAGE_SIZE) 2827 { 2828 /* 2829 * PRP2 will contain a PRP List pointer because 2830 * more PRP's are needed with this command. The 2831 * list will start at the beginning of the 2832 * contiguous buffer. 2833 */ 2834 *prp2_entry = 2835 htole64( 2836 (uint64_t)(uintptr_t)prp_entry_phys); 2837 2838 /* 2839 * The next PRP Entry will be the start of the 2840 * first PRP List. 2841 */ 2842 prp_entry = prp_page; 2843 } 2844 else 2845 { 2846 /* 2847 * After this, the PRP Entries are complete. 2848 * This command uses 2 PRP's and no PRP list. 2849 */ 2850 *prp2_entry = htole64((uint64_t)paddr); 2851 } 2852 } 2853 else 2854 { 2855 /* 2856 * Put entry in list and bump the addresses. 2857 * 2858 * After PRP1 and PRP2 are filled in, this will fill in 2859 * all remaining PRP entries in a PRP List, one per each 2860 * time through the loop. 2861 */ 2862 *prp_entry = htole64((uint64_t)paddr); 2863 prp_entry++; 2864 prp_entry_phys++; 2865 } 2866 2867 /* 2868 * Bump the phys address of the command's data buffer by the 2869 * entry_len. 2870 */ 2871 paddr += entry_len; 2872 2873 /* Decrement length accounting for last partial page. */ 2874 if (entry_len > length) 2875 length = 0; 2876 else 2877 length -= entry_len; 2878 } 2879 } 2880 2881 /* 2882 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to 2883 * determine if the driver needs to build a native SGL. If so, that native SGL 2884 * is built in the contiguous buffers allocated especially for PCIe SGL 2885 * creation. If the driver will not build a native SGL, return TRUE and a 2886 * normal IEEE SGL will be built. Currently this routine supports NVMe devices 2887 * only. 2888 * 2889 * Returns FALSE (0) if native SGL was built, TRUE (1) if no SGL was built. 2890 */ 2891 static int 2892 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm, 2893 bus_dma_segment_t *segs, int segs_left) 2894 { 2895 uint32_t i, sge_dwords, length, offset, entry_len; 2896 uint32_t num_entries, buff_len = 0, sges_in_segment; 2897 uint32_t page_mask, page_mask_result, *curr_buff; 2898 uint32_t *ptr_sgl, *ptr_first_sgl, first_page_offset; 2899 uint32_t first_page_data_size, end_residual; 2900 uint64_t *msg_phys; 2901 bus_addr_t paddr; 2902 int build_native_sgl = 0, first_prp_entry; 2903 int prp_size = PRP_ENTRY_SIZE; 2904 Mpi25IeeeSgeChain64_t *main_chain_element = NULL; 2905 struct mpr_prp_page *prp_page_info = NULL; 2906 2907 mpr_dprint(sc, MPR_TRACE, "%s\n", __func__); 2908 2909 /* 2910 * Add up the sizes of each segment length to get the total transfer 2911 * size, which will be checked against the Maximum Data Transfer Size. 2912 * If the data transfer length exceeds the MDTS for this device, just 2913 * return 1 so a normal IEEE SGL will be built. F/W will break the I/O 2914 * up into multiple I/O's. [nvme_mdts = 0 means unlimited] 2915 */ 2916 for (i = 0; i < segs_left; i++) 2917 buff_len += htole32(segs[i].ds_len); 2918 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) 2919 return 1; 2920 2921 /* Create page_mask (to get offset within page) */ 2922 page_mask = PAGE_SIZE - 1; 2923 2924 /* 2925 * Check if the number of elements exceeds the max number that can be 2926 * put in the main message frame (H/W can only translate an SGL that 2927 * is contained entirely in the main message frame). 2928 */ 2929 sges_in_segment = (sc->facts->IOCRequestFrameSize - 2930 offsetof(Mpi25SCSIIORequest_t, SGL)) / sizeof(MPI25_SGE_IO_UNION); 2931 if (segs_left > sges_in_segment) 2932 build_native_sgl = 1; 2933 else 2934 { 2935 /* 2936 * NVMe uses one PRP for each physical page (or part of physical 2937 * page). 2938 * if 4 pages or less then IEEE is OK 2939 * if > 5 pages then we need to build a native SGL 2940 * if > 4 and <= 5 pages, then check the physical address of 2941 * the first SG entry, then if this first size in the page 2942 * is >= the residual beyond 4 pages then use IEEE, 2943 * otherwise use native SGL 2944 */ 2945 if (buff_len > (PAGE_SIZE * 5)) 2946 build_native_sgl = 1; 2947 else if ((buff_len > (PAGE_SIZE * 4)) && 2948 (buff_len <= (PAGE_SIZE * 5)) ) 2949 { 2950 msg_phys = (uint64_t *)segs[0].ds_addr; 2951 first_page_offset = 2952 ((uint32_t)(uint64_t)(uintptr_t)msg_phys & 2953 page_mask); 2954 first_page_data_size = PAGE_SIZE - first_page_offset; 2955 end_residual = buff_len % PAGE_SIZE; 2956 2957 /* 2958 * If offset into first page pushes the end of the data 2959 * beyond end of the 5th page, we need the extra PRP 2960 * list. 2961 */ 2962 if (first_page_data_size < end_residual) 2963 build_native_sgl = 1; 2964 2965 /* 2966 * Check if first SG entry size is < residual beyond 4 2967 * pages. 2968 */ 2969 if (htole32(segs[0].ds_len) < 2970 (buff_len - (PAGE_SIZE * 4))) 2971 build_native_sgl = 1; 2972 } 2973 } 2974 2975 /* check if native SGL is needed */ 2976 if (!build_native_sgl) 2977 return 1; 2978 2979 /* 2980 * Native SGL is needed. 2981 * Put a chain element in main message frame that points to the first 2982 * chain buffer. 2983 * 2984 * NOTE: The ChainOffset field must be 0 when using a chain pointer to 2985 * a native SGL. 2986 */ 2987 2988 /* Set main message chain element pointer */ 2989 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; 2990 2991 /* 2992 * For NVMe the chain element needs to be the 2nd SGL entry in the main 2993 * message. 2994 */ 2995 main_chain_element = (Mpi25IeeeSgeChain64_t *) 2996 ((uint8_t *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64)); 2997 2998 /* 2999 * For the PRP entries, use the specially allocated buffer of 3000 * contiguous memory. PRP Page allocation failures should not happen 3001 * because there should be enough PRP page buffers to account for the 3002 * possible NVMe QDepth. 3003 */ 3004 prp_page_info = mpr_alloc_prp_page(sc); 3005 KASSERT(prp_page_info != NULL, ("%s: There are no PRP Pages left to be " 3006 "used for building a native NVMe SGL.\n", __func__)); 3007 curr_buff = (uint32_t *)prp_page_info->prp_page; 3008 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; 3009 3010 /* 3011 * Insert the allocated PRP page into the command's PRP page list. This 3012 * will be freed when the command is freed. 3013 */ 3014 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); 3015 3016 /* 3017 * Check if we are within 1 entry of a page boundary we don't want our 3018 * first entry to be a PRP List entry. 3019 */ 3020 page_mask_result = (uintptr_t)((uint8_t *)curr_buff + prp_size) & 3021 page_mask; 3022 if (!page_mask_result) { 3023 /* Bump up to next page boundary. */ 3024 curr_buff = (uint32_t *)((uint8_t *)curr_buff + prp_size); 3025 msg_phys = (uint64_t *)((uint8_t *)msg_phys + prp_size); 3026 } 3027 3028 /* Fill in the chain element and make it an NVMe segment type. */ 3029 main_chain_element->Address.High = 3030 htole32((uint32_t)((uint64_t)(uintptr_t)msg_phys >> 32)); 3031 main_chain_element->Address.Low = 3032 htole32((uint32_t)(uintptr_t)msg_phys); 3033 main_chain_element->NextChainOffset = 0; 3034 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3035 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3036 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP; 3037 3038 /* Set SGL pointer to start of contiguous PCIe buffer. */ 3039 ptr_sgl = curr_buff; 3040 sge_dwords = 2; 3041 num_entries = 0; 3042 3043 /* 3044 * NVMe has a very convoluted PRP format. One PRP is required for each 3045 * page or partial page. We need to split up OS SG entries if they are 3046 * longer than one page or cross a page boundary. We also have to insert 3047 * a PRP list pointer entry as the last entry in each physical page of 3048 * the PRP list. 3049 * 3050 * NOTE: The first PRP "entry" is actually placed in the first SGL entry 3051 * in the main message in IEEE 64 format. The 2nd entry in the main 3052 * message is the chain element, and the rest of the PRP entries are 3053 * built in the contiguous PCIe buffer. 3054 */ 3055 first_prp_entry = 1; 3056 ptr_first_sgl = (uint32_t *)cm->cm_sge; 3057 3058 for (i = 0; i < segs_left; i++) { 3059 /* Get physical address and length of this SG entry. */ 3060 paddr = segs[i].ds_addr; 3061 length = segs[i].ds_len; 3062 3063 /* 3064 * Check whether a given SGE buffer lies on a non-PAGED 3065 * boundary if this is not the first page. If so, this is not 3066 * expected so have FW build the SGL. 3067 */ 3068 if ((i != 0) && (((uint32_t)paddr & page_mask) != 0)) { 3069 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE while " 3070 "building NVMe PRPs, low address is 0x%x\n", 3071 (uint32_t)paddr); 3072 return 1; 3073 } 3074 3075 /* Apart from last SGE, if any other SGE boundary is not page 3076 * aligned then it means that hole exists. Existence of hole 3077 * leads to data corruption. So fallback to IEEE SGEs. 3078 */ 3079 if (i != (segs_left - 1)) { 3080 if (((uint32_t)paddr + length) & page_mask) { 3081 mpr_dprint(sc, MPR_ERROR, "Unaligned SGE " 3082 "boundary while building NVMe PRPs, low " 3083 "address: 0x%x and length: %u\n", 3084 (uint32_t)paddr, length); 3085 return 1; 3086 } 3087 } 3088 3089 /* Loop while the length is not zero. */ 3090 while (length) { 3091 /* 3092 * Check if we need to put a list pointer here if we are 3093 * at page boundary - prp_size. 3094 */ 3095 page_mask_result = (uintptr_t)((uint8_t *)ptr_sgl + 3096 prp_size) & page_mask; 3097 if (!page_mask_result) { 3098 /* 3099 * Need to put a PRP list pointer here. 3100 */ 3101 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 3102 prp_size); 3103 *ptr_sgl = htole32((uintptr_t)msg_phys); 3104 *(ptr_sgl+1) = htole32((uint64_t)(uintptr_t) 3105 msg_phys >> 32); 3106 ptr_sgl += sge_dwords; 3107 num_entries++; 3108 } 3109 3110 /* Need to handle if entry will be part of a page. */ 3111 offset = (uint32_t)paddr & page_mask; 3112 entry_len = PAGE_SIZE - offset; 3113 if (first_prp_entry) { 3114 /* 3115 * Put IEEE entry in first SGE in main message. 3116 * (Simple element, System addr, not end of 3117 * list.) 3118 */ 3119 *ptr_first_sgl = htole32((uint32_t)paddr); 3120 *(ptr_first_sgl + 1) = 3121 htole32((uint32_t)((uint64_t)paddr >> 32)); 3122 *(ptr_first_sgl + 2) = htole32(entry_len); 3123 *(ptr_first_sgl + 3) = 0; 3124 3125 /* No longer the first PRP entry. */ 3126 first_prp_entry = 0; 3127 } else { 3128 /* Put entry in list. */ 3129 *ptr_sgl = htole32((uint32_t)paddr); 3130 *(ptr_sgl + 1) = 3131 htole32((uint32_t)((uint64_t)paddr >> 32)); 3132 3133 /* Bump ptr_sgl, msg_phys, and num_entries. */ 3134 ptr_sgl += sge_dwords; 3135 msg_phys = (uint64_t *)((uint8_t *)msg_phys + 3136 prp_size); 3137 num_entries++; 3138 } 3139 3140 /* Bump the phys address by the entry_len. */ 3141 paddr += entry_len; 3142 3143 /* Decrement length accounting for last partial page. */ 3144 if (entry_len > length) 3145 length = 0; 3146 else 3147 length -= entry_len; 3148 } 3149 } 3150 3151 /* Set chain element Length. */ 3152 main_chain_element->Length = htole32(num_entries * prp_size); 3153 3154 /* Return 0, indicating we built a native SGL. */ 3155 return 0; 3156 } 3157 3158 /* 3159 * Add a chain element as the next SGE for the specified command. 3160 * Reset cm_sge and cm_sgesize to indicate all the available space. Chains are 3161 * only required for IEEE commands. Therefore there is no code for commands 3162 * that have the MPR_CM_FLAGS_SGE_SIMPLE flag set (and those commands 3163 * shouldn't be requesting chains). 3164 */ 3165 static int 3166 mpr_add_chain(struct mpr_command *cm, int segsleft) 3167 { 3168 struct mpr_softc *sc = cm->cm_sc; 3169 MPI2_REQUEST_HEADER *req; 3170 MPI25_IEEE_SGE_CHAIN64 *ieee_sgc; 3171 struct mpr_chain *chain; 3172 int sgc_size, current_segs, rem_segs, segs_per_frame; 3173 uint8_t next_chain_offset = 0; 3174 3175 /* 3176 * Fail if a command is requesting a chain for SIMPLE SGE's. For SAS3 3177 * only IEEE commands should be requesting chains. Return some error 3178 * code other than 0. 3179 */ 3180 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { 3181 mpr_dprint(sc, MPR_ERROR, "A chain element cannot be added to " 3182 "an MPI SGL.\n"); 3183 return(ENOBUFS); 3184 } 3185 3186 sgc_size = sizeof(MPI25_IEEE_SGE_CHAIN64); 3187 if (cm->cm_sglsize < sgc_size) 3188 panic("MPR: Need SGE Error Code\n"); 3189 3190 chain = mpr_alloc_chain(cm->cm_sc); 3191 if (chain == NULL) 3192 return (ENOBUFS); 3193 3194 /* 3195 * Note: a double-linked list is used to make it easier to walk for 3196 * debugging. 3197 */ 3198 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); 3199 3200 /* 3201 * Need to know if the number of frames left is more than 1 or not. If 3202 * more than 1 frame is required, NextChainOffset will need to be set, 3203 * which will just be the last segment of the frame. 3204 */ 3205 rem_segs = 0; 3206 if (cm->cm_sglsize < (sgc_size * segsleft)) { 3207 /* 3208 * rem_segs is the number of segements remaining after the 3209 * segments that will go into the current frame. Since it is 3210 * known that at least one more frame is required, account for 3211 * the chain element. To know if more than one more frame is 3212 * required, just check if there will be a remainder after using 3213 * the current frame (with this chain) and the next frame. If 3214 * so the NextChainOffset must be the last element of the next 3215 * frame. 3216 */ 3217 current_segs = (cm->cm_sglsize / sgc_size) - 1; 3218 rem_segs = segsleft - current_segs; 3219 segs_per_frame = sc->chain_frame_size / sgc_size; 3220 if (rem_segs > segs_per_frame) { 3221 next_chain_offset = segs_per_frame - 1; 3222 } 3223 } 3224 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; 3225 ieee_sgc->Length = next_chain_offset ? 3226 htole32((uint32_t)sc->chain_frame_size) : 3227 htole32((uint32_t)rem_segs * (uint32_t)sgc_size); 3228 ieee_sgc->NextChainOffset = next_chain_offset; 3229 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | 3230 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3231 ieee_sgc->Address.Low = htole32(chain->chain_busaddr); 3232 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); 3233 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; 3234 req = (MPI2_REQUEST_HEADER *)cm->cm_req; 3235 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4; 3236 3237 cm->cm_sglsize = sc->chain_frame_size; 3238 return (0); 3239 } 3240 3241 /* 3242 * Add one scatter-gather element to the scatter-gather list for a command. 3243 * Maintain cm_sglsize and cm_sge as the remaining size and pointer to the 3244 * next SGE to fill in, respectively. In Gen3, the MPI SGL does not have a 3245 * chain, so don't consider any chain additions. 3246 */ 3247 int 3248 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, 3249 int segsleft) 3250 { 3251 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3252 u32 sge_flags; 3253 3254 /* 3255 * case 1: >=1 more segment, no room for anything (error) 3256 * case 2: 1 more segment and enough room for it 3257 */ 3258 3259 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { 3260 mpr_dprint(cm->cm_sc, MPR_ERROR, 3261 "%s: warning: Not enough room for MPI SGL in frame.\n", 3262 __func__); 3263 return(ENOBUFS); 3264 } 3265 3266 KASSERT(segsleft == 1, 3267 ("segsleft cannot be more than 1 for an MPI SGL; segsleft = %d\n", 3268 segsleft)); 3269 3270 /* 3271 * There is one more segment left to add for the MPI SGL and there is 3272 * enough room in the frame to add it. This is the normal case because 3273 * MPI SGL's don't have chains, otherwise something is wrong. 3274 * 3275 * If this is a bi-directional request, need to account for that 3276 * here. Save the pre-filled sge values. These will be used 3277 * either for the 2nd SGL or for a single direction SGL. If 3278 * cm_out_len is non-zero, this is a bi-directional request, so 3279 * fill in the OUT SGL first, then the IN SGL, otherwise just 3280 * fill in the IN SGL. Note that at this time, when filling in 3281 * 2 SGL's for a bi-directional request, they both use the same 3282 * DMA buffer (same cm command). 3283 */ 3284 saved_buf_len = sge->FlagsLength & 0x00FFFFFF; 3285 saved_address_low = sge->Address.Low; 3286 saved_address_high = sge->Address.High; 3287 if (cm->cm_out_len) { 3288 sge->FlagsLength = cm->cm_out_len | 3289 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3290 MPI2_SGE_FLAGS_END_OF_BUFFER | 3291 MPI2_SGE_FLAGS_HOST_TO_IOC | 3292 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 3293 MPI2_SGE_FLAGS_SHIFT); 3294 cm->cm_sglsize -= len; 3295 /* Endian Safe code */ 3296 sge_flags = sge->FlagsLength; 3297 sge->FlagsLength = htole32(sge_flags); 3298 sge->Address.High = htole32(sge->Address.High); 3299 sge->Address.Low = htole32(sge->Address.Low); 3300 bcopy(sge, cm->cm_sge, len); 3301 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3302 } 3303 sge->FlagsLength = saved_buf_len | 3304 ((uint32_t)(MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3305 MPI2_SGE_FLAGS_END_OF_BUFFER | 3306 MPI2_SGE_FLAGS_LAST_ELEMENT | 3307 MPI2_SGE_FLAGS_END_OF_LIST | 3308 MPI2_SGE_FLAGS_64_BIT_ADDRESSING) << 3309 MPI2_SGE_FLAGS_SHIFT); 3310 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { 3311 sge->FlagsLength |= 3312 ((uint32_t)(MPI2_SGE_FLAGS_IOC_TO_HOST) << 3313 MPI2_SGE_FLAGS_SHIFT); 3314 } else { 3315 sge->FlagsLength |= 3316 ((uint32_t)(MPI2_SGE_FLAGS_HOST_TO_IOC) << 3317 MPI2_SGE_FLAGS_SHIFT); 3318 } 3319 sge->Address.Low = saved_address_low; 3320 sge->Address.High = saved_address_high; 3321 3322 cm->cm_sglsize -= len; 3323 /* Endian Safe code */ 3324 sge_flags = sge->FlagsLength; 3325 sge->FlagsLength = htole32(sge_flags); 3326 sge->Address.High = htole32(sge->Address.High); 3327 sge->Address.Low = htole32(sge->Address.Low); 3328 bcopy(sge, cm->cm_sge, len); 3329 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); 3330 return (0); 3331 } 3332 3333 /* 3334 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter- 3335 * gather list for a command. Maintain cm_sglsize and cm_sge as the 3336 * remaining size and pointer to the next SGE to fill in, respectively. 3337 */ 3338 int 3339 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) 3340 { 3341 MPI2_IEEE_SGE_SIMPLE64 *sge = sgep; 3342 int error, ieee_sge_size = sizeof(MPI25_SGE_IO_UNION); 3343 uint32_t saved_buf_len, saved_address_low, saved_address_high; 3344 uint32_t sge_length; 3345 3346 /* 3347 * case 1: No room for chain or segment (error). 3348 * case 2: Two or more segments left but only room for chain. 3349 * case 3: Last segment and room for it, so set flags. 3350 */ 3351 3352 /* 3353 * There should be room for at least one element, or there is a big 3354 * problem. 3355 */ 3356 if (cm->cm_sglsize < ieee_sge_size) 3357 panic("MPR: Need SGE Error Code\n"); 3358 3359 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { 3360 if ((error = mpr_add_chain(cm, segsleft)) != 0) 3361 return (error); 3362 } 3363 3364 if (segsleft == 1) { 3365 /* 3366 * If this is a bi-directional request, need to account for that 3367 * here. Save the pre-filled sge values. These will be used 3368 * either for the 2nd SGL or for a single direction SGL. If 3369 * cm_out_len is non-zero, this is a bi-directional request, so 3370 * fill in the OUT SGL first, then the IN SGL, otherwise just 3371 * fill in the IN SGL. Note that at this time, when filling in 3372 * 2 SGL's for a bi-directional request, they both use the same 3373 * DMA buffer (same cm command). 3374 */ 3375 saved_buf_len = sge->Length; 3376 saved_address_low = sge->Address.Low; 3377 saved_address_high = sge->Address.High; 3378 if (cm->cm_out_len) { 3379 sge->Length = cm->cm_out_len; 3380 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3381 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3382 cm->cm_sglsize -= ieee_sge_size; 3383 /* Endian Safe code */ 3384 sge_length = sge->Length; 3385 sge->Length = htole32(sge_length); 3386 sge->Address.High = htole32(sge->Address.High); 3387 sge->Address.Low = htole32(sge->Address.Low); 3388 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3389 cm->cm_sge = 3390 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3391 ieee_sge_size); 3392 } 3393 sge->Length = saved_buf_len; 3394 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3395 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR | 3396 MPI25_IEEE_SGE_FLAGS_END_OF_LIST); 3397 sge->Address.Low = saved_address_low; 3398 sge->Address.High = saved_address_high; 3399 } 3400 3401 cm->cm_sglsize -= ieee_sge_size; 3402 /* Endian Safe code */ 3403 sge_length = sge->Length; 3404 sge->Length = htole32(sge_length); 3405 sge->Address.High = htole32(sge->Address.High); 3406 sge->Address.Low = htole32(sge->Address.Low); 3407 bcopy(sgep, cm->cm_sge, ieee_sge_size); 3408 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + 3409 ieee_sge_size); 3410 return (0); 3411 } 3412 3413 /* 3414 * Add one dma segment to the scatter-gather list for a command. 3415 */ 3416 int 3417 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, 3418 int segsleft) 3419 { 3420 MPI2_SGE_SIMPLE64 sge; 3421 MPI2_IEEE_SGE_SIMPLE64 ieee_sge; 3422 3423 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { 3424 ieee_sge.Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | 3425 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR); 3426 ieee_sge.Length = len; 3427 mpr_from_u64(pa, &ieee_sge.Address); 3428 3429 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); 3430 } else { 3431 /* 3432 * This driver always uses 64-bit address elements for 3433 * simplicity. 3434 */ 3435 flags |= MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 3436 MPI2_SGE_FLAGS_64_BIT_ADDRESSING; 3437 /* Set Endian safe macro in mpr_push_sge */ 3438 sge.FlagsLength = len | (flags << MPI2_SGE_FLAGS_SHIFT); 3439 mpr_from_u64(pa, &sge.Address); 3440 3441 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); 3442 } 3443 } 3444 3445 static void 3446 mpr_data_cb(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 3447 { 3448 struct mpr_softc *sc; 3449 struct mpr_command *cm; 3450 u_int i, dir, sflags; 3451 3452 cm = (struct mpr_command *)arg; 3453 sc = cm->cm_sc; 3454 3455 /* 3456 * In this case, just print out a warning and let the chip tell the 3457 * user they did the wrong thing. 3458 */ 3459 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { 3460 mpr_dprint(sc, MPR_ERROR, "%s: warning: busdma returned %d " 3461 "segments, more than the %d allowed\n", __func__, nsegs, 3462 cm->cm_max_segs); 3463 } 3464 3465 /* 3466 * Set up DMA direction flags. Bi-directional requests are also handled 3467 * here. In that case, both direction flags will be set. 3468 */ 3469 sflags = 0; 3470 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { 3471 /* 3472 * We have to add a special case for SMP passthrough, there 3473 * is no easy way to generically handle it. The first 3474 * S/G element is used for the command (therefore the 3475 * direction bit needs to be set). The second one is used 3476 * for the reply. We'll leave it to the caller to make 3477 * sure we only have two buffers. 3478 */ 3479 /* 3480 * Even though the busdma man page says it doesn't make 3481 * sense to have both direction flags, it does in this case. 3482 * We have one s/g element being accessed in each direction. 3483 */ 3484 dir = BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD; 3485 3486 /* 3487 * Set the direction flag on the first buffer in the SMP 3488 * passthrough request. We'll clear it for the second one. 3489 */ 3490 sflags |= MPI2_SGE_FLAGS_DIRECTION | 3491 MPI2_SGE_FLAGS_END_OF_BUFFER; 3492 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { 3493 sflags |= MPI2_SGE_FLAGS_HOST_TO_IOC; 3494 dir = BUS_DMASYNC_PREWRITE; 3495 } else 3496 dir = BUS_DMASYNC_PREREAD; 3497 3498 /* Check if a native SG list is needed for an NVMe PCIe device. */ 3499 if (cm->cm_targ && cm->cm_targ->is_nvme && 3500 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) { 3501 /* A native SG list was built, skip to end. */ 3502 goto out; 3503 } 3504 3505 for (i = 0; i < nsegs; i++) { 3506 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { 3507 sflags &= ~MPI2_SGE_FLAGS_DIRECTION; 3508 } 3509 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, 3510 sflags, nsegs - i); 3511 if (error != 0) { 3512 /* Resource shortage, roll back! */ 3513 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) 3514 mpr_dprint(sc, MPR_INFO, "Out of chain frames, " 3515 "consider increasing hw.mpr.max_chains.\n"); 3516 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; 3517 mpr_complete_command(sc, cm); 3518 return; 3519 } 3520 } 3521 3522 out: 3523 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); 3524 mpr_enqueue_request(sc, cm); 3525 3526 return; 3527 } 3528 3529 static void 3530 mpr_data_cb2(void *arg, bus_dma_segment_t *segs, int nsegs, bus_size_t mapsize, 3531 int error) 3532 { 3533 mpr_data_cb(arg, segs, nsegs, error); 3534 } 3535 3536 /* 3537 * This is the routine to enqueue commands ansynchronously. 3538 * Note that the only error path here is from bus_dmamap_load(), which can 3539 * return EINPROGRESS if it is waiting for resources. Other than this, it's 3540 * assumed that if you have a command in-hand, then you have enough credits 3541 * to use it. 3542 */ 3543 int 3544 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) 3545 { 3546 int error = 0; 3547 3548 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { 3549 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, 3550 &cm->cm_uio, mpr_data_cb2, cm, 0); 3551 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { 3552 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, 3553 cm->cm_data, mpr_data_cb, cm, 0); 3554 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { 3555 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, 3556 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); 3557 } else { 3558 /* Add a zero-length element as needed */ 3559 if (cm->cm_sge != NULL) 3560 mpr_add_dmaseg(cm, 0, 0, 0, 1); 3561 mpr_enqueue_request(sc, cm); 3562 } 3563 3564 return (error); 3565 } 3566 3567 /* 3568 * This is the routine to enqueue commands synchronously. An error of 3569 * EINPROGRESS from mpr_map_command() is ignored since the command will 3570 * be executed and enqueued automatically. Other errors come from msleep(). 3571 */ 3572 int 3573 mpr_wait_command(struct mpr_softc *sc, struct mpr_command **cmp, int timeout, 3574 int sleep_flag) 3575 { 3576 int error, rc; 3577 struct timeval cur_time, start_time; 3578 struct mpr_command *cm = *cmp; 3579 3580 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) 3581 return EBUSY; 3582 3583 cm->cm_complete = NULL; 3584 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); 3585 error = mpr_map_command(sc, cm); 3586 if ((error != 0) && (error != EINPROGRESS)) 3587 return (error); 3588 3589 // Check for context and wait for 50 mSec at a time until time has 3590 // expired or the command has finished. If msleep can't be used, need 3591 // to poll. 3592 #if __FreeBSD_version >= 1000029 3593 if (curthread->td_no_sleeping) 3594 #else //__FreeBSD_version < 1000029 3595 if (curthread->td_pflags & TDP_NOSLEEPING) 3596 #endif //__FreeBSD_version >= 1000029 3597 sleep_flag = NO_SLEEP; 3598 getmicrouptime(&start_time); 3599 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) { 3600 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); 3601 if (error == EWOULDBLOCK) { 3602 /* 3603 * Record the actual elapsed time in the case of a 3604 * timeout for the message below. 3605 */ 3606 getmicrouptime(&cur_time); 3607 timevalsub(&cur_time, &start_time); 3608 } 3609 } else { 3610 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3611 mpr_intr_locked(sc); 3612 if (sleep_flag == CAN_SLEEP) 3613 pause("mprwait", hz/20); 3614 else 3615 DELAY(50000); 3616 3617 getmicrouptime(&cur_time); 3618 timevalsub(&cur_time, &start_time); 3619 if (cur_time.tv_sec > timeout) { 3620 error = EWOULDBLOCK; 3621 break; 3622 } 3623 } 3624 } 3625 3626 if (error == EWOULDBLOCK) { 3627 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s, timeout=%d," 3628 " elapsed=%jd\n", __func__, timeout, 3629 (intmax_t)cur_time.tv_sec); 3630 rc = mpr_reinit(sc); 3631 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3632 "failed"); 3633 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { 3634 /* 3635 * Tell the caller that we freed the command in a 3636 * reinit. 3637 */ 3638 *cmp = NULL; 3639 } 3640 error = ETIMEDOUT; 3641 } 3642 return (error); 3643 } 3644 3645 /* 3646 * This is the routine to enqueue a command synchonously and poll for 3647 * completion. Its use should be rare. 3648 */ 3649 int 3650 mpr_request_polled(struct mpr_softc *sc, struct mpr_command **cmp) 3651 { 3652 int error, rc; 3653 struct timeval cur_time, start_time; 3654 struct mpr_command *cm = *cmp; 3655 3656 error = 0; 3657 3658 cm->cm_flags |= MPR_CM_FLAGS_POLLED; 3659 cm->cm_complete = NULL; 3660 mpr_map_command(sc, cm); 3661 3662 getmicrouptime(&start_time); 3663 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { 3664 mpr_intr_locked(sc); 3665 3666 if (mtx_owned(&sc->mpr_mtx)) 3667 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, 3668 "mprpoll", hz/20); 3669 else 3670 pause("mprpoll", hz/20); 3671 3672 /* 3673 * Check for real-time timeout and fail if more than 60 seconds. 3674 */ 3675 getmicrouptime(&cur_time); 3676 timevalsub(&cur_time, &start_time); 3677 if (cur_time.tv_sec > 60) { 3678 mpr_dprint(sc, MPR_FAULT, "polling failed\n"); 3679 error = ETIMEDOUT; 3680 break; 3681 } 3682 } 3683 3684 if (error) { 3685 mpr_dprint(sc, MPR_FAULT, "Calling Reinit from %s\n", __func__); 3686 rc = mpr_reinit(sc); 3687 mpr_dprint(sc, MPR_FAULT, "Reinit %s\n", (rc == 0) ? "success" : 3688 "failed"); 3689 3690 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { 3691 /* 3692 * Tell the caller that we freed the command in a 3693 * reinit. 3694 */ 3695 *cmp = NULL; 3696 } 3697 } 3698 return (error); 3699 } 3700 3701 /* 3702 * The MPT driver had a verbose interface for config pages. In this driver, 3703 * reduce it to much simpler terms, similar to the Linux driver. 3704 */ 3705 int 3706 mpr_read_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3707 { 3708 MPI2_CONFIG_REQUEST *req; 3709 struct mpr_command *cm; 3710 int error; 3711 3712 if (sc->mpr_flags & MPR_FLAGS_BUSY) { 3713 return (EBUSY); 3714 } 3715 3716 cm = mpr_alloc_command(sc); 3717 if (cm == NULL) { 3718 return (EBUSY); 3719 } 3720 3721 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; 3722 req->Function = MPI2_FUNCTION_CONFIG; 3723 req->Action = params->action; 3724 req->SGLFlags = 0; 3725 req->ChainOffset = 0; 3726 req->PageAddress = params->page_address; 3727 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3728 MPI2_CONFIG_EXTENDED_PAGE_HEADER *hdr; 3729 3730 hdr = ¶ms->hdr.Ext; 3731 req->ExtPageType = hdr->ExtPageType; 3732 req->ExtPageLength = hdr->ExtPageLength; 3733 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; 3734 req->Header.PageLength = 0; /* Must be set to zero */ 3735 req->Header.PageNumber = hdr->PageNumber; 3736 req->Header.PageVersion = hdr->PageVersion; 3737 } else { 3738 MPI2_CONFIG_PAGE_HEADER *hdr; 3739 3740 hdr = ¶ms->hdr.Struct; 3741 req->Header.PageType = hdr->PageType; 3742 req->Header.PageNumber = hdr->PageNumber; 3743 req->Header.PageLength = hdr->PageLength; 3744 req->Header.PageVersion = hdr->PageVersion; 3745 } 3746 3747 cm->cm_data = params->buffer; 3748 cm->cm_length = params->length; 3749 if (cm->cm_data != NULL) { 3750 cm->cm_sge = &req->PageBufferSGE; 3751 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); 3752 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; 3753 } else 3754 cm->cm_sge = NULL; 3755 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; 3756 3757 cm->cm_complete_data = params; 3758 if (params->callback != NULL) { 3759 cm->cm_complete = mpr_config_complete; 3760 return (mpr_map_command(sc, cm)); 3761 } else { 3762 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP); 3763 if (error) { 3764 mpr_dprint(sc, MPR_FAULT, 3765 "Error %d reading config page\n", error); 3766 if (cm != NULL) 3767 mpr_free_command(sc, cm); 3768 return (error); 3769 } 3770 mpr_config_complete(sc, cm); 3771 } 3772 3773 return (0); 3774 } 3775 3776 int 3777 mpr_write_config_page(struct mpr_softc *sc, struct mpr_config_params *params) 3778 { 3779 return (EINVAL); 3780 } 3781 3782 static void 3783 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) 3784 { 3785 MPI2_CONFIG_REPLY *reply; 3786 struct mpr_config_params *params; 3787 3788 MPR_FUNCTRACE(sc); 3789 params = cm->cm_complete_data; 3790 3791 if (cm->cm_data != NULL) { 3792 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, 3793 BUS_DMASYNC_POSTREAD); 3794 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); 3795 } 3796 3797 /* 3798 * XXX KDM need to do more error recovery? This results in the 3799 * device in question not getting probed. 3800 */ 3801 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { 3802 params->status = MPI2_IOCSTATUS_BUSY; 3803 goto done; 3804 } 3805 3806 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; 3807 if (reply == NULL) { 3808 params->status = MPI2_IOCSTATUS_BUSY; 3809 goto done; 3810 } 3811 params->status = reply->IOCStatus; 3812 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { 3813 params->hdr.Ext.ExtPageType = reply->ExtPageType; 3814 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; 3815 params->hdr.Ext.PageType = reply->Header.PageType; 3816 params->hdr.Ext.PageNumber = reply->Header.PageNumber; 3817 params->hdr.Ext.PageVersion = reply->Header.PageVersion; 3818 } else { 3819 params->hdr.Struct.PageType = reply->Header.PageType; 3820 params->hdr.Struct.PageNumber = reply->Header.PageNumber; 3821 params->hdr.Struct.PageLength = reply->Header.PageLength; 3822 params->hdr.Struct.PageVersion = reply->Header.PageVersion; 3823 } 3824 3825 done: 3826 mpr_free_command(sc, cm); 3827 if (params->callback != NULL) 3828 params->callback(sc, params); 3829 3830 return; 3831 } 3832