Lines Matching +full:cm +full:- +full:poll +full:- +full:mode
1 /*-
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
100 struct mpr_command *cm);
103 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm);
106 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm);
126 * Added this union to smoothly convert le64toh cm->cm_desc.Words.
139 /* Rate limit chain-fail messages to 1 per minute */
167 if (curthread->td_no_sleeping) in mpr_diag_reset()
179 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) in mpr_diag_reset()
180 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, in mpr_diag_reset()
213 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) in mpr_diag_reset()
214 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, in mpr_diag_reset()
284 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) in mpr_transition_ready()
392 prireqcr = MAX(1, sc->max_prireqframes); in mpr_resize_queues()
393 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit); in mpr_resize_queues()
395 reqcr = MAX(2, sc->max_reqframes); in mpr_resize_queues()
396 reqcr = MIN(reqcr, sc->facts->RequestCredit); in mpr_resize_queues()
398 sc->num_reqs = prireqcr + reqcr; in mpr_resize_queues()
399 sc->num_prireqs = prireqcr; in mpr_resize_queues()
400 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes, in mpr_resize_queues()
401 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; in mpr_resize_queues()
404 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4; in mpr_resize_queues()
411 * number of 16-byte elelements that can fit in a Chain Frame, which is in mpr_resize_queues()
414 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) { in mpr_resize_queues()
415 chain_seg_size = sc->facts->IOCMaxChainSegmentSize; in mpr_resize_queues()
418 sc->chain_frame_size = chain_seg_size * in mpr_resize_queues()
421 sc->chain_frame_size = sc->reqframesz; in mpr_resize_queues()
426 * ((SGEs per frame - 1 for chain element) * Max Chain Depth) in mpr_resize_queues()
433 sges_per_frame = sc->chain_frame_size/sizeof(MPI2_IEEE_SGE_SIMPLE64)-1; in mpr_resize_queues()
434 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE; in mpr_resize_queues()
440 if (sc->max_io_pages > 0) { in mpr_resize_queues()
441 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE); in mpr_resize_queues()
442 sc->maxio = maxio; in mpr_resize_queues()
444 sc->maxio = maxio; in mpr_resize_queues()
448 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) / in mpr_resize_queues()
450 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains) in mpr_resize_queues()
451 sc->num_chains = sc->max_chains; in mpr_resize_queues()
454 * Figure out the number of MSIx-based queues. If the firmware or in mpr_resize_queues()
456 * the queues to be useful then don't enable multi-queue. in mpr_resize_queues()
458 if (sc->facts->MaxMSIxVectors < 2) in mpr_resize_queues()
459 sc->msi_msgs = 1; in mpr_resize_queues()
461 if (sc->msi_msgs > 1) { in mpr_resize_queues()
462 sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus); in mpr_resize_queues()
463 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors); in mpr_resize_queues()
464 if (sc->num_reqs / sc->msi_msgs < 2) in mpr_resize_queues()
465 sc->msi_msgs = 1; in mpr_resize_queues()
469 sc->msi_msgs, sc->num_reqs, sc->num_replies); in mpr_resize_queues()
473 * This is called during attach and when re-initializing due to a Diag Reset.
475 * If called from attach, de-allocation is not required because the driver has
477 * allocated structures based on IOC Facts will need to be freed and re-
491 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); in mpr_iocfacts_allocate()
496 * a re-initialization and only return the error if attaching so the OS in mpr_iocfacts_allocate()
499 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { in mpr_iocfacts_allocate()
510 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts); in mpr_iocfacts_allocate()
512 snprintf(sc->fw_version, sizeof(sc->fw_version), in mpr_iocfacts_allocate()
514 sc->facts->FWVersion.Struct.Major, in mpr_iocfacts_allocate()
515 sc->facts->FWVersion.Struct.Minor, in mpr_iocfacts_allocate()
516 sc->facts->FWVersion.Struct.Unit, in mpr_iocfacts_allocate()
517 sc->facts->FWVersion.Struct.Dev); in mpr_iocfacts_allocate()
519 snprintf(sc->msg_version, sizeof(sc->msg_version), "%d.%d", in mpr_iocfacts_allocate()
520 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK) >> in mpr_iocfacts_allocate()
522 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MINOR_MASK) >> in mpr_iocfacts_allocate()
525 mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version, in mpr_iocfacts_allocate()
528 "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, in mpr_iocfacts_allocate()
541 if (attaching && ((sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
558 saved_mode = sc->ir_firmware; in mpr_iocfacts_allocate()
559 if (sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
561 sc->ir_firmware = 1; in mpr_iocfacts_allocate()
563 if (sc->ir_firmware != saved_mode) { in mpr_iocfacts_allocate()
564 mpr_dprint(sc, MPR_INIT|MPR_FAULT, "new IR/IT mode " in mpr_iocfacts_allocate()
565 "in IOC Facts does not match previous mode\n"); in mpr_iocfacts_allocate()
571 sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED; in mpr_iocfacts_allocate()
574 ((saved_facts.MsgVersion != sc->facts->MsgVersion) || in mpr_iocfacts_allocate()
575 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || in mpr_iocfacts_allocate()
576 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || in mpr_iocfacts_allocate()
577 (saved_facts.RequestCredit != sc->facts->RequestCredit) || in mpr_iocfacts_allocate()
578 (saved_facts.ProductID != sc->facts->ProductID) || in mpr_iocfacts_allocate()
579 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || in mpr_iocfacts_allocate()
581 sc->facts->IOCRequestFrameSize) || in mpr_iocfacts_allocate()
583 sc->facts->IOCMaxChainSegmentSize) || in mpr_iocfacts_allocate()
584 (saved_facts.MaxTargets != sc->facts->MaxTargets) || in mpr_iocfacts_allocate()
585 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || in mpr_iocfacts_allocate()
586 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || in mpr_iocfacts_allocate()
587 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || in mpr_iocfacts_allocate()
589 sc->facts->MaxReplyDescriptorPostQueueDepth) || in mpr_iocfacts_allocate()
590 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || in mpr_iocfacts_allocate()
591 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || in mpr_iocfacts_allocate()
593 sc->facts->MaxPersistentEntries))) { in mpr_iocfacts_allocate()
597 sc->mpr_flags |= MPR_FLAGS_REALLOCATED; in mpr_iocfacts_allocate()
601 * Some things should be done if attaching or re-allocating after a Diag in mpr_iocfacts_allocate()
610 if (sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
612 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. in mpr_iocfacts_allocate()
614 if (sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
616 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. in mpr_iocfacts_allocate()
618 if (sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
620 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. in mpr_iocfacts_allocate()
626 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) in mpr_iocfacts_allocate()
627 sc->eedp_enabled = TRUE; in mpr_iocfacts_allocate()
628 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) in mpr_iocfacts_allocate()
629 sc->control_TLR = TRUE; in mpr_iocfacts_allocate()
630 if ((sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
632 (sc->mpr_flags & MPR_FLAGS_SEA_IOC)) in mpr_iocfacts_allocate()
633 sc->atomic_desc_capable = TRUE; in mpr_iocfacts_allocate()
640 TAILQ_INIT(&sc->req_list); in mpr_iocfacts_allocate()
641 TAILQ_INIT(&sc->high_priority_req_list); in mpr_iocfacts_allocate()
642 TAILQ_INIT(&sc->chain_list); in mpr_iocfacts_allocate()
643 TAILQ_INIT(&sc->prp_page_list); in mpr_iocfacts_allocate()
644 TAILQ_INIT(&sc->tm_list); in mpr_iocfacts_allocate()
685 bzero(sc->free_queue, sc->fqdepth * 4); in mpr_iocfacts_allocate()
711 sc->replypostindex = 0; in mpr_iocfacts_allocate()
712 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); in mpr_iocfacts_allocate()
738 * XXX If the number of MSI-X vectors changes during re-init, this in mpr_iocfacts_allocate()
758 struct mpr_command *cm; in mpr_iocfacts_free() local
763 if (sc->free_busaddr != 0) in mpr_iocfacts_free()
764 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); in mpr_iocfacts_free()
765 if (sc->free_queue != NULL) in mpr_iocfacts_free()
766 bus_dmamem_free(sc->queues_dmat, sc->free_queue, in mpr_iocfacts_free()
767 sc->queues_map); in mpr_iocfacts_free()
768 if (sc->queues_dmat != NULL) in mpr_iocfacts_free()
769 bus_dma_tag_destroy(sc->queues_dmat); in mpr_iocfacts_free()
771 if (sc->chain_frames != NULL) { in mpr_iocfacts_free()
772 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); in mpr_iocfacts_free()
773 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, in mpr_iocfacts_free()
774 sc->chain_map); in mpr_iocfacts_free()
776 if (sc->chain_dmat != NULL) in mpr_iocfacts_free()
777 bus_dma_tag_destroy(sc->chain_dmat); in mpr_iocfacts_free()
779 if (sc->sense_busaddr != 0) in mpr_iocfacts_free()
780 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); in mpr_iocfacts_free()
781 if (sc->sense_frames != NULL) in mpr_iocfacts_free()
782 bus_dmamem_free(sc->sense_dmat, sc->sense_frames, in mpr_iocfacts_free()
783 sc->sense_map); in mpr_iocfacts_free()
784 if (sc->sense_dmat != NULL) in mpr_iocfacts_free()
785 bus_dma_tag_destroy(sc->sense_dmat); in mpr_iocfacts_free()
787 if (sc->prp_page_busaddr != 0) in mpr_iocfacts_free()
788 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map); in mpr_iocfacts_free()
789 if (sc->prp_pages != NULL) in mpr_iocfacts_free()
790 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages, in mpr_iocfacts_free()
791 sc->prp_page_map); in mpr_iocfacts_free()
792 if (sc->prp_page_dmat != NULL) in mpr_iocfacts_free()
793 bus_dma_tag_destroy(sc->prp_page_dmat); in mpr_iocfacts_free()
795 if (sc->reply_busaddr != 0) in mpr_iocfacts_free()
796 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); in mpr_iocfacts_free()
797 if (sc->reply_frames != NULL) in mpr_iocfacts_free()
798 bus_dmamem_free(sc->reply_dmat, sc->reply_frames, in mpr_iocfacts_free()
799 sc->reply_map); in mpr_iocfacts_free()
800 if (sc->reply_dmat != NULL) in mpr_iocfacts_free()
801 bus_dma_tag_destroy(sc->reply_dmat); in mpr_iocfacts_free()
803 if (sc->req_busaddr != 0) in mpr_iocfacts_free()
804 bus_dmamap_unload(sc->req_dmat, sc->req_map); in mpr_iocfacts_free()
805 if (sc->req_frames != NULL) in mpr_iocfacts_free()
806 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); in mpr_iocfacts_free()
807 if (sc->req_dmat != NULL) in mpr_iocfacts_free()
808 bus_dma_tag_destroy(sc->req_dmat); in mpr_iocfacts_free()
810 if (sc->chains != NULL) in mpr_iocfacts_free()
811 free(sc->chains, M_MPR); in mpr_iocfacts_free()
812 if (sc->prps != NULL) in mpr_iocfacts_free()
813 free(sc->prps, M_MPR); in mpr_iocfacts_free()
814 if (sc->commands != NULL) { in mpr_iocfacts_free()
815 for (i = 1; i < sc->num_reqs; i++) { in mpr_iocfacts_free()
816 cm = &sc->commands[i]; in mpr_iocfacts_free()
817 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); in mpr_iocfacts_free()
819 free(sc->commands, M_MPR); in mpr_iocfacts_free()
821 if (sc->buffer_dmat != NULL) in mpr_iocfacts_free()
822 bus_dma_tag_destroy(sc->buffer_dmat); in mpr_iocfacts_free()
825 free(sc->queues, M_MPR); in mpr_iocfacts_free()
826 sc->queues = NULL; in mpr_iocfacts_free()
843 sassc = sc->sassc; in mpr_reinit()
847 mtx_assert(&sc->mpr_mtx, MA_OWNED); in mpr_reinit()
850 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { in mpr_reinit()
859 sc->mpr_flags |= MPR_FLAGS_DIAGRESET; in mpr_reinit()
872 /* Restore the PCI state, including the MSI-X registers */ in mpr_reinit()
890 * Mapping structures will be re-allocated after getting IOC Page8, so in mpr_reinit()
903 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; in mpr_reinit()
907 * Some mapping info is based in IOC Page8 data, so re-initialize the in mpr_reinit()
920 sc, sc->replypostindex, sc->replyfreeindex); in mpr_reinit()
962 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) in mpr_wait_db_ack()
963 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", in mpr_wait_db_ack()
970 } while (--cntdn); in mpr_wait_db_ack()
993 /* Step through the synchronous command state machine, i.e. "Doorbell mode" */
1003 if (curthread->td_no_sleeping) in mpr_request_sync()
1035 /* Clock out the message data synchronously in 32-bit dwords*/ in mpr_request_sync()
1047 /* Clock in the reply in 16-bit words. The total length of the in mpr_request_sync()
1074 ioc_sz = reply->MsgLength; in mpr_request_sync()
1084 residual = ioc_sz * 2 - count; in mpr_request_sync()
1105 while (residual--) { in mpr_request_sync()
1127 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) in mpr_enqueue_request() argument
1132 mpr_dprint(sc, MPR_TRACE, "SMID %u cm %p ccb %p\n", in mpr_enqueue_request()
1133 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); in mpr_enqueue_request()
1135 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & in mpr_enqueue_request()
1137 mtx_assert(&sc->mpr_mtx, MA_OWNED); in mpr_enqueue_request()
1139 if (++sc->io_cmds_active > sc->io_cmds_highwater) in mpr_enqueue_request()
1140 sc->io_cmds_highwater++; in mpr_enqueue_request()
1142 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY, in mpr_enqueue_request()
1143 ("command not busy, state = %u\n", cm->cm_state)); in mpr_enqueue_request()
1144 cm->cm_state = MPR_CM_STATE_INQUEUE; in mpr_enqueue_request()
1146 if (sc->atomic_desc_capable) { in mpr_enqueue_request()
1147 rd.u.low = cm->cm_desc.Words.Low; in mpr_enqueue_request()
1151 rd.u.low = htole32(cm->cm_desc.Words.Low); in mpr_enqueue_request()
1152 rd.u.high = htole32(cm->cm_desc.Words.High); in mpr_enqueue_request()
1168 facts->HeaderVersion = le16toh(facts->HeaderVersion); in adjust_iocfacts_endianness()
1169 facts->Reserved1 = le16toh(facts->Reserved1); in adjust_iocfacts_endianness()
1170 facts->IOCExceptions = le16toh(facts->IOCExceptions); in adjust_iocfacts_endianness()
1171 facts->IOCStatus = le16toh(facts->IOCStatus); in adjust_iocfacts_endianness()
1172 facts->IOCLogInfo = le32toh(facts->IOCLogInfo); in adjust_iocfacts_endianness()
1173 facts->RequestCredit = le16toh(facts->RequestCredit); in adjust_iocfacts_endianness()
1174 facts->ProductID = le16toh(facts->ProductID); in adjust_iocfacts_endianness()
1175 facts->IOCCapabilities = le32toh(facts->IOCCapabilities); in adjust_iocfacts_endianness()
1176 facts->IOCRequestFrameSize = le16toh(facts->IOCRequestFrameSize); in adjust_iocfacts_endianness()
1177 facts->IOCMaxChainSegmentSize = le16toh(facts->IOCMaxChainSegmentSize); in adjust_iocfacts_endianness()
1178 facts->MaxInitiators = le16toh(facts->MaxInitiators); in adjust_iocfacts_endianness()
1179 facts->MaxTargets = le16toh(facts->MaxTargets); in adjust_iocfacts_endianness()
1180 facts->MaxSasExpanders = le16toh(facts->MaxSasExpanders); in adjust_iocfacts_endianness()
1181 facts->MaxEnclosures = le16toh(facts->MaxEnclosures); in adjust_iocfacts_endianness()
1182 facts->ProtocolFlags = le16toh(facts->ProtocolFlags); in adjust_iocfacts_endianness()
1183 facts->HighPriorityCredit = le16toh(facts->HighPriorityCredit); in adjust_iocfacts_endianness()
1184 facts->MaxReplyDescriptorPostQueueDepth = le16toh(facts->MaxReplyDescriptorPostQueueDepth); in adjust_iocfacts_endianness()
1185 facts->MaxDevHandle = le16toh(facts->MaxDevHandle); in adjust_iocfacts_endianness()
1186 facts->MaxPersistentEntries = le16toh(facts->MaxPersistentEntries); in adjust_iocfacts_endianness()
1187 facts->MinDevHandle = le16toh(facts->MinDevHandle); in adjust_iocfacts_endianness()
1225 mpr_dprint(sc, MPR_TRACE, "facts->IOCCapabilities 0x%x\n", facts->IOCCapabilities); in mpr_get_iocfacts()
1244 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0) in mpr_send_iocinit()
1245 || (sc->replyframesz == 0)) { in mpr_send_iocinit()
1258 * deliberately in the lower 32bits of memory. This is a micro- in mpr_send_iocinit()
1265 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4)); in mpr_send_iocinit()
1266 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); in mpr_send_iocinit()
1267 init.ReplyFreeQueueDepth = htole16(sc->fqdepth); in mpr_send_iocinit()
1272 htole32((uint32_t)sc->req_busaddr); in mpr_send_iocinit()
1275 htole32((uint32_t)sc->post_busaddr); in mpr_send_iocinit()
1277 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); in mpr_send_iocinit()
1312 mpr_lock(ctx->softc); in mpr_memaddr_wait_cb()
1313 ctx->error = error; in mpr_memaddr_wait_cb()
1314 ctx->completed = 1; in mpr_memaddr_wait_cb()
1315 if ((error == 0) && (ctx->abandoned == 0)) { in mpr_memaddr_wait_cb()
1316 *ctx->addr = segs[0].ds_addr; in mpr_memaddr_wait_cb()
1320 if (ctx->abandoned != 0) in mpr_memaddr_wait_cb()
1326 mpr_unlock(ctx->softc); in mpr_memaddr_wait_cb()
1329 bus_dmamap_unload(ctx->buffer_dmat, in mpr_memaddr_wait_cb()
1330 ctx->buffer_dmamap); in mpr_memaddr_wait_cb()
1331 *ctx->addr = 0; in mpr_memaddr_wait_cb()
1344 nq = sc->msi_msgs; in mpr_alloc_queues()
1347 sc->queues = malloc(sizeof(struct mpr_queue) * nq, M_MPR, in mpr_alloc_queues()
1349 if (sc->queues == NULL) in mpr_alloc_queues()
1353 q = &sc->queues[i]; in mpr_alloc_queues()
1355 q->sc = sc; in mpr_alloc_queues()
1356 q->qnum = i; in mpr_alloc_queues()
1376 * contains filled-in reply frames sent from the firmware to the host. in mpr_alloc_hw_queues()
1380 sc->fqdepth = roundup2(sc->num_replies + 1, 16); in mpr_alloc_hw_queues()
1381 sc->pqdepth = roundup2(sc->num_replies + 1, 16); in mpr_alloc_hw_queues()
1382 fqsize= sc->fqdepth * 4; in mpr_alloc_hw_queues()
1383 pqsize = sc->pqdepth * 8; in mpr_alloc_hw_queues()
1386 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_hw_queues()
1390 if (bus_dma_template_tag(&t, &sc->queues_dmat)) { in mpr_alloc_hw_queues()
1394 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, in mpr_alloc_hw_queues()
1395 &sc->queues_map)) { in mpr_alloc_hw_queues()
1400 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, in mpr_alloc_hw_queues()
1403 sc->free_queue = (uint32_t *)queues; in mpr_alloc_hw_queues()
1404 sc->free_busaddr = queues_busaddr; in mpr_alloc_hw_queues()
1405 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); in mpr_alloc_hw_queues()
1406 sc->post_busaddr = queues_busaddr + fqsize; in mpr_alloc_hw_queues()
1408 (uintmax_t)sc->free_busaddr, fqsize); in mpr_alloc_hw_queues()
1410 (uintmax_t)sc->post_busaddr, pqsize); in mpr_alloc_hw_queues()
1422 sc->replyframesz = sc->facts->ReplyFrameSize * 4; in mpr_alloc_replies()
1425 * sc->num_replies should be one less than sc->fqdepth. We need to in mpr_alloc_replies()
1426 * allocate space for sc->fqdepth replies, but only sc->num_replies in mpr_alloc_replies()
1429 num_replies = max(sc->fqdepth, sc->num_replies); in mpr_alloc_replies()
1431 rsize = sc->replyframesz * num_replies; in mpr_alloc_replies()
1432 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_replies()
1436 if (bus_dma_template_tag(&t, &sc->reply_dmat)) { in mpr_alloc_replies()
1440 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, in mpr_alloc_replies()
1441 BUS_DMA_NOWAIT, &sc->reply_map)) { in mpr_alloc_replies()
1445 bzero(sc->reply_frames, rsize); in mpr_alloc_replies()
1446 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, in mpr_alloc_replies()
1447 mpr_memaddr_cb, &sc->reply_busaddr, 0); in mpr_alloc_replies()
1449 (uintmax_t)sc->reply_busaddr, rsize); in mpr_alloc_replies()
1466 for (bo = 0; bo + sc->chain_frame_size <= segs[s].ds_len; in mpr_load_chains_cb()
1467 bo += sc->chain_frame_size) { in mpr_load_chains_cb()
1468 chain = &sc->chains[i++]; in mpr_load_chains_cb()
1469 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o); in mpr_load_chains_cb()
1470 chain->chain_busaddr = segs[s].ds_addr + bo; in mpr_load_chains_cb()
1471 o += sc->chain_frame_size; in mpr_load_chains_cb()
1475 o += segs[s].ds_len - bo; in mpr_load_chains_cb()
1477 sc->chain_free_lowwater = i; in mpr_load_chains_cb()
1484 struct mpr_command *cm; in mpr_alloc_requests() local
1487 rsize = sc->reqframesz * sc->num_reqs; in mpr_alloc_requests()
1488 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_requests()
1492 if (bus_dma_template_tag(&t, &sc->req_dmat)) { in mpr_alloc_requests()
1496 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, in mpr_alloc_requests()
1497 BUS_DMA_NOWAIT, &sc->req_map)) { in mpr_alloc_requests()
1501 bzero(sc->req_frames, rsize); in mpr_alloc_requests()
1502 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, in mpr_alloc_requests()
1503 mpr_memaddr_cb, &sc->req_busaddr, 0); in mpr_alloc_requests()
1505 (uintmax_t)sc->req_busaddr, rsize); in mpr_alloc_requests()
1507 sc->chains = malloc(sizeof(struct mpr_chain) * sc->num_chains, M_MPR, in mpr_alloc_requests()
1509 if (!sc->chains) { in mpr_alloc_requests()
1513 rsize = sc->chain_frame_size * sc->num_chains; in mpr_alloc_requests()
1514 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_requests()
1518 if (bus_dma_template_tag(&t, &sc->chain_dmat)) { in mpr_alloc_requests()
1522 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, in mpr_alloc_requests()
1523 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) { in mpr_alloc_requests()
1527 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, in mpr_alloc_requests()
1530 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, in mpr_alloc_requests()
1531 sc->chain_map); in mpr_alloc_requests()
1535 rsize = MPR_SENSE_LEN * sc->num_reqs; in mpr_alloc_requests()
1536 bus_dma_template_clone(&t, sc->req_dmat); in mpr_alloc_requests()
1539 if (bus_dma_template_tag(&t, &sc->sense_dmat)) { in mpr_alloc_requests()
1543 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, in mpr_alloc_requests()
1544 BUS_DMA_NOWAIT, &sc->sense_map)) { in mpr_alloc_requests()
1548 bzero(sc->sense_frames, rsize); in mpr_alloc_requests()
1549 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, in mpr_alloc_requests()
1550 mpr_memaddr_cb, &sc->sense_busaddr, 0); in mpr_alloc_requests()
1552 (uintmax_t)sc->sense_busaddr, rsize); in mpr_alloc_requests()
1558 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) && in mpr_alloc_requests()
1559 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) { in mpr_alloc_requests()
1564 nsegs = (sc->maxio / PAGE_SIZE) + 1; in mpr_alloc_requests()
1565 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_requests()
1569 BD_LOCKFUNCARG(&sc->mpr_mtx), in mpr_alloc_requests()
1571 if (bus_dma_template_tag(&t, &sc->buffer_dmat)) { in mpr_alloc_requests()
1580 sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs, in mpr_alloc_requests()
1582 for (i = 1; i < sc->num_reqs; i++) { in mpr_alloc_requests()
1583 cm = &sc->commands[i]; in mpr_alloc_requests()
1584 cm->cm_req = sc->req_frames + i * sc->reqframesz; in mpr_alloc_requests()
1585 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz; in mpr_alloc_requests()
1586 cm->cm_sense = &sc->sense_frames[i]; in mpr_alloc_requests()
1587 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; in mpr_alloc_requests()
1588 cm->cm_desc.Default.SMID = htole16(i); in mpr_alloc_requests()
1589 cm->cm_sc = sc; in mpr_alloc_requests()
1590 cm->cm_state = MPR_CM_STATE_BUSY; in mpr_alloc_requests()
1591 TAILQ_INIT(&cm->cm_chain_list); in mpr_alloc_requests()
1592 TAILQ_INIT(&cm->cm_prp_page_list); in mpr_alloc_requests()
1593 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); in mpr_alloc_requests()
1596 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) in mpr_alloc_requests()
1598 if (i <= sc->num_prireqs) in mpr_alloc_requests()
1599 mpr_free_high_priority_command(sc, cm); in mpr_alloc_requests()
1601 mpr_free_command(sc, cm); in mpr_alloc_requests()
1604 sc->num_reqs = i; in mpr_alloc_requests()
1648 PRPs_required = sc->maxio / PAGE_SIZE; in mpr_alloc_nvme_prp_pages()
1649 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1; in mpr_alloc_nvme_prp_pages()
1652 sc->prp_buffer_size = PAGE_SIZE * pages_required; in mpr_alloc_nvme_prp_pages()
1653 rsize = sc->prp_buffer_size * NVME_QDEPTH; in mpr_alloc_nvme_prp_pages()
1654 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_nvme_prp_pages()
1658 if (bus_dma_template_tag(&t, &sc->prp_page_dmat)) { in mpr_alloc_nvme_prp_pages()
1663 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages, in mpr_alloc_nvme_prp_pages()
1664 BUS_DMA_NOWAIT, &sc->prp_page_map)) { in mpr_alloc_nvme_prp_pages()
1668 bzero(sc->prp_pages, rsize); in mpr_alloc_nvme_prp_pages()
1669 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages, in mpr_alloc_nvme_prp_pages()
1670 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0); in mpr_alloc_nvme_prp_pages()
1672 sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR, in mpr_alloc_nvme_prp_pages()
1675 prp_page = &sc->prps[i]; in mpr_alloc_nvme_prp_pages()
1676 prp_page->prp_page = (uint64_t *)(sc->prp_pages + in mpr_alloc_nvme_prp_pages()
1677 i * sc->prp_buffer_size); in mpr_alloc_nvme_prp_pages()
1678 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr + in mpr_alloc_nvme_prp_pages()
1679 i * sc->prp_buffer_size); in mpr_alloc_nvme_prp_pages()
1681 sc->prp_pages_free_lowwater++; in mpr_alloc_nvme_prp_pages()
1692 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); in mpr_init_queues()
1696 * have space for on the queue. So sc->num_replies (the number we in mpr_init_queues()
1697 * use) should be less than sc->fqdepth (allocated size). in mpr_init_queues()
1699 if (sc->num_replies >= sc->fqdepth) in mpr_init_queues()
1705 for (i = 0; i < sc->fqdepth; i++) { in mpr_init_queues()
1706 sc->free_queue[i] = htole32(sc->reply_busaddr + (i * sc->replyframesz)); in mpr_init_queues()
1708 sc->replyfreeindex = sc->num_replies; in mpr_init_queues()
1714 * Next are the global settings, if they exist. Highest are the per-unit
1723 sc->mpr_debug = MPR_INFO | MPR_FAULT; in mpr_get_tunables()
1724 sc->disable_msix = 0; in mpr_get_tunables()
1725 sc->disable_msi = 0; in mpr_get_tunables()
1726 sc->max_msix = MPR_MSIX_MAX; in mpr_get_tunables()
1727 sc->max_chains = MPR_CHAIN_FRAMES; in mpr_get_tunables()
1728 sc->max_io_pages = MPR_MAXIO_PAGES; in mpr_get_tunables()
1729 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; in mpr_get_tunables()
1730 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; in mpr_get_tunables()
1731 sc->use_phynum = 1; in mpr_get_tunables()
1732 sc->encl_min_slots = 0; in mpr_get_tunables()
1733 sc->max_reqframes = MPR_REQ_FRAMES; in mpr_get_tunables()
1734 sc->max_prireqframes = MPR_PRI_REQ_FRAMES; in mpr_get_tunables()
1735 sc->max_replyframes = MPR_REPLY_FRAMES; in mpr_get_tunables()
1736 sc->max_evtframes = MPR_EVT_REPLY_FRAMES; in mpr_get_tunables()
1744 TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix); in mpr_get_tunables()
1745 TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi); in mpr_get_tunables()
1746 TUNABLE_INT_FETCH("hw.mpr.max_msix", &sc->max_msix); in mpr_get_tunables()
1747 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); in mpr_get_tunables()
1748 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages); in mpr_get_tunables()
1749 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); in mpr_get_tunables()
1750 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); in mpr_get_tunables()
1751 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); in mpr_get_tunables()
1752 TUNABLE_INT_FETCH("hw.mpr.encl_min_slots", &sc->encl_min_slots); in mpr_get_tunables()
1753 TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes); in mpr_get_tunables()
1754 TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes); in mpr_get_tunables()
1755 TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes); in mpr_get_tunables()
1756 TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes); in mpr_get_tunables()
1758 /* Grab the unit-instance variables */ in mpr_get_tunables()
1760 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1766 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1767 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); in mpr_get_tunables()
1770 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1771 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); in mpr_get_tunables()
1774 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1775 TUNABLE_INT_FETCH(tmpstr, &sc->max_msix); in mpr_get_tunables()
1778 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1779 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); in mpr_get_tunables()
1782 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1783 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); in mpr_get_tunables()
1785 bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); in mpr_get_tunables()
1787 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1788 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); in mpr_get_tunables()
1791 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1792 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); in mpr_get_tunables()
1795 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1796 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); in mpr_get_tunables()
1799 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1800 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); in mpr_get_tunables()
1803 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1804 TUNABLE_INT_FETCH(tmpstr, &sc->encl_min_slots); in mpr_get_tunables()
1807 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1808 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes); in mpr_get_tunables()
1811 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1812 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes); in mpr_get_tunables()
1815 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1816 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes); in mpr_get_tunables()
1819 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1820 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes); in mpr_get_tunables()
1835 device_get_unit(sc->mpr_dev)); in mpr_setup_sysctl()
1836 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); in mpr_setup_sysctl()
1838 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); in mpr_setup_sysctl()
1840 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); in mpr_setup_sysctl()
1843 sysctl_ctx_init(&sc->sysctl_ctx); in mpr_setup_sysctl()
1844 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, in mpr_setup_sysctl()
1847 if (sc->sysctl_tree == NULL) in mpr_setup_sysctl()
1849 sysctl_ctx = &sc->sysctl_ctx; in mpr_setup_sysctl()
1850 sysctl_tree = sc->sysctl_tree; in mpr_setup_sysctl()
1858 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, in mpr_setup_sysctl()
1859 "Disable the use of MSI-X interrupts"); in mpr_setup_sysctl()
1862 OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0, in mpr_setup_sysctl()
1863 "User-defined maximum number of MSIX queues"); in mpr_setup_sysctl()
1866 OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0, in mpr_setup_sysctl()
1870 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0, in mpr_setup_sysctl()
1874 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0, in mpr_setup_sysctl()
1878 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0, in mpr_setup_sysctl()
1882 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0, in mpr_setup_sysctl()
1886 OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, in mpr_setup_sysctl()
1887 strlen(sc->fw_version), "firmware version"); in mpr_setup_sysctl()
1894 OID_AUTO, "msg_version", CTLFLAG_RD, sc->msg_version, in mpr_setup_sysctl()
1895 strlen(sc->msg_version), "message interface version (deprecated)"); in mpr_setup_sysctl()
1899 &sc->io_cmds_active, 0, "number of currently active commands"); in mpr_setup_sysctl()
1903 &sc->io_cmds_highwater, 0, "maximum active commands seen"); in mpr_setup_sysctl()
1907 &sc->chain_free, 0, "number of free chain elements"); in mpr_setup_sysctl()
1911 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); in mpr_setup_sysctl()
1915 &sc->max_chains, 0,"maximum chain frames that will be allocated"); in mpr_setup_sysctl()
1919 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " in mpr_setup_sysctl()
1923 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, in mpr_setup_sysctl()
1928 &sc->chain_alloc_fail, "chain allocation failures"); in mpr_setup_sysctl()
1932 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " in mpr_setup_sysctl()
1942 &sc->dump_reqs_alltypes, 0, in mpr_setup_sysctl()
1946 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, in mpr_setup_sysctl()
1951 &sc->prp_pages_free, 0, "number of free PRP pages"); in mpr_setup_sysctl()
1955 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages"); in mpr_setup_sysctl()
1959 &sc->prp_page_alloc_fail, "PRP page allocation failures"); in mpr_setup_sysctl()
1962 OID_AUTO, "encl_min_slots", CTLFLAG_RW, &sc->encl_min_slots, 0, in mpr_setup_sysctl()
2006 debug = sc->mpr_debug; in mpr_debug_sysctl()
2013 if (debug & string->flag) in mpr_debug_sysctl()
2014 sbuf_printf(sbuf, ",%s", string->name); in mpr_debug_sysctl()
2020 if (error || req->newptr == NULL) in mpr_debug_sysctl()
2023 len = req->newlen - req->newidx; in mpr_debug_sysctl()
2051 } else if (*list == '-') { in mpr_parse_debug()
2070 if (strcasecmp(token, string->name) == 0) { in mpr_parse_debug()
2071 flags |= string->flag; in mpr_parse_debug()
2079 sc->mpr_debug = flags; in mpr_parse_debug()
2082 sc->mpr_debug |= flags; in mpr_parse_debug()
2085 sc->mpr_debug &= (~flags); in mpr_parse_debug()
2104 struct mpr_command *cm; in mpr_dump_reqs() local
2119 numreqs = sc->num_reqs; in mpr_dump_reqs()
2121 if (req->newptr != NULL) in mpr_dump_reqs()
2124 if (smid == 0 || smid > sc->num_reqs) in mpr_dump_reqs()
2126 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs)) in mpr_dump_reqs()
2127 numreqs = sc->num_reqs; in mpr_dump_reqs()
2132 cm = &sc->commands[i]; in mpr_dump_reqs()
2133 if ((sc->dump_reqs_alltypes == 0) && (cm->cm_state != state)) in mpr_dump_reqs()
2136 hdr.state = cm->cm_state; in mpr_dump_reqs()
2138 hdr.deschi = cm->cm_desc.Words.High; in mpr_dump_reqs()
2139 hdr.desclo = cm->cm_desc.Words.Low; in mpr_dump_reqs()
2140 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, in mpr_dump_reqs()
2144 sbuf_bcat(sb, cm->cm_req, 128); in mpr_dump_reqs()
2145 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, in mpr_dump_reqs()
2147 sbuf_bcat(sb, chain->chain, 128); in mpr_dump_reqs()
2163 mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF); in mpr_attach()
2164 callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0); in mpr_attach()
2165 callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0); in mpr_attach()
2166 TAILQ_INIT(&sc->event_list); in mpr_attach()
2167 timevalclear(&sc->lastfail); in mpr_attach()
2175 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, in mpr_attach()
2177 if (!sc->facts) { in mpr_attach()
2185 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC in mpr_attach()
2204 sc->mpr_ich.ich_func = mpr_startup; in mpr_attach()
2205 sc->mpr_ich.ich_arg = sc; in mpr_attach()
2206 if (config_intrhook_establish(&sc->mpr_ich) != 0) { in mpr_attach()
2215 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, in mpr_attach()
2218 if (sc->shutdown_eh == NULL) in mpr_attach()
2224 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; in mpr_attach()
2230 /* Run through any late-start handlers. */
2249 config_intrhook_disestablish(&sc->mpr_ich); in mpr_startup()
2250 sc->mpr_ich.ich_arg = NULL; in mpr_startup()
2263 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) in mpr_periodic()
2276 callout_reset_sbt(&sc->periodic, MPR_PERIODIC_DELAY * SBT_1S, 0, in mpr_periodic()
2288 switch (event->Event) { in mpr_log_evt_handler()
2291 if (sc->mpr_debug & MPR_EVENT) in mpr_log_evt_handler()
2292 hexdump(event->EventData, event->EventDataLength, NULL, in mpr_log_evt_handler()
2296 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; in mpr_log_evt_handler()
2298 "0x%x Sequence %d:\n", entry->LogEntryQualifier, in mpr_log_evt_handler()
2299 entry->LogSequence); in mpr_log_evt_handler()
2317 &sc->mpr_log_eh); in mpr_attach_log()
2326 if (sc->mpr_log_eh != NULL) in mpr_detach_log()
2327 mpr_deregister_events(sc, sc->mpr_log_eh); in mpr_detach_log()
2343 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; in mpr_free()
2346 callout_drain(&sc->periodic); in mpr_free()
2347 callout_drain(&sc->device_check_callout); in mpr_free()
2366 if (sc->facts != NULL) in mpr_free()
2367 free(sc->facts, M_MPR); in mpr_free()
2375 if (sc->sysctl_tree != NULL) in mpr_free()
2376 sysctl_ctx_free(&sc->sysctl_ctx); in mpr_free()
2379 if (sc->shutdown_eh != NULL) in mpr_free()
2380 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); in mpr_free()
2382 mtx_destroy(&sc->mpr_mtx); in mpr_free()
2389 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) in mpr_complete_command() argument
2393 if (cm == NULL) { in mpr_complete_command()
2398 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE, in mpr_complete_command()
2399 ("command not inqueue, state = %u\n", cm->cm_state)); in mpr_complete_command()
2400 cm->cm_state = MPR_CM_STATE_BUSY; in mpr_complete_command()
2401 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) in mpr_complete_command()
2402 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; in mpr_complete_command()
2404 if (cm->cm_complete != NULL) { in mpr_complete_command()
2406 "%s cm %p calling cm_complete %p data %p reply %p\n", in mpr_complete_command()
2407 __func__, cm, cm->cm_complete, cm->cm_complete_data, in mpr_complete_command()
2408 cm->cm_reply); in mpr_complete_command()
2409 cm->cm_complete(sc, cm); in mpr_complete_command()
2412 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { in mpr_complete_command()
2413 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); in mpr_complete_command()
2414 wakeup(cm); in mpr_complete_command()
2417 if (sc->io_cmds_active != 0) { in mpr_complete_command()
2418 sc->io_cmds_active--; in mpr_complete_command()
2421 "out of sync - resynching to 0\n"); in mpr_complete_command()
2477 sc_status = le16toh(mpi_reply->IOCStatus); in mpr_display_reply_info()
2479 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); in mpr_display_reply_info()
2493 * needed for both INTx interrupts and driver-driven polling in mpr_intr()
2533 struct mpr_command *cm = NULL; in mpr_intr_locked() local
2539 pq = sc->replypostindex; in mpr_intr_locked()
2542 __func__, sc, sc->replypostindex); in mpr_intr_locked()
2545 cm = NULL; in mpr_intr_locked()
2546 desc = &sc->post_queue[sc->replypostindex]; in mpr_intr_locked()
2558 flags = desc->Default.ReplyFlags & in mpr_intr_locked()
2561 (le32toh(desc->Words.High) == 0xffffffff)) in mpr_intr_locked()
2565 * and cm completion handlers which decide to do a diag in mpr_intr_locked()
2571 if (++sc->replypostindex >= sc->pqdepth) in mpr_intr_locked()
2572 sc->replypostindex = 0; in mpr_intr_locked()
2578 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; in mpr_intr_locked()
2579 cm->cm_reply = NULL; in mpr_intr_locked()
2587 * Re-compose the reply address from the address in mpr_intr_locked()
2593 * (sc->reply_frames). in mpr_intr_locked()
2595 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); in mpr_intr_locked()
2596 reply = sc->reply_frames + in mpr_intr_locked()
2597 (baddr - ((uint32_t)sc->reply_busaddr)); in mpr_intr_locked()
2604 if ((reply < sc->reply_frames) in mpr_intr_locked()
2605 || (reply > (sc->reply_frames + in mpr_intr_locked()
2606 (sc->fqdepth * sc->replyframesz)))) { in mpr_intr_locked()
2611 sc->reply_frames, sc->fqdepth, in mpr_intr_locked()
2612 sc->replyframesz); in mpr_intr_locked()
2614 /* LSI-TODO. See Linux Code for Graceful exit */ in mpr_intr_locked()
2617 if (le16toh(desc->AddressReply.SMID) == 0) { in mpr_intr_locked()
2618 if (((MPI2_DEFAULT_REPLY *)reply)->Function == in mpr_intr_locked()
2629 if ((le16toh(rel_rep->IOCStatus) & in mpr_intr_locked()
2634 &sc->fw_diag_buffer_list[ in mpr_intr_locked()
2635 rel_rep->BufferType]; in mpr_intr_locked()
2636 pBuffer->valid_data = TRUE; in mpr_intr_locked()
2637 pBuffer->owned_by_firmware = in mpr_intr_locked()
2639 pBuffer->immediate = FALSE; in mpr_intr_locked()
2646 cm = &sc->commands[ in mpr_intr_locked()
2647 le16toh(desc->AddressReply.SMID)]; in mpr_intr_locked()
2648 if (cm->cm_state == MPR_CM_STATE_INQUEUE) { in mpr_intr_locked()
2649 cm->cm_reply = reply; in mpr_intr_locked()
2650 cm->cm_reply_data = in mpr_intr_locked()
2651 le32toh(desc->AddressReply. in mpr_intr_locked()
2656 " ignoring state %d cm %p\n", in mpr_intr_locked()
2657 cm->cm_state, cm); in mpr_intr_locked()
2668 desc->Default.ReplyFlags); in mpr_intr_locked()
2669 cm = NULL; in mpr_intr_locked()
2673 if (cm != NULL) { in mpr_intr_locked()
2675 if (cm->cm_reply) in mpr_intr_locked()
2676 mpr_display_reply_info(sc,cm->cm_reply); in mpr_intr_locked()
2677 mpr_complete_command(sc, cm); in mpr_intr_locked()
2681 if (pq != sc->replypostindex) { in mpr_intr_locked()
2683 __func__, sc, sc->replypostindex); in mpr_intr_locked()
2685 sc->replypostindex); in mpr_intr_locked()
2698 event = le16toh(reply->Event); in mpr_dispatch_event()
2699 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { in mpr_dispatch_event()
2700 if (isset(eh->mask, event)) { in mpr_dispatch_event()
2701 eh->callback(sc, data, reply); in mpr_dispatch_event()
2719 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) in mpr_reregister_events_complete() argument
2723 if (cm->cm_reply) in mpr_reregister_events_complete()
2725 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); in mpr_reregister_events_complete()
2727 mpr_free_command(sc, cm); in mpr_reregister_events_complete()
2746 eh->callback = cb; in mpr_register_events()
2747 eh->data = data; in mpr_register_events()
2748 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); in mpr_register_events()
2762 struct mpr_command *cm = NULL; in mpr_update_events() local
2769 bcopy(mask, &handle->mask[0], 16); in mpr_update_events()
2770 memset(sc->event_mask, 0xff, 16); in mpr_update_events()
2772 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { in mpr_update_events()
2774 sc->event_mask[i] &= ~eh->mask[i]; in mpr_update_events()
2777 if ((cm = mpr_alloc_command(sc)) == NULL) in mpr_update_events()
2779 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; in mpr_update_events()
2780 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; in mpr_update_events()
2781 evtreq->MsgFlags = 0; in mpr_update_events()
2782 evtreq->SASBroadcastPrimitiveMasks = 0; in mpr_update_events()
2785 u_char fullmask[sizeof(evtreq->EventMasks)]; in mpr_update_events()
2787 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, sizeof(fullmask)); in mpr_update_events()
2790 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, sizeof(sc->event_mask)); in mpr_update_events()
2792 evtreq->EventMasks[i] = htole32(evtreq->EventMasks[i]); in mpr_update_events()
2794 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; in mpr_update_events()
2795 cm->cm_data = NULL; in mpr_update_events()
2797 error = mpr_request_polled(sc, &cm); in mpr_update_events()
2798 if (cm != NULL) in mpr_update_events()
2799 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; in mpr_update_events()
2801 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) in mpr_update_events()
2809 if (cm != NULL) in mpr_update_events()
2810 mpr_free_command(sc, cm); in mpr_update_events()
2818 struct mpr_command *cm; in mpr_reregister_events() local
2826 memset(sc->event_mask, 0xff, 16); in mpr_reregister_events()
2828 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { in mpr_reregister_events()
2830 sc->event_mask[i] &= ~eh->mask[i]; in mpr_reregister_events()
2833 if ((cm = mpr_alloc_command(sc)) == NULL) in mpr_reregister_events()
2835 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; in mpr_reregister_events()
2836 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; in mpr_reregister_events()
2837 evtreq->MsgFlags = 0; in mpr_reregister_events()
2838 evtreq->SASBroadcastPrimitiveMasks = 0; in mpr_reregister_events()
2841 u_char fullmask[sizeof(evtreq->EventMasks)]; in mpr_reregister_events()
2843 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, sizeof(fullmask)); in mpr_reregister_events()
2846 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, sizeof(sc->event_mask)); in mpr_reregister_events()
2848 evtreq->EventMasks[i] = htole32(evtreq->EventMasks[i]); in mpr_reregister_events()
2850 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; in mpr_reregister_events()
2851 cm->cm_data = NULL; in mpr_reregister_events()
2852 cm->cm_complete = mpr_reregister_events_complete; in mpr_reregister_events()
2854 error = mpr_map_command(sc, cm); in mpr_reregister_events()
2865 TAILQ_REMOVE(&sc->event_list, handle, eh_list); in mpr_deregister_events()
2871 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a
2886 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous
2898 * Each 64-bit PRP entry comprises an address and an offset field. The address
2901 * first element in a PRP list may contain a non-zero offest, implying that all
2906 * being described by the list begins at a non-zero offset within the first page,
2907 * then the first PRP element will contain a non-zero offset indicating where the
2919 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm, in mpr_build_nvme_prp() argument
2943 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + in mpr_build_nvme_prp()
2945 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + in mpr_build_nvme_prp()
2958 prp_page = (uint64_t *)prp_page_info->prp_page; in mpr_build_nvme_prp()
2959 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; in mpr_build_nvme_prp()
2965 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); in mpr_build_nvme_prp()
2971 page_mask = PAGE_SIZE - 1; in mpr_build_nvme_prp()
3000 * boundary - prp_size (8 bytes). in mpr_build_nvme_prp()
3009 * - bump the current memory pointer to the next in mpr_build_nvme_prp()
3011 * - set the PRP Entry to point to that page. This is in mpr_build_nvme_prp()
3013 * - bump the PRP Entry pointer the start of the next in mpr_build_nvme_prp()
3015 * no need to get a new page - it's just the next in mpr_build_nvme_prp()
3026 entry_len = PAGE_SIZE - offset; in mpr_build_nvme_prp()
3100 length -= entry_len; in mpr_build_nvme_prp()
3105 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to
3115 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm, in mpr_check_pcie_native_sgl() argument
3141 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) in mpr_check_pcie_native_sgl()
3145 page_mask = PAGE_SIZE - 1; in mpr_check_pcie_native_sgl()
3152 sges_in_segment = (sc->reqframesz - in mpr_check_pcie_native_sgl()
3177 first_page_data_size = PAGE_SIZE - first_page_offset; in mpr_check_pcie_native_sgl()
3193 (buff_len - (PAGE_SIZE * 4))) in mpr_check_pcie_native_sgl()
3212 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; in mpr_check_pcie_native_sgl()
3230 curr_buff = (uint32_t *)prp_page_info->prp_page; in mpr_check_pcie_native_sgl()
3231 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; in mpr_check_pcie_native_sgl()
3237 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); in mpr_check_pcie_native_sgl()
3252 main_chain_element->Address.High = in mpr_check_pcie_native_sgl()
3254 main_chain_element->Address.Low = in mpr_check_pcie_native_sgl()
3256 main_chain_element->NextChainOffset = 0; in mpr_check_pcie_native_sgl()
3257 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | in mpr_check_pcie_native_sgl()
3279 ptr_first_sgl = (uint32_t *)cm->cm_sge; in mpr_check_pcie_native_sgl()
3287 * Check whether a given SGE buffer lies on a non-PAGED in mpr_check_pcie_native_sgl()
3302 if (i != (segs_left - 1)) { in mpr_check_pcie_native_sgl()
3316 * at page boundary - prp_size. in mpr_check_pcie_native_sgl()
3335 entry_len = PAGE_SIZE - offset; in mpr_check_pcie_native_sgl()
3370 length -= entry_len; in mpr_check_pcie_native_sgl()
3375 main_chain_element->Length = htole32(num_entries * prp_size); in mpr_check_pcie_native_sgl()
3389 mpr_add_chain(struct mpr_command *cm, int segsleft) in mpr_add_chain() argument
3391 struct mpr_softc *sc = cm->cm_sc; in mpr_add_chain()
3403 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { in mpr_add_chain()
3410 if (cm->cm_sglsize < sgc_size) in mpr_add_chain()
3413 chain = mpr_alloc_chain(cm->cm_sc); in mpr_add_chain()
3418 * Note: a double-linked list is used to make it easier to walk for in mpr_add_chain()
3421 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); in mpr_add_chain()
3429 if (cm->cm_sglsize < (sgc_size * segsleft)) { in mpr_add_chain()
3440 current_segs = (cm->cm_sglsize / sgc_size) - 1; in mpr_add_chain()
3441 rem_segs = segsleft - current_segs; in mpr_add_chain()
3442 segs_per_frame = sc->chain_frame_size / sgc_size; in mpr_add_chain()
3444 next_chain_offset = segs_per_frame - 1; in mpr_add_chain()
3447 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; in mpr_add_chain()
3448 ieee_sgc->Length = next_chain_offset ? in mpr_add_chain()
3449 htole32((uint32_t)sc->chain_frame_size) : in mpr_add_chain()
3451 ieee_sgc->NextChainOffset = next_chain_offset; in mpr_add_chain()
3452 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | in mpr_add_chain()
3454 ieee_sgc->Address.Low = htole32(chain->chain_busaddr); in mpr_add_chain()
3455 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); in mpr_add_chain()
3456 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; in mpr_add_chain()
3457 req = (MPI2_REQUEST_HEADER *)cm->cm_req; in mpr_add_chain()
3458 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4; in mpr_add_chain()
3460 cm->cm_sglsize = sc->chain_frame_size; in mpr_add_chain()
3465 * Add one scatter-gather element to the scatter-gather list for a command.
3471 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, in mpr_push_sge() argument
3482 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { in mpr_push_sge()
3483 mpr_dprint(cm->cm_sc, MPR_ERROR, in mpr_push_sge()
3498 * If this is a bi-directional request, need to account for that in mpr_push_sge()
3499 * here. Save the pre-filled sge values. These will be used in mpr_push_sge()
3501 * cm_out_len is non-zero, this is a bi-directional request, so in mpr_push_sge()
3504 * 2 SGL's for a bi-directional request, they both use the same in mpr_push_sge()
3505 * DMA buffer (same cm command). in mpr_push_sge()
3507 saved_buf_len = sge->FlagsLength & 0x00FFFFFF; in mpr_push_sge()
3508 saved_address_low = sge->Address.Low; in mpr_push_sge()
3509 saved_address_high = sge->Address.High; in mpr_push_sge()
3510 if (cm->cm_out_len) { in mpr_push_sge()
3511 sge->FlagsLength = cm->cm_out_len | in mpr_push_sge()
3517 cm->cm_sglsize -= len; in mpr_push_sge()
3519 sge_flags = sge->FlagsLength; in mpr_push_sge()
3520 sge->FlagsLength = htole32(sge_flags); in mpr_push_sge()
3521 bcopy(sge, cm->cm_sge, len); in mpr_push_sge()
3522 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); in mpr_push_sge()
3524 sge->FlagsLength = saved_buf_len | in mpr_push_sge()
3531 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { in mpr_push_sge()
3532 sge->FlagsLength |= in mpr_push_sge()
3536 sge->FlagsLength |= in mpr_push_sge()
3540 sge->Address.Low = saved_address_low; in mpr_push_sge()
3541 sge->Address.High = saved_address_high; in mpr_push_sge()
3543 cm->cm_sglsize -= len; in mpr_push_sge()
3545 sge_flags = sge->FlagsLength; in mpr_push_sge()
3546 sge->FlagsLength = htole32(sge_flags); in mpr_push_sge()
3547 bcopy(sge, cm->cm_sge, len); in mpr_push_sge()
3548 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); in mpr_push_sge()
3553 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter-
3558 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) in mpr_push_ieee_sge() argument
3575 if (cm->cm_sglsize < ieee_sge_size) in mpr_push_ieee_sge()
3578 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { in mpr_push_ieee_sge()
3579 if ((error = mpr_add_chain(cm, segsleft)) != 0) in mpr_push_ieee_sge()
3585 * If this is a bi-directional request, need to account for that in mpr_push_ieee_sge()
3586 * here. Save the pre-filled sge values. These will be used in mpr_push_ieee_sge()
3588 * cm_out_len is non-zero, this is a bi-directional request, so in mpr_push_ieee_sge()
3591 * 2 SGL's for a bi-directional request, they both use the same in mpr_push_ieee_sge()
3592 * DMA buffer (same cm command). in mpr_push_ieee_sge()
3594 saved_buf_len = sge->Length; in mpr_push_ieee_sge()
3595 saved_address_low = sge->Address.Low; in mpr_push_ieee_sge()
3596 saved_address_high = sge->Address.High; in mpr_push_ieee_sge()
3597 if (cm->cm_out_len) { in mpr_push_ieee_sge()
3598 sge->Length = cm->cm_out_len; in mpr_push_ieee_sge()
3599 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | in mpr_push_ieee_sge()
3601 cm->cm_sglsize -= ieee_sge_size; in mpr_push_ieee_sge()
3603 sge_length = sge->Length; in mpr_push_ieee_sge()
3604 sge->Length = htole32(sge_length); in mpr_push_ieee_sge()
3605 bcopy(sgep, cm->cm_sge, ieee_sge_size); in mpr_push_ieee_sge()
3606 cm->cm_sge = in mpr_push_ieee_sge()
3607 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + in mpr_push_ieee_sge()
3610 sge->Length = saved_buf_len; in mpr_push_ieee_sge()
3611 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | in mpr_push_ieee_sge()
3614 sge->Address.Low = saved_address_low; in mpr_push_ieee_sge()
3615 sge->Address.High = saved_address_high; in mpr_push_ieee_sge()
3618 cm->cm_sglsize -= ieee_sge_size; in mpr_push_ieee_sge()
3620 sge_length = sge->Length; in mpr_push_ieee_sge()
3621 sge->Length = htole32(sge_length); in mpr_push_ieee_sge()
3622 bcopy(sgep, cm->cm_sge, ieee_sge_size); in mpr_push_ieee_sge()
3623 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + in mpr_push_ieee_sge()
3629 * Add one dma segment to the scatter-gather list for a command.
3632 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, in mpr_add_dmaseg() argument
3638 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { in mpr_add_dmaseg()
3644 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); in mpr_add_dmaseg()
3647 * This driver always uses 64-bit address elements for in mpr_add_dmaseg()
3656 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); in mpr_add_dmaseg()
3664 struct mpr_command *cm; in mpr_data_cb() local
3667 cm = (struct mpr_command *)arg; in mpr_data_cb()
3668 sc = cm->cm_sc; in mpr_data_cb()
3674 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { in mpr_data_cb()
3677 cm->cm_max_segs); in mpr_data_cb()
3681 * Set up DMA direction flags. Bi-directional requests are also handled in mpr_data_cb()
3685 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { in mpr_data_cb()
3707 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { in mpr_data_cb()
3714 if (cm->cm_targ && cm->cm_targ->is_nvme && in mpr_data_cb()
3715 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) { in mpr_data_cb()
3721 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { in mpr_data_cb()
3724 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, in mpr_data_cb()
3725 sflags, nsegs - i); in mpr_data_cb()
3728 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) in mpr_data_cb()
3731 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; in mpr_data_cb()
3738 cm->cm_state = MPR_CM_STATE_INQUEUE; in mpr_data_cb()
3739 mpr_complete_command(sc, cm); in mpr_data_cb()
3745 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); in mpr_data_cb()
3746 mpr_enqueue_request(sc, cm); in mpr_data_cb()
3762 * assumed that if you have a command in-hand, then you have enough credits
3766 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) in mpr_map_command() argument
3770 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { in mpr_map_command()
3771 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, in mpr_map_command()
3772 &cm->cm_uio, mpr_data_cb2, cm, 0); in mpr_map_command()
3773 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { in mpr_map_command()
3774 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, in mpr_map_command()
3775 cm->cm_data, mpr_data_cb, cm, 0); in mpr_map_command()
3776 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { in mpr_map_command()
3777 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, in mpr_map_command()
3778 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); in mpr_map_command()
3780 /* Add a zero-length element as needed */ in mpr_map_command()
3781 if (cm->cm_sge != NULL) in mpr_map_command()
3782 mpr_add_dmaseg(cm, 0, 0, 0, 1); in mpr_map_command()
3783 mpr_enqueue_request(sc, cm); in mpr_map_command()
3800 struct mpr_command *cm = *cmp; in mpr_wait_command() local
3802 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) in mpr_wait_command()
3805 cm->cm_complete = NULL; in mpr_wait_command()
3806 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); in mpr_wait_command()
3807 error = mpr_map_command(sc, cm); in mpr_wait_command()
3813 // to poll. in mpr_wait_command()
3814 if (curthread->td_no_sleeping) in mpr_wait_command()
3817 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) { in mpr_wait_command()
3818 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); in mpr_wait_command()
3828 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { in mpr_wait_command()
3845 if (cm->cm_timeout_handler == NULL) { in mpr_wait_command()
3853 cm->cm_timeout_handler(sc, cm); in mpr_wait_command()
3854 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { in mpr_wait_command()
3867 * This is the routine to enqueue a command synchonously and poll for
3875 struct mpr_command *cm = *cmp; in mpr_request_polled() local
3879 cm->cm_flags |= MPR_CM_FLAGS_POLLED; in mpr_request_polled()
3880 cm->cm_complete = NULL; in mpr_request_polled()
3881 mpr_map_command(sc, cm); in mpr_request_polled()
3884 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { in mpr_request_polled()
3887 if (mtx_owned(&sc->mpr_mtx)) in mpr_request_polled()
3888 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, in mpr_request_polled()
3894 * Check for real-time timeout and fail if more than 60 seconds. in mpr_request_polled()
3904 cm->cm_state = MPR_CM_STATE_BUSY; in mpr_request_polled()
3911 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { in mpr_request_polled()
3930 struct mpr_command *cm; in mpr_read_config_page() local
3933 if (sc->mpr_flags & MPR_FLAGS_BUSY) { in mpr_read_config_page()
3937 cm = mpr_alloc_command(sc); in mpr_read_config_page()
3938 if (cm == NULL) { in mpr_read_config_page()
3942 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; in mpr_read_config_page()
3943 req->Function = MPI2_FUNCTION_CONFIG; in mpr_read_config_page()
3944 req->Action = params->action; in mpr_read_config_page()
3945 req->SGLFlags = 0; in mpr_read_config_page()
3946 req->ChainOffset = 0; in mpr_read_config_page()
3947 req->PageAddress = params->page_address; in mpr_read_config_page()
3948 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { in mpr_read_config_page()
3951 hdr = ¶ms->hdr.Ext; in mpr_read_config_page()
3952 req->ExtPageType = hdr->ExtPageType; in mpr_read_config_page()
3953 req->ExtPageLength = hdr->ExtPageLength; in mpr_read_config_page()
3954 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; in mpr_read_config_page()
3955 req->Header.PageLength = 0; /* Must be set to zero */ in mpr_read_config_page()
3956 req->Header.PageNumber = hdr->PageNumber; in mpr_read_config_page()
3957 req->Header.PageVersion = hdr->PageVersion; in mpr_read_config_page()
3961 hdr = ¶ms->hdr.Struct; in mpr_read_config_page()
3962 req->Header.PageType = hdr->PageType; in mpr_read_config_page()
3963 req->Header.PageNumber = hdr->PageNumber; in mpr_read_config_page()
3964 req->Header.PageLength = hdr->PageLength; in mpr_read_config_page()
3965 req->Header.PageVersion = hdr->PageVersion; in mpr_read_config_page()
3968 cm->cm_data = params->buffer; in mpr_read_config_page()
3969 cm->cm_length = params->length; in mpr_read_config_page()
3970 if (cm->cm_data != NULL) { in mpr_read_config_page()
3971 cm->cm_sge = &req->PageBufferSGE; in mpr_read_config_page()
3972 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); in mpr_read_config_page()
3973 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; in mpr_read_config_page()
3975 cm->cm_sge = NULL; in mpr_read_config_page()
3976 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; in mpr_read_config_page()
3978 cm->cm_complete_data = params; in mpr_read_config_page()
3979 if (params->callback != NULL) { in mpr_read_config_page()
3980 cm->cm_complete = mpr_config_complete; in mpr_read_config_page()
3981 return (mpr_map_command(sc, cm)); in mpr_read_config_page()
3983 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP); in mpr_read_config_page()
3987 if (cm != NULL) in mpr_read_config_page()
3988 mpr_free_command(sc, cm); in mpr_read_config_page()
3991 mpr_config_complete(sc, cm); in mpr_read_config_page()
4004 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) in mpr_config_complete() argument
4010 params = cm->cm_complete_data; in mpr_config_complete()
4012 if (cm->cm_data != NULL) { in mpr_config_complete()
4013 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, in mpr_config_complete()
4015 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); in mpr_config_complete()
4022 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { in mpr_config_complete()
4023 params->status = MPI2_IOCSTATUS_BUSY; in mpr_config_complete()
4027 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; in mpr_config_complete()
4029 params->status = MPI2_IOCSTATUS_BUSY; in mpr_config_complete()
4032 params->status = reply->IOCStatus; in mpr_config_complete()
4033 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { in mpr_config_complete()
4034 params->hdr.Ext.ExtPageType = reply->ExtPageType; in mpr_config_complete()
4035 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; in mpr_config_complete()
4036 params->hdr.Ext.PageType = reply->Header.PageType; in mpr_config_complete()
4037 params->hdr.Ext.PageNumber = reply->Header.PageNumber; in mpr_config_complete()
4038 params->hdr.Ext.PageVersion = reply->Header.PageVersion; in mpr_config_complete()
4040 params->hdr.Struct.PageType = reply->Header.PageType; in mpr_config_complete()
4041 params->hdr.Struct.PageNumber = reply->Header.PageNumber; in mpr_config_complete()
4042 params->hdr.Struct.PageLength = reply->Header.PageLength; in mpr_config_complete()
4043 params->hdr.Struct.PageVersion = reply->Header.PageVersion; in mpr_config_complete()
4047 mpr_free_command(sc, cm); in mpr_config_complete()
4048 if (params->callback != NULL) in mpr_config_complete()
4049 params->callback(sc, params); in mpr_config_complete()