Lines Matching +full:sg +full:- +full:micro
1 /*-
3 * Copyright (c) 2011-2015 LSI Corp.
4 * Copyright (c) 2013-2016 Avago Technologies
5 * Copyright 2000-2020 Broadcom Inc.
29 * Broadcom Inc. (LSI) MPT-Fusion Host Adapter FreeBSD
126 * Added this union to smoothly convert le64toh cm->cm_desc.Words.
139 /* Rate limit chain-fail messages to 1 per minute */
167 if (curthread->td_no_sleeping) in mpr_diag_reset()
179 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) in mpr_diag_reset()
180 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, in mpr_diag_reset()
213 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) in mpr_diag_reset()
214 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, in mpr_diag_reset()
284 sleep_flags = (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE) in mpr_transition_ready()
392 prireqcr = MAX(1, sc->max_prireqframes); in mpr_resize_queues()
393 prireqcr = MIN(prireqcr, sc->facts->HighPriorityCredit); in mpr_resize_queues()
395 reqcr = MAX(2, sc->max_reqframes); in mpr_resize_queues()
396 reqcr = MIN(reqcr, sc->facts->RequestCredit); in mpr_resize_queues()
398 sc->num_reqs = prireqcr + reqcr; in mpr_resize_queues()
399 sc->num_prireqs = prireqcr; in mpr_resize_queues()
400 sc->num_replies = MIN(sc->max_replyframes + sc->max_evtframes, in mpr_resize_queues()
401 sc->facts->MaxReplyDescriptorPostQueueDepth) - 1; in mpr_resize_queues()
404 sc->reqframesz = sc->facts->IOCRequestFrameSize * 4; in mpr_resize_queues()
411 * number of 16-byte elelements that can fit in a Chain Frame, which is in mpr_resize_queues()
414 if (sc->facts->MsgVersion >= MPI2_VERSION_02_05) { in mpr_resize_queues()
415 chain_seg_size = sc->facts->IOCMaxChainSegmentSize; in mpr_resize_queues()
418 sc->chain_frame_size = chain_seg_size * in mpr_resize_queues()
421 sc->chain_frame_size = sc->reqframesz; in mpr_resize_queues()
426 * ((SGEs per frame - 1 for chain element) * Max Chain Depth) in mpr_resize_queues()
433 sges_per_frame = sc->chain_frame_size/sizeof(MPI2_IEEE_SGE_SIMPLE64)-1; in mpr_resize_queues()
434 maxio = (sges_per_frame * sc->facts->MaxChainDepth + 1) * PAGE_SIZE; in mpr_resize_queues()
440 if (sc->max_io_pages > 0) { in mpr_resize_queues()
441 maxio = min(maxio, sc->max_io_pages * PAGE_SIZE); in mpr_resize_queues()
442 sc->maxio = maxio; in mpr_resize_queues()
444 sc->maxio = maxio; in mpr_resize_queues()
448 sc->num_chains = (maxio / PAGE_SIZE + sges_per_frame - 2) / in mpr_resize_queues()
450 if (sc->max_chains > 0 && sc->max_chains < sc->num_chains) in mpr_resize_queues()
451 sc->num_chains = sc->max_chains; in mpr_resize_queues()
454 * Figure out the number of MSIx-based queues. If the firmware or in mpr_resize_queues()
456 * the queues to be useful then don't enable multi-queue. in mpr_resize_queues()
458 if (sc->facts->MaxMSIxVectors < 2) in mpr_resize_queues()
459 sc->msi_msgs = 1; in mpr_resize_queues()
461 if (sc->msi_msgs > 1) { in mpr_resize_queues()
462 sc->msi_msgs = MIN(sc->msi_msgs, mp_ncpus); in mpr_resize_queues()
463 sc->msi_msgs = MIN(sc->msi_msgs, sc->facts->MaxMSIxVectors); in mpr_resize_queues()
464 if (sc->num_reqs / sc->msi_msgs < 2) in mpr_resize_queues()
465 sc->msi_msgs = 1; in mpr_resize_queues()
469 sc->msi_msgs, sc->num_reqs, sc->num_replies); in mpr_resize_queues()
473 * This is called during attach and when re-initializing due to a Diag Reset.
475 * If called from attach, de-allocation is not required because the driver has
477 * allocated structures based on IOC Facts will need to be freed and re-
491 bcopy(sc->facts, &saved_facts, sizeof(MPI2_IOC_FACTS_REPLY)); in mpr_iocfacts_allocate()
496 * a re-initialization and only return the error if attaching so the OS in mpr_iocfacts_allocate()
499 if ((error = mpr_get_iocfacts(sc, sc->facts)) != 0) { in mpr_iocfacts_allocate()
510 MPR_DPRINT_PAGE(sc, MPR_XINFO, iocfacts, sc->facts); in mpr_iocfacts_allocate()
512 snprintf(sc->fw_version, sizeof(sc->fw_version), in mpr_iocfacts_allocate()
514 sc->facts->FWVersion.Struct.Major, in mpr_iocfacts_allocate()
515 sc->facts->FWVersion.Struct.Minor, in mpr_iocfacts_allocate()
516 sc->facts->FWVersion.Struct.Unit, in mpr_iocfacts_allocate()
517 sc->facts->FWVersion.Struct.Dev); in mpr_iocfacts_allocate()
519 snprintf(sc->msg_version, sizeof(sc->msg_version), "%d.%d", in mpr_iocfacts_allocate()
520 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MAJOR_MASK) >> in mpr_iocfacts_allocate()
522 (sc->facts->MsgVersion & MPI2_IOCFACTS_MSGVERSION_MINOR_MASK) >> in mpr_iocfacts_allocate()
525 mpr_dprint(sc, MPR_INFO, "Firmware: %s, Driver: %s\n", sc->fw_version, in mpr_iocfacts_allocate()
528 "IOCCapabilities: %b\n", sc->facts->IOCCapabilities, in mpr_iocfacts_allocate()
541 if (attaching && ((sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
558 saved_mode = sc->ir_firmware; in mpr_iocfacts_allocate()
559 if (sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
561 sc->ir_firmware = 1; in mpr_iocfacts_allocate()
563 if (sc->ir_firmware != saved_mode) { in mpr_iocfacts_allocate()
571 sc->mpr_flags &= ~MPR_FLAGS_REALLOCATED; in mpr_iocfacts_allocate()
574 ((saved_facts.MsgVersion != sc->facts->MsgVersion) || in mpr_iocfacts_allocate()
575 (saved_facts.HeaderVersion != sc->facts->HeaderVersion) || in mpr_iocfacts_allocate()
576 (saved_facts.MaxChainDepth != sc->facts->MaxChainDepth) || in mpr_iocfacts_allocate()
577 (saved_facts.RequestCredit != sc->facts->RequestCredit) || in mpr_iocfacts_allocate()
578 (saved_facts.ProductID != sc->facts->ProductID) || in mpr_iocfacts_allocate()
579 (saved_facts.IOCCapabilities != sc->facts->IOCCapabilities) || in mpr_iocfacts_allocate()
581 sc->facts->IOCRequestFrameSize) || in mpr_iocfacts_allocate()
583 sc->facts->IOCMaxChainSegmentSize) || in mpr_iocfacts_allocate()
584 (saved_facts.MaxTargets != sc->facts->MaxTargets) || in mpr_iocfacts_allocate()
585 (saved_facts.MaxSasExpanders != sc->facts->MaxSasExpanders) || in mpr_iocfacts_allocate()
586 (saved_facts.MaxEnclosures != sc->facts->MaxEnclosures) || in mpr_iocfacts_allocate()
587 (saved_facts.HighPriorityCredit != sc->facts->HighPriorityCredit) || in mpr_iocfacts_allocate()
589 sc->facts->MaxReplyDescriptorPostQueueDepth) || in mpr_iocfacts_allocate()
590 (saved_facts.ReplyFrameSize != sc->facts->ReplyFrameSize) || in mpr_iocfacts_allocate()
591 (saved_facts.MaxVolumes != sc->facts->MaxVolumes) || in mpr_iocfacts_allocate()
593 sc->facts->MaxPersistentEntries))) { in mpr_iocfacts_allocate()
597 sc->mpr_flags |= MPR_FLAGS_REALLOCATED; in mpr_iocfacts_allocate()
601 * Some things should be done if attaching or re-allocating after a Diag in mpr_iocfacts_allocate()
610 if (sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
612 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_TRACE]. in mpr_iocfacts_allocate()
614 if (sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
616 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_SNAPSHOT]. in mpr_iocfacts_allocate()
618 if (sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
620 sc->fw_diag_buffer_list[MPI2_DIAG_BUF_TYPE_EXTENDED]. in mpr_iocfacts_allocate()
626 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) in mpr_iocfacts_allocate()
627 sc->eedp_enabled = TRUE; in mpr_iocfacts_allocate()
628 if (sc->facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) in mpr_iocfacts_allocate()
629 sc->control_TLR = TRUE; in mpr_iocfacts_allocate()
630 if ((sc->facts->IOCCapabilities & in mpr_iocfacts_allocate()
632 (sc->mpr_flags & MPR_FLAGS_SEA_IOC)) in mpr_iocfacts_allocate()
633 sc->atomic_desc_capable = TRUE; in mpr_iocfacts_allocate()
640 TAILQ_INIT(&sc->req_list); in mpr_iocfacts_allocate()
641 TAILQ_INIT(&sc->high_priority_req_list); in mpr_iocfacts_allocate()
642 TAILQ_INIT(&sc->chain_list); in mpr_iocfacts_allocate()
643 TAILQ_INIT(&sc->prp_page_list); in mpr_iocfacts_allocate()
644 TAILQ_INIT(&sc->tm_list); in mpr_iocfacts_allocate()
685 bzero(sc->free_queue, sc->fqdepth * 4); in mpr_iocfacts_allocate()
711 sc->replypostindex = 0; in mpr_iocfacts_allocate()
712 mpr_regwrite(sc, MPI2_REPLY_FREE_HOST_INDEX_OFFSET, sc->replyfreeindex); in mpr_iocfacts_allocate()
738 * XXX If the number of MSI-X vectors changes during re-init, this in mpr_iocfacts_allocate()
763 if (sc->free_busaddr != 0) in mpr_iocfacts_free()
764 bus_dmamap_unload(sc->queues_dmat, sc->queues_map); in mpr_iocfacts_free()
765 if (sc->free_queue != NULL) in mpr_iocfacts_free()
766 bus_dmamem_free(sc->queues_dmat, sc->free_queue, in mpr_iocfacts_free()
767 sc->queues_map); in mpr_iocfacts_free()
768 if (sc->queues_dmat != NULL) in mpr_iocfacts_free()
769 bus_dma_tag_destroy(sc->queues_dmat); in mpr_iocfacts_free()
771 if (sc->chain_frames != NULL) { in mpr_iocfacts_free()
772 bus_dmamap_unload(sc->chain_dmat, sc->chain_map); in mpr_iocfacts_free()
773 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, in mpr_iocfacts_free()
774 sc->chain_map); in mpr_iocfacts_free()
776 if (sc->chain_dmat != NULL) in mpr_iocfacts_free()
777 bus_dma_tag_destroy(sc->chain_dmat); in mpr_iocfacts_free()
779 if (sc->sense_busaddr != 0) in mpr_iocfacts_free()
780 bus_dmamap_unload(sc->sense_dmat, sc->sense_map); in mpr_iocfacts_free()
781 if (sc->sense_frames != NULL) in mpr_iocfacts_free()
782 bus_dmamem_free(sc->sense_dmat, sc->sense_frames, in mpr_iocfacts_free()
783 sc->sense_map); in mpr_iocfacts_free()
784 if (sc->sense_dmat != NULL) in mpr_iocfacts_free()
785 bus_dma_tag_destroy(sc->sense_dmat); in mpr_iocfacts_free()
787 if (sc->prp_page_busaddr != 0) in mpr_iocfacts_free()
788 bus_dmamap_unload(sc->prp_page_dmat, sc->prp_page_map); in mpr_iocfacts_free()
789 if (sc->prp_pages != NULL) in mpr_iocfacts_free()
790 bus_dmamem_free(sc->prp_page_dmat, sc->prp_pages, in mpr_iocfacts_free()
791 sc->prp_page_map); in mpr_iocfacts_free()
792 if (sc->prp_page_dmat != NULL) in mpr_iocfacts_free()
793 bus_dma_tag_destroy(sc->prp_page_dmat); in mpr_iocfacts_free()
795 if (sc->reply_busaddr != 0) in mpr_iocfacts_free()
796 bus_dmamap_unload(sc->reply_dmat, sc->reply_map); in mpr_iocfacts_free()
797 if (sc->reply_frames != NULL) in mpr_iocfacts_free()
798 bus_dmamem_free(sc->reply_dmat, sc->reply_frames, in mpr_iocfacts_free()
799 sc->reply_map); in mpr_iocfacts_free()
800 if (sc->reply_dmat != NULL) in mpr_iocfacts_free()
801 bus_dma_tag_destroy(sc->reply_dmat); in mpr_iocfacts_free()
803 if (sc->req_busaddr != 0) in mpr_iocfacts_free()
804 bus_dmamap_unload(sc->req_dmat, sc->req_map); in mpr_iocfacts_free()
805 if (sc->req_frames != NULL) in mpr_iocfacts_free()
806 bus_dmamem_free(sc->req_dmat, sc->req_frames, sc->req_map); in mpr_iocfacts_free()
807 if (sc->req_dmat != NULL) in mpr_iocfacts_free()
808 bus_dma_tag_destroy(sc->req_dmat); in mpr_iocfacts_free()
810 if (sc->chains != NULL) in mpr_iocfacts_free()
811 free(sc->chains, M_MPR); in mpr_iocfacts_free()
812 if (sc->prps != NULL) in mpr_iocfacts_free()
813 free(sc->prps, M_MPR); in mpr_iocfacts_free()
814 if (sc->commands != NULL) { in mpr_iocfacts_free()
815 for (i = 1; i < sc->num_reqs; i++) { in mpr_iocfacts_free()
816 cm = &sc->commands[i]; in mpr_iocfacts_free()
817 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); in mpr_iocfacts_free()
819 free(sc->commands, M_MPR); in mpr_iocfacts_free()
821 if (sc->buffer_dmat != NULL) in mpr_iocfacts_free()
822 bus_dma_tag_destroy(sc->buffer_dmat); in mpr_iocfacts_free()
825 free(sc->queues, M_MPR); in mpr_iocfacts_free()
826 sc->queues = NULL; in mpr_iocfacts_free()
843 sassc = sc->sassc; in mpr_reinit()
847 mtx_assert(&sc->mpr_mtx, MA_OWNED); in mpr_reinit()
850 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) { in mpr_reinit()
859 sc->mpr_flags |= MPR_FLAGS_DIAGRESET; in mpr_reinit()
872 /* Restore the PCI state, including the MSI-X registers */ in mpr_reinit()
890 * Mapping structures will be re-allocated after getting IOC Page8, so in mpr_reinit()
903 sc->mpr_flags &= ~MPR_FLAGS_DIAGRESET; in mpr_reinit()
907 * Some mapping info is based in IOC Page8 data, so re-initialize the in mpr_reinit()
920 sc, sc->replypostindex, sc->replyfreeindex); in mpr_reinit()
962 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) in mpr_wait_db_ack()
963 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, "mprdba", in mpr_wait_db_ack()
970 } while (--cntdn); in mpr_wait_db_ack()
1003 if (curthread->td_no_sleeping) in mpr_request_sync()
1035 /* Clock out the message data synchronously in 32-bit dwords*/ in mpr_request_sync()
1047 /* Clock in the reply in 16-bit words. The total length of the in mpr_request_sync()
1074 ioc_sz = reply->MsgLength; in mpr_request_sync()
1084 residual = ioc_sz * 2 - count; in mpr_request_sync()
1105 while (residual--) { in mpr_request_sync()
1133 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); in mpr_enqueue_request()
1135 if (sc->mpr_flags & MPR_FLAGS_ATTACH_DONE && !(sc->mpr_flags & in mpr_enqueue_request()
1137 mtx_assert(&sc->mpr_mtx, MA_OWNED); in mpr_enqueue_request()
1139 if (++sc->io_cmds_active > sc->io_cmds_highwater) in mpr_enqueue_request()
1140 sc->io_cmds_highwater++; in mpr_enqueue_request()
1142 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY, in mpr_enqueue_request()
1143 ("command not busy, state = %u\n", cm->cm_state)); in mpr_enqueue_request()
1144 cm->cm_state = MPR_CM_STATE_INQUEUE; in mpr_enqueue_request()
1146 if (sc->atomic_desc_capable) { in mpr_enqueue_request()
1147 rd.u.low = cm->cm_desc.Words.Low; in mpr_enqueue_request()
1151 rd.u.low = htole32(cm->cm_desc.Words.Low); in mpr_enqueue_request()
1152 rd.u.high = htole32(cm->cm_desc.Words.High); in mpr_enqueue_request()
1168 facts->HeaderVersion = le16toh(facts->HeaderVersion); in adjust_iocfacts_endianness()
1169 facts->Reserved1 = le16toh(facts->Reserved1); in adjust_iocfacts_endianness()
1170 facts->IOCExceptions = le16toh(facts->IOCExceptions); in adjust_iocfacts_endianness()
1171 facts->IOCStatus = le16toh(facts->IOCStatus); in adjust_iocfacts_endianness()
1172 facts->IOCLogInfo = le32toh(facts->IOCLogInfo); in adjust_iocfacts_endianness()
1173 facts->RequestCredit = le16toh(facts->RequestCredit); in adjust_iocfacts_endianness()
1174 facts->ProductID = le16toh(facts->ProductID); in adjust_iocfacts_endianness()
1175 facts->IOCCapabilities = le32toh(facts->IOCCapabilities); in adjust_iocfacts_endianness()
1176 facts->IOCRequestFrameSize = le16toh(facts->IOCRequestFrameSize); in adjust_iocfacts_endianness()
1177 facts->IOCMaxChainSegmentSize = le16toh(facts->IOCMaxChainSegmentSize); in adjust_iocfacts_endianness()
1178 facts->MaxInitiators = le16toh(facts->MaxInitiators); in adjust_iocfacts_endianness()
1179 facts->MaxTargets = le16toh(facts->MaxTargets); in adjust_iocfacts_endianness()
1180 facts->MaxSasExpanders = le16toh(facts->MaxSasExpanders); in adjust_iocfacts_endianness()
1181 facts->MaxEnclosures = le16toh(facts->MaxEnclosures); in adjust_iocfacts_endianness()
1182 facts->ProtocolFlags = le16toh(facts->ProtocolFlags); in adjust_iocfacts_endianness()
1183 facts->HighPriorityCredit = le16toh(facts->HighPriorityCredit); in adjust_iocfacts_endianness()
1184 facts->MaxReplyDescriptorPostQueueDepth = le16toh(facts->MaxReplyDescriptorPostQueueDepth); in adjust_iocfacts_endianness()
1185 facts->MaxDevHandle = le16toh(facts->MaxDevHandle); in adjust_iocfacts_endianness()
1186 facts->MaxPersistentEntries = le16toh(facts->MaxPersistentEntries); in adjust_iocfacts_endianness()
1187 facts->MinDevHandle = le16toh(facts->MinDevHandle); in adjust_iocfacts_endianness()
1225 mpr_dprint(sc, MPR_TRACE, "facts->IOCCapabilities 0x%x\n", facts->IOCCapabilities); in mpr_get_iocfacts()
1244 if ((sc->pqdepth == 0) || (sc->fqdepth == 0) || (sc->reqframesz == 0) in mpr_send_iocinit()
1245 || (sc->replyframesz == 0)) { in mpr_send_iocinit()
1258 * deliberately in the lower 32bits of memory. This is a micro- in mpr_send_iocinit()
1265 init.SystemRequestFrameSize = htole16((uint16_t)(sc->reqframesz / 4)); in mpr_send_iocinit()
1266 init.ReplyDescriptorPostQueueDepth = htole16(sc->pqdepth); in mpr_send_iocinit()
1267 init.ReplyFreeQueueDepth = htole16(sc->fqdepth); in mpr_send_iocinit()
1272 htole32((uint32_t)sc->req_busaddr); in mpr_send_iocinit()
1275 htole32((uint32_t)sc->post_busaddr); in mpr_send_iocinit()
1277 init.ReplyFreeQueueAddress.Low = htole32((uint32_t)sc->free_busaddr); in mpr_send_iocinit()
1312 mpr_lock(ctx->softc); in mpr_memaddr_wait_cb()
1313 ctx->error = error; in mpr_memaddr_wait_cb()
1314 ctx->completed = 1; in mpr_memaddr_wait_cb()
1315 if ((error == 0) && (ctx->abandoned == 0)) { in mpr_memaddr_wait_cb()
1316 *ctx->addr = segs[0].ds_addr; in mpr_memaddr_wait_cb()
1320 if (ctx->abandoned != 0) in mpr_memaddr_wait_cb()
1326 mpr_unlock(ctx->softc); in mpr_memaddr_wait_cb()
1329 bus_dmamap_unload(ctx->buffer_dmat, in mpr_memaddr_wait_cb()
1330 ctx->buffer_dmamap); in mpr_memaddr_wait_cb()
1331 *ctx->addr = 0; in mpr_memaddr_wait_cb()
1344 nq = sc->msi_msgs; in mpr_alloc_queues()
1347 sc->queues = malloc(sizeof(struct mpr_queue) * nq, M_MPR, in mpr_alloc_queues()
1349 if (sc->queues == NULL) in mpr_alloc_queues()
1353 q = &sc->queues[i]; in mpr_alloc_queues()
1355 q->sc = sc; in mpr_alloc_queues()
1356 q->qnum = i; in mpr_alloc_queues()
1376 * contains filled-in reply frames sent from the firmware to the host. in mpr_alloc_hw_queues()
1380 sc->fqdepth = roundup2(sc->num_replies + 1, 16); in mpr_alloc_hw_queues()
1381 sc->pqdepth = roundup2(sc->num_replies + 1, 16); in mpr_alloc_hw_queues()
1382 fqsize= sc->fqdepth * 4; in mpr_alloc_hw_queues()
1383 pqsize = sc->pqdepth * 8; in mpr_alloc_hw_queues()
1386 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_hw_queues()
1390 if (bus_dma_template_tag(&t, &sc->queues_dmat)) { in mpr_alloc_hw_queues()
1394 if (bus_dmamem_alloc(sc->queues_dmat, (void **)&queues, BUS_DMA_NOWAIT, in mpr_alloc_hw_queues()
1395 &sc->queues_map)) { in mpr_alloc_hw_queues()
1400 bus_dmamap_load(sc->queues_dmat, sc->queues_map, queues, qsize, in mpr_alloc_hw_queues()
1403 sc->free_queue = (uint32_t *)queues; in mpr_alloc_hw_queues()
1404 sc->free_busaddr = queues_busaddr; in mpr_alloc_hw_queues()
1405 sc->post_queue = (MPI2_REPLY_DESCRIPTORS_UNION *)(queues + fqsize); in mpr_alloc_hw_queues()
1406 sc->post_busaddr = queues_busaddr + fqsize; in mpr_alloc_hw_queues()
1408 (uintmax_t)sc->free_busaddr, fqsize); in mpr_alloc_hw_queues()
1410 (uintmax_t)sc->post_busaddr, pqsize); in mpr_alloc_hw_queues()
1422 sc->replyframesz = sc->facts->ReplyFrameSize * 4; in mpr_alloc_replies()
1425 * sc->num_replies should be one less than sc->fqdepth. We need to in mpr_alloc_replies()
1426 * allocate space for sc->fqdepth replies, but only sc->num_replies in mpr_alloc_replies()
1429 num_replies = max(sc->fqdepth, sc->num_replies); in mpr_alloc_replies()
1431 rsize = sc->replyframesz * num_replies; in mpr_alloc_replies()
1432 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_replies()
1436 if (bus_dma_template_tag(&t, &sc->reply_dmat)) { in mpr_alloc_replies()
1440 if (bus_dmamem_alloc(sc->reply_dmat, (void **)&sc->reply_frames, in mpr_alloc_replies()
1441 BUS_DMA_NOWAIT, &sc->reply_map)) { in mpr_alloc_replies()
1445 bzero(sc->reply_frames, rsize); in mpr_alloc_replies()
1446 bus_dmamap_load(sc->reply_dmat, sc->reply_map, sc->reply_frames, rsize, in mpr_alloc_replies()
1447 mpr_memaddr_cb, &sc->reply_busaddr, 0); in mpr_alloc_replies()
1449 (uintmax_t)sc->reply_busaddr, rsize); in mpr_alloc_replies()
1466 for (bo = 0; bo + sc->chain_frame_size <= segs[s].ds_len; in mpr_load_chains_cb()
1467 bo += sc->chain_frame_size) { in mpr_load_chains_cb()
1468 chain = &sc->chains[i++]; in mpr_load_chains_cb()
1469 chain->chain =(MPI2_SGE_IO_UNION *)(sc->chain_frames+o); in mpr_load_chains_cb()
1470 chain->chain_busaddr = segs[s].ds_addr + bo; in mpr_load_chains_cb()
1471 o += sc->chain_frame_size; in mpr_load_chains_cb()
1475 o += segs[s].ds_len - bo; in mpr_load_chains_cb()
1477 sc->chain_free_lowwater = i; in mpr_load_chains_cb()
1487 rsize = sc->reqframesz * sc->num_reqs; in mpr_alloc_requests()
1488 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_requests()
1492 if (bus_dma_template_tag(&t, &sc->req_dmat)) { in mpr_alloc_requests()
1496 if (bus_dmamem_alloc(sc->req_dmat, (void **)&sc->req_frames, in mpr_alloc_requests()
1497 BUS_DMA_NOWAIT, &sc->req_map)) { in mpr_alloc_requests()
1501 bzero(sc->req_frames, rsize); in mpr_alloc_requests()
1502 bus_dmamap_load(sc->req_dmat, sc->req_map, sc->req_frames, rsize, in mpr_alloc_requests()
1503 mpr_memaddr_cb, &sc->req_busaddr, 0); in mpr_alloc_requests()
1505 (uintmax_t)sc->req_busaddr, rsize); in mpr_alloc_requests()
1507 sc->chains = malloc(sizeof(struct mpr_chain) * sc->num_chains, M_MPR, in mpr_alloc_requests()
1509 if (!sc->chains) { in mpr_alloc_requests()
1513 rsize = sc->chain_frame_size * sc->num_chains; in mpr_alloc_requests()
1514 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_requests()
1518 if (bus_dma_template_tag(&t, &sc->chain_dmat)) { in mpr_alloc_requests()
1522 if (bus_dmamem_alloc(sc->chain_dmat, (void **)&sc->chain_frames, in mpr_alloc_requests()
1523 BUS_DMA_NOWAIT | BUS_DMA_ZERO, &sc->chain_map)) { in mpr_alloc_requests()
1527 if (bus_dmamap_load(sc->chain_dmat, sc->chain_map, sc->chain_frames, in mpr_alloc_requests()
1530 bus_dmamem_free(sc->chain_dmat, sc->chain_frames, in mpr_alloc_requests()
1531 sc->chain_map); in mpr_alloc_requests()
1535 rsize = MPR_SENSE_LEN * sc->num_reqs; in mpr_alloc_requests()
1536 bus_dma_template_clone(&t, sc->req_dmat); in mpr_alloc_requests()
1539 if (bus_dma_template_tag(&t, &sc->sense_dmat)) { in mpr_alloc_requests()
1543 if (bus_dmamem_alloc(sc->sense_dmat, (void **)&sc->sense_frames, in mpr_alloc_requests()
1544 BUS_DMA_NOWAIT, &sc->sense_map)) { in mpr_alloc_requests()
1548 bzero(sc->sense_frames, rsize); in mpr_alloc_requests()
1549 bus_dmamap_load(sc->sense_dmat, sc->sense_map, sc->sense_frames, rsize, in mpr_alloc_requests()
1550 mpr_memaddr_cb, &sc->sense_busaddr, 0); in mpr_alloc_requests()
1552 (uintmax_t)sc->sense_busaddr, rsize); in mpr_alloc_requests()
1558 if ((sc->facts->MsgVersion >= MPI2_VERSION_02_06) && in mpr_alloc_requests()
1559 (sc->facts->ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES)) { in mpr_alloc_requests()
1564 nsegs = (sc->maxio / PAGE_SIZE) + 1; in mpr_alloc_requests()
1565 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_requests()
1569 BD_LOCKFUNCARG(&sc->mpr_mtx), in mpr_alloc_requests()
1571 if (bus_dma_template_tag(&t, &sc->buffer_dmat)) { in mpr_alloc_requests()
1580 sc->commands = malloc(sizeof(struct mpr_command) * sc->num_reqs, in mpr_alloc_requests()
1582 for (i = 1; i < sc->num_reqs; i++) { in mpr_alloc_requests()
1583 cm = &sc->commands[i]; in mpr_alloc_requests()
1584 cm->cm_req = sc->req_frames + i * sc->reqframesz; in mpr_alloc_requests()
1585 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz; in mpr_alloc_requests()
1586 cm->cm_sense = &sc->sense_frames[i]; in mpr_alloc_requests()
1587 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; in mpr_alloc_requests()
1588 cm->cm_desc.Default.SMID = htole16(i); in mpr_alloc_requests()
1589 cm->cm_sc = sc; in mpr_alloc_requests()
1590 cm->cm_state = MPR_CM_STATE_BUSY; in mpr_alloc_requests()
1591 TAILQ_INIT(&cm->cm_chain_list); in mpr_alloc_requests()
1592 TAILQ_INIT(&cm->cm_prp_page_list); in mpr_alloc_requests()
1593 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); in mpr_alloc_requests()
1596 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) in mpr_alloc_requests()
1598 if (i <= sc->num_prireqs) in mpr_alloc_requests()
1604 sc->num_reqs = i; in mpr_alloc_requests()
1648 PRPs_required = sc->maxio / PAGE_SIZE; in mpr_alloc_nvme_prp_pages()
1649 PRPs_per_page = (PAGE_SIZE / PRP_ENTRY_SIZE) - 1; in mpr_alloc_nvme_prp_pages()
1652 sc->prp_buffer_size = PAGE_SIZE * pages_required; in mpr_alloc_nvme_prp_pages()
1653 rsize = sc->prp_buffer_size * NVME_QDEPTH; in mpr_alloc_nvme_prp_pages()
1654 bus_dma_template_init(&t, sc->mpr_parent_dmat); in mpr_alloc_nvme_prp_pages()
1658 if (bus_dma_template_tag(&t, &sc->prp_page_dmat)) { in mpr_alloc_nvme_prp_pages()
1663 if (bus_dmamem_alloc(sc->prp_page_dmat, (void **)&sc->prp_pages, in mpr_alloc_nvme_prp_pages()
1664 BUS_DMA_NOWAIT, &sc->prp_page_map)) { in mpr_alloc_nvme_prp_pages()
1668 bzero(sc->prp_pages, rsize); in mpr_alloc_nvme_prp_pages()
1669 bus_dmamap_load(sc->prp_page_dmat, sc->prp_page_map, sc->prp_pages, in mpr_alloc_nvme_prp_pages()
1670 rsize, mpr_memaddr_cb, &sc->prp_page_busaddr, 0); in mpr_alloc_nvme_prp_pages()
1672 sc->prps = malloc(sizeof(struct mpr_prp_page) * NVME_QDEPTH, M_MPR, in mpr_alloc_nvme_prp_pages()
1675 prp_page = &sc->prps[i]; in mpr_alloc_nvme_prp_pages()
1676 prp_page->prp_page = (uint64_t *)(sc->prp_pages + in mpr_alloc_nvme_prp_pages()
1677 i * sc->prp_buffer_size); in mpr_alloc_nvme_prp_pages()
1678 prp_page->prp_page_busaddr = (uint64_t)(sc->prp_page_busaddr + in mpr_alloc_nvme_prp_pages()
1679 i * sc->prp_buffer_size); in mpr_alloc_nvme_prp_pages()
1681 sc->prp_pages_free_lowwater++; in mpr_alloc_nvme_prp_pages()
1692 memset((uint8_t *)sc->post_queue, 0xff, sc->pqdepth * 8); in mpr_init_queues()
1696 * have space for on the queue. So sc->num_replies (the number we in mpr_init_queues()
1697 * use) should be less than sc->fqdepth (allocated size). in mpr_init_queues()
1699 if (sc->num_replies >= sc->fqdepth) in mpr_init_queues()
1705 for (i = 0; i < sc->fqdepth; i++) { in mpr_init_queues()
1706 sc->free_queue[i] = htole32(sc->reply_busaddr + (i * sc->replyframesz)); in mpr_init_queues()
1708 sc->replyfreeindex = sc->num_replies; in mpr_init_queues()
1714 * Next are the global settings, if they exist. Highest are the per-unit
1723 sc->mpr_debug = MPR_INFO | MPR_FAULT; in mpr_get_tunables()
1724 sc->disable_msix = 0; in mpr_get_tunables()
1725 sc->disable_msi = 0; in mpr_get_tunables()
1726 sc->max_msix = MPR_MSIX_MAX; in mpr_get_tunables()
1727 sc->max_chains = MPR_CHAIN_FRAMES; in mpr_get_tunables()
1728 sc->max_io_pages = MPR_MAXIO_PAGES; in mpr_get_tunables()
1729 sc->enable_ssu = MPR_SSU_ENABLE_SSD_DISABLE_HDD; in mpr_get_tunables()
1730 sc->spinup_wait_time = DEFAULT_SPINUP_WAIT; in mpr_get_tunables()
1731 sc->use_phynum = 1; in mpr_get_tunables()
1732 sc->max_reqframes = MPR_REQ_FRAMES; in mpr_get_tunables()
1733 sc->max_prireqframes = MPR_PRI_REQ_FRAMES; in mpr_get_tunables()
1734 sc->max_replyframes = MPR_REPLY_FRAMES; in mpr_get_tunables()
1735 sc->max_evtframes = MPR_EVT_REPLY_FRAMES; in mpr_get_tunables()
1743 TUNABLE_INT_FETCH("hw.mpr.disable_msix", &sc->disable_msix); in mpr_get_tunables()
1744 TUNABLE_INT_FETCH("hw.mpr.disable_msi", &sc->disable_msi); in mpr_get_tunables()
1745 TUNABLE_INT_FETCH("hw.mpr.max_msix", &sc->max_msix); in mpr_get_tunables()
1746 TUNABLE_INT_FETCH("hw.mpr.max_chains", &sc->max_chains); in mpr_get_tunables()
1747 TUNABLE_INT_FETCH("hw.mpr.max_io_pages", &sc->max_io_pages); in mpr_get_tunables()
1748 TUNABLE_INT_FETCH("hw.mpr.enable_ssu", &sc->enable_ssu); in mpr_get_tunables()
1749 TUNABLE_INT_FETCH("hw.mpr.spinup_wait_time", &sc->spinup_wait_time); in mpr_get_tunables()
1750 TUNABLE_INT_FETCH("hw.mpr.use_phy_num", &sc->use_phynum); in mpr_get_tunables()
1751 TUNABLE_INT_FETCH("hw.mpr.max_reqframes", &sc->max_reqframes); in mpr_get_tunables()
1752 TUNABLE_INT_FETCH("hw.mpr.max_prireqframes", &sc->max_prireqframes); in mpr_get_tunables()
1753 TUNABLE_INT_FETCH("hw.mpr.max_replyframes", &sc->max_replyframes); in mpr_get_tunables()
1754 TUNABLE_INT_FETCH("hw.mpr.max_evtframes", &sc->max_evtframes); in mpr_get_tunables()
1756 /* Grab the unit-instance variables */ in mpr_get_tunables()
1758 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1764 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1765 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msix); in mpr_get_tunables()
1768 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1769 TUNABLE_INT_FETCH(tmpstr, &sc->disable_msi); in mpr_get_tunables()
1772 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1773 TUNABLE_INT_FETCH(tmpstr, &sc->max_msix); in mpr_get_tunables()
1776 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1777 TUNABLE_INT_FETCH(tmpstr, &sc->max_chains); in mpr_get_tunables()
1780 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1781 TUNABLE_INT_FETCH(tmpstr, &sc->max_io_pages); in mpr_get_tunables()
1783 bzero(sc->exclude_ids, sizeof(sc->exclude_ids)); in mpr_get_tunables()
1785 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1786 TUNABLE_STR_FETCH(tmpstr, sc->exclude_ids, sizeof(sc->exclude_ids)); in mpr_get_tunables()
1789 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1790 TUNABLE_INT_FETCH(tmpstr, &sc->enable_ssu); in mpr_get_tunables()
1793 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1794 TUNABLE_INT_FETCH(tmpstr, &sc->spinup_wait_time); in mpr_get_tunables()
1797 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1798 TUNABLE_INT_FETCH(tmpstr, &sc->use_phynum); in mpr_get_tunables()
1801 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1802 TUNABLE_INT_FETCH(tmpstr, &sc->max_reqframes); in mpr_get_tunables()
1805 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1806 TUNABLE_INT_FETCH(tmpstr, &sc->max_prireqframes); in mpr_get_tunables()
1809 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1810 TUNABLE_INT_FETCH(tmpstr, &sc->max_replyframes); in mpr_get_tunables()
1813 device_get_unit(sc->mpr_dev)); in mpr_get_tunables()
1814 TUNABLE_INT_FETCH(tmpstr, &sc->max_evtframes); in mpr_get_tunables()
1829 device_get_unit(sc->mpr_dev)); in mpr_setup_sysctl()
1830 snprintf(tmpstr2, sizeof(tmpstr2), "%d", device_get_unit(sc->mpr_dev)); in mpr_setup_sysctl()
1832 sysctl_ctx = device_get_sysctl_ctx(sc->mpr_dev); in mpr_setup_sysctl()
1834 sysctl_tree = device_get_sysctl_tree(sc->mpr_dev); in mpr_setup_sysctl()
1837 sysctl_ctx_init(&sc->sysctl_ctx); in mpr_setup_sysctl()
1838 sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, in mpr_setup_sysctl()
1841 if (sc->sysctl_tree == NULL) in mpr_setup_sysctl()
1843 sysctl_ctx = &sc->sysctl_ctx; in mpr_setup_sysctl()
1844 sysctl_tree = sc->sysctl_tree; in mpr_setup_sysctl()
1852 OID_AUTO, "disable_msix", CTLFLAG_RD, &sc->disable_msix, 0, in mpr_setup_sysctl()
1853 "Disable the use of MSI-X interrupts"); in mpr_setup_sysctl()
1856 OID_AUTO, "max_msix", CTLFLAG_RD, &sc->max_msix, 0, in mpr_setup_sysctl()
1857 "User-defined maximum number of MSIX queues"); in mpr_setup_sysctl()
1860 OID_AUTO, "msix_msgs", CTLFLAG_RD, &sc->msi_msgs, 0, in mpr_setup_sysctl()
1864 OID_AUTO, "max_reqframes", CTLFLAG_RD, &sc->max_reqframes, 0, in mpr_setup_sysctl()
1868 OID_AUTO, "max_prireqframes", CTLFLAG_RD, &sc->max_prireqframes, 0, in mpr_setup_sysctl()
1872 OID_AUTO, "max_replyframes", CTLFLAG_RD, &sc->max_replyframes, 0, in mpr_setup_sysctl()
1876 OID_AUTO, "max_evtframes", CTLFLAG_RD, &sc->max_evtframes, 0, in mpr_setup_sysctl()
1880 OID_AUTO, "firmware_version", CTLFLAG_RD, sc->fw_version, in mpr_setup_sysctl()
1881 strlen(sc->fw_version), "firmware version"); in mpr_setup_sysctl()
1888 OID_AUTO, "msg_version", CTLFLAG_RD, sc->msg_version, in mpr_setup_sysctl()
1889 strlen(sc->msg_version), "message interface version (deprecated)"); in mpr_setup_sysctl()
1893 &sc->io_cmds_active, 0, "number of currently active commands"); in mpr_setup_sysctl()
1897 &sc->io_cmds_highwater, 0, "maximum active commands seen"); in mpr_setup_sysctl()
1901 &sc->chain_free, 0, "number of free chain elements"); in mpr_setup_sysctl()
1905 &sc->chain_free_lowwater, 0,"lowest number of free chain elements"); in mpr_setup_sysctl()
1909 &sc->max_chains, 0,"maximum chain frames that will be allocated"); in mpr_setup_sysctl()
1913 &sc->max_io_pages, 0,"maximum pages to allow per I/O (if <1 use " in mpr_setup_sysctl()
1917 OID_AUTO, "enable_ssu", CTLFLAG_RW, &sc->enable_ssu, 0, in mpr_setup_sysctl()
1922 &sc->chain_alloc_fail, "chain allocation failures"); in mpr_setup_sysctl()
1926 &sc->spinup_wait_time, DEFAULT_SPINUP_WAIT, "seconds to wait for " in mpr_setup_sysctl()
1936 &sc->dump_reqs_alltypes, 0, in mpr_setup_sysctl()
1940 OID_AUTO, "use_phy_num", CTLFLAG_RD, &sc->use_phynum, 0, in mpr_setup_sysctl()
1945 &sc->prp_pages_free, 0, "number of free PRP pages"); in mpr_setup_sysctl()
1949 &sc->prp_pages_free_lowwater, 0,"lowest number of free PRP pages"); in mpr_setup_sysctl()
1953 &sc->prp_page_alloc_fail, "PRP page allocation failures"); in mpr_setup_sysctl()
1996 debug = sc->mpr_debug; in mpr_debug_sysctl()
2003 if (debug & string->flag) in mpr_debug_sysctl()
2004 sbuf_printf(sbuf, ",%s", string->name); in mpr_debug_sysctl()
2010 if (error || req->newptr == NULL) in mpr_debug_sysctl()
2013 len = req->newlen - req->newidx; in mpr_debug_sysctl()
2041 } else if (*list == '-') { in mpr_parse_debug()
2060 if (strcasecmp(token, string->name) == 0) { in mpr_parse_debug()
2061 flags |= string->flag; in mpr_parse_debug()
2069 sc->mpr_debug = flags; in mpr_parse_debug()
2072 sc->mpr_debug |= flags; in mpr_parse_debug()
2075 sc->mpr_debug &= (~flags); in mpr_parse_debug()
2109 numreqs = sc->num_reqs; in mpr_dump_reqs()
2111 if (req->newptr != NULL) in mpr_dump_reqs()
2114 if (smid == 0 || smid > sc->num_reqs) in mpr_dump_reqs()
2116 if (numreqs <= 0 || (numreqs + smid > sc->num_reqs)) in mpr_dump_reqs()
2117 numreqs = sc->num_reqs; in mpr_dump_reqs()
2122 cm = &sc->commands[i]; in mpr_dump_reqs()
2123 if ((sc->dump_reqs_alltypes == 0) && (cm->cm_state != state)) in mpr_dump_reqs()
2126 hdr.state = cm->cm_state; in mpr_dump_reqs()
2128 hdr.deschi = cm->cm_desc.Words.High; in mpr_dump_reqs()
2129 hdr.desclo = cm->cm_desc.Words.Low; in mpr_dump_reqs()
2130 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, in mpr_dump_reqs()
2134 sbuf_bcat(sb, cm->cm_req, 128); in mpr_dump_reqs()
2135 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, in mpr_dump_reqs()
2137 sbuf_bcat(sb, chain->chain, 128); in mpr_dump_reqs()
2153 mtx_init(&sc->mpr_mtx, "MPR lock", NULL, MTX_DEF); in mpr_attach()
2154 callout_init_mtx(&sc->periodic, &sc->mpr_mtx, 0); in mpr_attach()
2155 callout_init_mtx(&sc->device_check_callout, &sc->mpr_mtx, 0); in mpr_attach()
2156 TAILQ_INIT(&sc->event_list); in mpr_attach()
2157 timevalclear(&sc->lastfail); in mpr_attach()
2165 sc->facts = malloc(sizeof(MPI2_IOC_FACTS_REPLY), M_MPR, in mpr_attach()
2167 if (!sc->facts) { in mpr_attach()
2175 * A Diag Reset will also call mpr_iocfacts_allocate and re-read the IOC in mpr_attach()
2194 sc->mpr_ich.ich_func = mpr_startup; in mpr_attach()
2195 sc->mpr_ich.ich_arg = sc; in mpr_attach()
2196 if (config_intrhook_establish(&sc->mpr_ich) != 0) { in mpr_attach()
2205 sc->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final, in mpr_attach()
2208 if (sc->shutdown_eh == NULL) in mpr_attach()
2214 sc->mpr_flags |= MPR_FLAGS_ATTACH_DONE; in mpr_attach()
2220 /* Run through any late-start handlers. */
2239 config_intrhook_disestablish(&sc->mpr_ich); in mpr_startup()
2240 sc->mpr_ich.ich_arg = NULL; in mpr_startup()
2253 if (sc->mpr_flags & MPR_FLAGS_SHUTDOWN) in mpr_periodic()
2266 callout_reset_sbt(&sc->periodic, MPR_PERIODIC_DELAY * SBT_1S, 0, in mpr_periodic()
2278 switch (event->Event) { in mpr_log_evt_handler()
2281 if (sc->mpr_debug & MPR_EVENT) in mpr_log_evt_handler()
2282 hexdump(event->EventData, event->EventDataLength, NULL, in mpr_log_evt_handler()
2286 entry = (MPI2_EVENT_DATA_LOG_ENTRY_ADDED *)event->EventData; in mpr_log_evt_handler()
2288 "0x%x Sequence %d:\n", entry->LogEntryQualifier, in mpr_log_evt_handler()
2289 entry->LogSequence); in mpr_log_evt_handler()
2307 &sc->mpr_log_eh); in mpr_attach_log()
2316 if (sc->mpr_log_eh != NULL) in mpr_detach_log()
2317 mpr_deregister_events(sc, sc->mpr_log_eh); in mpr_detach_log()
2333 sc->mpr_flags |= MPR_FLAGS_SHUTDOWN; in mpr_free()
2336 callout_drain(&sc->periodic); in mpr_free()
2337 callout_drain(&sc->device_check_callout); in mpr_free()
2356 if (sc->facts != NULL) in mpr_free()
2357 free(sc->facts, M_MPR); in mpr_free()
2365 if (sc->sysctl_tree != NULL) in mpr_free()
2366 sysctl_ctx_free(&sc->sysctl_ctx); in mpr_free()
2369 if (sc->shutdown_eh != NULL) in mpr_free()
2370 EVENTHANDLER_DEREGISTER(shutdown_final, sc->shutdown_eh); in mpr_free()
2372 mtx_destroy(&sc->mpr_mtx); in mpr_free()
2388 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE, in mpr_complete_command()
2389 ("command not inqueue, state = %u\n", cm->cm_state)); in mpr_complete_command()
2390 cm->cm_state = MPR_CM_STATE_BUSY; in mpr_complete_command()
2391 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) in mpr_complete_command()
2392 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; in mpr_complete_command()
2394 if (cm->cm_complete != NULL) { in mpr_complete_command()
2397 __func__, cm, cm->cm_complete, cm->cm_complete_data, in mpr_complete_command()
2398 cm->cm_reply); in mpr_complete_command()
2399 cm->cm_complete(sc, cm); in mpr_complete_command()
2402 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { in mpr_complete_command()
2407 if (sc->io_cmds_active != 0) { in mpr_complete_command()
2408 sc->io_cmds_active--; in mpr_complete_command()
2411 "out of sync - resynching to 0\n"); in mpr_complete_command()
2467 sc_status = le16toh(mpi_reply->IOCStatus); in mpr_display_reply_info()
2469 mpr_sas_log_info(sc, le32toh(mpi_reply->IOCLogInfo)); in mpr_display_reply_info()
2483 * needed for both INTx interrupts and driver-driven polling in mpr_intr()
2529 pq = sc->replypostindex; in mpr_intr_locked()
2532 __func__, sc, sc->replypostindex); in mpr_intr_locked()
2536 desc = &sc->post_queue[sc->replypostindex]; in mpr_intr_locked()
2548 flags = desc->Default.ReplyFlags & in mpr_intr_locked()
2551 (le32toh(desc->Words.High) == 0xffffffff)) in mpr_intr_locked()
2561 if (++sc->replypostindex >= sc->pqdepth) in mpr_intr_locked()
2562 sc->replypostindex = 0; in mpr_intr_locked()
2568 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; in mpr_intr_locked()
2569 cm->cm_reply = NULL; in mpr_intr_locked()
2577 * Re-compose the reply address from the address in mpr_intr_locked()
2583 * (sc->reply_frames). in mpr_intr_locked()
2585 baddr = le32toh(desc->AddressReply.ReplyFrameAddress); in mpr_intr_locked()
2586 reply = sc->reply_frames + in mpr_intr_locked()
2587 (baddr - ((uint32_t)sc->reply_busaddr)); in mpr_intr_locked()
2594 if ((reply < sc->reply_frames) in mpr_intr_locked()
2595 || (reply > (sc->reply_frames + in mpr_intr_locked()
2596 (sc->fqdepth * sc->replyframesz)))) { in mpr_intr_locked()
2601 sc->reply_frames, sc->fqdepth, in mpr_intr_locked()
2602 sc->replyframesz); in mpr_intr_locked()
2604 /* LSI-TODO. See Linux Code for Graceful exit */ in mpr_intr_locked()
2607 if (le16toh(desc->AddressReply.SMID) == 0) { in mpr_intr_locked()
2608 if (((MPI2_DEFAULT_REPLY *)reply)->Function == in mpr_intr_locked()
2619 if ((le16toh(rel_rep->IOCStatus) & in mpr_intr_locked()
2624 &sc->fw_diag_buffer_list[ in mpr_intr_locked()
2625 rel_rep->BufferType]; in mpr_intr_locked()
2626 pBuffer->valid_data = TRUE; in mpr_intr_locked()
2627 pBuffer->owned_by_firmware = in mpr_intr_locked()
2629 pBuffer->immediate = FALSE; in mpr_intr_locked()
2636 cm = &sc->commands[ in mpr_intr_locked()
2637 le16toh(desc->AddressReply.SMID)]; in mpr_intr_locked()
2638 if (cm->cm_state == MPR_CM_STATE_INQUEUE) { in mpr_intr_locked()
2639 cm->cm_reply = reply; in mpr_intr_locked()
2640 cm->cm_reply_data = in mpr_intr_locked()
2641 le32toh(desc->AddressReply. in mpr_intr_locked()
2647 cm->cm_state, cm); in mpr_intr_locked()
2658 desc->Default.ReplyFlags); in mpr_intr_locked()
2665 if (cm->cm_reply) in mpr_intr_locked()
2666 mpr_display_reply_info(sc,cm->cm_reply); in mpr_intr_locked()
2671 if (pq != sc->replypostindex) { in mpr_intr_locked()
2673 __func__, sc, sc->replypostindex); in mpr_intr_locked()
2675 sc->replypostindex); in mpr_intr_locked()
2688 event = le16toh(reply->Event); in mpr_dispatch_event()
2689 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { in mpr_dispatch_event()
2690 if (isset(eh->mask, event)) { in mpr_dispatch_event()
2691 eh->callback(sc, data, reply); in mpr_dispatch_event()
2713 if (cm->cm_reply) in mpr_reregister_events_complete()
2715 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); in mpr_reregister_events_complete()
2736 eh->callback = cb; in mpr_register_events()
2737 eh->data = data; in mpr_register_events()
2738 TAILQ_INSERT_TAIL(&sc->event_list, eh, eh_list); in mpr_register_events()
2759 bcopy(mask, &handle->mask[0], 16); in mpr_update_events()
2760 memset(sc->event_mask, 0xff, 16); in mpr_update_events()
2762 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { in mpr_update_events()
2764 sc->event_mask[i] &= ~eh->mask[i]; in mpr_update_events()
2769 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; in mpr_update_events()
2770 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; in mpr_update_events()
2771 evtreq->MsgFlags = 0; in mpr_update_events()
2772 evtreq->SASBroadcastPrimitiveMasks = 0; in mpr_update_events()
2775 u_char fullmask[sizeof(evtreq->EventMasks)]; in mpr_update_events()
2777 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, sizeof(fullmask)); in mpr_update_events()
2780 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, sizeof(sc->event_mask)); in mpr_update_events()
2782 evtreq->EventMasks[i] = htole32(evtreq->EventMasks[i]); in mpr_update_events()
2784 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; in mpr_update_events()
2785 cm->cm_data = NULL; in mpr_update_events()
2789 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; in mpr_update_events()
2791 (reply->IOCStatus & MPI2_IOCSTATUS_MASK) != MPI2_IOCSTATUS_SUCCESS) in mpr_update_events()
2816 memset(sc->event_mask, 0xff, 16); in mpr_reregister_events()
2818 TAILQ_FOREACH(eh, &sc->event_list, eh_list) { in mpr_reregister_events()
2820 sc->event_mask[i] &= ~eh->mask[i]; in mpr_reregister_events()
2825 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; in mpr_reregister_events()
2826 evtreq->Function = MPI2_FUNCTION_EVENT_NOTIFICATION; in mpr_reregister_events()
2827 evtreq->MsgFlags = 0; in mpr_reregister_events()
2828 evtreq->SASBroadcastPrimitiveMasks = 0; in mpr_reregister_events()
2831 u_char fullmask[sizeof(evtreq->EventMasks)]; in mpr_reregister_events()
2833 bcopy(fullmask, (uint8_t *)&evtreq->EventMasks, sizeof(fullmask)); in mpr_reregister_events()
2836 bcopy(sc->event_mask, (uint8_t *)&evtreq->EventMasks, sizeof(sc->event_mask)); in mpr_reregister_events()
2838 evtreq->EventMasks[i] = htole32(evtreq->EventMasks[i]); in mpr_reregister_events()
2840 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; in mpr_reregister_events()
2841 cm->cm_data = NULL; in mpr_reregister_events()
2842 cm->cm_complete = mpr_reregister_events_complete; in mpr_reregister_events()
2855 TAILQ_REMOVE(&sc->event_list, handle, eh_list); in mpr_deregister_events()
2861 * mpr_build_nvme_prp - This function is called for NVMe end devices to build a
2876 * non-contiguous SGL into a PRP in this case. All PRPs will describe contiguous
2888 * Each 64-bit PRP entry comprises an address and an offset field. The address
2891 * first element in a PRP list may contain a non-zero offest, implying that all
2896 * being described by the list begins at a non-zero offset within the first page,
2897 * then the first PRP element will contain a non-zero offset indicating where the
2933 prp1_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + in mpr_build_nvme_prp()
2935 prp2_entry = (uint64_t *)(nvme_encap_request->NVMe_Command + in mpr_build_nvme_prp()
2948 prp_page = (uint64_t *)prp_page_info->prp_page; in mpr_build_nvme_prp()
2949 prp_page_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; in mpr_build_nvme_prp()
2955 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); in mpr_build_nvme_prp()
2961 page_mask = PAGE_SIZE - 1; in mpr_build_nvme_prp()
2990 * boundary - prp_size (8 bytes). in mpr_build_nvme_prp()
2999 * - bump the current memory pointer to the next in mpr_build_nvme_prp()
3001 * - set the PRP Entry to point to that page. This is in mpr_build_nvme_prp()
3003 * - bump the PRP Entry pointer the start of the next in mpr_build_nvme_prp()
3005 * no need to get a new page - it's just the next in mpr_build_nvme_prp()
3016 entry_len = PAGE_SIZE - offset; in mpr_build_nvme_prp()
3090 length -= entry_len; in mpr_build_nvme_prp()
3095 * mpr_check_pcie_native_sgl - This function is called for PCIe end devices to
3131 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) in mpr_check_pcie_native_sgl()
3135 page_mask = PAGE_SIZE - 1; in mpr_check_pcie_native_sgl()
3142 sges_in_segment = (sc->reqframesz - in mpr_check_pcie_native_sgl()
3154 * the first SG entry, then if this first size in the page in mpr_check_pcie_native_sgl()
3167 first_page_data_size = PAGE_SIZE - first_page_offset; in mpr_check_pcie_native_sgl()
3179 * Check if first SG entry size is < residual beyond 4 in mpr_check_pcie_native_sgl()
3183 (buff_len - (PAGE_SIZE * 4))) in mpr_check_pcie_native_sgl()
3202 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; in mpr_check_pcie_native_sgl()
3220 curr_buff = (uint32_t *)prp_page_info->prp_page; in mpr_check_pcie_native_sgl()
3221 msg_phys = (uint64_t *)(uintptr_t)prp_page_info->prp_page_busaddr; in mpr_check_pcie_native_sgl()
3227 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); in mpr_check_pcie_native_sgl()
3242 main_chain_element->Address.High = in mpr_check_pcie_native_sgl()
3244 main_chain_element->Address.Low = in mpr_check_pcie_native_sgl()
3246 main_chain_element->NextChainOffset = 0; in mpr_check_pcie_native_sgl()
3247 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | in mpr_check_pcie_native_sgl()
3258 * page or partial page. We need to split up OS SG entries if they are in mpr_check_pcie_native_sgl()
3269 ptr_first_sgl = (uint32_t *)cm->cm_sge; in mpr_check_pcie_native_sgl()
3272 /* Get physical address and length of this SG entry. */ in mpr_check_pcie_native_sgl()
3277 * Check whether a given SGE buffer lies on a non-PAGED in mpr_check_pcie_native_sgl()
3292 if (i != (segs_left - 1)) { in mpr_check_pcie_native_sgl()
3306 * at page boundary - prp_size. in mpr_check_pcie_native_sgl()
3325 entry_len = PAGE_SIZE - offset; in mpr_check_pcie_native_sgl()
3360 length -= entry_len; in mpr_check_pcie_native_sgl()
3365 main_chain_element->Length = htole32(num_entries * prp_size); in mpr_check_pcie_native_sgl()
3381 struct mpr_softc *sc = cm->cm_sc; in mpr_add_chain()
3393 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { in mpr_add_chain()
3400 if (cm->cm_sglsize < sgc_size) in mpr_add_chain()
3403 chain = mpr_alloc_chain(cm->cm_sc); in mpr_add_chain()
3408 * Note: a double-linked list is used to make it easier to walk for in mpr_add_chain()
3411 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); in mpr_add_chain()
3419 if (cm->cm_sglsize < (sgc_size * segsleft)) { in mpr_add_chain()
3430 current_segs = (cm->cm_sglsize / sgc_size) - 1; in mpr_add_chain()
3431 rem_segs = segsleft - current_segs; in mpr_add_chain()
3432 segs_per_frame = sc->chain_frame_size / sgc_size; in mpr_add_chain()
3434 next_chain_offset = segs_per_frame - 1; in mpr_add_chain()
3437 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; in mpr_add_chain()
3438 ieee_sgc->Length = next_chain_offset ? in mpr_add_chain()
3439 htole32((uint32_t)sc->chain_frame_size) : in mpr_add_chain()
3441 ieee_sgc->NextChainOffset = next_chain_offset; in mpr_add_chain()
3442 ieee_sgc->Flags = (MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT | in mpr_add_chain()
3444 ieee_sgc->Address.Low = htole32(chain->chain_busaddr); in mpr_add_chain()
3445 ieee_sgc->Address.High = htole32(chain->chain_busaddr >> 32); in mpr_add_chain()
3446 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; in mpr_add_chain()
3447 req = (MPI2_REQUEST_HEADER *)cm->cm_req; in mpr_add_chain()
3448 req->ChainOffset = (sc->chain_frame_size - sgc_size) >> 4; in mpr_add_chain()
3450 cm->cm_sglsize = sc->chain_frame_size; in mpr_add_chain()
3455 * Add one scatter-gather element to the scatter-gather list for a command.
3472 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { in mpr_push_sge()
3473 mpr_dprint(cm->cm_sc, MPR_ERROR, in mpr_push_sge()
3488 * If this is a bi-directional request, need to account for that in mpr_push_sge()
3489 * here. Save the pre-filled sge values. These will be used in mpr_push_sge()
3491 * cm_out_len is non-zero, this is a bi-directional request, so in mpr_push_sge()
3494 * 2 SGL's for a bi-directional request, they both use the same in mpr_push_sge()
3497 saved_buf_len = sge->FlagsLength & 0x00FFFFFF; in mpr_push_sge()
3498 saved_address_low = sge->Address.Low; in mpr_push_sge()
3499 saved_address_high = sge->Address.High; in mpr_push_sge()
3500 if (cm->cm_out_len) { in mpr_push_sge()
3501 sge->FlagsLength = cm->cm_out_len | in mpr_push_sge()
3507 cm->cm_sglsize -= len; in mpr_push_sge()
3509 sge_flags = sge->FlagsLength; in mpr_push_sge()
3510 sge->FlagsLength = htole32(sge_flags); in mpr_push_sge()
3511 bcopy(sge, cm->cm_sge, len); in mpr_push_sge()
3512 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); in mpr_push_sge()
3514 sge->FlagsLength = saved_buf_len | in mpr_push_sge()
3521 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { in mpr_push_sge()
3522 sge->FlagsLength |= in mpr_push_sge()
3526 sge->FlagsLength |= in mpr_push_sge()
3530 sge->Address.Low = saved_address_low; in mpr_push_sge()
3531 sge->Address.High = saved_address_high; in mpr_push_sge()
3533 cm->cm_sglsize -= len; in mpr_push_sge()
3535 sge_flags = sge->FlagsLength; in mpr_push_sge()
3536 sge->FlagsLength = htole32(sge_flags); in mpr_push_sge()
3537 bcopy(sge, cm->cm_sge, len); in mpr_push_sge()
3538 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); in mpr_push_sge()
3543 * Add one IEEE scatter-gather element (chain or simple) to the IEEE scatter-
3565 if (cm->cm_sglsize < ieee_sge_size) in mpr_push_ieee_sge()
3568 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { in mpr_push_ieee_sge()
3575 * If this is a bi-directional request, need to account for that in mpr_push_ieee_sge()
3576 * here. Save the pre-filled sge values. These will be used in mpr_push_ieee_sge()
3578 * cm_out_len is non-zero, this is a bi-directional request, so in mpr_push_ieee_sge()
3581 * 2 SGL's for a bi-directional request, they both use the same in mpr_push_ieee_sge()
3584 saved_buf_len = sge->Length; in mpr_push_ieee_sge()
3585 saved_address_low = sge->Address.Low; in mpr_push_ieee_sge()
3586 saved_address_high = sge->Address.High; in mpr_push_ieee_sge()
3587 if (cm->cm_out_len) { in mpr_push_ieee_sge()
3588 sge->Length = cm->cm_out_len; in mpr_push_ieee_sge()
3589 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | in mpr_push_ieee_sge()
3591 cm->cm_sglsize -= ieee_sge_size; in mpr_push_ieee_sge()
3593 sge_length = sge->Length; in mpr_push_ieee_sge()
3594 sge->Length = htole32(sge_length); in mpr_push_ieee_sge()
3595 bcopy(sgep, cm->cm_sge, ieee_sge_size); in mpr_push_ieee_sge()
3596 cm->cm_sge = in mpr_push_ieee_sge()
3597 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + in mpr_push_ieee_sge()
3600 sge->Length = saved_buf_len; in mpr_push_ieee_sge()
3601 sge->Flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT | in mpr_push_ieee_sge()
3604 sge->Address.Low = saved_address_low; in mpr_push_ieee_sge()
3605 sge->Address.High = saved_address_high; in mpr_push_ieee_sge()
3608 cm->cm_sglsize -= ieee_sge_size; in mpr_push_ieee_sge()
3610 sge_length = sge->Length; in mpr_push_ieee_sge()
3611 sge->Length = htole32(sge_length); in mpr_push_ieee_sge()
3612 bcopy(sgep, cm->cm_sge, ieee_sge_size); in mpr_push_ieee_sge()
3613 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + in mpr_push_ieee_sge()
3619 * Add one dma segment to the scatter-gather list for a command.
3628 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { in mpr_add_dmaseg()
3637 * This driver always uses 64-bit address elements for in mpr_add_dmaseg()
3658 sc = cm->cm_sc; in mpr_data_cb()
3664 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { in mpr_data_cb()
3667 cm->cm_max_segs); in mpr_data_cb()
3671 * Set up DMA direction flags. Bi-directional requests are also handled in mpr_data_cb()
3675 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { in mpr_data_cb()
3697 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { in mpr_data_cb()
3703 /* Check if a native SG list is needed for an NVMe PCIe device. */ in mpr_data_cb()
3704 if (cm->cm_targ && cm->cm_targ->is_nvme && in mpr_data_cb()
3706 /* A native SG list was built, skip to end. */ in mpr_data_cb()
3711 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { in mpr_data_cb()
3715 sflags, nsegs - i); in mpr_data_cb()
3718 if (ratecheck(&sc->lastfail, &mpr_chainfail_interval)) in mpr_data_cb()
3721 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; in mpr_data_cb()
3728 cm->cm_state = MPR_CM_STATE_INQUEUE; in mpr_data_cb()
3735 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); in mpr_data_cb()
3752 * assumed that if you have a command in-hand, then you have enough credits
3760 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { in mpr_map_command()
3761 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, in mpr_map_command()
3762 &cm->cm_uio, mpr_data_cb2, cm, 0); in mpr_map_command()
3763 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { in mpr_map_command()
3764 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, in mpr_map_command()
3765 cm->cm_data, mpr_data_cb, cm, 0); in mpr_map_command()
3766 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { in mpr_map_command()
3767 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, in mpr_map_command()
3768 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); in mpr_map_command()
3770 /* Add a zero-length element as needed */ in mpr_map_command()
3771 if (cm->cm_sge != NULL) in mpr_map_command()
3792 if (sc->mpr_flags & MPR_FLAGS_DIAGRESET) in mpr_wait_command()
3795 cm->cm_complete = NULL; in mpr_wait_command()
3796 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); in mpr_wait_command()
3804 if (curthread->td_no_sleeping) in mpr_wait_command()
3807 if (mtx_owned(&sc->mpr_mtx) && sleep_flag == CAN_SLEEP) { in mpr_wait_command()
3808 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); in mpr_wait_command()
3818 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { in mpr_wait_command()
3835 if (cm->cm_timeout_handler == NULL) { in mpr_wait_command()
3843 cm->cm_timeout_handler(sc, cm); in mpr_wait_command()
3844 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { in mpr_wait_command()
3869 cm->cm_flags |= MPR_CM_FLAGS_POLLED; in mpr_request_polled()
3870 cm->cm_complete = NULL; in mpr_request_polled()
3874 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { in mpr_request_polled()
3877 if (mtx_owned(&sc->mpr_mtx)) in mpr_request_polled()
3878 msleep(&sc->msleep_fake_chan, &sc->mpr_mtx, 0, in mpr_request_polled()
3884 * Check for real-time timeout and fail if more than 60 seconds. in mpr_request_polled()
3894 cm->cm_state = MPR_CM_STATE_BUSY; in mpr_request_polled()
3901 if (sc->mpr_flags & MPR_FLAGS_REALLOCATED) { in mpr_request_polled()
3923 if (sc->mpr_flags & MPR_FLAGS_BUSY) { in mpr_read_config_page()
3932 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; in mpr_read_config_page()
3933 req->Function = MPI2_FUNCTION_CONFIG; in mpr_read_config_page()
3934 req->Action = params->action; in mpr_read_config_page()
3935 req->SGLFlags = 0; in mpr_read_config_page()
3936 req->ChainOffset = 0; in mpr_read_config_page()
3937 req->PageAddress = params->page_address; in mpr_read_config_page()
3938 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { in mpr_read_config_page()
3941 hdr = ¶ms->hdr.Ext; in mpr_read_config_page()
3942 req->ExtPageType = hdr->ExtPageType; in mpr_read_config_page()
3943 req->ExtPageLength = hdr->ExtPageLength; in mpr_read_config_page()
3944 req->Header.PageType = MPI2_CONFIG_PAGETYPE_EXTENDED; in mpr_read_config_page()
3945 req->Header.PageLength = 0; /* Must be set to zero */ in mpr_read_config_page()
3946 req->Header.PageNumber = hdr->PageNumber; in mpr_read_config_page()
3947 req->Header.PageVersion = hdr->PageVersion; in mpr_read_config_page()
3951 hdr = ¶ms->hdr.Struct; in mpr_read_config_page()
3952 req->Header.PageType = hdr->PageType; in mpr_read_config_page()
3953 req->Header.PageNumber = hdr->PageNumber; in mpr_read_config_page()
3954 req->Header.PageLength = hdr->PageLength; in mpr_read_config_page()
3955 req->Header.PageVersion = hdr->PageVersion; in mpr_read_config_page()
3958 cm->cm_data = params->buffer; in mpr_read_config_page()
3959 cm->cm_length = params->length; in mpr_read_config_page()
3960 if (cm->cm_data != NULL) { in mpr_read_config_page()
3961 cm->cm_sge = &req->PageBufferSGE; in mpr_read_config_page()
3962 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); in mpr_read_config_page()
3963 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; in mpr_read_config_page()
3965 cm->cm_sge = NULL; in mpr_read_config_page()
3966 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; in mpr_read_config_page()
3968 cm->cm_complete_data = params; in mpr_read_config_page()
3969 if (params->callback != NULL) { in mpr_read_config_page()
3970 cm->cm_complete = mpr_config_complete; in mpr_read_config_page()
4000 params = cm->cm_complete_data; in mpr_config_complete()
4002 if (cm->cm_data != NULL) { in mpr_config_complete()
4003 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, in mpr_config_complete()
4005 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); in mpr_config_complete()
4012 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { in mpr_config_complete()
4013 params->status = MPI2_IOCSTATUS_BUSY; in mpr_config_complete()
4017 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; in mpr_config_complete()
4019 params->status = MPI2_IOCSTATUS_BUSY; in mpr_config_complete()
4022 params->status = reply->IOCStatus; in mpr_config_complete()
4023 if (params->hdr.Struct.PageType == MPI2_CONFIG_PAGETYPE_EXTENDED) { in mpr_config_complete()
4024 params->hdr.Ext.ExtPageType = reply->ExtPageType; in mpr_config_complete()
4025 params->hdr.Ext.ExtPageLength = reply->ExtPageLength; in mpr_config_complete()
4026 params->hdr.Ext.PageType = reply->Header.PageType; in mpr_config_complete()
4027 params->hdr.Ext.PageNumber = reply->Header.PageNumber; in mpr_config_complete()
4028 params->hdr.Ext.PageVersion = reply->Header.PageVersion; in mpr_config_complete()
4030 params->hdr.Struct.PageType = reply->Header.PageType; in mpr_config_complete()
4031 params->hdr.Struct.PageNumber = reply->Header.PageNumber; in mpr_config_complete()
4032 params->hdr.Struct.PageLength = reply->Header.PageLength; in mpr_config_complete()
4033 params->hdr.Struct.PageVersion = reply->Header.PageVersion; in mpr_config_complete()
4038 if (params->callback != NULL) in mpr_config_complete()
4039 params->callback(sc, params); in mpr_config_complete()