Lines Matching refs:cm
100 struct mpr_command *cm);
103 static void mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm);
106 static void mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm);
758 struct mpr_command *cm; in mpr_iocfacts_free() local
816 cm = &sc->commands[i]; in mpr_iocfacts_free()
817 bus_dmamap_destroy(sc->buffer_dmat, cm->cm_dmamap); in mpr_iocfacts_free()
1127 mpr_enqueue_request(struct mpr_softc *sc, struct mpr_command *cm) in mpr_enqueue_request() argument
1133 cm->cm_desc.Default.SMID, cm, cm->cm_ccb); in mpr_enqueue_request()
1142 KASSERT(cm->cm_state == MPR_CM_STATE_BUSY, in mpr_enqueue_request()
1143 ("command not busy, state = %u\n", cm->cm_state)); in mpr_enqueue_request()
1144 cm->cm_state = MPR_CM_STATE_INQUEUE; in mpr_enqueue_request()
1147 rd.u.low = cm->cm_desc.Words.Low; in mpr_enqueue_request()
1151 rd.u.low = htole32(cm->cm_desc.Words.Low); in mpr_enqueue_request()
1152 rd.u.high = htole32(cm->cm_desc.Words.High); in mpr_enqueue_request()
1484 struct mpr_command *cm; in mpr_alloc_requests() local
1583 cm = &sc->commands[i]; in mpr_alloc_requests()
1584 cm->cm_req = sc->req_frames + i * sc->reqframesz; in mpr_alloc_requests()
1585 cm->cm_req_busaddr = sc->req_busaddr + i * sc->reqframesz; in mpr_alloc_requests()
1586 cm->cm_sense = &sc->sense_frames[i]; in mpr_alloc_requests()
1587 cm->cm_sense_busaddr = sc->sense_busaddr + i * MPR_SENSE_LEN; in mpr_alloc_requests()
1588 cm->cm_desc.Default.SMID = htole16(i); in mpr_alloc_requests()
1589 cm->cm_sc = sc; in mpr_alloc_requests()
1590 cm->cm_state = MPR_CM_STATE_BUSY; in mpr_alloc_requests()
1591 TAILQ_INIT(&cm->cm_chain_list); in mpr_alloc_requests()
1592 TAILQ_INIT(&cm->cm_prp_page_list); in mpr_alloc_requests()
1593 callout_init_mtx(&cm->cm_callout, &sc->mpr_mtx, 0); in mpr_alloc_requests()
1596 if (bus_dmamap_create(sc->buffer_dmat, 0, &cm->cm_dmamap) in mpr_alloc_requests()
1599 mpr_free_high_priority_command(sc, cm); in mpr_alloc_requests()
1601 mpr_free_command(sc, cm); in mpr_alloc_requests()
2094 struct mpr_command *cm; in mpr_dump_reqs() local
2122 cm = &sc->commands[i]; in mpr_dump_reqs()
2123 if ((sc->dump_reqs_alltypes == 0) && (cm->cm_state != state)) in mpr_dump_reqs()
2126 hdr.state = cm->cm_state; in mpr_dump_reqs()
2128 hdr.deschi = cm->cm_desc.Words.High; in mpr_dump_reqs()
2129 hdr.desclo = cm->cm_desc.Words.Low; in mpr_dump_reqs()
2130 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, in mpr_dump_reqs()
2134 sbuf_bcat(sb, cm->cm_req, 128); in mpr_dump_reqs()
2135 TAILQ_FOREACH_SAFE(chain, &cm->cm_chain_list, chain_link, in mpr_dump_reqs()
2379 mpr_complete_command(struct mpr_softc *sc, struct mpr_command *cm) in mpr_complete_command() argument
2383 if (cm == NULL) { in mpr_complete_command()
2388 KASSERT(cm->cm_state == MPR_CM_STATE_INQUEUE, in mpr_complete_command()
2389 ("command not inqueue, state = %u\n", cm->cm_state)); in mpr_complete_command()
2390 cm->cm_state = MPR_CM_STATE_BUSY; in mpr_complete_command()
2391 if (cm->cm_flags & MPR_CM_FLAGS_POLLED) in mpr_complete_command()
2392 cm->cm_flags |= MPR_CM_FLAGS_COMPLETE; in mpr_complete_command()
2394 if (cm->cm_complete != NULL) { in mpr_complete_command()
2397 __func__, cm, cm->cm_complete, cm->cm_complete_data, in mpr_complete_command()
2398 cm->cm_reply); in mpr_complete_command()
2399 cm->cm_complete(sc, cm); in mpr_complete_command()
2402 if (cm->cm_flags & MPR_CM_FLAGS_WAKEUP) { in mpr_complete_command()
2403 mpr_dprint(sc, MPR_TRACE, "waking up %p\n", cm); in mpr_complete_command()
2404 wakeup(cm); in mpr_complete_command()
2523 struct mpr_command *cm = NULL; in mpr_intr_locked() local
2535 cm = NULL; in mpr_intr_locked()
2568 cm = &sc->commands[le16toh(desc->SCSIIOSuccess.SMID)]; in mpr_intr_locked()
2569 cm->cm_reply = NULL; in mpr_intr_locked()
2636 cm = &sc->commands[ in mpr_intr_locked()
2638 if (cm->cm_state == MPR_CM_STATE_INQUEUE) { in mpr_intr_locked()
2639 cm->cm_reply = reply; in mpr_intr_locked()
2640 cm->cm_reply_data = in mpr_intr_locked()
2647 cm->cm_state, cm); in mpr_intr_locked()
2659 cm = NULL; in mpr_intr_locked()
2663 if (cm != NULL) { in mpr_intr_locked()
2665 if (cm->cm_reply) in mpr_intr_locked()
2666 mpr_display_reply_info(sc,cm->cm_reply); in mpr_intr_locked()
2667 mpr_complete_command(sc, cm); in mpr_intr_locked()
2709 mpr_reregister_events_complete(struct mpr_softc *sc, struct mpr_command *cm) in mpr_reregister_events_complete() argument
2713 if (cm->cm_reply) in mpr_reregister_events_complete()
2715 (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply); in mpr_reregister_events_complete()
2717 mpr_free_command(sc, cm); in mpr_reregister_events_complete()
2752 struct mpr_command *cm = NULL; in mpr_update_events() local
2767 if ((cm = mpr_alloc_command(sc)) == NULL) in mpr_update_events()
2769 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; in mpr_update_events()
2784 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; in mpr_update_events()
2785 cm->cm_data = NULL; in mpr_update_events()
2787 error = mpr_request_polled(sc, &cm); in mpr_update_events()
2788 if (cm != NULL) in mpr_update_events()
2789 reply = (MPI2_EVENT_NOTIFICATION_REPLY *)cm->cm_reply; in mpr_update_events()
2799 if (cm != NULL) in mpr_update_events()
2800 mpr_free_command(sc, cm); in mpr_update_events()
2808 struct mpr_command *cm; in mpr_reregister_events() local
2823 if ((cm = mpr_alloc_command(sc)) == NULL) in mpr_reregister_events()
2825 evtreq = (MPI2_EVENT_NOTIFICATION_REQUEST *)cm->cm_req; in mpr_reregister_events()
2840 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; in mpr_reregister_events()
2841 cm->cm_data = NULL; in mpr_reregister_events()
2842 cm->cm_complete = mpr_reregister_events_complete; in mpr_reregister_events()
2844 error = mpr_map_command(sc, cm); in mpr_reregister_events()
2909 mpr_build_nvme_prp(struct mpr_softc *sc, struct mpr_command *cm, in mpr_build_nvme_prp() argument
2955 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); in mpr_build_nvme_prp()
3105 mpr_check_pcie_native_sgl(struct mpr_softc *sc, struct mpr_command *cm, in mpr_check_pcie_native_sgl() argument
3131 if ((cm->cm_targ->MDTS > 0) && (buff_len > cm->cm_targ->MDTS)) in mpr_check_pcie_native_sgl()
3202 main_chain_element = (pMpi25IeeeSgeChain64_t)cm->cm_sge; in mpr_check_pcie_native_sgl()
3227 TAILQ_INSERT_TAIL(&cm->cm_prp_page_list, prp_page_info, prp_page_link); in mpr_check_pcie_native_sgl()
3269 ptr_first_sgl = (uint32_t *)cm->cm_sge; in mpr_check_pcie_native_sgl()
3379 mpr_add_chain(struct mpr_command *cm, int segsleft) in mpr_add_chain() argument
3381 struct mpr_softc *sc = cm->cm_sc; in mpr_add_chain()
3393 if (cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE) { in mpr_add_chain()
3400 if (cm->cm_sglsize < sgc_size) in mpr_add_chain()
3403 chain = mpr_alloc_chain(cm->cm_sc); in mpr_add_chain()
3411 TAILQ_INSERT_TAIL(&cm->cm_chain_list, chain, chain_link); in mpr_add_chain()
3419 if (cm->cm_sglsize < (sgc_size * segsleft)) { in mpr_add_chain()
3430 current_segs = (cm->cm_sglsize / sgc_size) - 1; in mpr_add_chain()
3437 ieee_sgc = &((MPI25_SGE_IO_UNION *)cm->cm_sge)->IeeeChain; in mpr_add_chain()
3446 cm->cm_sge = &((MPI25_SGE_IO_UNION *)chain->chain)->IeeeSimple; in mpr_add_chain()
3447 req = (MPI2_REQUEST_HEADER *)cm->cm_req; in mpr_add_chain()
3450 cm->cm_sglsize = sc->chain_frame_size; in mpr_add_chain()
3461 mpr_push_sge(struct mpr_command *cm, MPI2_SGE_SIMPLE64 *sge, size_t len, in mpr_push_sge() argument
3472 if (cm->cm_sglsize < (segsleft * sizeof(MPI2_SGE_SIMPLE64))) { in mpr_push_sge()
3473 mpr_dprint(cm->cm_sc, MPR_ERROR, in mpr_push_sge()
3500 if (cm->cm_out_len) { in mpr_push_sge()
3501 sge->FlagsLength = cm->cm_out_len | in mpr_push_sge()
3507 cm->cm_sglsize -= len; in mpr_push_sge()
3511 bcopy(sge, cm->cm_sge, len); in mpr_push_sge()
3512 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); in mpr_push_sge()
3521 if (cm->cm_flags & MPR_CM_FLAGS_DATAIN) { in mpr_push_sge()
3533 cm->cm_sglsize -= len; in mpr_push_sge()
3537 bcopy(sge, cm->cm_sge, len); in mpr_push_sge()
3538 cm->cm_sge = (MPI2_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + len); in mpr_push_sge()
3548 mpr_push_ieee_sge(struct mpr_command *cm, void *sgep, int segsleft) in mpr_push_ieee_sge() argument
3565 if (cm->cm_sglsize < ieee_sge_size) in mpr_push_ieee_sge()
3568 if ((segsleft >= 2) && (cm->cm_sglsize < (ieee_sge_size * 2))) { in mpr_push_ieee_sge()
3569 if ((error = mpr_add_chain(cm, segsleft)) != 0) in mpr_push_ieee_sge()
3587 if (cm->cm_out_len) { in mpr_push_ieee_sge()
3588 sge->Length = cm->cm_out_len; in mpr_push_ieee_sge()
3591 cm->cm_sglsize -= ieee_sge_size; in mpr_push_ieee_sge()
3595 bcopy(sgep, cm->cm_sge, ieee_sge_size); in mpr_push_ieee_sge()
3596 cm->cm_sge = in mpr_push_ieee_sge()
3597 (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + in mpr_push_ieee_sge()
3608 cm->cm_sglsize -= ieee_sge_size; in mpr_push_ieee_sge()
3612 bcopy(sgep, cm->cm_sge, ieee_sge_size); in mpr_push_ieee_sge()
3613 cm->cm_sge = (MPI25_SGE_IO_UNION *)((uintptr_t)cm->cm_sge + in mpr_push_ieee_sge()
3622 mpr_add_dmaseg(struct mpr_command *cm, vm_paddr_t pa, size_t len, u_int flags, in mpr_add_dmaseg() argument
3628 if (!(cm->cm_flags & MPR_CM_FLAGS_SGE_SIMPLE)) { in mpr_add_dmaseg()
3634 return (mpr_push_ieee_sge(cm, &ieee_sge, segsleft)); in mpr_add_dmaseg()
3646 return (mpr_push_sge(cm, &sge, sizeof sge, segsleft)); in mpr_add_dmaseg()
3654 struct mpr_command *cm; in mpr_data_cb() local
3657 cm = (struct mpr_command *)arg; in mpr_data_cb()
3658 sc = cm->cm_sc; in mpr_data_cb()
3664 if ((cm->cm_max_segs != 0) && (nsegs > cm->cm_max_segs)) { in mpr_data_cb()
3667 cm->cm_max_segs); in mpr_data_cb()
3675 if (cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) { in mpr_data_cb()
3697 } else if (cm->cm_flags & MPR_CM_FLAGS_DATAOUT) { in mpr_data_cb()
3704 if (cm->cm_targ && cm->cm_targ->is_nvme && in mpr_data_cb()
3705 mpr_check_pcie_native_sgl(sc, cm, segs, nsegs) == 0) { in mpr_data_cb()
3711 if ((cm->cm_flags & MPR_CM_FLAGS_SMP_PASS) && (i != 0)) { in mpr_data_cb()
3714 error = mpr_add_dmaseg(cm, segs[i].ds_addr, segs[i].ds_len, in mpr_data_cb()
3721 cm->cm_flags |= MPR_CM_FLAGS_CHAIN_FAILED; in mpr_data_cb()
3728 cm->cm_state = MPR_CM_STATE_INQUEUE; in mpr_data_cb()
3729 mpr_complete_command(sc, cm); in mpr_data_cb()
3735 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, dir); in mpr_data_cb()
3736 mpr_enqueue_request(sc, cm); in mpr_data_cb()
3756 mpr_map_command(struct mpr_softc *sc, struct mpr_command *cm) in mpr_map_command() argument
3760 if (cm->cm_flags & MPR_CM_FLAGS_USE_UIO) { in mpr_map_command()
3761 error = bus_dmamap_load_uio(sc->buffer_dmat, cm->cm_dmamap, in mpr_map_command()
3762 &cm->cm_uio, mpr_data_cb2, cm, 0); in mpr_map_command()
3763 } else if (cm->cm_flags & MPR_CM_FLAGS_USE_CCB) { in mpr_map_command()
3764 error = bus_dmamap_load_ccb(sc->buffer_dmat, cm->cm_dmamap, in mpr_map_command()
3765 cm->cm_data, mpr_data_cb, cm, 0); in mpr_map_command()
3766 } else if ((cm->cm_data != NULL) && (cm->cm_length != 0)) { in mpr_map_command()
3767 error = bus_dmamap_load(sc->buffer_dmat, cm->cm_dmamap, in mpr_map_command()
3768 cm->cm_data, cm->cm_length, mpr_data_cb, cm, 0); in mpr_map_command()
3771 if (cm->cm_sge != NULL) in mpr_map_command()
3772 mpr_add_dmaseg(cm, 0, 0, 0, 1); in mpr_map_command()
3773 mpr_enqueue_request(sc, cm); in mpr_map_command()
3790 struct mpr_command *cm = *cmp; in mpr_wait_command() local
3795 cm->cm_complete = NULL; in mpr_wait_command()
3796 cm->cm_flags |= (MPR_CM_FLAGS_WAKEUP + MPR_CM_FLAGS_POLLED); in mpr_wait_command()
3797 error = mpr_map_command(sc, cm); in mpr_wait_command()
3808 error = msleep(cm, &sc->mpr_mtx, 0, "mprwait", timeout*hz); in mpr_wait_command()
3818 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { in mpr_wait_command()
3835 if (cm->cm_timeout_handler == NULL) { in mpr_wait_command()
3843 cm->cm_timeout_handler(sc, cm); in mpr_wait_command()
3865 struct mpr_command *cm = *cmp; in mpr_request_polled() local
3869 cm->cm_flags |= MPR_CM_FLAGS_POLLED; in mpr_request_polled()
3870 cm->cm_complete = NULL; in mpr_request_polled()
3871 mpr_map_command(sc, cm); in mpr_request_polled()
3874 while ((cm->cm_flags & MPR_CM_FLAGS_COMPLETE) == 0) { in mpr_request_polled()
3894 cm->cm_state = MPR_CM_STATE_BUSY; in mpr_request_polled()
3920 struct mpr_command *cm; in mpr_read_config_page() local
3927 cm = mpr_alloc_command(sc); in mpr_read_config_page()
3928 if (cm == NULL) { in mpr_read_config_page()
3932 req = (MPI2_CONFIG_REQUEST *)cm->cm_req; in mpr_read_config_page()
3958 cm->cm_data = params->buffer; in mpr_read_config_page()
3959 cm->cm_length = params->length; in mpr_read_config_page()
3960 if (cm->cm_data != NULL) { in mpr_read_config_page()
3961 cm->cm_sge = &req->PageBufferSGE; in mpr_read_config_page()
3962 cm->cm_sglsize = sizeof(MPI2_SGE_IO_UNION); in mpr_read_config_page()
3963 cm->cm_flags = MPR_CM_FLAGS_SGE_SIMPLE | MPR_CM_FLAGS_DATAIN; in mpr_read_config_page()
3965 cm->cm_sge = NULL; in mpr_read_config_page()
3966 cm->cm_desc.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE; in mpr_read_config_page()
3968 cm->cm_complete_data = params; in mpr_read_config_page()
3970 cm->cm_complete = mpr_config_complete; in mpr_read_config_page()
3971 return (mpr_map_command(sc, cm)); in mpr_read_config_page()
3973 error = mpr_wait_command(sc, &cm, 0, CAN_SLEEP); in mpr_read_config_page()
3977 if (cm != NULL) in mpr_read_config_page()
3978 mpr_free_command(sc, cm); in mpr_read_config_page()
3981 mpr_config_complete(sc, cm); in mpr_read_config_page()
3994 mpr_config_complete(struct mpr_softc *sc, struct mpr_command *cm) in mpr_config_complete() argument
4000 params = cm->cm_complete_data; in mpr_config_complete()
4002 if (cm->cm_data != NULL) { in mpr_config_complete()
4003 bus_dmamap_sync(sc->buffer_dmat, cm->cm_dmamap, in mpr_config_complete()
4005 bus_dmamap_unload(sc->buffer_dmat, cm->cm_dmamap); in mpr_config_complete()
4012 if ((cm->cm_flags & MPR_CM_FLAGS_ERROR_MASK) != 0) { in mpr_config_complete()
4017 reply = (MPI2_CONFIG_REPLY *)cm->cm_reply; in mpr_config_complete()
4037 mpr_free_command(sc, cm); in mpr_config_complete()