/freebsd/sys/dev/cxgbe/tom/ |
H A D | t4_connect.c | 76 const struct cpl_act_establish *cpl = (const void *)(rss + 1); in do_act_establish() local 77 u_int tid = GET_TID(cpl); in do_act_establish() 78 u_int atid = G_TID_TID(ntohl(cpl->tos_atid)); in do_act_establish() 97 send_reset(sc, toep, be32toh(cpl->snd_isn)); in do_act_establish() 101 make_established(toep, be32toh(cpl->snd_isn) - 1, in do_act_establish() 102 be32toh(cpl->rcv_isn) - 1, cpl->tcp_opt); in do_act_establish() 150 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); in do_act_open_rpl() local 151 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status))); in do_act_open_rpl() 152 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status)); in do_act_open_rpl() 166 release_tid(sc, GET_TID(cpl), toep->ctrlq); in do_act_open_rpl() [all …]
|
H A D | t4_tls.c | 383 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep, in write_tlstx_cpl() argument 391 cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) | in write_tlstx_cpl() 394 cpl->pld_len = htobe32(plen); in write_tlstx_cpl() 396 cpl->type_protover = htobe32( in write_tlstx_cpl() 398 cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs | in write_tlstx_cpl() 400 cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen); in write_tlstx_cpl() 401 cpl->scmd1 = htobe64(seqno); in write_tlstx_cpl() 486 struct cpl_tx_tls_sfo *cpl; in t4_push_ktls() local 655 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1); in t4_push_ktls() 662 write_tlstx_cpl(cpl, toe in t4_push_ktls() 725 const struct cpl_tls_data *cpl = mtod(m, const void *); do_tls_data() local 786 const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *); do_rx_tls_cmp() local 989 do_rx_data_tls(const struct cpl_rx_data * cpl,struct toepcb * toep,struct mbuf * m) do_rx_data_tls() argument 1276 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); do_tls_tcb_rpl() local [all...] |
H A D | t4_listen.c | 883 const struct cpl_pass_open_rpl *cpl = (const void *)(rss + 1); in do_pass_open_rpl() local 884 int stid = GET_TID(cpl); in do_pass_open_rpl() 885 unsigned int status = cpl->status; in do_pass_open_rpl() 889 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); in do_pass_open_rpl() 959 const struct cpl_close_listsvr_rpl *cpl = (const void *)(rss + 1); in do_close_server_rpl() local 960 int stid = GET_TID(cpl); in do_close_server_rpl() 961 unsigned int status = cpl->status; in do_close_server_rpl() 965 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); in do_close_server_rpl() 1026 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); in do_abort_req_synqe() local 1027 unsigned int tid = GET_TID(cpl); in do_abort_req_synqe() [all …]
|
H A D | t4_cpl_io.c | 920 struct cpl_tx_data_iso *cpl; in write_tx_data_iso() local 940 cpl = (struct cpl_tx_data_iso *)dst; in write_tx_data_iso() 941 cpl->op_to_scsi = htonl(V_CPL_TX_DATA_ISO_OP(CPL_TX_DATA_ISO) | in write_tx_data_iso() 949 cpl->ahs_len = 0; in write_tx_data_iso() 950 cpl->mpdu = htons(DIV_ROUND_UP(mss, 4)); in write_tx_data_iso() 951 cpl->burst_size = htonl(DIV_ROUND_UP(burst_size, 4)); in write_tx_data_iso() 952 cpl->len = htonl(len); in write_tx_data_iso() 953 cpl->reserved2_seglen_offset = htonl(0); in write_tx_data_iso() 954 cpl->datasn_offset = htonl(0); in write_tx_data_iso() 955 cpl->buffer_offset = htonl(0); in write_tx_data_iso() [all …]
|
/freebsd/sys/dev/nvme/ |
H A D | nvme_qpair.c | 336 struct nvme_completion *cpl) in nvme_qpair_print_completion() argument 340 sct = NVME_STATUS_GET_SCT(cpl->status); in nvme_qpair_print_completion() 341 sc = NVME_STATUS_GET_SC(cpl->status); in nvme_qpair_print_completion() 342 crd = NVME_STATUS_GET_CRD(cpl->status); in nvme_qpair_print_completion() 343 m = NVME_STATUS_GET_M(cpl->status); in nvme_qpair_print_completion() 344 dnr = NVME_STATUS_GET_DNR(cpl->status); in nvme_qpair_print_completion() 345 p = NVME_STATUS_GET_P(cpl->status); in nvme_qpair_print_completion() 350 cpl->sqid, cpl->cid, cpl->cdw0); in nvme_qpair_print_completion() 354 nvme_completion_is_retry(const struct nvme_completion *cpl) in nvme_completion_is_retry() argument 358 sct = NVME_STATUS_GET_SCT(cpl->status); in nvme_completion_is_retry() [all …]
|
H A D | nvme_ctrlr.c | 464 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_identify() 494 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_set_num_qpairs() 504 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; in nvme_ctrlr_set_num_qpairs() 505 cq_allocated = (status.cpl.cdw0 >> 16) + 1; in nvme_ctrlr_set_num_qpairs() 534 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_create_qpairs() 543 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_create_qpairs() 565 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_delete_qpairs() 574 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_delete_qpairs() 683 nvme_ctrlr_async_event_log_page_cb(void *arg, const struct nvme_completion *cpl) in nvme_ctrlr_async_event_log_page_cb() argument 696 if (nvme_completion_is_error(cpl)) in nvme_ctrlr_async_event_log_page_cb() [all …]
|
H A D | nvme_test.c | 160 nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl) in nvme_ns_io_test_cb() 167 if (nvme_completion_is_error(cpl)) { in nvme_ns_io_test_cb() 202 struct nvme_completion cpl; in nvme_ns_io_test() 214 memset(&cpl, 0, sizeof(cpl)); in nvme_ns_io_test() 216 nvme_ns_io_test_cb(tth, &cpl); in nvme_ns_io_test() 159 nvme_ns_io_test_cb(void * arg,const struct nvme_completion * cpl) nvme_ns_io_test_cb() argument 201 struct nvme_completion cpl; nvme_ns_io_test() local
|
H A D | nvme.c | 303 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl) in nvme_completion_poll_cb() argument 312 memcpy(&status->cpl, cpl, sizeof(*cpl)); in nvme_completion_poll_cb()
|
H A D | nvme_sim.c | 63 nvme_sim_nvmeio_done(void *ccb_arg, const struct nvme_completion *cpl) in nvme_sim_nvmeio_done() argument 73 memcpy(&ccb->nvmeio.cpl, cpl, sizeof(*cpl)); in nvme_sim_nvmeio_done() 75 if (nvme_completion_is_error(cpl)) { in nvme_sim_nvmeio_done()
|
H A D | nvme_ns.c | 50 const struct nvme_completion *cpl); 137 nvme_ns_strategy_done(void *arg, const struct nvme_completion *cpl) in nvme_ns_strategy_done() argument 145 if (nvme_completion_is_error(cpl)) { in nvme_ns_strategy_done() 309 nvme_bio_child_done(void *arg, const struct nvme_completion *cpl) in nvme_bio_child_done() argument 317 bio_error = nvme_completion_is_error(cpl) ? EIO : 0; in nvme_bio_child_done() 548 if (nvme_completion_is_error(&status.cpl)) { in nvme_ns_construct()
|
/freebsd/sys/dev/nvmf/controller/ |
H A D | nvmft_qpair.c | 188 struct nvme_completion cpl; in _nvmft_send_response() local 193 memcpy(&cpl, cqe, sizeof(cpl)); in _nvmft_send_response() 205 cpl.sqhd = htole16(qp->sqhd); in _nvmft_send_response() 207 cpl.sqhd = 0; in _nvmft_send_response() 210 rc = nvmf_allocate_response(nq, &cpl, M_WAITOK); in _nvmft_send_response() 234 const struct nvme_completion *cpl = cqe; in nvmft_send_response() local 237 KASSERT(BIT_ISSET(NUM_CIDS, cpl->cid, qp->cids), in nvmft_send_response() 238 ("%s: CID %u not busy", __func__, cpl->cid)); in nvmft_send_response() 240 BIT_CLR_ATOMIC(NUM_CIDS, cpl->cid, qp->cids); in nvmft_send_response() 247 struct nvme_completion *cpl = cqe; in nvmft_init_cqe() local [all …]
|
/freebsd/sys/dev/cxgbe/cxgbei/ |
H A D | cxgbei.c | 196 struct cpl_iscsi_hdr *cpl = mtod(m, struct cpl_iscsi_hdr *); in do_rx_iscsi_hdr() local 197 u_int tid = GET_TID(cpl); in do_rx_iscsi_hdr() 201 uint16_t len_ddp = be16toh(cpl->pdu_len_ddp); in do_rx_iscsi_hdr() 202 uint16_t len = be16toh(cpl->len); in do_rx_iscsi_hdr() 205 MPASS(m->m_pkthdr.len == len + sizeof(*cpl)); in do_rx_iscsi_hdr() 210 m_copydata(m, sizeof(*cpl), ISCSI_BHS_SIZE, (caddr_t)ip->ip_bhs); in do_rx_iscsi_hdr() 213 icp->icp_seq = ntohl(cpl->seq); in do_rx_iscsi_hdr() 233 struct cpl_iscsi_data *cpl = mtod(m, struct cpl_iscsi_data *); in do_rx_iscsi_data() local 234 u_int tid = GET_TID(cpl); in do_rx_iscsi_data() 240 MPASS(m->m_pkthdr.len == be16toh(cpl->len) + sizeof(*cpl)); in do_rx_iscsi_data() [all …]
|
/freebsd/sys/dev/cxgbe/crypto/ |
H A D | t6_kern_tls.c | 154 struct cpl_act_open_req *cpl; in mk_ktls_act_open_req() local 159 cpl = (struct cpl_act_open_req *)cpl6; in mk_ktls_act_open_req() 163 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, in mk_ktls_act_open_req() 165 inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, in mk_ktls_act_open_req() 166 &cpl->peer_ip, &cpl->peer_port); in mk_ktls_act_open_req() 171 cpl->opt0 = htobe64(options); in mk_ktls_act_open_req() 176 cpl->opt2 = htobe32(options); in mk_ktls_act_open_req() 185 struct cpl_act_open_req6 *cpl; in mk_ktls_act_open_req6() local 190 cpl = (struct cpl_act_open_req6 *)cpl6; in mk_ktls_act_open_req6() 194 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, in mk_ktls_act_open_req6() [all …]
|
H A D | t4_crypto.c | 337 struct cpl_rx_phys_dsgl *cpl; in ccr_write_phys_dsgl() local 344 cpl = dst; in ccr_write_phys_dsgl() 345 cpl->op_to_tid = htobe32(V_CPL_RX_PHYS_DSGL_OPCODE(CPL_RX_PHYS_DSGL) | in ccr_write_phys_dsgl() 347 cpl->pcirlxorder_to_noofsgentr = htobe32( in ccr_write_phys_dsgl() 352 cpl->rss_hdr_int.opcode = CPL_RX_PHYS_ADDR; in ccr_write_phys_dsgl() 353 cpl->rss_hdr_int.qid = htobe16(s->port->rxq->iq.abs_id); in ccr_write_phys_dsgl() 354 cpl->rss_hdr_int.hash_val = 0; in ccr_write_phys_dsgl() 355 cpl->rss_hdr_int.channel = s->port->rx_channel_id; in ccr_write_phys_dsgl() 356 sgl = (struct phys_sge_pairs *)(cpl + 1); in ccr_write_phys_dsgl() 378 MPASS(j + 8 * (sgl - (struct phys_sge_pairs *)(cpl in ccr_write_phys_dsgl() 601 ccr_hash_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error) ccr_hash_done() argument 783 ccr_cipher_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error) ccr_cipher_done() argument 1106 ccr_eta_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error) ccr_eta_done() argument 1377 ccr_gcm_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error) ccr_gcm_done() argument 1743 ccr_ccm_done(struct ccr_softc * sc,struct ccr_session * s,struct cryptop * crp,const struct cpl_fw6_pld * cpl,int error) ccr_ccm_done() argument 2654 const struct cpl_fw6_pld *cpl; do_cpl6_fw_pld() local [all...] |
/freebsd/sys/dev/cxgb/ |
H A D | cxgb_offload.h | 94 #define M_GETHDR_OFLD(qset, ctrl, cpl) \ argument 95 m_gethdr_ofld(qset, ctrl, sizeof(*cpl), (void **)&cpl) 97 m_gethdr_ofld(int qset, int ctrl, int cpllen, void **cpl) in m_gethdr_ofld() argument 108 *cpl = (void *)(oh + 1); in m_gethdr_ofld()
|
/freebsd/sbin/nvmecontrol/ |
H A D | ns.c | 431 if (nvme_completion_is_error(&pt.cpl)) in nsactive() 476 if (nvme_completion_is_error(&pt.cpl)) in nsallocated() 520 if (nvme_completion_is_error(&pt.cpl)) in nscontrollers() 621 if (nvme_completion_is_error(&pt.cpl)) { in nscreate() 623 get_res_str(NVMEV(NVME_STATUS_SC, pt.cpl.status))); in nscreate() 625 printf("namespace %d created\n", pt.cpl.cdw0); in nscreate() 673 if (nvme_completion_is_error(&pt.cpl)) { in nsdelete() 675 get_res_str(NVMEV(NVME_STATUS_SC, pt.cpl.status))); in nsdelete() 737 if (nvme_completion_is_error(&pt.cpl)) in nsattach() 758 if (nvme_completion_is_error(&pt.cpl)) { in nsattach() [all...] |
H A D | power.c | 114 if (nvme_completion_is_error(&pt.cpl)) in power_set() 130 if (nvme_completion_is_error(&pt.cpl)) in power_show() 133 printf("Current Power State is %d\n", pt.cpl.cdw0 & 0x1F); in power_show() 134 printf("Current Workload Hint is %d\n", pt.cpl.cdw0 >> 5); in power_show()
|
H A D | selftest.c | 68 if (NVME_STATUS_GET_SCT(pt.cpl.status) == NVME_SCT_COMMAND_SPECIFIC && in selftest_op() 69 NVME_STATUS_GET_SC(pt.cpl.status) == NVME_SC_SELF_TEST_IN_PROGRESS) in selftest_op() 71 else if (nvme_completion_is_error(&pt.cpl)) in selftest_op()
|
H A D | nvmecontrol.c | 116 if (nvme_completion_is_error(&pt.cpl)) in read_controller_data() 140 if (nvme_completion_is_error(&pt.cpl)) in read_namespace_data() 164 if (nvme_completion_is_error(&pt.cpl)) in read_active_namespaces()
|
H A D | firmware.c | 191 if (nvme_completion_is_error(&pt.cpl)) in update_firmware() 214 sct = NVME_STATUS_GET_SCT(pt.cpl.status); in activate_firmware() 215 sc = NVME_STATUS_GET_SC(pt.cpl.status); in activate_firmware() 221 if (nvme_completion_is_error(&pt.cpl)) in activate_firmware()
|
/freebsd/lib/libnvmf/ |
H A D | nvmf_controller.c | 21 struct nvme_completion *cpl = cqe; in nvmf_init_cqe() local 24 memset(cpl, 0, sizeof(*cpl)); in nvmf_init_cqe() 25 cpl->cid = cmd->cid; in nvmf_init_cqe() 26 cpl->status = htole16(status); in nvmf_init_cqe() 33 struct nvme_completion cpl; in nvmf_simple_response() local 38 nvmf_init_cqe(&cpl, nc, status); in nvmf_simple_response() 39 return (nvmf_allocate_response(nc->nc_qpair, &cpl)); in nvmf_simple_response()
|
/freebsd/usr.sbin/bhyve/amd64/ |
H A D | task_switch.c | 271 sup_paging.cpl = 0; /* implicit supervisor mode */ in read_tss_descriptor() 314 int cpl, dpl, rpl; in validate_seg_desc() local 369 sup_paging.cpl = 0; /* implicit supervisor mode */ in validate_seg_desc() 396 cpl = cs & SEL_RPL_MASK; in validate_seg_desc() 400 if (stackseg && (rpl != cpl || dpl != cpl)) { in validate_seg_desc() 407 if ((conforming && (cpl < dpl)) || in validate_seg_desc() 408 (!conforming && (cpl != dpl))) { in validate_seg_desc() 424 if (!conforming && (rpl > dpl || cpl > dpl)) { in validate_seg_desc() 588 ts->paging.cpl = tss->tss_cs & SEL_RPL_MASK; in tss32_restore() 673 if (vie_alignment_check(paging->cpl, bytes, cr0, rflags, gla)) { in push_errcode() [all …]
|
/freebsd/sys/dev/cxgbe/ |
H A D | t4_filter.c | 1298 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); in t4_hashfilter_ao_rpl() local 1299 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status))); in t4_hashfilter_ao_rpl() 1300 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status)); in t4_hashfilter_ao_rpl() 1310 f->tid = GET_TID(cpl); in t4_hashfilter_ao_rpl() 1328 release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]); in t4_hashfilter_ao_rpl() 1382 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); in t4_del_hashfilter_rpl() local 1383 unsigned int tid = GET_TID(cpl); in t4_del_hashfilter_rpl() 1394 if (cpl->status == 0) { in t4_del_hashfilter_rpl() 1507 uint64_t ftuple, struct cpl_act_open_req6 *cpl) in mk_act_open_req6() argument 1509 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl; in mk_act_open_req6() [all …]
|
H A D | t4_sge.c | 398 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); in set_tcb_rpl_handler() local 404 tid = GET_TID(cpl); in set_tcb_rpl_handler() 413 cookie = G_COOKIE(cpl->cookie); in set_tcb_rpl_handler() 438 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); in act_open_rpl_handler() local 439 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); in act_open_rpl_handler() 467 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); in fw4_ack_handler() local 468 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); in fw4_ack_handler() 1940 const struct cpl_rx_pkt *cpl; in eth_rx() local 2016 cpl = (const void *)(&d->rss + 1); in eth_rx() 2018 const uint16_t ev = be16toh(cpl->err_vec); in eth_rx() [all …]
|
H A D | t4_netmap.c | 975 struct cpl_tx_pkt_core *cpl; in cxgbe_nm_tx() local 989 cpl = (void *)(wr + 1); in cxgbe_nm_tx() 996 cpl->ctrl0 = nm_txq->cpl_ctrl0; in cxgbe_nm_tx() 997 cpl->pack = 0; in cxgbe_nm_tx() 998 cpl->len = htobe16(slot->len); in cxgbe_nm_tx() 999 cpl->ctrl1 = nm_txcsum ? 0 : in cxgbe_nm_tx() 1002 usgl = (void *)(cpl + 1); in cxgbe_nm_tx() 1009 cpl = (void *)(usgl + 1); in cxgbe_nm_tx() 1303 unwrap_nm_fw6_msg(const struct cpl_fw6_msg *cpl) in unwrap_nm_fw6_msg() argument 1306 MPASS(cpl->type == FW_TYPE_RSSCPL || cpl->type == FW6_TYPE_RSSCPL); in unwrap_nm_fw6_msg() [all …]
|