| /freebsd/sys/dev/cxgbe/tom/ |
| H A D | t4_connect.c | 76 const struct cpl_act_establish *cpl = (const void *)(rss + 1); in do_act_establish() local 77 u_int tid = GET_TID(cpl); in do_act_establish() 78 u_int atid = G_TID_TID(ntohl(cpl->tos_atid)); in do_act_establish() 103 send_reset(sc, toep, be32toh(cpl->snd_isn)); in do_act_establish() 107 make_established(toep, be32toh(cpl->snd_isn) - 1, in do_act_establish() 108 be32toh(cpl->rcv_isn) - 1, cpl->tcp_opt); in do_act_establish() 156 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); in do_act_open_rpl() 157 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status))); in do_act_open_rpl() 158 u_int status = G_AOPEN_STATUS(be32toh(cpl in do_act_open_rpl() 150 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); do_act_open_rpl() local 312 struct cpl_act_open_req6 *cpl = wrtod(wr); t4_connect() local 356 struct cpl_act_open_req *cpl = wrtod(wr); t4_connect() local [all...] |
| H A D | t4_tls.c | 396 write_tlstx_cpl(struct cpl_tx_tls_sfo *cpl, struct toepcb *toep, in write_tlstx_cpl() 405 cpl->op_to_seg_len = htobe32(V_CPL_TX_TLS_SFO_OPCODE(CPL_TX_TLS_SFO) | in count_ext_pgs_segs() 408 cpl->pld_len = htobe32(plen); in count_ext_pgs_segs() 410 cpl->type_protover = htobe32(V_CPL_TX_TLS_SFO_TYPE(rec_type)); in count_ext_pgs_segs() 411 cpl->seqno_numivs = htobe32(tls_ofld->scmd0.seqno_numivs | in count_ext_pgs_segs() 413 cpl->ivgen_hdrlen = htobe32(tls_ofld->scmd0.ivgen_hdrlen); in count_ext_pgs_segs() 414 cpl->scmd1 = htobe64(seqno); in count_ext_pgs_segs() 499 struct cpl_tx_tls_sfo *cpl; in t4_push_ktls() 685 cpl = (struct cpl_tx_tls_sfo *)(txwr + 1); in t4_push_ktls() 693 write_tlstx_cpl(cpl, toe in t4_push_ktls() 383 write_tlstx_cpl(struct cpl_tx_tls_sfo * cpl,struct toepcb * toep,struct tls_hdr * tls_hdr,unsigned int plen,uint64_t seqno) write_tlstx_cpl() argument 486 struct cpl_tx_tls_sfo *cpl; t4_push_ktls() local 740 const struct cpl_tls_data *cpl = mtod(m, const void *); do_tls_data() local 801 const struct cpl_rx_tls_cmp *cpl = mtod(m, const void *); do_rx_tls_cmp() local 1004 do_rx_data_tls(const struct cpl_rx_data * cpl,struct toepcb * toep,struct mbuf * m) do_rx_data_tls() argument 1255 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); do_tls_tcb_rpl() local [all...] |
| H A D | t4_listen.c | 884 const struct cpl_pass_open_rpl *cpl = (const void *)(rss + 1); in do_pass_open_rpl() 885 int stid = GET_TID(cpl); in do_pass_open_rpl() 886 unsigned int status = cpl->status; in do_pass_open_rpl() 890 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); in do_pass_open_rpl() 960 const struct cpl_close_listsvr_rpl *cpl = (const void *)(rss + 1); in do_close_server_rpl() 961 int stid = GET_TID(cpl); in do_close_server_rpl() 962 unsigned int status = cpl->status; in do_close_server_rpl() 966 unsigned int opcode = G_CPL_OPCODE(be32toh(OPCODE_TID(cpl))); in do_close_server_rpl() 1027 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); in do_abort_req_synqe() 1028 unsigned int tid = GET_TID(cpl); in do_abort_req_synqe() 883 const struct cpl_pass_open_rpl *cpl = (const void *)(rss + 1); do_pass_open_rpl() local 959 const struct cpl_close_listsvr_rpl *cpl = (const void *)(rss + 1); do_close_server_rpl() local 1026 const struct cpl_abort_req_rss *cpl = (const void *)(rss + 1); do_abort_req_synqe() local 1076 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); do_abort_rpl_synqe() local 1158 encapsulated_syn(struct adapter * sc,const struct cpl_pass_accept_req * cpl) encapsulated_syn() argument 1172 const struct cpl_pass_accept_req *cpl = mtod(m, const void *); pass_accept_req_to_protohdrs() local 1333 const struct cpl_pass_accept_req *cpl = mtod(m, const void *); do_pass_accept_req() local 1587 synqe_to_protohdrs(struct adapter * sc,struct synq_entry * synqe,const struct cpl_pass_establish * cpl,struct in_conninfo * inc,struct tcphdr * th,struct tcpopt * to) synqe_to_protohdrs() argument 1614 const struct cpl_pass_establish *cpl = (const void *)(rss + 1); do_pass_establish() local [all...] |
| H A D | t4_cpl_io.c | 1061 struct cpl_tx_data_iso *cpl; in write_iscsi_tx_data_iso() local 1081 cpl = (struct cpl_tx_data_iso *)dst; in write_iscsi_tx_data_iso() 1082 cpl->op_to_scsi = htonl(V_CPL_TX_DATA_ISO_OP(CPL_TX_DATA_ISO) | in write_iscsi_tx_data_iso() 1090 cpl->ahs_len = 0; in write_iscsi_tx_data_iso() 1091 cpl->mpdu = htons(DIV_ROUND_UP(mss, 4)); in write_iscsi_tx_data_iso() 1092 cpl->burst_size = htonl(DIV_ROUND_UP(burst_size, 4)); in write_iscsi_tx_data_iso() 1093 cpl->len = htonl(len); in write_iscsi_tx_data_iso() 1094 cpl->reserved2_seglen_offset = htonl(0); in write_iscsi_tx_data_iso() 1095 cpl->datasn_offset = htonl(0); in write_iscsi_tx_data_iso() 1096 cpl->buffer_offset = htonl(0); in write_iscsi_tx_data_iso() [all …]
|
| /freebsd/sys/dev/nvme/ |
| H A D | nvme_qpair.c | 120 get_status_string(const struct nvme_completion *cpl, char *buf, size_t len) in get_status_string() argument 125 nvme_sc_sbuf(cpl, &sb); in get_status_string() 133 struct nvme_completion *cpl) in nvme_qpair_print_completion() argument 138 crd = NVME_STATUS_GET_CRD(cpl->status); in nvme_qpair_print_completion() 139 m = NVME_STATUS_GET_M(cpl->status); in nvme_qpair_print_completion() 140 dnr = NVME_STATUS_GET_DNR(cpl->status); in nvme_qpair_print_completion() 141 p = NVME_STATUS_GET_P(cpl->status); in nvme_qpair_print_completion() 145 get_status_string(cpl, buf, sizeof(buf)), crd, m, dnr, p, in nvme_qpair_print_completion() 146 cpl->sqid, cpl in nvme_qpair_print_completion() 150 nvme_completion_is_retry(const struct nvme_completion * cpl) nvme_completion_is_retry() argument 211 nvme_qpair_complete_tracker(struct nvme_tracker * tr,struct nvme_completion * cpl,error_print_t print_on_error) nvme_qpair_complete_tracker() argument 300 struct nvme_completion cpl; nvme_qpair_manual_complete_tracker() local 318 struct nvme_completion cpl; nvme_qpair_manual_complete_request() local 342 struct nvme_completion cpl; _nvme_qpair_process_completions() local [all...] |
| H A D | nvme_ctrlr.c | 469 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_identify() 499 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_set_num_qpairs() 509 sq_allocated = (status.cpl.cdw0 & 0xFFFF) + 1; in nvme_ctrlr_set_num_qpairs() 510 cq_allocated = (status.cpl.cdw0 >> 16) + 1; in nvme_ctrlr_set_num_qpairs() 539 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_create_qpairs() 548 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_create_qpairs() 570 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_delete_qpairs() 579 if (nvme_completion_is_error(&status.cpl)) { in nvme_ctrlr_delete_qpairs() 686 nvme_ctrlr_async_event_cb(void *arg, const struct nvme_completion *cpl) in nvme_ctrlr_async_event_cb() argument 690 if (nvme_completion_is_error(cpl)) { in nvme_ctrlr_async_event_cb() [all …]
|
| H A D | nvme.c | 134 nvme_completion_poll_cb(void *arg, const struct nvme_completion *cpl) in nvme_completion_poll_cb() argument 143 memcpy(&status->cpl, cpl, sizeof(*cpl)); in nvme_completion_poll_cb()
|
| H A D | nvme_test.c | 160 nvme_ns_io_test_cb(void *arg, const struct nvme_completion *cpl) in nvme_ns_io_test_cb() 167 if (nvme_completion_is_error(cpl)) { in nvme_ns_io_test_cb() 202 struct nvme_completion cpl; in nvme_ns_io_test() 214 memset(&cpl, 0, sizeof(cpl)); in nvme_ns_io_test() 216 nvme_ns_io_test_cb(tth, &cpl); in nvme_ns_io_test() 159 nvme_ns_io_test_cb(void * arg,const struct nvme_completion * cpl) nvme_ns_io_test_cb() argument 201 struct nvme_completion cpl; nvme_ns_io_test() local
|
| H A D | nvme_util.c | 215 nvme_sc_sbuf(const struct nvme_completion *cpl, struct sbuf *sb) in nvme_sc_sbuf() argument 220 status = le16toh(cpl->status); in nvme_sc_sbuf() 259 nvme_cpl_sbuf(const struct nvme_completion *cpl, struct sbuf *sb) in nvme_cpl_sbuf() argument 263 status = le16toh(cpl->status); in nvme_cpl_sbuf() 264 nvme_sc_sbuf(cpl, sb); in nvme_cpl_sbuf()
|
| H A D | nvme_sim.c | 68 nvme_sim_nvmeio_done(void *ccb_arg, const struct nvme_completion *cpl) in nvme_sim_nvmeio_done() argument 78 memcpy(&ccb->nvmeio.cpl, cpl, sizeof(*cpl)); in nvme_sim_nvmeio_done() 80 if (nvme_completion_is_error(cpl)) { in nvme_sim_nvmeio_done() 461 nvme_sim_handle_aen(device_t dev, const struct nvme_completion *cpl, in nvme_sim_handle_aen() argument
|
| /freebsd/sys/dev/nvmf/controller/ |
| H A D | nvmft_qpair.c | 194 struct nvme_completion cpl; in _nvmft_send_response() local 199 memcpy(&cpl, cqe, sizeof(cpl)); in _nvmft_send_response() 211 cpl.sqhd = htole16(qp->sqhd); in _nvmft_send_response() 213 cpl.sqhd = 0; in _nvmft_send_response() 216 rc = nvmf_allocate_response(nq, &cpl, M_WAITOK); in _nvmft_send_response() 240 const struct nvme_completion *cpl = cqe; in nvmft_send_response() local 243 KASSERT(BIT_ISSET(NUM_CIDS, cpl->cid, qp->cids), in nvmft_send_response() 244 ("%s: CID %u not busy", __func__, cpl->cid)); in nvmft_send_response() 246 BIT_CLR_ATOMIC(NUM_CIDS, cpl->cid, qp->cids); in nvmft_send_response() 253 struct nvme_completion *cpl = cqe; in nvmft_init_cqe() local [all …]
|
| /freebsd/sys/dev/cxgbe/cxgbei/ |
| H A D | cxgbei.c | 196 struct cpl_iscsi_hdr *cpl = mtod(m, struct cpl_iscsi_hdr *); in do_rx_iscsi_hdr() local 197 u_int tid = GET_TID(cpl); in do_rx_iscsi_hdr() 201 uint16_t len_ddp = be16toh(cpl->pdu_len_ddp); in do_rx_iscsi_hdr() 202 uint16_t len = be16toh(cpl->len); in do_rx_iscsi_hdr() 205 MPASS(m->m_pkthdr.len == len + sizeof(*cpl)); in do_rx_iscsi_hdr() 210 m_copydata(m, sizeof(*cpl), ISCSI_BHS_SIZE, (caddr_t)ip->ip_bhs); in do_rx_iscsi_hdr() 213 icp->icp_seq = ntohl(cpl->seq); in do_rx_iscsi_hdr() 233 struct cpl_iscsi_data *cpl = mtod(m, struct cpl_iscsi_data *); in do_rx_iscsi_data() local 234 u_int tid = GET_TID(cpl); in do_rx_iscsi_data() 240 MPASS(m->m_pkthdr.len == be16toh(cpl->len) + sizeof(*cpl)); in do_rx_iscsi_data() [all …]
|
| /freebsd/sys/dev/cxgb/ |
| H A D | cxgb_offload.h | 94 #define M_GETHDR_OFLD(qset, ctrl, cpl) \ argument 95 m_gethdr_ofld(qset, ctrl, sizeof(*cpl), (void **)&cpl) 97 m_gethdr_ofld(int qset, int ctrl, int cpllen, void **cpl) in m_gethdr_ofld() argument 108 *cpl = (void *)(oh + 1); in m_gethdr_ofld()
|
| /freebsd/sys/dev/cxgbe/crypto/ |
| H A D | t6_kern_tls.c | 154 struct cpl_act_open_req *cpl; in mk_ktls_act_open_req() local 159 cpl = (struct cpl_act_open_req *)cpl6; in mk_ktls_act_open_req() 163 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, in mk_ktls_act_open_req() 165 inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, in mk_ktls_act_open_req() 166 &cpl->peer_ip, &cpl->peer_port); in mk_ktls_act_open_req() 171 cpl->opt0 = htobe64(options); in mk_ktls_act_open_req() 176 cpl->opt2 = htobe32(options); in mk_ktls_act_open_req() 185 struct cpl_act_open_req6 *cpl; in mk_ktls_act_open_req6() local 250 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); ktls_act_open_rpl() local 281 struct cpl_set_tcb_field_core *cpl; write_set_tcb_field_ulp() local 1114 struct cpl_tx_pkt_core *cpl; ktls_write_tcp_options() local 1209 struct cpl_tx_pkt_core *cpl; ktls_write_tunnel_packet() local 1860 struct cpl_tx_pkt_core *cpl; ktls_write_tcp_fin() local [all...] |
| H A D | t7_kern_tls.c | 962 write_lso_cpl(void *cpl, struct mbuf *m0, uint16_t mss, uint16_t eh_type, in write_lso_cpl() argument 981 lso = cpl; in write_lso_cpl() 994 struct cpl_tx_tls_ack *cpl; in write_tx_tls_ack() local 998 cpl = dst; in write_tx_tls_ack() 999 cpl->op_to_Rsvd2 = htobe32(V_CPL_TX_TLS_ACK_OPCODE(CPL_TX_TLS_ACK) | in write_tx_tls_ack() 1004 cpl->PldLen = htobe32(V_CPL_TX_TLS_ACK_PLDLEN(32 + 16 + hash_len)); in write_tx_tls_ack() 1005 cpl->Rsvd3 = 0; in write_tx_tls_ack() 1007 return (cpl + 1); in write_tx_tls_ack() 1015 struct cpl_fw6_pld *cpl; in write_fw6_pld() local 1023 cpl = (void *)(rss + 1); in write_fw6_pld() [all …]
|
| /freebsd/sys/dev/ufshci/ |
| H A D | ufshci.c | 51 ufshci_completion_poll_cb(void *arg, const struct ufshci_completion *cpl, in ufshci_completion_poll_cb() argument 61 memcpy(&status->cpl.response_upiu, &cpl->response_upiu, cpl->size); in ufshci_completion_poll_cb()
|
| H A D | ufshci_req_queue.c | 181 struct ufshci_completion cpl; in ufshci_req_queue_manual_complete_request() local 184 memset(&cpl, 0, sizeof(cpl)); in ufshci_req_queue_manual_complete_request() 185 cpl.response_upiu.header.response = rc; in ufshci_req_queue_manual_complete_request() 187 &cpl.response_upiu); in ufshci_req_queue_manual_complete_request() 195 req->cb_fn(req->cb_arg, &cpl, error); in ufshci_req_queue_manual_complete_request() 244 struct ufshci_completion cpl; in ufshci_req_queue_complete_tracker() local 252 cpl.size = tr->response_size; in ufshci_req_queue_complete_tracker() 254 memcpy(&cpl.response_upiu, in ufshci_req_queue_complete_tracker() 255 (void *)hwq->utmrd[tr->slot_num].response_upiu, cpl.size); in ufshci_req_queue_complete_tracker() 262 memcpy(&cpl.response_upiu, (void *)tr->ucd->response_upiu, in ufshci_req_queue_complete_tracker() [all …]
|
| /freebsd/sbin/nvmecontrol/ |
| H A D | ns.c | 431 if (nvme_completion_is_error(&pt.cpl)) in nsactive() 476 if (nvme_completion_is_error(&pt.cpl)) in nsallocated() 520 if (nvme_completion_is_error(&pt.cpl)) in nscontrollers() 621 if (nvme_completion_is_error(&pt.cpl)) { in nscreate() 623 get_res_str(NVMEV(NVME_STATUS_SC, pt.cpl.status))); in nscreate() 625 printf("namespace %d created\n", pt.cpl.cdw0); in nscreate() 673 if (nvme_completion_is_error(&pt.cpl)) { in nsdelete() 675 get_res_str(NVMEV(NVME_STATUS_SC, pt.cpl.status))); in nsdelete() 737 if (nvme_completion_is_error(&pt.cpl)) in nsattach() 758 if (nvme_completion_is_error(&pt.cpl)) { in nsattach() [all …]
|
| H A D | power.c | 114 if (nvme_completion_is_error(&pt.cpl)) in power_set() 130 if (nvme_completion_is_error(&pt.cpl)) in power_show() 133 printf("Current Power State is %d\n", pt.cpl.cdw0 & 0x1F); in power_show() 134 printf("Current Workload Hint is %d\n", pt.cpl.cdw0 >> 5); in power_show()
|
| H A D | selftest.c | 68 if (NVME_STATUS_GET_SCT(pt.cpl.status) == NVME_SCT_COMMAND_SPECIFIC && in selftest_op() 69 NVME_STATUS_GET_SC(pt.cpl.status) == NVME_SC_SELF_TEST_IN_PROGRESS) in selftest_op() 71 else if (nvme_completion_is_error(&pt.cpl)) in selftest_op()
|
| H A D | nvmecontrol.c | 116 if (nvme_completion_is_error(&pt.cpl)) in read_controller_data() 140 if (nvme_completion_is_error(&pt.cpl)) in read_namespace_data() 164 if (nvme_completion_is_error(&pt.cpl)) in read_active_namespaces()
|
| /freebsd/lib/libnvmf/ |
| H A D | nvmf_controller.c | 71 struct nvme_completion *cpl = cqe; in nvmf_init_cqe() local 74 memset(cpl, 0, sizeof(*cpl)); in nvmf_init_cqe() 75 cpl->cid = cmd->cid; in nvmf_init_cqe() 76 cpl->status = htole16(status); in nvmf_init_cqe() 83 struct nvme_completion cpl; in nvmf_simple_response() local 88 nvmf_init_cqe(&cpl, nc, status); in nvmf_simple_response() 89 return (nvmf_allocate_response(nc->nc_qpair, &cpl)); in nvmf_simple_response()
|
| /freebsd/usr.sbin/bhyve/amd64/ |
| H A D | task_switch.c | 271 sup_paging.cpl = 0; /* implicit supervisor mode */ in read_tss_descriptor() 314 int cpl, dpl, rpl; in validate_seg_desc() local 369 sup_paging.cpl = 0; /* implicit supervisor mode */ in validate_seg_desc() 396 cpl = cs & SEL_RPL_MASK; in validate_seg_desc() 400 if (stackseg && (rpl != cpl || dpl != cpl)) { in validate_seg_desc() 407 if ((conforming && (cpl < dpl)) || in validate_seg_desc() 408 (!conforming && (cpl != dpl))) { in validate_seg_desc() 424 if (!conforming && (rpl > dpl || cpl > dpl)) { in validate_seg_desc() 588 ts->paging.cpl = tss->tss_cs & SEL_RPL_MASK; in tss32_restore() 673 if (vie_alignment_check(paging->cpl, bytes, cr0, rflags, gla)) { in push_errcode() [all …]
|
| /freebsd/sys/dev/cxgbe/ |
| H A D | t4_filter.c | 1508 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); in mk_act_open_req6() 1509 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status))); in mk_act_open_req6() 1510 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status)); in mk_act_open_req6() 1520 f->tid = GET_TID(cpl); in mk_act_open_req6() 1538 release_tid(sc, GET_TID(cpl), &sc->sge.ctrlq[0]); in mk_act_open_req6() 1592 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); 1593 unsigned int tid = GET_TID(cpl); 1604 if (cpl->status == 0) { in act_open_cpl_len16() 1717 uint64_t ftuple, struct cpl_act_open_req6 *cpl) in mk_abort_req_ulp() 1719 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl; in mk_abort_req_ulp() 1298 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); t4_hashfilter_ao_rpl() local 1382 const struct cpl_abort_rpl_rss *cpl = (const void *)(rss + 1); t4_del_hashfilter_rpl() local 1507 mk_act_open_req6(struct adapter * sc,struct filter_entry * f,int atid,uint64_t ftuple,struct cpl_act_open_req6 * cpl) mk_act_open_req6() argument 1551 mk_act_open_req(struct adapter * sc,struct filter_entry * f,int atid,uint64_t ftuple,struct cpl_act_open_req * cpl) mk_act_open_req() argument [all...] |
| H A D | t4_sge.c | 401 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); in set_tcb_rpl_handler() local 407 tid = GET_TID(cpl); in set_tcb_rpl_handler() 416 cookie = G_COOKIE(cpl->cookie); in set_tcb_rpl_handler() 441 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); in act_open_rpl_handler() local 442 u_int cookie = G_TID_COOKIE(G_AOPEN_ATID(be32toh(cpl->atid_status))); in act_open_rpl_handler() 470 const struct cpl_fw4_ack *cpl = (const void *)(rss + 1); in fw4_ack_handler() local 471 unsigned int tid = G_CPL_FW4_ACK_FLOWID(be32toh(OPCODE_TID(cpl))); in fw4_ack_handler() 486 const struct cpl_fw6_pld *cpl; in fw6_pld_handler() local 490 cpl = mtod(m, const void *); in fw6_pld_handler() 492 cpl = (const void *)(rss + 1); in fw6_pld_handler() [all …]
|