Lines Matching +full:conf +full:- +full:rst

1 /*-
45 #include <sys/conf.h>
106 #define TT_HASH(icc, tt) (G_PPOD_TAG(tt) & (icc)->cmp_hash_mask)
177 KASSERT(icp->ref_cnt != 0, ("freeing deleted PDU")); in icl_cxgbei_conn_pdu_free()
178 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); in icl_cxgbei_conn_pdu_free()
179 MPASS(ic == ip->ip_conn); in icl_cxgbei_conn_pdu_free()
181 m_freem(ip->ip_ahs_mbuf); in icl_cxgbei_conn_pdu_free()
182 m_freem(ip->ip_data_mbuf); in icl_cxgbei_conn_pdu_free()
183 m_freem(ip->ip_bhs_mbuf); in icl_cxgbei_conn_pdu_free()
185 KASSERT(ic != NULL || icp->ref_cnt == 1, in icl_cxgbei_conn_pdu_free()
188 if (atomic_fetchadd_int(&icp->ref_cnt, -1) != 1) in icl_cxgbei_conn_pdu_free()
194 refcount_release(&ic->ic_outstanding_pdus); in icl_cxgbei_conn_pdu_free()
203 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); in icl_cxgbei_pdu_call_cb()
205 if (icp->cb != NULL) in icl_cxgbei_pdu_call_cb()
206 icp->cb(ip, icp->error); in icl_cxgbei_pdu_call_cb()
208 if (__predict_true(ip->ip_conn != NULL)) in icl_cxgbei_pdu_call_cb()
209 refcount_release(&ip->ip_conn->ic_outstanding_pdus); in icl_cxgbei_pdu_call_cb()
220 icp->error = error; in icl_cxgbei_pdu_done()
222 m_freem(ip->ip_ahs_mbuf); in icl_cxgbei_pdu_done()
223 ip->ip_ahs_mbuf = NULL; in icl_cxgbei_pdu_done()
224 m_freem(ip->ip_data_mbuf); in icl_cxgbei_pdu_done()
225 ip->ip_data_mbuf = NULL; in icl_cxgbei_pdu_done()
226 m_freem(ip->ip_bhs_mbuf); in icl_cxgbei_pdu_done()
227 ip->ip_bhs_mbuf = NULL; in icl_cxgbei_pdu_done()
233 if (atomic_fetchadd_int(&icp->ref_cnt, -1) == 1) in icl_cxgbei_pdu_done()
243 struct icl_cxgbei_pdu *icp = (struct icl_cxgbei_pdu *)mb->m_ext.ext_arg1; in icl_cxgbei_mbuf_done()
250 icl_cxgbei_pdu_call_cb(&icp->ip); in icl_cxgbei_mbuf_done()
264 icp->icp_signature = CXGBEI_PDU_SIGNATURE; in icl_cxgbei_new_pdu()
265 icp->ref_cnt = 1; in icl_cxgbei_new_pdu()
266 ip = &icp->ip; in icl_cxgbei_new_pdu()
274 ip->ip_bhs_mbuf = m; in icl_cxgbei_new_pdu()
275 ip->ip_bhs = mtod(m, struct iscsi_bhs *); in icl_cxgbei_new_pdu()
276 memset(ip->ip_bhs, 0, sizeof(*ip->ip_bhs)); in icl_cxgbei_new_pdu()
277 m->m_len = sizeof(struct iscsi_bhs); in icl_cxgbei_new_pdu()
278 m->m_pkthdr.len = m->m_len; in icl_cxgbei_new_pdu()
287 ip->ip_conn = ic; in icl_cxgbei_new_pdu_set_conn()
289 refcount_acquire(&ic->ic_outstanding_pdus); in icl_cxgbei_new_pdu_set_conn()
314 len += request->ip_bhs->bhs_data_segment_len[0]; in icl_pdu_data_segment_length()
316 len += request->ip_bhs->bhs_data_segment_len[1]; in icl_pdu_data_segment_length()
318 len += request->ip_bhs->bhs_data_segment_len[2]; in icl_pdu_data_segment_length()
334 struct icl_pdu *ip = &icp->ip; in finalize_pdu()
343 m = ip->ip_data_mbuf; in finalize_pdu()
344 ulp_submode = icc->ulp_submode; in finalize_pdu()
352 padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len; in finalize_pdu()
355 bzero(mtod(last, uint8_t *) + last->m_len, padding); in finalize_pdu()
356 last->m_len += padding; in finalize_pdu()
359 MPASS(ip->ip_data_len == 0); in finalize_pdu()
367 m = ip->ip_bhs_mbuf; in finalize_pdu()
368 MPASS(m->m_pkthdr.len == sizeof(struct iscsi_bhs)); in finalize_pdu()
369 MPASS(m->m_len == sizeof(struct iscsi_bhs)); in finalize_pdu()
371 bhs = ip->ip_bhs; in finalize_pdu()
372 data_len = ip->ip_data_len; in finalize_pdu()
373 if (data_len > icc->ic.ic_max_send_data_segment_length) { in finalize_pdu()
379 switch (bhs->bhs_opcode) { in finalize_pdu()
387 panic("invalid opcode %#x for ISO", bhs->bhs_opcode); in finalize_pdu()
389 data_len = icc->ic.ic_max_send_data_segment_length; in finalize_pdu()
391 if (bhsdi->bhsdi_flags & BHSDI_FLAGS_F) { in finalize_pdu()
397 bhsdi->bhsdi_flags &= ~BHSDI_FLAGS_F; in finalize_pdu()
404 bhs->bhs_data_segment_len[2] = data_len; in finalize_pdu()
405 bhs->bhs_data_segment_len[1] = data_len >> 8; in finalize_pdu()
406 bhs->bhs_data_segment_len[0] = data_len >> 16; in finalize_pdu()
411 m->m_pkthdr.len += ip->ip_data_len + padding; in finalize_pdu()
412 m->m_next = ip->ip_data_mbuf; in finalize_pdu()
414 ip->ip_bhs_mbuf = NULL; in finalize_pdu()
415 ip->ip_data_mbuf = NULL; in finalize_pdu()
416 ip->ip_bhs = NULL; in finalize_pdu()
420 * still be held by zero-copy PDU buffers (ICL_NOCOPY). in finalize_pdu()
422 if (atomic_fetchadd_int(&icp->ref_cnt, -1) == 1) in finalize_pdu()
433 struct icl_conn *ic = &icc->ic; in icl_cxgbei_tx_main()
434 struct toepcb *toep = icc->toep; in icl_cxgbei_tx_main()
435 struct socket *so = ic->ic_socket; in icl_cxgbei_tx_main()
445 while (__predict_true(!ic->ic_disconnecting)) { in icl_cxgbei_tx_main()
446 while (STAILQ_EMPTY(&icc->sent_pdus)) { in icl_cxgbei_tx_main()
447 icc->tx_active = false; in icl_cxgbei_tx_main()
448 mtx_sleep(&icc->tx_active, ic->ic_lock, 0, "-", 0); in icl_cxgbei_tx_main()
449 if (__predict_false(ic->ic_disconnecting)) in icl_cxgbei_tx_main()
451 MPASS(icc->tx_active); in icl_cxgbei_tx_main()
454 STAILQ_SWAP(&icc->sent_pdus, &tx_pdus, icl_pdu); in icl_cxgbei_tx_main()
462 MPASS((m->m_pkthdr.len & 3) == 0); in icl_cxgbei_tx_main()
468 if (__predict_false(ic->ic_disconnecting) || in icl_cxgbei_tx_main()
469 __predict_false(ic->ic_socket == NULL)) { in icl_cxgbei_tx_main()
474 CURVNET_SET(toep->vnet); in icl_cxgbei_tx_main()
479 if (__predict_false(inp->inp_flags & INP_DROPPED) || in icl_cxgbei_tx_main()
480 __predict_false((toep->flags & TPF_ATTACHED) == 0)) { in icl_cxgbei_tx_main()
483 mbufq_concat(&toep->ulp_pduq, &mq); in icl_cxgbei_tx_main()
484 t4_push_pdus(icc->sc, toep, 0); in icl_cxgbei_tx_main()
502 struct icl_conn *ic = &icc->ic; in icl_cxgbei_rx_main()
508 sb = &ic->ic_socket->so_rcv; in icl_cxgbei_rx_main()
510 while (__predict_true(!ic->ic_disconnecting)) { in icl_cxgbei_rx_main()
511 while (STAILQ_EMPTY(&icc->rcvd_pdus)) { in icl_cxgbei_rx_main()
512 icc->rx_active = false; in icl_cxgbei_rx_main()
513 mtx_sleep(&icc->rx_active, SOCKBUF_MTX(sb), 0, "-", 0); in icl_cxgbei_rx_main()
514 if (__predict_false(ic->ic_disconnecting)) in icl_cxgbei_rx_main()
516 MPASS(icc->rx_active); in icl_cxgbei_rx_main()
528 cantrcvmore = (sb->sb_state & SBS_CANTRCVMORE) != 0; in icl_cxgbei_rx_main()
530 STAILQ_SWAP(&icc->rcvd_pdus, &rx_pdus, icl_pdu); in icl_cxgbei_rx_main()
539 ic->ic_receive(ip); in icl_cxgbei_rx_main()
551 while (!icc->rx_exiting) in icl_cxgbei_rx_main()
552 mtx_sleep(&icc->rx_active, SOCKBUF_MTX(sb), 0, "-", 0); in icl_cxgbei_rx_main()
571 icp = m->m_ext.ext_arg1; in cxgbei_free_mext_pg()
572 if (atomic_fetchadd_int(&icp->ref_cnt, -1) == 1) in cxgbei_free_mext_pg()
573 icl_cxgbei_pdu_call_cb(&icp->ip); in cxgbei_free_mext_pg()
588 /* Fall back to non-jumbo mbufs. */ in cxgbei_getm()
596 m_tail->m_next = m; in cxgbei_getm()
599 len -= MJUM16BYTES; in cxgbei_getm()
612 m_tail->m_next = m; in cxgbei_getm()
629 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); in icl_cxgbei_conn_pdu_append_bio()
630 MPASS(ic == ip->ip_conn); in icl_cxgbei_conn_pdu_append_bio()
633 m_tail = ip->ip_data_mbuf; in icl_cxgbei_conn_pdu_append_bio()
635 for (; m_tail->m_next != NULL; m_tail = m_tail->m_next) in icl_cxgbei_conn_pdu_append_bio()
638 MPASS(bp->bio_flags & BIO_UNMAPPED); in icl_cxgbei_conn_pdu_append_bio()
639 if (offset < PAGE_SIZE - bp->bio_ma_offset) { in icl_cxgbei_conn_pdu_append_bio()
640 page_offset = bp->bio_ma_offset + offset; in icl_cxgbei_conn_pdu_append_bio()
643 offset -= PAGE_SIZE - bp->bio_ma_offset; in icl_cxgbei_conn_pdu_append_bio()
645 offset -= PAGE_SIZE; in icl_cxgbei_conn_pdu_append_bio()
657 atomic_add_int(&icp->ref_cnt, 1); in icl_cxgbei_conn_pdu_append_bio()
658 m->m_ext.ext_arg1 = icp; in icl_cxgbei_conn_pdu_append_bio()
659 m->m_epg_1st_off = page_offset; in icl_cxgbei_conn_pdu_append_bio()
662 todo = MIN(len, PAGE_SIZE - page_offset); in icl_cxgbei_conn_pdu_append_bio()
664 m->m_epg_pa[m->m_epg_npgs] = in icl_cxgbei_conn_pdu_append_bio()
665 VM_PAGE_TO_PHYS(bp->bio_ma[i]); in icl_cxgbei_conn_pdu_append_bio()
666 m->m_epg_npgs++; in icl_cxgbei_conn_pdu_append_bio()
667 m->m_epg_last_len = todo; in icl_cxgbei_conn_pdu_append_bio()
668 m->m_len += todo; in icl_cxgbei_conn_pdu_append_bio()
669 m->m_ext.ext_size += PAGE_SIZE; in icl_cxgbei_conn_pdu_append_bio()
672 if (m->m_epg_npgs == MBUF_PEXT_MAX_PGS) { in icl_cxgbei_conn_pdu_append_bio()
674 m_tail->m_next = m; in icl_cxgbei_conn_pdu_append_bio()
676 ip->ip_data_mbuf = m; in icl_cxgbei_conn_pdu_append_bio()
678 ip->ip_data_len += m->m_len; in icl_cxgbei_conn_pdu_append_bio()
683 len -= todo; in icl_cxgbei_conn_pdu_append_bio()
689 m_tail->m_next = m; in icl_cxgbei_conn_pdu_append_bio()
691 ip->ip_data_mbuf = m; in icl_cxgbei_conn_pdu_append_bio()
692 ip->ip_data_len += m->m_len; in icl_cxgbei_conn_pdu_append_bio()
701 if (ip->ip_data_mbuf == NULL) { in icl_cxgbei_conn_pdu_append_bio()
702 ip->ip_data_mbuf = m; in icl_cxgbei_conn_pdu_append_bio()
703 ip->ip_data_len = len; in icl_cxgbei_conn_pdu_append_bio()
705 m_tail->m_next = m; in icl_cxgbei_conn_pdu_append_bio()
706 ip->ip_data_len += len; in icl_cxgbei_conn_pdu_append_bio()
710 todo = MIN(len, PAGE_SIZE - page_offset); in icl_cxgbei_conn_pdu_append_bio()
712 mapped = pmap_map_io_transient(bp->bio_ma + i, &vaddr, 1, in icl_cxgbei_conn_pdu_append_bio()
716 mtodo = min(todo, M_SIZE(m) - m->m_len); in icl_cxgbei_conn_pdu_append_bio()
717 memcpy(mtod(m, char *) + m->m_len, (char *)vaddr + in icl_cxgbei_conn_pdu_append_bio()
719 m->m_len += mtodo; in icl_cxgbei_conn_pdu_append_bio()
720 if (m->m_len == M_SIZE(m)) in icl_cxgbei_conn_pdu_append_bio()
721 m = m->m_next; in icl_cxgbei_conn_pdu_append_bio()
723 todo -= mtodo; in icl_cxgbei_conn_pdu_append_bio()
727 pmap_unmap_io_transient(bp->bio_ma + 1, &vaddr, 1, in icl_cxgbei_conn_pdu_append_bio()
731 len -= todo; in icl_cxgbei_conn_pdu_append_bio()
735 MPASS(ip->ip_data_len <= max(ic->ic_max_send_data_segment_length, in icl_cxgbei_conn_pdu_append_bio()
736 ic->ic_hw_isomax)); in icl_cxgbei_conn_pdu_append_bio()
749 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); in icl_cxgbei_conn_pdu_append_data()
750 MPASS(ic == ip->ip_conn); in icl_cxgbei_conn_pdu_append_data()
753 m_tail = ip->ip_data_mbuf; in icl_cxgbei_conn_pdu_append_data()
755 for (; m_tail->m_next != NULL; m_tail = m_tail->m_next) in icl_cxgbei_conn_pdu_append_data()
765 m->m_flags |= M_RDONLY; in icl_cxgbei_conn_pdu_append_data()
766 m_extaddref(m, __DECONST(char *, addr), len, &icp->ref_cnt, in icl_cxgbei_conn_pdu_append_data()
768 m->m_len = len; in icl_cxgbei_conn_pdu_append_data()
769 if (ip->ip_data_mbuf == NULL) { in icl_cxgbei_conn_pdu_append_data()
770 ip->ip_data_mbuf = m; in icl_cxgbei_conn_pdu_append_data()
771 ip->ip_data_len = len; in icl_cxgbei_conn_pdu_append_data()
773 m_tail->m_next = m; in icl_cxgbei_conn_pdu_append_data()
774 m_tail = m_tail->m_next; in icl_cxgbei_conn_pdu_append_data()
775 ip->ip_data_len += len; in icl_cxgbei_conn_pdu_append_data()
785 if (ip->ip_data_mbuf == NULL) { in icl_cxgbei_conn_pdu_append_data()
786 ip->ip_data_mbuf = m; in icl_cxgbei_conn_pdu_append_data()
787 ip->ip_data_len = len; in icl_cxgbei_conn_pdu_append_data()
789 m_tail->m_next = m; in icl_cxgbei_conn_pdu_append_data()
790 ip->ip_data_len += len; in icl_cxgbei_conn_pdu_append_data()
793 for (; m != NULL; m = m->m_next) { in icl_cxgbei_conn_pdu_append_data()
794 m->m_len = min(len, M_SIZE(m)); in icl_cxgbei_conn_pdu_append_data()
795 memcpy(mtod(m, void *), src, m->m_len); in icl_cxgbei_conn_pdu_append_data()
796 src += m->m_len; in icl_cxgbei_conn_pdu_append_data()
797 len -= m->m_len; in icl_cxgbei_conn_pdu_append_data()
801 MPASS(ip->ip_data_len <= max(ic->ic_max_send_data_segment_length, in icl_cxgbei_conn_pdu_append_data()
802 ic->ic_hw_isomax)); in icl_cxgbei_conn_pdu_append_data()
817 if (icp->icp_flags & ICPF_RX_DDP) in icl_cxgbei_conn_pdu_get_bio()
820 MPASS(bp->bio_flags & BIO_UNMAPPED); in icl_cxgbei_conn_pdu_get_bio()
821 if (bio_off < PAGE_SIZE - bp->bio_ma_offset) { in icl_cxgbei_conn_pdu_get_bio()
822 page_offset = bp->bio_ma_offset + bio_off; in icl_cxgbei_conn_pdu_get_bio()
825 bio_off -= PAGE_SIZE - bp->bio_ma_offset; in icl_cxgbei_conn_pdu_get_bio()
827 bio_off -= PAGE_SIZE; in icl_cxgbei_conn_pdu_get_bio()
832 todo = MIN(len, PAGE_SIZE - page_offset); in icl_cxgbei_conn_pdu_get_bio()
834 mapped = pmap_map_io_transient(bp->bio_ma + i, &vaddr, 1, in icl_cxgbei_conn_pdu_get_bio()
836 m_copydata(ip->ip_data_mbuf, pdu_off, todo, (char *)vaddr + in icl_cxgbei_conn_pdu_get_bio()
839 pmap_unmap_io_transient(bp->bio_ma + 1, &vaddr, 1, in icl_cxgbei_conn_pdu_get_bio()
844 len -= todo; in icl_cxgbei_conn_pdu_get_bio()
855 if (icp->icp_flags & ICPF_RX_DDP) in icl_cxgbei_conn_pdu_get_data()
857 m_copydata(ip->ip_data_mbuf, off, len, addr); in icl_cxgbei_conn_pdu_get_data()
872 struct socket *so = ic->ic_socket; in icl_cxgbei_conn_pdu_queue_cb()
874 MPASS(ic == ip->ip_conn); in icl_cxgbei_conn_pdu_queue_cb()
875 MPASS(ip->ip_bhs_mbuf != NULL); in icl_cxgbei_conn_pdu_queue_cb()
877 MPASS(ip->ip_ahs_mbuf == NULL && ip->ip_ahs_len == 0); in icl_cxgbei_conn_pdu_queue_cb()
881 icp->cb = cb; in icl_cxgbei_conn_pdu_queue_cb()
884 if (ic->ic_disconnecting || so == NULL || !sowriteable(so)) { in icl_cxgbei_conn_pdu_queue_cb()
889 STAILQ_INSERT_TAIL(&icc->sent_pdus, ip, ip_next); in icl_cxgbei_conn_pdu_queue_cb()
890 if (!icc->tx_active) { in icl_cxgbei_conn_pdu_queue_cb()
891 icc->tx_active = true; in icl_cxgbei_conn_pdu_queue_cb()
892 wakeup(&icc->tx_active); in icl_cxgbei_conn_pdu_queue_cb()
906 icc->icc_signature = CXGBEI_CONN_SIGNATURE; in icl_cxgbei_new_conn()
907 STAILQ_INIT(&icc->rcvd_pdus); in icl_cxgbei_new_conn()
908 STAILQ_INIT(&icc->sent_pdus); in icl_cxgbei_new_conn()
910 icc->cmp_table = hashinit(64, M_CXGBEI, &icc->cmp_hash_mask); in icl_cxgbei_new_conn()
911 mtx_init(&icc->cmp_lock, "cxgbei_cmp", NULL, MTX_DEF); in icl_cxgbei_new_conn()
913 ic = &icc->ic; in icl_cxgbei_new_conn()
914 ic->ic_lock = lock; in icl_cxgbei_new_conn()
917 refcount_init(&ic->ic_outstanding_pdus, 0); in icl_cxgbei_new_conn()
919 ic->ic_name = name; in icl_cxgbei_new_conn()
920 ic->ic_offload = "cxgbei"; in icl_cxgbei_new_conn()
921 ic->ic_unmapped = true; in icl_cxgbei_new_conn()
933 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); in icl_cxgbei_conn_free()
937 mtx_destroy(&icc->cmp_lock); in icl_cxgbei_conn_free()
938 hashdestroy(icc->cmp_table, M_CXGBEI, icc->cmp_hash_mask); in icl_cxgbei_conn_free()
956 SOCKBUF_LOCK(&so->so_snd); in icl_cxgbei_setsockopt()
957 so->so_snd.sb_flags |= SB_AUTOSIZE; in icl_cxgbei_setsockopt()
958 SOCKBUF_UNLOCK(&so->so_snd); in icl_cxgbei_setsockopt()
959 SOCKBUF_LOCK(&so->so_rcv); in icl_cxgbei_setsockopt()
960 so->so_rcv.sb_flags |= SB_AUTOSIZE; in icl_cxgbei_setsockopt()
961 SOCKBUF_UNLOCK(&so->so_rcv); in icl_cxgbei_setsockopt()
991 struct socket *so = fa->so; in find_offload_adapter()
992 struct tom_data *td = sc->tom_softc; in find_offload_adapter()
996 /* Non-TCP were filtered out earlier. */ in find_offload_adapter()
997 MPASS(so->so_proto->pr_protocol == IPPROTO_TCP); in find_offload_adapter()
999 if (fa->sc != NULL) in find_offload_adapter()
1007 if ((inp->inp_flags & INP_DROPPED) == 0) { in find_offload_adapter()
1009 if (tp->t_flags & TF_TOE && tp->tod == &td->tod) in find_offload_adapter()
1010 fa->sc = sc; /* Found. */ in find_offload_adapter()
1036 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; in send_iscsi_flowc_wr()
1040 wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq); in send_iscsi_flowc_wr()
1046 memset(flowc, 0, wr->wr_len); in send_iscsi_flowc_wr()
1048 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | in send_iscsi_flowc_wr()
1050 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | in send_iscsi_flowc_wr()
1051 V_FW_WR_FLOWID(toep->tid)); in send_iscsi_flowc_wr()
1053 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; in send_iscsi_flowc_wr()
1054 flowc->mnemval[0].val = htobe32(maxlen); in send_iscsi_flowc_wr()
1056 txsd->tx_credits = howmany(flowclen, 16); in send_iscsi_flowc_wr()
1057 txsd->plen = 0; in send_iscsi_flowc_wr()
1058 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, in send_iscsi_flowc_wr()
1059 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); in send_iscsi_flowc_wr()
1060 toep->tx_credits -= txsd->tx_credits; in send_iscsi_flowc_wr()
1061 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) in send_iscsi_flowc_wr()
1062 toep->txsd_pidx = 0; in send_iscsi_flowc_wr()
1063 toep->txsd_avail--; in send_iscsi_flowc_wr()
1074 __func__, toep->tid, ulp_submode); in set_ulp_mode_iscsi()
1077 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_ULP_TYPE, in set_ulp_mode_iscsi()
1082 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, val, val, 0, 0); in set_ulp_mode_iscsi()
1106 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); in icl_cxgbei_conn_handoff()
1116 if (fp->f_type != DTYPE_SOCKET) { in icl_cxgbei_conn_handoff()
1120 so = fp->f_data; in icl_cxgbei_conn_handoff()
1121 if (so->so_type != SOCK_STREAM || in icl_cxgbei_conn_handoff()
1122 so->so_proto->pr_protocol != IPPROTO_TCP) { in icl_cxgbei_conn_handoff()
1128 if (ic->ic_socket != NULL) { in icl_cxgbei_conn_handoff()
1133 ic->ic_disconnecting = false; in icl_cxgbei_conn_handoff()
1134 ic->ic_socket = so; in icl_cxgbei_conn_handoff()
1135 fp->f_ops = &badfileops; in icl_cxgbei_conn_handoff()
1136 fp->f_data = NULL; in icl_cxgbei_conn_handoff()
1148 icc->sc = fa.sc; in icl_cxgbei_conn_handoff()
1150 max_rx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_recv_data_segment_length; in icl_cxgbei_conn_handoff()
1151 max_tx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_send_data_segment_length; in icl_cxgbei_conn_handoff()
1152 if (ic->ic_header_crc32c) { in icl_cxgbei_conn_handoff()
1156 if (ic->ic_data_crc32c) { in icl_cxgbei_conn_handoff()
1164 if (inp->inp_flags & INP_DROPPED) { in icl_cxgbei_conn_handoff()
1173 MPASS(tp->t_flags & TF_TOE); in icl_cxgbei_conn_handoff()
1174 MPASS(tp->tod != NULL); in icl_cxgbei_conn_handoff()
1175 MPASS(tp->t_toe != NULL); in icl_cxgbei_conn_handoff()
1176 toep = tp->t_toe; in icl_cxgbei_conn_handoff()
1177 MPASS(toep->vi->adapter == icc->sc); in icl_cxgbei_conn_handoff()
1185 icc->toep = toep; in icl_cxgbei_conn_handoff()
1187 icc->ulp_submode = 0; in icl_cxgbei_conn_handoff()
1188 if (ic->ic_header_crc32c) in icl_cxgbei_conn_handoff()
1189 icc->ulp_submode |= ULP_CRC_HEADER; in icl_cxgbei_conn_handoff()
1190 if (ic->ic_data_crc32c) in icl_cxgbei_conn_handoff()
1191 icc->ulp_submode |= ULP_CRC_DATA; in icl_cxgbei_conn_handoff()
1193 if (icc->sc->tt.iso && chip_id(icc->sc) >= CHELSIO_T5 && in icl_cxgbei_conn_handoff()
1194 !is_memfree(icc->sc)) { in icl_cxgbei_conn_handoff()
1196 tp->t_maxseg); in icl_cxgbei_conn_handoff()
1198 ic->ic_hw_isomax = max_iso_pdus * in icl_cxgbei_conn_handoff()
1199 ic->ic_max_send_data_segment_length; in icl_cxgbei_conn_handoff()
1203 toep->params.ulp_mode = ULP_MODE_ISCSI; in icl_cxgbei_conn_handoff()
1204 toep->ulpcb = icc; in icl_cxgbei_conn_handoff()
1206 send_iscsi_flowc_wr(icc->sc, toep, in icl_cxgbei_conn_handoff()
1207 roundup(max_iso_pdus * max_tx_pdu_len, tp->t_maxseg)); in icl_cxgbei_conn_handoff()
1208 set_ulp_mode_iscsi(icc->sc, toep, icc->ulp_submode); in icl_cxgbei_conn_handoff()
1211 error = kthread_add(icl_cxgbei_tx_main, icc, NULL, &icc->tx_thread, 0, in icl_cxgbei_conn_handoff()
1212 0, "%stx (cxgbei)", ic->ic_name); in icl_cxgbei_conn_handoff()
1216 error = kthread_add(icl_cxgbei_rx_main, icc, NULL, &icc->rx_thread, 0, in icl_cxgbei_conn_handoff()
1217 0, "%srx (cxgbei)", ic->ic_name); in icl_cxgbei_conn_handoff()
1236 struct toepcb *toep = icc->toep; in icl_cxgbei_conn_close()
1238 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); in icl_cxgbei_conn_close()
1242 so = ic->ic_socket; in icl_cxgbei_conn_close()
1243 if (ic->ic_disconnecting || so == NULL) { in icl_cxgbei_conn_close()
1245 __func__, icc, ic->ic_disconnecting, so); in icl_cxgbei_conn_close()
1249 ic->ic_disconnecting = true; in icl_cxgbei_conn_close()
1252 KASSERT(ic->ic_outstanding_pdus == 0, in icl_cxgbei_conn_close()
1254 ic->ic_outstanding_pdus)); in icl_cxgbei_conn_close()
1257 CTR3(KTR_CXGBE, "%s: tid %d, icc %p", __func__, toep ? toep->tid : -1, in icl_cxgbei_conn_close()
1264 if (icc->tx_thread != NULL) { in icl_cxgbei_conn_close()
1265 wakeup(&icc->tx_active); in icl_cxgbei_conn_close()
1266 mtx_sleep(icc->tx_thread, ic->ic_lock, 0, "conclo", 0); in icl_cxgbei_conn_close()
1270 while (!STAILQ_EMPTY(&icc->sent_pdus)) { in icl_cxgbei_conn_close()
1271 ip = STAILQ_FIRST(&icc->sent_pdus); in icl_cxgbei_conn_close()
1272 STAILQ_REMOVE_HEAD(&icc->sent_pdus, ip_next); in icl_cxgbei_conn_close()
1278 sb = &so->so_rcv; in icl_cxgbei_conn_close()
1285 if (icc->rx_thread != NULL) { in icl_cxgbei_conn_close()
1286 icc->rx_exiting = true; in icl_cxgbei_conn_close()
1287 wakeup(&icc->rx_active); in icl_cxgbei_conn_close()
1288 mtx_sleep(icc->rx_thread, SOCKBUF_MTX(sb), 0, "conclo", 0); in icl_cxgbei_conn_close()
1294 while (!STAILQ_EMPTY(&icc->rcvd_pdus)) { in icl_cxgbei_conn_close()
1295 ip = STAILQ_FIRST(&icc->rcvd_pdus); in icl_cxgbei_conn_close()
1296 STAILQ_REMOVE_HEAD(&icc->rcvd_pdus, ip_next); in icl_cxgbei_conn_close()
1303 toep->ulpcb = NULL; in icl_cxgbei_conn_close()
1306 mbufq_drain(&toep->ulp_pduq); in icl_cxgbei_conn_close()
1310 * CPL to be received. If toep->inp is NULL, then in icl_cxgbei_conn_close()
1312 * due to the peer sending a RST). in icl_cxgbei_conn_close()
1314 if (toep->inp != NULL) { in icl_cxgbei_conn_close()
1316 toep->flags |= TPF_WAITING_FOR_FINAL; in icl_cxgbei_conn_close()
1323 ic->ic_socket = NULL; in icl_cxgbei_conn_close()
1327 * XXXNP: we should send RST instead of FIN when PDUs held in various in icl_cxgbei_conn_close()
1344 while ((toep->flags & TPF_WAITING_FOR_FINAL) != 0) in icl_cxgbei_conn_close()
1359 cmp->tt = tt; in cxgbei_insert_cmp()
1361 mtx_lock(&icc->cmp_lock); in cxgbei_insert_cmp()
1363 LIST_FOREACH(cmp2, &icc->cmp_table[TT_HASH(icc, tt)], link) { in cxgbei_insert_cmp()
1364 KASSERT(cmp2->tt != tt, ("%s: duplicate cmp", __func__)); in cxgbei_insert_cmp()
1367 LIST_INSERT_HEAD(&icc->cmp_table[TT_HASH(icc, tt)], cmp, link); in cxgbei_insert_cmp()
1368 mtx_unlock(&icc->cmp_lock); in cxgbei_insert_cmp()
1376 mtx_lock(&icc->cmp_lock); in cxgbei_find_cmp()
1377 LIST_FOREACH(cmp, &icc->cmp_table[TT_HASH(icc, tt)], link) { in cxgbei_find_cmp()
1378 if (cmp->tt == tt) in cxgbei_find_cmp()
1381 mtx_unlock(&icc->cmp_lock); in cxgbei_find_cmp()
1392 mtx_lock(&icc->cmp_lock); in cxgbei_rm_cmp()
1395 LIST_FOREACH(cmp2, &icc->cmp_table[TT_HASH(icc, cmp->tt)], link) { in cxgbei_rm_cmp()
1403 mtx_unlock(&icc->cmp_lock); in cxgbei_rm_cmp()
1411 struct toepcb *toep = icc->toep; in icl_cxgbei_conn_task_setup()
1412 struct adapter *sc = icc->sc; in icl_cxgbei_conn_task_setup()
1413 struct cxgbei_data *ci = sc->iscsi_ulp_softc; in icl_cxgbei_conn_task_setup()
1414 struct ppod_region *pr = &ci->pr; in icl_cxgbei_conn_task_setup()
1428 if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN || in icl_cxgbei_conn_task_setup()
1429 csio->dxfer_len < ci->ddp_threshold || ic->ic_disconnecting || in icl_cxgbei_conn_task_setup()
1430 ic->ic_socket == NULL) { in icl_cxgbei_conn_task_setup()
1438 itt = V_PPOD_TAG(itt) | pr->pr_invalid_bit; in icl_cxgbei_conn_task_setup()
1443 toep->ofld_rxq->rx_iscsi_ddp_setup_error, 1); in icl_cxgbei_conn_task_setup()
1456 prsv = &ddp->prsv; in icl_cxgbei_conn_task_setup()
1459 switch (csio->ccb_h.flags & CAM_DATA_MASK) { in icl_cxgbei_conn_task_setup()
1462 (struct bio *)csio->data_ptr, prsv); in icl_cxgbei_conn_task_setup()
1469 (struct bio *)csio->data_ptr, &mq); in icl_cxgbei_conn_task_setup()
1478 rc = t4_alloc_page_pods_for_buf(pr, (vm_offset_t)csio->data_ptr, in icl_cxgbei_conn_task_setup()
1479 csio->dxfer_len, prsv); in icl_cxgbei_conn_task_setup()
1486 (vm_offset_t)csio->data_ptr, csio->dxfer_len, &mq); in icl_cxgbei_conn_task_setup()
1501 * Do not get inp from toep->inp as the toepcb might have in icl_cxgbei_conn_task_setup()
1504 inp = sotoinpcb(ic->ic_socket); in icl_cxgbei_conn_task_setup()
1506 if ((inp->inp_flags & INP_DROPPED) != 0) { in icl_cxgbei_conn_task_setup()
1513 mbufq_concat(&toep->ulp_pduq, &mq); in icl_cxgbei_conn_task_setup()
1516 ddp->cmp.last_datasn = -1; in icl_cxgbei_conn_task_setup()
1517 cxgbei_insert_cmp(icc, &ddp->cmp, prsv->prsv_tag); in icl_cxgbei_conn_task_setup()
1518 *ittp = htobe32(prsv->prsv_tag); in icl_cxgbei_conn_task_setup()
1520 counter_u64_add(toep->ofld_rxq->rx_iscsi_ddp_setup_ok, 1); in icl_cxgbei_conn_task_setup()
1531 cxgbei_rm_cmp(ic_to_icc(ic), &ddp->cmp); in icl_cxgbei_conn_task_done()
1532 t4_free_page_pods(&ddp->prsv); in icl_cxgbei_conn_task_done()
1545 if (((vm_offset_t)sg[--entries].addr & 3U) != 0) in ddp_sgl_check()
1552 while (--entries >= 0) { in ddp_sgl_check()
1565 #define io_to_ddp_state(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr)
1572 struct toepcb *toep = icc->toep; in icl_cxgbei_conn_transfer_setup()
1573 struct ctl_scsiio *ctsio = &io->scsiio; in icl_cxgbei_conn_transfer_setup()
1574 struct adapter *sc = icc->sc; in icl_cxgbei_conn_transfer_setup()
1575 struct cxgbei_data *ci = sc->iscsi_ulp_softc; in icl_cxgbei_conn_transfer_setup()
1576 struct ppod_region *pr = &ci->pr; in icl_cxgbei_conn_transfer_setup()
1582 int sg_entries = ctsio->kern_sg_entries; in icl_cxgbei_conn_transfer_setup()
1590 if (ctsio->ext_data_filled == 0) { in icl_cxgbei_conn_transfer_setup()
1595 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); in icl_cxgbei_conn_transfer_setup()
1596 MPASS(ic == ip->ip_conn); in icl_cxgbei_conn_transfer_setup()
1597 MPASS(ip->ip_bhs_mbuf != NULL); in icl_cxgbei_conn_transfer_setup()
1610 xferlen = ctsio->kern_data_len; in icl_cxgbei_conn_transfer_setup()
1612 xferlen - first_burst < ci->ddp_threshold) { in icl_cxgbei_conn_transfer_setup()
1620 ttt = V_PPOD_TAG(ttt) | pr->pr_invalid_bit; in icl_cxgbei_conn_transfer_setup()
1625 toep->ofld_rxq->rx_iscsi_ddp_setup_error, 1); in icl_cxgbei_conn_transfer_setup()
1631 sgl->len = xferlen; in icl_cxgbei_conn_transfer_setup()
1632 sgl->addr = (void *)ctsio->kern_data_ptr; in icl_cxgbei_conn_transfer_setup()
1635 sgl = (void *)ctsio->kern_data_ptr; in icl_cxgbei_conn_transfer_setup()
1650 prsv = &ddp->prsv; in icl_cxgbei_conn_transfer_setup()
1669 * Do not get inp from toep->inp as the toepcb might in icl_cxgbei_conn_transfer_setup()
1673 if (ic->ic_disconnecting || ic->ic_socket == NULL) { in icl_cxgbei_conn_transfer_setup()
1680 inp = sotoinpcb(ic->ic_socket); in icl_cxgbei_conn_transfer_setup()
1683 if ((inp->inp_flags & INP_DROPPED) != 0) { in icl_cxgbei_conn_transfer_setup()
1690 mbufq_concat(&toep->ulp_pduq, &mq); in icl_cxgbei_conn_transfer_setup()
1693 ddp->cmp.next_buffer_offset = ctsio->kern_rel_offset + in icl_cxgbei_conn_transfer_setup()
1695 ddp->cmp.last_datasn = -1; in icl_cxgbei_conn_transfer_setup()
1696 cxgbei_insert_cmp(icc, &ddp->cmp, prsv->prsv_tag); in icl_cxgbei_conn_transfer_setup()
1697 *tttp = htobe32(prsv->prsv_tag); in icl_cxgbei_conn_transfer_setup()
1700 counter_u64_add(toep->ofld_rxq->rx_iscsi_ddp_setup_ok, 1); in icl_cxgbei_conn_transfer_setup()
1705 * In the middle of an I/O. A non-NULL page pod reservation indicates in icl_cxgbei_conn_transfer_setup()
1711 prsv = &ddp->prsv; in icl_cxgbei_conn_transfer_setup()
1713 alias = (prsv->prsv_tag & pr->pr_alias_mask) >> pr->pr_alias_shift; in icl_cxgbei_conn_transfer_setup()
1715 prsv->prsv_tag &= ~pr->pr_alias_mask; in icl_cxgbei_conn_transfer_setup()
1716 prsv->prsv_tag |= alias << pr->pr_alias_shift & pr->pr_alias_mask; in icl_cxgbei_conn_transfer_setup()
1718 ddp->cmp.last_datasn = -1; in icl_cxgbei_conn_transfer_setup()
1719 cxgbei_insert_cmp(icc, &ddp->cmp, prsv->prsv_tag); in icl_cxgbei_conn_transfer_setup()
1720 *tttp = htobe32(prsv->prsv_tag); in icl_cxgbei_conn_transfer_setup()
1737 cxgbei_rm_cmp(ic_to_icc(ic), &ddp->cmp); in icl_cxgbei_conn_transfer_done()
1738 if (ctsio->kern_data_len == ctsio->ext_data_filled || in icl_cxgbei_conn_transfer_done()
1739 ic->ic_disconnecting) { in icl_cxgbei_conn_transfer_done()
1740 t4_free_page_pods(&ddp->prsv); in icl_cxgbei_conn_transfer_done()
1759 ci = sc->iscsi_ulp_softc; in cxgbei_limits()
1763 max_dsl = ci->max_rx_data_len; in cxgbei_limits()
1764 if (idl->idl_max_recv_data_segment_length > max_dsl) in cxgbei_limits()
1765 idl->idl_max_recv_data_segment_length = max_dsl; in cxgbei_limits()
1767 max_dsl = ci->max_tx_data_len; in cxgbei_limits()
1768 if (idl->idl_max_send_data_segment_length > max_dsl) in cxgbei_limits()
1769 idl->idl_max_send_data_segment_length = max_dsl; in cxgbei_limits()
1791 if (fp->f_type != DTYPE_SOCKET) { in cxgbei_limits_fd()
1795 so = fp->f_data; in cxgbei_limits_fd()
1796 if (so->so_type != SOCK_STREAM || in cxgbei_limits_fd()
1797 so->so_proto->pr_protocol != IPPROTO_TCP) { in cxgbei_limits_fd()
1818 ci = sc->iscsi_ulp_softc; in cxgbei_limits_fd()
1821 idl->idl_max_recv_data_segment_length = ci->max_rx_data_len; in cxgbei_limits_fd()
1822 idl->idl_max_send_data_segment_length = ci->max_tx_data_len; in cxgbei_limits_fd()
1836 idl->idl_max_recv_data_segment_length = (1 << 24) - 1; in icl_cxgbei_limits()
1837 idl->idl_max_send_data_segment_length = (1 << 24) - 1; in icl_cxgbei_limits()
1840 idl->idl_max_burst_length = max_burst_length; in icl_cxgbei_limits()
1841 idl->idl_first_burst_length = first_burst_length; in icl_cxgbei_limits()
1860 rc = icl_register("cxgbei", false, -100, icl_cxgbei_limits, in icl_cxgbei_mod_load()