Lines Matching +full:cmd +full:- +full:db
1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
100 * reuse of buffers (avoiding the need to re-fault in pages, hold
104 * is a trade-off for performance.
106 * If an application ping-pongs two buffers for a connection via
118 if (ps->prsv.prsv_nppods > 0) in free_pageset()
119 t4_free_page_pods(&ps->prsv); in free_pageset()
121 for (i = 0; i < ps->npages; i++) { in free_pageset()
122 p = ps->pages[i]; in free_pageset()
141 if (ps->vm) in ddp_free_orphan_pagesets()
142 vmspace_free(ps->vm); in ddp_free_orphan_pagesets()
154 if (!(toep->ddp.flags & DDP_DEAD)) { in recycle_pageset()
155 KASSERT(toep->ddp.cached_count + toep->ddp.active_count < in recycle_pageset()
156 nitems(toep->ddp.db), ("too many wired pagesets")); in recycle_pageset()
157 TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link); in recycle_pageset()
158 toep->ddp.cached_count++; in recycle_pageset()
160 free_pageset(toep->td, ps); in recycle_pageset()
173 copied = job->aio_received; in ddp_complete_one()
177 aio_complete(job, -1, error); in ddp_complete_one()
183 t4_free_page_pods(&drb->prsv); in free_ddp_rcv_buffer()
184 free(drb->buf, M_CXGBE); in free_ddp_rcv_buffer()
186 counter_u64_add(toep->ofld_rxq->ddp_buffer_free, 1); in free_ddp_rcv_buffer()
194 if (!(toep->ddp.flags & DDP_DEAD) && in recycle_ddp_rcv_buffer()
195 toep->ddp.cached_count < t4_ddp_rcvbuf_cache) { in recycle_ddp_rcv_buffer()
196 TAILQ_INSERT_HEAD(&toep->ddp.cached_buffers, drb, link); in recycle_ddp_rcv_buffer()
197 toep->ddp.cached_count++; in recycle_ddp_rcv_buffer()
211 if (!TAILQ_EMPTY(&toep->ddp.cached_buffers)) { in alloc_cached_ddp_rcv_buffer()
212 drb = TAILQ_FIRST(&toep->ddp.cached_buffers); in alloc_cached_ddp_rcv_buffer()
213 TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link); in alloc_cached_ddp_rcv_buffer()
214 toep->ddp.cached_count--; in alloc_cached_ddp_rcv_buffer()
215 counter_u64_add(toep->ofld_rxq->ddp_buffer_reuse, 1); in alloc_cached_ddp_rcv_buffer()
225 struct tom_data *td = toep->td; in alloc_ddp_rcv_buffer()
234 drb->buf = contigmalloc(t4_ddp_rcvbuf_len, M_CXGBE, how, 0, ~0, in alloc_ddp_rcv_buffer()
236 if (drb->buf == NULL) { in alloc_ddp_rcv_buffer()
240 drb->len = t4_ddp_rcvbuf_len; in alloc_ddp_rcv_buffer()
241 drb->refs = 1; in alloc_ddp_rcv_buffer()
243 error = t4_alloc_page_pods_for_rcvbuf(&td->pr, drb); in alloc_ddp_rcv_buffer()
245 free(drb->buf, M_CXGBE); in alloc_ddp_rcv_buffer()
250 error = t4_write_page_pods_for_rcvbuf(sc, toep->ctrlq, toep->tid, drb); in alloc_ddp_rcv_buffer()
252 t4_free_page_pods(&drb->prsv); in alloc_ddp_rcv_buffer()
253 free(drb->buf, M_CXGBE); in alloc_ddp_rcv_buffer()
259 counter_u64_add(toep->ofld_rxq->ddp_buffer_alloc, 1); in alloc_ddp_rcv_buffer()
264 free_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db) in free_ddp_buffer() argument
266 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in free_ddp_buffer()
267 if (db->drb != NULL) in free_ddp_buffer()
268 free_ddp_rcv_buffer(toep, db->drb); in free_ddp_buffer()
270 db->drb = NULL; in free_ddp_buffer()
275 if (db->job) { in free_ddp_buffer()
277 * XXX: If we are un-offloading the socket then we in free_ddp_buffer()
282 if (!aio_clear_cancel_function(db->job)) in free_ddp_buffer()
283 ddp_complete_one(db->job, 0); in free_ddp_buffer()
285 db->job = NULL; in free_ddp_buffer()
289 if (db->ps) { in free_ddp_buffer()
290 free_pageset(toep->td, db->ps); in free_ddp_buffer()
292 db->ps = NULL; in free_ddp_buffer()
301 toep->ddp.flags = DDP_OK; in ddp_init_toep()
302 toep->ddp.active_id = -1; in ddp_init_toep()
303 mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF); in ddp_init_toep()
304 mtx_init(&toep->ddp.cache_lock, "t4 ddp cache", NULL, MTX_DEF); in ddp_init_toep()
311 mtx_destroy(&toep->ddp.lock); in ddp_uninit_toep()
312 mtx_destroy(&toep->ddp.cache_lock); in ddp_uninit_toep()
324 toep->ddp.flags |= DDP_DEAD; in release_ddp_resources()
326 for (i = 0; i < nitems(toep->ddp.db); i++) { in release_ddp_resources()
327 free_ddp_buffer(toep, &toep->ddp.db[i]); in release_ddp_resources()
329 if ((toep->ddp.flags & DDP_AIO) != 0) { in release_ddp_resources()
330 while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) { in release_ddp_resources()
331 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); in release_ddp_resources()
332 free_pageset(toep->td, ps); in release_ddp_resources()
336 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in release_ddp_resources()
338 while ((drb = TAILQ_FIRST(&toep->ddp.cached_buffers)) != NULL) { in release_ddp_resources()
339 TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link); in release_ddp_resources()
353 MPASS((toep->ddp.flags & (DDP_TASK_ACTIVE | DDP_DEAD)) != DDP_TASK_ACTIVE); in ddp_assert_empty()
354 for (i = 0; i < nitems(toep->ddp.db); i++) { in ddp_assert_empty()
355 if ((toep->ddp.flags & DDP_AIO) != 0) { in ddp_assert_empty()
356 MPASS(toep->ddp.db[i].job == NULL); in ddp_assert_empty()
357 MPASS(toep->ddp.db[i].ps == NULL); in ddp_assert_empty()
359 MPASS(toep->ddp.db[i].drb == NULL); in ddp_assert_empty()
361 if ((toep->ddp.flags & DDP_AIO) != 0) { in ddp_assert_empty()
362 MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets)); in ddp_assert_empty()
363 MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq)); in ddp_assert_empty()
365 if ((toep->ddp.flags & DDP_RCVBUF) != 0) in ddp_assert_empty()
366 MPASS(TAILQ_EMPTY(&toep->ddp.cached_buffers)); in ddp_assert_empty()
371 complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db, in complete_ddp_buffer() argument
377 toep->ddp.active_count--; in complete_ddp_buffer()
378 if (toep->ddp.active_id == db_idx) { in complete_ddp_buffer()
379 if (toep->ddp.active_count == 0) { in complete_ddp_buffer()
380 if ((toep->ddp.flags & DDP_AIO) != 0) in complete_ddp_buffer()
381 KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL, in complete_ddp_buffer()
384 KASSERT(toep->ddp.db[db_idx ^ 1].drb == NULL, in complete_ddp_buffer()
386 toep->ddp.active_id = -1; in complete_ddp_buffer()
388 toep->ddp.active_id ^= 1; in complete_ddp_buffer()
391 toep->tid, toep->ddp.active_id); in complete_ddp_buffer()
394 KASSERT(toep->ddp.active_count != 0 && in complete_ddp_buffer()
395 toep->ddp.active_id != -1, in complete_ddp_buffer()
399 if ((toep->ddp.flags & DDP_AIO) != 0) { in complete_ddp_buffer()
400 db->cancel_pending = 0; in complete_ddp_buffer()
401 db->job = NULL; in complete_ddp_buffer()
402 recycle_pageset(toep, db->ps); in complete_ddp_buffer()
403 db->ps = NULL; in complete_ddp_buffer()
405 drb = db->drb; in complete_ddp_buffer()
406 if (atomic_fetchadd_int(&drb->refs, -1) == 1) in complete_ddp_buffer()
408 db->drb = NULL; in complete_ddp_buffer()
409 db->placed = 0; in complete_ddp_buffer()
413 KASSERT(toep->ddp.flags & db_flag, in complete_ddp_buffer()
415 __func__, toep, toep->ddp.flags)); in complete_ddp_buffer()
416 toep->ddp.flags &= ~db_flag; in complete_ddp_buffer()
423 struct toepcb *toep = m->m_ext.ext_arg1; in ddp_rcv_mbuf_done()
424 struct ddp_rcv_buffer *drb = m->m_ext.ext_arg2; in ddp_rcv_mbuf_done()
432 struct inpcb *inp = toep->inp; in queue_ddp_rcvbuf_mbuf()
434 struct ddp_buffer *db; in queue_ddp_rcvbuf_mbuf() local
443 m->m_pkthdr.rcvif = toep->vi->ifp; in queue_ddp_rcvbuf_mbuf()
445 db = &toep->ddp.db[db_idx]; in queue_ddp_rcvbuf_mbuf()
446 drb = db->drb; in queue_ddp_rcvbuf_mbuf()
447 m_extaddref(m, (char *)drb->buf + db->placed, len, &drb->refs, in queue_ddp_rcvbuf_mbuf()
449 m->m_pkthdr.len = len; in queue_ddp_rcvbuf_mbuf()
450 m->m_len = len; in queue_ddp_rcvbuf_mbuf()
452 sb = &inp->inp_socket->so_rcv; in queue_ddp_rcvbuf_mbuf()
456 db->placed += len; in queue_ddp_rcvbuf_mbuf()
457 toep->ofld_rxq->rx_toe_ddp_octets += len; in queue_ddp_rcvbuf_mbuf()
464 struct inpcb *inp = toep->inp; in insert_ddp_data()
466 struct ddp_buffer *db; in insert_ddp_data() local
479 ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0; in insert_ddp_data()
480 tp->rcv_nxt += n; in insert_ddp_data()
482 KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__)); in insert_ddp_data()
483 tp->rcv_wnd -= n; in insert_ddp_data()
487 while (toep->ddp.active_count > 0) { in insert_ddp_data()
488 MPASS(toep->ddp.active_id != -1); in insert_ddp_data()
489 db_idx = toep->ddp.active_id; in insert_ddp_data()
493 MPASS((toep->ddp.flags & db_flag) != 0); in insert_ddp_data()
494 db = &toep->ddp.db[db_idx]; in insert_ddp_data()
497 if (placed > db->drb->len - db->placed) in insert_ddp_data()
498 placed = db->drb->len - db->placed; in insert_ddp_data()
501 complete_ddp_buffer(toep, db, db_idx); in insert_ddp_data()
502 n -= placed; in insert_ddp_data()
505 job = db->job; in insert_ddp_data()
506 copied = job->aio_received; in insert_ddp_data()
508 if (placed > job->uaiocb.aio_nbytes - copied) in insert_ddp_data()
509 placed = job->uaiocb.aio_nbytes - copied; in insert_ddp_data()
511 job->msgrcv = 1; in insert_ddp_data()
512 toep->ofld_rxq->rx_aio_ddp_jobs++; in insert_ddp_data()
514 toep->ofld_rxq->rx_aio_ddp_octets += placed; in insert_ddp_data()
521 job->aio_received += placed; in insert_ddp_data()
529 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); in insert_ddp_data()
530 toep->ddp.waiting_count++; in insert_ddp_data()
533 n -= placed; in insert_ddp_data()
534 complete_ddp_buffer(toep, db, db_idx); in insert_ddp_data()
554 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); in mk_rx_data_ack_ulp()
555 ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16)); in mk_rx_data_ack_ulp()
558 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); in mk_rx_data_ack_ulp()
559 ulpsc->len = htobe32(sizeof(*req)); in mk_rx_data_ack_ulp()
562 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid)); in mk_rx_data_ack_ulp()
563 req->credit_dack = htobe32(F_RX_MODULATE_RX); in mk_rx_data_ack_ulp()
567 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); in mk_rx_data_ack_ulp()
568 ulpsc->len = htobe32(0); in mk_rx_data_ack_ulp()
598 wr = alloc_wrqe(wrlen, toep->ctrlq); in mk_update_tcb_for_ddp()
606 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, in mk_update_tcb_for_ddp()
609 V_TCB_RX_DDP_BUF0_TAG(prsv->prsv_tag)); in mk_update_tcb_for_ddp()
613 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, in mk_update_tcb_for_ddp()
620 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, in mk_update_tcb_for_ddp()
628 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_RX_DDP_FLAGS, in mk_update_tcb_for_ddp()
643 struct inpcb *inp = toep->inp; in handle_ddp_data_aio()
644 struct ddp_buffer *db; in handle_ddp_data_aio() local
658 sb = &so->so_rcv; in handle_ddp_data_aio()
661 KASSERT(toep->ddp.active_id == db_idx, in handle_ddp_data_aio()
663 toep->ddp.active_id, toep->tid)); in handle_ddp_data_aio()
664 db = &toep->ddp.db[db_idx]; in handle_ddp_data_aio()
665 job = db->job; in handle_ddp_data_aio()
667 if (__predict_false(inp->inp_flags & INP_DROPPED)) { in handle_ddp_data_aio()
673 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); in handle_ddp_data_aio()
687 * For RX_DATA_DDP, len might be non-zero, but it is only the in handle_ddp_data_aio()
693 len += be32toh(rcv_nxt) - tp->rcv_nxt; in handle_ddp_data_aio()
694 tp->rcv_nxt += len; in handle_ddp_data_aio()
695 tp->t_rcvtime = ticks; in handle_ddp_data_aio()
697 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); in handle_ddp_data_aio()
698 tp->rcv_wnd -= len; in handle_ddp_data_aio()
702 toep->tid, db_idx, len, report); in handle_ddp_data_aio()
706 MPASS(toep->vnet == so->so_vnet); in handle_ddp_data_aio()
707 CURVNET_SET(toep->vnet); in handle_ddp_data_aio()
709 if (sb->sb_flags & SB_AUTOSIZE && in handle_ddp_data_aio()
711 sb->sb_hiwat < V_tcp_autorcvbuf_max && in handle_ddp_data_aio()
713 struct adapter *sc = td_adapter(toep->td); in handle_ddp_data_aio()
714 unsigned int hiwat = sb->sb_hiwat; in handle_ddp_data_aio()
715 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, in handle_ddp_data_aio()
719 sb->sb_flags &= ~SB_AUTOSIZE; in handle_ddp_data_aio()
724 job->msgrcv = 1; in handle_ddp_data_aio()
725 toep->ofld_rxq->rx_aio_ddp_jobs++; in handle_ddp_data_aio()
726 toep->ofld_rxq->rx_aio_ddp_octets += len; in handle_ddp_data_aio()
727 if (db->cancel_pending) { in handle_ddp_data_aio()
732 job->aio_received += len; in handle_ddp_data_aio()
739 job->aio_received += len; in handle_ddp_data_aio()
741 copied = job->aio_received; in handle_ddp_data_aio()
745 __func__, toep->tid, job, copied, len); in handle_ddp_data_aio()
748 t4_rcvd(&toep->td->tod, tp); in handle_ddp_data_aio()
752 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_data_aio()
753 if (toep->ddp.waiting_count > 0) in handle_ddp_data_aio()
765 struct adapter *sc = td_adapter(toep->td); in queue_ddp_rcvbuf()
766 struct ddp_buffer *db; in queue_ddp_rcvbuf() local
773 KASSERT((toep->ddp.flags & DDP_DEAD) == 0, ("%s: DDP_DEAD", __func__)); in queue_ddp_rcvbuf()
774 KASSERT(toep->ddp.active_count < nitems(toep->ddp.db), in queue_ddp_rcvbuf()
778 if (toep->ddp.db[0].drb == NULL) { in queue_ddp_rcvbuf()
781 MPASS(toep->ddp.db[1].drb == NULL); in queue_ddp_rcvbuf()
809 MPASS((toep->ddp.flags & buf_flag) == 0); in queue_ddp_rcvbuf()
810 if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { in queue_ddp_rcvbuf()
812 MPASS(toep->ddp.active_id == -1); in queue_ddp_rcvbuf()
813 MPASS(toep->ddp.active_count == 0); in queue_ddp_rcvbuf()
822 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &drb->prsv, 0, drb->len, in queue_ddp_rcvbuf()
833 toep->tid, db_idx, ddp_flags, ddp_flags_mask); in queue_ddp_rcvbuf()
839 drb->refs = 1; in queue_ddp_rcvbuf()
841 /* Give the chip the go-ahead. */ in queue_ddp_rcvbuf()
843 db = &toep->ddp.db[db_idx]; in queue_ddp_rcvbuf()
844 db->drb = drb; in queue_ddp_rcvbuf()
845 toep->ddp.flags |= buf_flag; in queue_ddp_rcvbuf()
846 toep->ddp.active_count++; in queue_ddp_rcvbuf()
847 if (toep->ddp.active_count == 1) { in queue_ddp_rcvbuf()
848 MPASS(toep->ddp.active_id == -1); in queue_ddp_rcvbuf()
849 toep->ddp.active_id = db_idx; in queue_ddp_rcvbuf()
851 toep->ddp.active_id); in queue_ddp_rcvbuf()
861 struct inpcb *inp = toep->inp; in handle_ddp_data_rcvbuf()
865 struct ddp_buffer *db; in handle_ddp_data_rcvbuf() local
876 sb = &so->so_rcv; in handle_ddp_data_rcvbuf()
879 KASSERT(toep->ddp.active_id == db_idx, in handle_ddp_data_rcvbuf()
881 toep->ddp.active_id, toep->tid)); in handle_ddp_data_rcvbuf()
882 db = &toep->ddp.db[db_idx]; in handle_ddp_data_rcvbuf()
884 if (__predict_false(inp->inp_flags & INP_DROPPED)) { in handle_ddp_data_rcvbuf()
890 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); in handle_ddp_data_rcvbuf()
892 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_data_rcvbuf()
904 * For RX_DATA_DDP, len might be non-zero, but it is only the in handle_ddp_data_rcvbuf()
910 len += be32toh(rcv_nxt) - tp->rcv_nxt; in handle_ddp_data_rcvbuf()
911 tp->rcv_nxt += len; in handle_ddp_data_rcvbuf()
912 tp->t_rcvtime = ticks; in handle_ddp_data_rcvbuf()
914 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); in handle_ddp_data_rcvbuf()
915 tp->rcv_wnd -= len; in handle_ddp_data_rcvbuf()
919 toep->tid, db_idx, len, report); in handle_ddp_data_rcvbuf()
923 MPASS(toep->vnet == so->so_vnet); in handle_ddp_data_rcvbuf()
924 CURVNET_SET(toep->vnet); in handle_ddp_data_rcvbuf()
926 if (sb->sb_flags & SB_AUTOSIZE && in handle_ddp_data_rcvbuf()
928 sb->sb_hiwat < V_tcp_autorcvbuf_max && in handle_ddp_data_rcvbuf()
930 struct adapter *sc = td_adapter(toep->td); in handle_ddp_data_rcvbuf()
931 unsigned int hiwat = sb->sb_hiwat; in handle_ddp_data_rcvbuf()
932 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, in handle_ddp_data_rcvbuf()
936 sb->sb_flags &= ~SB_AUTOSIZE; in handle_ddp_data_rcvbuf()
941 t4_rcvd_locked(&toep->td->tod, tp); in handle_ddp_data_rcvbuf()
948 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_data_rcvbuf()
950 KASSERT(db->placed < db->drb->len, in handle_ddp_data_rcvbuf()
953 if (toep->ddp.active_count != nitems(toep->ddp.db)) { in handle_ddp_data_rcvbuf()
975 if ((toep->ddp.flags & DDP_RCVBUF) != 0) in handle_ddp_data()
986 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in handle_ddp_indicate()
995 MPASS(toep->ddp.active_count == 0); in handle_ddp_indicate()
996 MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0); in handle_ddp_indicate()
997 if (toep->ddp.waiting_count == 0) { in handle_ddp_indicate()
1007 toep->tid, toep->ddp.waiting_count); in handle_ddp_indicate()
1016 struct adapter *sc = iq->adapter; in do_ddp_tcb_rpl()
1022 struct ddp_buffer *db; in do_ddp_tcb_rpl() local
1026 if (cpl->status != CPL_ERR_NONE) in do_ddp_tcb_rpl()
1027 panic("XXX: tcp_rpl failed: %d", cpl->status); in do_ddp_tcb_rpl()
1030 inp = toep->inp; in do_ddp_tcb_rpl()
1031 switch (cpl->cookie) { in do_ddp_tcb_rpl()
1037 KASSERT((toep->ddp.flags & DDP_AIO) != 0, in do_ddp_tcb_rpl()
1039 db_idx = G_COOKIE(cpl->cookie) - CPL_COOKIE_DDP0; in do_ddp_tcb_rpl()
1040 MPASS(db_idx < nitems(toep->ddp.db)); in do_ddp_tcb_rpl()
1043 db = &toep->ddp.db[db_idx]; in do_ddp_tcb_rpl()
1049 MPASS(db != NULL); in do_ddp_tcb_rpl()
1050 MPASS(db->job != NULL); in do_ddp_tcb_rpl()
1051 MPASS(db->cancel_pending); in do_ddp_tcb_rpl()
1063 job = db->job; in do_ddp_tcb_rpl()
1064 copied = job->aio_received; in do_ddp_tcb_rpl()
1072 t4_rcvd(&toep->td->tod, intotcpcb(inp)); in do_ddp_tcb_rpl()
1075 complete_ddp_buffer(toep, db, db_idx); in do_ddp_tcb_rpl()
1076 if (toep->ddp.waiting_count > 0) in do_ddp_tcb_rpl()
1083 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); in do_ddp_tcb_rpl()
1092 struct socket *so = toep->inp->inp_socket; in handle_ddp_close()
1093 struct sockbuf *sb = &so->so_rcv; in handle_ddp_close()
1094 struct ddp_buffer *db; in handle_ddp_close() local
1104 INP_WLOCK_ASSERT(toep->inp); in handle_ddp_close()
1107 ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0; in handle_ddp_close()
1109 /* - 1 is to ignore the byte for FIN */ in handle_ddp_close()
1110 len = be32toh(rcv_nxt) - tp->rcv_nxt - 1; in handle_ddp_close()
1111 tp->rcv_nxt += len; in handle_ddp_close()
1114 toep->tid, len); in handle_ddp_close()
1115 while (toep->ddp.active_count > 0) { in handle_ddp_close()
1116 MPASS(toep->ddp.active_id != -1); in handle_ddp_close()
1117 db_idx = toep->ddp.active_id; in handle_ddp_close()
1121 MPASS((toep->ddp.flags & db_flag) != 0); in handle_ddp_close()
1122 db = &toep->ddp.db[db_idx]; in handle_ddp_close()
1125 if (placed > db->drb->len - db->placed) in handle_ddp_close()
1126 placed = db->drb->len - db->placed; in handle_ddp_close()
1133 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_close()
1134 len -= placed; in handle_ddp_close()
1137 job = db->job; in handle_ddp_close()
1138 copied = job->aio_received; in handle_ddp_close()
1140 if (placed > job->uaiocb.aio_nbytes - copied) in handle_ddp_close()
1141 placed = job->uaiocb.aio_nbytes - copied; in handle_ddp_close()
1143 job->msgrcv = 1; in handle_ddp_close()
1144 toep->ofld_rxq->rx_aio_ddp_jobs++; in handle_ddp_close()
1146 toep->ofld_rxq->rx_aio_ddp_octets += placed; in handle_ddp_close()
1153 job->aio_received += placed; in handle_ddp_close()
1156 __func__, toep->tid, db_idx, placed); in handle_ddp_close()
1159 len -= placed; in handle_ddp_close()
1160 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_close()
1164 if ((toep->ddp.flags & DDP_AIO) != 0) in handle_ddp_close()
1178 struct adapter *sc = iq->adapter; in do_rx_data_ddp()
1185 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); in do_rx_data_ddp()
1186 KASSERT(!(toep->flags & TPF_SYNQE), in do_rx_data_ddp()
1189 vld = be32toh(cpl->ddpvld); in do_rx_data_ddp()
1200 handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len)); in do_rx_data_ddp()
1209 struct adapter *sc = iq->adapter; in do_rx_ddp_complete()
1215 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); in do_rx_ddp_complete()
1216 KASSERT(!(toep->flags & TPF_SYNQE), in do_rx_ddp_complete()
1219 handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0); in do_rx_ddp_complete()
1227 struct adapter *sc = toep->vi->adapter; in set_ddp_ulp_mode()
1233 if (!sc->tt.ddp) in set_ddp_ulp_mode()
1253 wr = alloc_wrqe(len, toep->ctrlq); in set_ddp_ulp_mode()
1257 CTR(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); in set_ddp_ulp_mode()
1267 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26, in set_ddp_ulp_mode()
1271 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 28, in set_ddp_ulp_mode()
1275 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30, in set_ddp_ulp_mode()
1279 toep->params.ulp_mode = ULP_MODE_TCPDDP; in set_ddp_ulp_mode()
1280 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE, in set_ddp_ulp_mode()
1285 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS, in set_ddp_ulp_mode()
1300 KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK, in enable_ddp()
1302 __func__, toep, toep->ddp.flags)); in enable_ddp()
1305 __func__, toep->tid, time_uptime); in enable_ddp()
1308 if ((toep->ddp.flags & DDP_AIO) != 0) in enable_ddp()
1312 toep->ddp.flags |= DDP_SC_REQ; in enable_ddp()
1313 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS, in enable_ddp()
1317 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, in enable_ddp()
1349 return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES)); in pages_to_nppods()
1358 if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT, in alloc_page_pods()
1363 CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d", in alloc_page_pods()
1364 __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask, in alloc_page_pods()
1365 nppods, 1 << pr->pr_page_shift[pgsz_idx]); in alloc_page_pods()
1373 MPASS((addr & pr->pr_tag_mask) == addr); in alloc_page_pods()
1374 MPASS((addr & pr->pr_invalid_bit) == 0); in alloc_page_pods()
1376 prsv->prsv_pr = pr; in alloc_page_pods()
1377 prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr; in alloc_page_pods()
1378 prsv->prsv_nppods = nppods; in alloc_page_pods()
1399 while (i < npages - 1 && in t4_alloc_page_pods_for_vmpages()
1407 if (hcf < (1 << pr->pr_page_shift[1])) { in t4_alloc_page_pods_for_vmpages()
1413 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) in t4_alloc_page_pods_for_vmpages()
1415 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { in t4_alloc_page_pods_for_vmpages()
1424 nppods = pages_to_nppods(npages, pr->pr_page_shift[idx]); in t4_alloc_page_pods_for_vmpages()
1427 MPASS(prsv->prsv_nppods > 0); in t4_alloc_page_pods_for_vmpages()
1435 struct ppod_reservation *prsv = &ps->prsv; in t4_alloc_page_pods_for_ps()
1437 KASSERT(prsv->prsv_nppods == 0, in t4_alloc_page_pods_for_ps()
1440 return (t4_alloc_page_pods_for_vmpages(pr, ps->pages, ps->npages, in t4_alloc_page_pods_for_ps()
1449 MPASS(bp->bio_flags & BIO_UNMAPPED); in t4_alloc_page_pods_for_bio()
1451 return (t4_alloc_page_pods_for_vmpages(pr, bp->bio_ma, bp->bio_ma_n, in t4_alloc_page_pods_for_bio()
1474 end_pva = trunc_page(buf + len - 1); in t4_alloc_page_pods_for_buf()
1486 if (hcf < (1 << pr->pr_page_shift[1])) { in t4_alloc_page_pods_for_buf()
1492 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) in t4_alloc_page_pods_for_buf()
1494 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { in t4_alloc_page_pods_for_buf()
1504 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; in t4_alloc_page_pods_for_buf()
1508 MPASS(prsv->prsv_nppods > 0); in t4_alloc_page_pods_for_buf()
1517 struct ppod_reservation *prsv = &drb->prsv; in t4_alloc_page_pods_for_rcvbuf()
1519 KASSERT(prsv->prsv_nppods == 0, in t4_alloc_page_pods_for_rcvbuf()
1522 return (t4_alloc_page_pods_for_buf(pr, (vm_offset_t)drb->buf, drb->len, in t4_alloc_page_pods_for_rcvbuf()
1546 for (i = entries - 1; i >= 0; i--) { in t4_alloc_page_pods_for_sgl()
1548 buf = (vm_offset_t)sge->addr; in t4_alloc_page_pods_for_sgl()
1549 len = sge->len; in t4_alloc_page_pods_for_sgl()
1551 end_pva = trunc_page(buf + len - 1); in t4_alloc_page_pods_for_sgl()
1564 if (hcf < (1 << pr->pr_page_shift[1])) { in t4_alloc_page_pods_for_sgl()
1570 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) in t4_alloc_page_pods_for_sgl()
1572 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { in t4_alloc_page_pods_for_sgl()
1582 while (entries--) { in t4_alloc_page_pods_for_sgl()
1584 start_pva = trunc_page((vm_offset_t)sgl->addr); in t4_alloc_page_pods_for_sgl()
1585 end_pva = trunc_page((vm_offset_t)sgl->addr + sgl->len - 1); in t4_alloc_page_pods_for_sgl()
1586 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; in t4_alloc_page_pods_for_sgl()
1592 MPASS(prsv->prsv_nppods > 0); in t4_alloc_page_pods_for_sgl()
1599 struct ppod_region *pr = prsv->prsv_pr; in t4_free_page_pods()
1603 MPASS(prsv->prsv_nppods != 0); in t4_free_page_pods()
1605 addr = prsv->prsv_tag & pr->pr_tag_mask; in t4_free_page_pods()
1606 MPASS((addr & pr->pr_invalid_bit) == 0); in t4_free_page_pods()
1609 CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__, in t4_free_page_pods()
1610 pr->pr_arena, addr, prsv->prsv_nppods); in t4_free_page_pods()
1613 vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods)); in t4_free_page_pods()
1614 prsv->prsv_nppods = 0; in t4_free_page_pods()
1629 uint32_t cmd; in t4_write_page_pods_for_ps() local
1630 struct ppod_reservation *prsv = &ps->prsv; in t4_write_page_pods_for_ps()
1631 struct ppod_region *pr = prsv->prsv_pr; in t4_write_page_pods_for_ps()
1634 KASSERT(!(ps->flags & PS_PPODS_WRITTEN), in t4_write_page_pods_for_ps()
1636 MPASS(prsv->prsv_nppods > 0); in t4_write_page_pods_for_ps()
1638 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); in t4_write_page_pods_for_ps()
1640 cmd |= htobe32(F_ULP_MEMIO_ORDER); in t4_write_page_pods_for_ps()
1642 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); in t4_write_page_pods_for_ps()
1643 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; in t4_write_page_pods_for_ps()
1644 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); in t4_write_page_pods_for_ps()
1645 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { in t4_write_page_pods_for_ps()
1647 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); in t4_write_page_pods_for_ps()
1657 ulpmc->cmd = cmd; in t4_write_page_pods_for_ps()
1658 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); in t4_write_page_pods_for_ps()
1659 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); in t4_write_page_pods_for_ps()
1660 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); in t4_write_page_pods_for_ps()
1663 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); in t4_write_page_pods_for_ps()
1664 ulpsc->len = htobe32(chunk); in t4_write_page_pods_for_ps()
1668 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | in t4_write_page_pods_for_ps()
1669 V_PPOD_TID(tid) | prsv->prsv_tag); in t4_write_page_pods_for_ps()
1670 ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) | in t4_write_page_pods_for_ps()
1671 V_PPOD_OFST(ps->offset)); in t4_write_page_pods_for_ps()
1672 ppod->rsvd = 0; in t4_write_page_pods_for_ps()
1674 for (k = 0; k < nitems(ppod->addr); k++) { in t4_write_page_pods_for_ps()
1675 if (idx < ps->npages) { in t4_write_page_pods_for_ps()
1676 pa = VM_PAGE_TO_PHYS(ps->pages[idx]); in t4_write_page_pods_for_ps()
1677 ppod->addr[k] = htobe64(pa); in t4_write_page_pods_for_ps()
1680 ppod->addr[k] = 0; in t4_write_page_pods_for_ps()
1683 "%s: tid %d ppod[%d]->addr[%d] = %p", in t4_write_page_pods_for_ps()
1685 be64toh(ppod->addr[k])); in t4_write_page_pods_for_ps()
1693 ps->flags |= PS_PPODS_WRITTEN; in t4_write_page_pods_for_ps()
1708 uint32_t cmd; in t4_write_page_pods_for_rcvbuf() local
1709 struct ppod_reservation *prsv = &drb->prsv; in t4_write_page_pods_for_rcvbuf()
1710 struct ppod_region *pr = prsv->prsv_pr; in t4_write_page_pods_for_rcvbuf()
1714 MPASS(prsv->prsv_nppods > 0); in t4_write_page_pods_for_rcvbuf()
1716 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); in t4_write_page_pods_for_rcvbuf()
1718 cmd |= htobe32(F_ULP_MEMIO_ORDER); in t4_write_page_pods_for_rcvbuf()
1720 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); in t4_write_page_pods_for_rcvbuf()
1721 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; in t4_write_page_pods_for_rcvbuf()
1722 offset = (uintptr_t)drb->buf & PAGE_MASK; in t4_write_page_pods_for_rcvbuf()
1723 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); in t4_write_page_pods_for_rcvbuf()
1724 pva = trunc_page((uintptr_t)drb->buf); in t4_write_page_pods_for_rcvbuf()
1725 end_pva = trunc_page((uintptr_t)drb->buf + drb->len - 1); in t4_write_page_pods_for_rcvbuf()
1726 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { in t4_write_page_pods_for_rcvbuf()
1728 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); in t4_write_page_pods_for_rcvbuf()
1739 ulpmc->cmd = cmd; in t4_write_page_pods_for_rcvbuf()
1740 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); in t4_write_page_pods_for_rcvbuf()
1741 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); in t4_write_page_pods_for_rcvbuf()
1742 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); in t4_write_page_pods_for_rcvbuf()
1745 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); in t4_write_page_pods_for_rcvbuf()
1746 ulpsc->len = htobe32(chunk); in t4_write_page_pods_for_rcvbuf()
1750 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | in t4_write_page_pods_for_rcvbuf()
1751 V_PPOD_TID(tid) | prsv->prsv_tag); in t4_write_page_pods_for_rcvbuf()
1752 ppod->len_offset = htobe64(V_PPOD_LEN(drb->len) | in t4_write_page_pods_for_rcvbuf()
1754 ppod->rsvd = 0; in t4_write_page_pods_for_rcvbuf()
1756 for (k = 0; k < nitems(ppod->addr); k++) { in t4_write_page_pods_for_rcvbuf()
1758 ppod->addr[k] = 0; in t4_write_page_pods_for_rcvbuf()
1761 ppod->addr[k] = htobe64(pa); in t4_write_page_pods_for_rcvbuf()
1766 "%s: tid %d ppod[%d]->addr[%d] = %p", in t4_write_page_pods_for_rcvbuf()
1768 be64toh(ppod->addr[k])); in t4_write_page_pods_for_rcvbuf()
1777 pva -= ddp_pgsz; in t4_write_page_pods_for_rcvbuf()
1801 m->m_pkthdr.len = len; in alloc_raw_wr_mbuf()
1802 m->m_len = len; in alloc_raw_wr_mbuf()
1816 uint32_t cmd; in t4_write_page_pods_for_bio() local
1817 struct ppod_region *pr = prsv->prsv_pr; in t4_write_page_pods_for_bio()
1821 MPASS(bp->bio_flags & BIO_UNMAPPED); in t4_write_page_pods_for_bio()
1823 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); in t4_write_page_pods_for_bio()
1825 cmd |= htobe32(F_ULP_MEMIO_ORDER); in t4_write_page_pods_for_bio()
1827 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); in t4_write_page_pods_for_bio()
1828 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; in t4_write_page_pods_for_bio()
1829 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); in t4_write_page_pods_for_bio()
1830 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { in t4_write_page_pods_for_bio()
1833 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); in t4_write_page_pods_for_bio()
1843 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); in t4_write_page_pods_for_bio()
1844 ulpmc->cmd = cmd; in t4_write_page_pods_for_bio()
1845 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); in t4_write_page_pods_for_bio()
1846 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); in t4_write_page_pods_for_bio()
1847 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); in t4_write_page_pods_for_bio()
1850 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); in t4_write_page_pods_for_bio()
1851 ulpsc->len = htobe32(chunk); in t4_write_page_pods_for_bio()
1855 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | in t4_write_page_pods_for_bio()
1856 V_PPOD_TID(toep->tid) | in t4_write_page_pods_for_bio()
1857 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); in t4_write_page_pods_for_bio()
1858 ppod->len_offset = htobe64(V_PPOD_LEN(bp->bio_bcount) | in t4_write_page_pods_for_bio()
1859 V_PPOD_OFST(bp->bio_ma_offset)); in t4_write_page_pods_for_bio()
1860 ppod->rsvd = 0; in t4_write_page_pods_for_bio()
1862 for (k = 0; k < nitems(ppod->addr); k++) { in t4_write_page_pods_for_bio()
1863 if (idx < bp->bio_ma_n) { in t4_write_page_pods_for_bio()
1864 pa = VM_PAGE_TO_PHYS(bp->bio_ma[idx]); in t4_write_page_pods_for_bio()
1865 ppod->addr[k] = htobe64(pa); in t4_write_page_pods_for_bio()
1868 ppod->addr[k] = 0; in t4_write_page_pods_for_bio()
1871 "%s: tid %d ppod[%d]->addr[%d] = %p", in t4_write_page_pods_for_bio()
1872 __func__, toep->tid, i, k, in t4_write_page_pods_for_bio()
1873 be64toh(ppod->addr[k])); in t4_write_page_pods_for_bio()
1894 uint32_t cmd; in t4_write_page_pods_for_buf() local
1895 struct ppod_region *pr = prsv->prsv_pr; in t4_write_page_pods_for_buf()
1900 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); in t4_write_page_pods_for_buf()
1902 cmd |= htobe32(F_ULP_MEMIO_ORDER); in t4_write_page_pods_for_buf()
1904 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); in t4_write_page_pods_for_buf()
1905 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; in t4_write_page_pods_for_buf()
1907 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); in t4_write_page_pods_for_buf()
1909 end_pva = trunc_page(buf + buflen - 1); in t4_write_page_pods_for_buf()
1910 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { in t4_write_page_pods_for_buf()
1913 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); in t4_write_page_pods_for_buf()
1923 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); in t4_write_page_pods_for_buf()
1924 ulpmc->cmd = cmd; in t4_write_page_pods_for_buf()
1925 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); in t4_write_page_pods_for_buf()
1926 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); in t4_write_page_pods_for_buf()
1927 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); in t4_write_page_pods_for_buf()
1930 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); in t4_write_page_pods_for_buf()
1931 ulpsc->len = htobe32(chunk); in t4_write_page_pods_for_buf()
1935 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | in t4_write_page_pods_for_buf()
1936 V_PPOD_TID(toep->tid) | in t4_write_page_pods_for_buf()
1937 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); in t4_write_page_pods_for_buf()
1938 ppod->len_offset = htobe64(V_PPOD_LEN(buflen) | in t4_write_page_pods_for_buf()
1940 ppod->rsvd = 0; in t4_write_page_pods_for_buf()
1942 for (k = 0; k < nitems(ppod->addr); k++) { in t4_write_page_pods_for_buf()
1944 ppod->addr[k] = 0; in t4_write_page_pods_for_buf()
1947 ppod->addr[k] = htobe64(pa); in t4_write_page_pods_for_buf()
1952 "%s: tid %d ppod[%d]->addr[%d] = %p", in t4_write_page_pods_for_buf()
1953 __func__, toep->tid, i, k, in t4_write_page_pods_for_buf()
1954 be64toh(ppod->addr[k])); in t4_write_page_pods_for_buf()
1963 pva -= ddp_pgsz; in t4_write_page_pods_for_buf()
1984 uint32_t cmd; in t4_write_page_pods_for_sgl() local
1985 struct ppod_region *pr = prsv->prsv_pr; in t4_write_page_pods_for_sgl()
1992 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); in t4_write_page_pods_for_sgl()
1994 cmd |= htobe32(F_ULP_MEMIO_ORDER); in t4_write_page_pods_for_sgl()
1996 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); in t4_write_page_pods_for_sgl()
1997 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; in t4_write_page_pods_for_sgl()
1998 offset = (vm_offset_t)sgl->addr & PAGE_MASK; in t4_write_page_pods_for_sgl()
1999 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); in t4_write_page_pods_for_sgl()
2000 pva = trunc_page((vm_offset_t)sgl->addr); in t4_write_page_pods_for_sgl()
2001 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { in t4_write_page_pods_for_sgl()
2004 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); in t4_write_page_pods_for_sgl()
2014 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); in t4_write_page_pods_for_sgl()
2015 ulpmc->cmd = cmd; in t4_write_page_pods_for_sgl()
2016 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); in t4_write_page_pods_for_sgl()
2017 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); in t4_write_page_pods_for_sgl()
2018 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); in t4_write_page_pods_for_sgl()
2021 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); in t4_write_page_pods_for_sgl()
2022 ulpsc->len = htobe32(chunk); in t4_write_page_pods_for_sgl()
2026 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | in t4_write_page_pods_for_sgl()
2027 V_PPOD_TID(toep->tid) | in t4_write_page_pods_for_sgl()
2028 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); in t4_write_page_pods_for_sgl()
2029 ppod->len_offset = htobe64(V_PPOD_LEN(xferlen) | in t4_write_page_pods_for_sgl()
2031 ppod->rsvd = 0; in t4_write_page_pods_for_sgl()
2033 for (k = 0; k < nitems(ppod->addr); k++) { in t4_write_page_pods_for_sgl()
2036 ppod->addr[k] = htobe64(pa); in t4_write_page_pods_for_sgl()
2038 ppod->addr[k] = 0; in t4_write_page_pods_for_sgl()
2042 "%s: tid %d ppod[%d]->addr[%d] = %p", in t4_write_page_pods_for_sgl()
2043 __func__, toep->tid, i, k, in t4_write_page_pods_for_sgl()
2044 be64toh(ppod->addr[k])); in t4_write_page_pods_for_sgl()
2052 if (k + 1 == nitems(ppod->addr)) in t4_write_page_pods_for_sgl()
2063 if (sg_offset == sgl->len) { in t4_write_page_pods_for_sgl()
2068 entries--; in t4_write_page_pods_for_sgl()
2073 (vm_offset_t)sgl->addr); in t4_write_page_pods_for_sgl()
2090 struct tom_data *td = sc->tom_softc; in prep_pageset()
2092 if (ps->prsv.prsv_nppods == 0 && in prep_pageset()
2093 t4_alloc_page_pods_for_ps(&td->pr, ps) != 0) { in prep_pageset()
2096 if (!(ps->flags & PS_PPODS_WRITTEN) && in prep_pageset()
2097 t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) { in prep_pageset()
2111 MPASS(r->size > 0); in t4_init_ppod_region()
2113 pr->pr_start = r->start; in t4_init_ppod_region()
2114 pr->pr_len = r->size; in t4_init_ppod_region()
2115 pr->pr_page_shift[0] = 12 + G_HPZ0(psz); in t4_init_ppod_region()
2116 pr->pr_page_shift[1] = 12 + G_HPZ1(psz); in t4_init_ppod_region()
2117 pr->pr_page_shift[2] = 12 + G_HPZ2(psz); in t4_init_ppod_region()
2118 pr->pr_page_shift[3] = 12 + G_HPZ3(psz); in t4_init_ppod_region()
2120 /* The SGL -> page pod algorithm requires the sizes to be in order. */ in t4_init_ppod_region()
2121 for (i = 1; i < nitems(pr->pr_page_shift); i++) { in t4_init_ppod_region()
2122 if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1]) in t4_init_ppod_region()
2126 pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG); in t4_init_ppod_region()
2127 pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask; in t4_init_ppod_region()
2128 if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0) in t4_init_ppod_region()
2130 pr->pr_alias_shift = fls(pr->pr_tag_mask); in t4_init_ppod_region()
2131 pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1); in t4_init_ppod_region()
2133 pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0, in t4_init_ppod_region()
2135 if (pr->pr_arena == NULL) in t4_init_ppod_region()
2147 if (pr->pr_arena) in t4_free_ppod_region()
2148 vmem_destroy(pr->pr_arena); in t4_free_ppod_region()
2157 if (ps->start != start || ps->npages != npages || in pscmp()
2158 ps->offset != pgoff || ps->len != len) in pscmp()
2161 return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp); in pscmp()
2180 vm = job->userproc->p_vmspace; in hold_aio()
2181 map = &vm->vm_map; in hold_aio()
2182 start = (uintptr_t)job->uaiocb.aio_buf; in hold_aio()
2184 end = round_page(start + job->uaiocb.aio_nbytes); in hold_aio()
2187 if (end - start > MAX_DDP_BUFFER_SIZE) { in hold_aio()
2199 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes, in hold_aio()
2200 (unsigned long)(end - (start + pgoff))); in hold_aio()
2201 job->uaiocb.aio_nbytes = end - (start + pgoff); in hold_aio()
2206 n = atop(end - start); in hold_aio()
2211 TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) { in hold_aio()
2213 job->uaiocb.aio_nbytes) == 0) { in hold_aio()
2214 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); in hold_aio()
2215 toep->ddp.cached_count--; in hold_aio()
2225 KASSERT(toep->ddp.active_count + toep->ddp.cached_count <= in hold_aio()
2226 nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__)); in hold_aio()
2227 if (toep->ddp.active_count + toep->ddp.cached_count == in hold_aio()
2228 nitems(toep->ddp.db)) { in hold_aio()
2229 KASSERT(toep->ddp.cached_count > 0, in hold_aio()
2231 ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq); in hold_aio()
2232 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); in hold_aio()
2233 toep->ddp.cached_count--; in hold_aio()
2234 free_pageset(toep->td, ps); in hold_aio()
2241 ps->pages = (vm_page_t *)(ps + 1); in hold_aio()
2242 ps->vm_timestamp = map->timestamp; in hold_aio()
2243 ps->npages = vm_fault_quick_hold_pages(map, start, end - start, in hold_aio()
2244 VM_PROT_WRITE, ps->pages, n); in hold_aio()
2247 if (ps->npages < 0) { in hold_aio()
2252 KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d", in hold_aio()
2253 ps->npages, n)); in hold_aio()
2255 ps->offset = pgoff; in hold_aio()
2256 ps->len = job->uaiocb.aio_nbytes; in hold_aio()
2257 refcount_acquire(&vm->vm_refcnt); in hold_aio()
2258 ps->vm = vm; in hold_aio()
2259 ps->start = start; in hold_aio()
2262 __func__, toep->tid, ps, job, ps->npages); in hold_aio()
2273 KASSERT((toep->ddp.flags & DDP_AIO) != 0, ("%s: DDP_RCVBUF", __func__)); in ddp_complete_all()
2274 while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) { in ddp_complete_all()
2275 job = TAILQ_FIRST(&toep->ddp.aiojobq); in ddp_complete_all()
2276 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); in ddp_complete_all()
2277 toep->ddp.waiting_count--; in ddp_complete_all()
2293 copied = job->aio_received; in aio_ddp_cancel_one()
2310 if (!(toep->ddp.flags & DDP_DEAD) && in aio_ddp_requeue_one()
2312 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); in aio_ddp_requeue_one()
2313 toep->ddp.waiting_count++; in aio_ddp_requeue_one()
2321 struct adapter *sc = td_adapter(toep->td); in aio_ddp_requeue()
2326 struct ddp_buffer *db; in aio_ddp_requeue() local
2337 if (toep->ddp.flags & DDP_DEAD) { in aio_ddp_requeue()
2338 MPASS(toep->ddp.waiting_count == 0); in aio_ddp_requeue()
2339 MPASS(toep->ddp.active_count == 0); in aio_ddp_requeue()
2343 if (toep->ddp.waiting_count == 0 || in aio_ddp_requeue()
2344 toep->ddp.active_count == nitems(toep->ddp.db)) { in aio_ddp_requeue()
2348 job = TAILQ_FIRST(&toep->ddp.aiojobq); in aio_ddp_requeue()
2349 so = job->fd_file->f_data; in aio_ddp_requeue()
2350 sb = &so->so_rcv; in aio_ddp_requeue()
2354 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { in aio_ddp_requeue()
2360 KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0, in aio_ddp_requeue()
2365 if (so->so_error && sbavail(sb) == 0) { in aio_ddp_requeue()
2366 toep->ddp.waiting_count--; in aio_ddp_requeue()
2367 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); in aio_ddp_requeue()
2378 copied = job->aio_received; in aio_ddp_requeue()
2384 error = so->so_error; in aio_ddp_requeue()
2385 so->so_error = 0; in aio_ddp_requeue()
2387 aio_complete(job, -1, error); in aio_ddp_requeue()
2396 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { in aio_ddp_requeue()
2398 if (toep->ddp.active_count != 0) in aio_ddp_requeue()
2408 if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) { in aio_ddp_requeue()
2422 if ((toep->ddp.flags & DDP_SC_REQ) == 0) in aio_ddp_requeue()
2432 if (toep->ddp.queueing != NULL) in aio_ddp_requeue()
2436 toep->ddp.waiting_count--; in aio_ddp_requeue()
2437 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); in aio_ddp_requeue()
2440 toep->ddp.queueing = job; in aio_ddp_requeue()
2446 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2451 if (so->so_error && sbavail(sb) == 0) { in aio_ddp_requeue()
2452 copied = job->aio_received; in aio_ddp_requeue()
2457 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2461 error = so->so_error; in aio_ddp_requeue()
2462 so->so_error = 0; in aio_ddp_requeue()
2465 aio_complete(job, -1, error); in aio_ddp_requeue()
2466 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2470 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { in aio_ddp_requeue()
2473 if (toep->ddp.active_count != 0) { in aio_ddp_requeue()
2480 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2485 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2494 MPASS(!(toep->ddp.flags & DDP_DEAD)); in aio_ddp_requeue()
2502 offset = ps->offset + job->aio_received; in aio_ddp_requeue()
2503 MPASS(job->aio_received <= job->uaiocb.aio_nbytes); in aio_ddp_requeue()
2504 resid = job->uaiocb.aio_nbytes - job->aio_received; in aio_ddp_requeue()
2505 m = sb->sb_mb; in aio_ddp_requeue()
2506 KASSERT(m == NULL || toep->ddp.active_count == 0, in aio_ddp_requeue()
2516 iov[0].iov_len = m->m_len; in aio_ddp_requeue()
2526 error = uiomove_fromphys(ps->pages, offset + copied, in aio_ddp_requeue()
2529 uiomove_fromphys(ps->pages, offset + copied, uio.uio_resid, &uio); in aio_ddp_requeue()
2533 resid -= uio.uio_offset; in aio_ddp_requeue()
2534 m = m->m_next; in aio_ddp_requeue()
2538 job->aio_received += copied; in aio_ddp_requeue()
2539 job->msgrcv = 1; in aio_ddp_requeue()
2540 copied = job->aio_received; in aio_ddp_requeue()
2561 t4_rcvd_locked(&toep->td->tod, intotcpcb(inp)); in aio_ddp_requeue()
2563 if (resid == 0 || toep->ddp.flags & DDP_DEAD) { in aio_ddp_requeue()
2573 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2582 if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) { in aio_ddp_requeue()
2586 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2603 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2614 if (toep->ddp.db[0].job == NULL) { in aio_ddp_requeue()
2617 MPASS(toep->ddp.db[1].job == NULL); in aio_ddp_requeue()
2625 if (so->so_state & SS_NBIO) in aio_ddp_requeue()
2633 if (so->so_state & SS_NBIO) in aio_ddp_requeue()
2640 MPASS((toep->ddp.flags & buf_flag) == 0); in aio_ddp_requeue()
2641 if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { in aio_ddp_requeue()
2643 MPASS(toep->ddp.active_id == -1); in aio_ddp_requeue()
2644 MPASS(toep->ddp.active_count == 0); in aio_ddp_requeue()
2656 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &ps->prsv, in aio_ddp_requeue()
2657 job->aio_received, ps->len, ddp_flags, ddp_flags_mask); in aio_ddp_requeue()
2661 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2679 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2686 toep->tid, job, db_idx, ddp_flags, ddp_flags_mask); in aio_ddp_requeue()
2688 /* Give the chip the go-ahead. */ in aio_ddp_requeue()
2690 db = &toep->ddp.db[db_idx]; in aio_ddp_requeue()
2691 db->cancel_pending = 0; in aio_ddp_requeue()
2692 db->job = job; in aio_ddp_requeue()
2693 db->ps = ps; in aio_ddp_requeue()
2694 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2695 toep->ddp.flags |= buf_flag; in aio_ddp_requeue()
2696 toep->ddp.active_count++; in aio_ddp_requeue()
2697 if (toep->ddp.active_count == 1) { in aio_ddp_requeue()
2698 MPASS(toep->ddp.active_id == -1); in aio_ddp_requeue()
2699 toep->ddp.active_id = db_idx; in aio_ddp_requeue()
2701 toep->ddp.active_id); in aio_ddp_requeue()
2711 if (toep->ddp.flags & DDP_TASK_ACTIVE) in ddp_queue_toep()
2713 toep->ddp.flags |= DDP_TASK_ACTIVE; in ddp_queue_toep()
2715 soaio_enqueue(&toep->ddp.requeue_task); in ddp_queue_toep()
2725 toep->ddp.flags &= ~DDP_TASK_ACTIVE; in aio_ddp_requeue_task()
2734 struct socket *so = job->fd_file->f_data; in t4_aio_cancel_active()
2736 struct toepcb *toep = tp->t_toe; in t4_aio_cancel_active()
2737 struct adapter *sc = td_adapter(toep->td); in t4_aio_cancel_active()
2748 for (i = 0; i < nitems(toep->ddp.db); i++) { in t4_aio_cancel_active()
2749 if (toep->ddp.db[i].job == job) { in t4_aio_cancel_active()
2751 MPASS(toep->ddp.db[i].cancel_pending == 0); in t4_aio_cancel_active()
2760 t4_set_tcb_field(sc, toep->ctrlq, toep, in t4_aio_cancel_active()
2763 toep->ddp.db[i].cancel_pending = 1; in t4_aio_cancel_active()
2775 struct socket *so = job->fd_file->f_data; in t4_aio_cancel_queued()
2777 struct toepcb *toep = tp->t_toe; in t4_aio_cancel_queued()
2781 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); in t4_aio_cancel_queued()
2782 toep->ddp.waiting_count--; in t4_aio_cancel_queued()
2783 if (toep->ddp.waiting_count == 0) in t4_aio_cancel_queued()
2797 struct toepcb *toep = tp->t_toe; in t4_aio_queue_ddp()
2800 if (job->uaiocb.aio_lio_opcode != LIO_READ) in t4_aio_queue_ddp()
2818 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in t4_aio_queue_ddp()
2823 if ((toep->ddp.flags & DDP_AIO) == 0) { in t4_aio_queue_ddp()
2824 toep->ddp.flags |= DDP_AIO; in t4_aio_queue_ddp()
2825 TAILQ_INIT(&toep->ddp.cached_pagesets); in t4_aio_queue_ddp()
2826 TAILQ_INIT(&toep->ddp.aiojobq); in t4_aio_queue_ddp()
2827 TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, in t4_aio_queue_ddp()
2838 CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid); in t4_aio_queue_ddp()
2842 TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list); in t4_aio_queue_ddp()
2843 toep->ddp.waiting_count++; in t4_aio_queue_ddp()
2865 if ((toep->ddp.flags & DDP_DEAD) != 0) { in ddp_rcvbuf_requeue()
2866 MPASS(toep->ddp.active_count == 0); in ddp_rcvbuf_requeue()
2871 if (toep->ddp.active_count == nitems(toep->ddp.db)) { in ddp_rcvbuf_requeue()
2875 inp = toep->inp; in ddp_rcvbuf_requeue()
2876 so = inp->inp_socket; in ddp_rcvbuf_requeue()
2877 sb = &so->so_rcv; in ddp_rcvbuf_requeue()
2892 if ((toep->ddp.flags & DDP_DEAD) != 0 || in ddp_rcvbuf_requeue()
2893 toep->ddp.active_count == nitems(toep->ddp.db)) { in ddp_rcvbuf_requeue()
2900 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { in ddp_rcvbuf_requeue()
2907 if (so->so_error != 0 || (sb->sb_state & SBS_CANTRCVMORE) != 0) { in ddp_rcvbuf_requeue()
2935 toep->ddp.flags &= ~DDP_TASK_ACTIVE; in ddp_rcvbuf_requeue_task()
2945 struct adapter *sc = td_adapter(toep->td); in t4_enable_ddp_rcv()
2967 if ((toep->ddp.flags & DDP_AIO) != 0) { in t4_enable_ddp_rcv()
2972 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in t4_enable_ddp_rcv()
2977 toep->ddp.flags |= DDP_RCVBUF; in t4_enable_ddp_rcv()
2978 TAILQ_INIT(&toep->ddp.cached_buffers); in t4_enable_ddp_rcv()
2980 TASK_INIT(&toep->ddp.requeue_task, 0, ddp_rcvbuf_requeue_task, toep); in t4_enable_ddp_rcv()