Lines Matching refs:toep
81 static void ddp_complete_all(struct toepcb *toep, int error);
150 recycle_pageset(struct toepcb *toep, struct pageset *ps) in recycle_pageset() argument
153 DDP_ASSERT_LOCKED(toep); in recycle_pageset()
154 if (!(toep->ddp.flags & DDP_DEAD)) { in recycle_pageset()
155 KASSERT(toep->ddp.cached_count + toep->ddp.active_count < in recycle_pageset()
156 nitems(toep->ddp.db), ("too many wired pagesets")); in recycle_pageset()
157 TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link); in recycle_pageset()
158 toep->ddp.cached_count++; in recycle_pageset()
160 free_pageset(toep->td, ps); in recycle_pageset()
181 free_ddp_rcv_buffer(struct toepcb *toep, struct ddp_rcv_buffer *drb) in free_ddp_rcv_buffer() argument
186 counter_u64_add(toep->ofld_rxq->ddp_buffer_free, 1); in free_ddp_rcv_buffer()
187 free_toepcb(toep); in free_ddp_rcv_buffer()
191 recycle_ddp_rcv_buffer(struct toepcb *toep, struct ddp_rcv_buffer *drb) in recycle_ddp_rcv_buffer() argument
193 DDP_CACHE_LOCK(toep); in recycle_ddp_rcv_buffer()
194 if (!(toep->ddp.flags & DDP_DEAD) && in recycle_ddp_rcv_buffer()
195 toep->ddp.cached_count < t4_ddp_rcvbuf_cache) { in recycle_ddp_rcv_buffer()
196 TAILQ_INSERT_HEAD(&toep->ddp.cached_buffers, drb, link); in recycle_ddp_rcv_buffer()
197 toep->ddp.cached_count++; in recycle_ddp_rcv_buffer()
198 DDP_CACHE_UNLOCK(toep); in recycle_ddp_rcv_buffer()
200 DDP_CACHE_UNLOCK(toep); in recycle_ddp_rcv_buffer()
201 free_ddp_rcv_buffer(toep, drb); in recycle_ddp_rcv_buffer()
206 alloc_cached_ddp_rcv_buffer(struct toepcb *toep) in alloc_cached_ddp_rcv_buffer() argument
210 DDP_CACHE_LOCK(toep); in alloc_cached_ddp_rcv_buffer()
211 if (!TAILQ_EMPTY(&toep->ddp.cached_buffers)) { in alloc_cached_ddp_rcv_buffer()
212 drb = TAILQ_FIRST(&toep->ddp.cached_buffers); in alloc_cached_ddp_rcv_buffer()
213 TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link); in alloc_cached_ddp_rcv_buffer()
214 toep->ddp.cached_count--; in alloc_cached_ddp_rcv_buffer()
215 counter_u64_add(toep->ofld_rxq->ddp_buffer_reuse, 1); in alloc_cached_ddp_rcv_buffer()
218 DDP_CACHE_UNLOCK(toep); in alloc_cached_ddp_rcv_buffer()
223 alloc_ddp_rcv_buffer(struct toepcb *toep, int how) in alloc_ddp_rcv_buffer() argument
225 struct tom_data *td = toep->td; in alloc_ddp_rcv_buffer()
250 error = t4_write_page_pods_for_rcvbuf(sc, toep->ctrlq, toep->tid, drb); in alloc_ddp_rcv_buffer()
258 hold_toepcb(toep); in alloc_ddp_rcv_buffer()
259 counter_u64_add(toep->ofld_rxq->ddp_buffer_alloc, 1); in alloc_ddp_rcv_buffer()
264 free_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db) in free_ddp_buffer() argument
266 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in free_ddp_buffer()
268 free_ddp_rcv_buffer(toep, db->drb); in free_ddp_buffer()
290 free_pageset(toep->td, db->ps); in free_ddp_buffer()
298 ddp_init_toep(struct toepcb *toep) in ddp_init_toep() argument
301 toep->ddp.flags = DDP_OK; in ddp_init_toep()
302 toep->ddp.active_id = -1; in ddp_init_toep()
303 mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF); in ddp_init_toep()
304 mtx_init(&toep->ddp.cache_lock, "t4 ddp cache", NULL, MTX_DEF); in ddp_init_toep()
308 ddp_uninit_toep(struct toepcb *toep) in ddp_uninit_toep() argument
311 mtx_destroy(&toep->ddp.lock); in ddp_uninit_toep()
312 mtx_destroy(&toep->ddp.cache_lock); in ddp_uninit_toep()
316 release_ddp_resources(struct toepcb *toep) in release_ddp_resources() argument
322 DDP_LOCK(toep); in release_ddp_resources()
323 DDP_CACHE_LOCK(toep); in release_ddp_resources()
324 toep->ddp.flags |= DDP_DEAD; in release_ddp_resources()
325 DDP_CACHE_UNLOCK(toep); in release_ddp_resources()
326 for (i = 0; i < nitems(toep->ddp.db); i++) { in release_ddp_resources()
327 free_ddp_buffer(toep, &toep->ddp.db[i]); in release_ddp_resources()
329 if ((toep->ddp.flags & DDP_AIO) != 0) { in release_ddp_resources()
330 while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) { in release_ddp_resources()
331 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); in release_ddp_resources()
332 free_pageset(toep->td, ps); in release_ddp_resources()
334 ddp_complete_all(toep, 0); in release_ddp_resources()
336 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in release_ddp_resources()
337 DDP_CACHE_LOCK(toep); in release_ddp_resources()
338 while ((drb = TAILQ_FIRST(&toep->ddp.cached_buffers)) != NULL) { in release_ddp_resources()
339 TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link); in release_ddp_resources()
340 free_ddp_rcv_buffer(toep, drb); in release_ddp_resources()
342 DDP_CACHE_UNLOCK(toep); in release_ddp_resources()
344 DDP_UNLOCK(toep); in release_ddp_resources()
349 ddp_assert_empty(struct toepcb *toep) in ddp_assert_empty() argument
353 MPASS((toep->ddp.flags & (DDP_TASK_ACTIVE | DDP_DEAD)) != DDP_TASK_ACTIVE); in ddp_assert_empty()
354 for (i = 0; i < nitems(toep->ddp.db); i++) { in ddp_assert_empty()
355 if ((toep->ddp.flags & DDP_AIO) != 0) { in ddp_assert_empty()
356 MPASS(toep->ddp.db[i].job == NULL); in ddp_assert_empty()
357 MPASS(toep->ddp.db[i].ps == NULL); in ddp_assert_empty()
359 MPASS(toep->ddp.db[i].drb == NULL); in ddp_assert_empty()
361 if ((toep->ddp.flags & DDP_AIO) != 0) { in ddp_assert_empty()
362 MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets)); in ddp_assert_empty()
363 MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq)); in ddp_assert_empty()
365 if ((toep->ddp.flags & DDP_RCVBUF) != 0) in ddp_assert_empty()
366 MPASS(TAILQ_EMPTY(&toep->ddp.cached_buffers)); in ddp_assert_empty()
371 complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db, in complete_ddp_buffer() argument
377 toep->ddp.active_count--; in complete_ddp_buffer()
378 if (toep->ddp.active_id == db_idx) { in complete_ddp_buffer()
379 if (toep->ddp.active_count == 0) { in complete_ddp_buffer()
380 if ((toep->ddp.flags & DDP_AIO) != 0) in complete_ddp_buffer()
381 KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL, in complete_ddp_buffer()
384 KASSERT(toep->ddp.db[db_idx ^ 1].drb == NULL, in complete_ddp_buffer()
386 toep->ddp.active_id = -1; in complete_ddp_buffer()
388 toep->ddp.active_id ^= 1; in complete_ddp_buffer()
391 toep->tid, toep->ddp.active_id); in complete_ddp_buffer()
394 KASSERT(toep->ddp.active_count != 0 && in complete_ddp_buffer()
395 toep->ddp.active_id != -1, in complete_ddp_buffer()
399 if ((toep->ddp.flags & DDP_AIO) != 0) { in complete_ddp_buffer()
402 recycle_pageset(toep, db->ps); in complete_ddp_buffer()
407 recycle_ddp_rcv_buffer(toep, drb); in complete_ddp_buffer()
413 KASSERT(toep->ddp.flags & db_flag, in complete_ddp_buffer()
415 __func__, toep, toep->ddp.flags)); in complete_ddp_buffer()
416 toep->ddp.flags &= ~db_flag; in complete_ddp_buffer()
423 struct toepcb *toep = m->m_ext.ext_arg1; in ddp_rcv_mbuf_done() local
426 recycle_ddp_rcv_buffer(toep, drb); in ddp_rcv_mbuf_done()
430 queue_ddp_rcvbuf_mbuf(struct toepcb *toep, u_int db_idx, u_int len) in queue_ddp_rcvbuf_mbuf() argument
432 struct inpcb *inp = toep->inp; in queue_ddp_rcvbuf_mbuf()
443 m->m_pkthdr.rcvif = toep->vi->ifp; in queue_ddp_rcvbuf_mbuf()
445 db = &toep->ddp.db[db_idx]; in queue_ddp_rcvbuf_mbuf()
448 ddp_rcv_mbuf_done, toep, drb); in queue_ddp_rcvbuf_mbuf()
457 toep->ofld_rxq->rx_toe_ddp_octets += len; in queue_ddp_rcvbuf_mbuf()
462 insert_ddp_data(struct toepcb *toep, uint32_t n) in insert_ddp_data() argument
464 struct inpcb *inp = toep->inp; in insert_ddp_data()
477 DDP_ASSERT_LOCKED(toep); in insert_ddp_data()
479 ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0; in insert_ddp_data()
487 while (toep->ddp.active_count > 0) { in insert_ddp_data()
488 MPASS(toep->ddp.active_id != -1); in insert_ddp_data()
489 db_idx = toep->ddp.active_id; in insert_ddp_data()
493 MPASS((toep->ddp.flags & db_flag) != 0); in insert_ddp_data()
494 db = &toep->ddp.db[db_idx]; in insert_ddp_data()
500 queue_ddp_rcvbuf_mbuf(toep, db_idx, placed); in insert_ddp_data()
501 complete_ddp_buffer(toep, db, db_idx); in insert_ddp_data()
512 toep->ofld_rxq->rx_aio_ddp_jobs++; in insert_ddp_data()
514 toep->ofld_rxq->rx_aio_ddp_octets += placed; in insert_ddp_data()
529 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); in insert_ddp_data()
530 toep->ddp.waiting_count++; in insert_ddp_data()
534 complete_ddp_buffer(toep, db, db_idx); in insert_ddp_data()
549 mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep) in mk_rx_data_ack_ulp() argument
562 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid)); in mk_rx_data_ack_ulp()
575 mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx, in mk_update_tcb_for_ddp() argument
598 wr = alloc_wrqe(wrlen, toep->ctrlq); in mk_update_tcb_for_ddp()
606 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, in mk_update_tcb_for_ddp()
613 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, in mk_update_tcb_for_ddp()
620 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, in mk_update_tcb_for_ddp()
628 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_RX_DDP_FLAGS, in mk_update_tcb_for_ddp()
632 ulpmc = mk_rx_data_ack_ulp(ulpmc, toep); in mk_update_tcb_for_ddp()
638 handle_ddp_data_aio(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, in handle_ddp_data_aio() argument
643 struct inpcb *inp = toep->inp; in handle_ddp_data_aio()
659 DDP_LOCK(toep); in handle_ddp_data_aio()
661 KASSERT(toep->ddp.active_id == db_idx, in handle_ddp_data_aio()
663 toep->ddp.active_id, toep->tid)); in handle_ddp_data_aio()
664 db = &toep->ddp.db[db_idx]; in handle_ddp_data_aio()
673 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); in handle_ddp_data_aio()
702 toep->tid, db_idx, len, report); in handle_ddp_data_aio()
706 MPASS(toep->vnet == so->so_vnet); in handle_ddp_data_aio()
707 CURVNET_SET(toep->vnet); in handle_ddp_data_aio()
713 struct adapter *sc = td_adapter(toep->td); in handle_ddp_data_aio()
725 toep->ofld_rxq->rx_aio_ddp_jobs++; in handle_ddp_data_aio()
726 toep->ofld_rxq->rx_aio_ddp_octets += len; in handle_ddp_data_aio()
745 __func__, toep->tid, job, copied, len); in handle_ddp_data_aio()
748 t4_rcvd(&toep->td->tod, tp); in handle_ddp_data_aio()
752 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_data_aio()
753 if (toep->ddp.waiting_count > 0) in handle_ddp_data_aio()
754 ddp_queue_toep(toep); in handle_ddp_data_aio()
756 DDP_UNLOCK(toep); in handle_ddp_data_aio()
763 queue_ddp_rcvbuf(struct toepcb *toep, struct ddp_rcv_buffer *drb) in queue_ddp_rcvbuf() argument
765 struct adapter *sc = td_adapter(toep->td); in queue_ddp_rcvbuf()
771 DDP_ASSERT_LOCKED(toep); in queue_ddp_rcvbuf()
773 KASSERT((toep->ddp.flags & DDP_DEAD) == 0, ("%s: DDP_DEAD", __func__)); in queue_ddp_rcvbuf()
774 KASSERT(toep->ddp.active_count < nitems(toep->ddp.db), in queue_ddp_rcvbuf()
778 if (toep->ddp.db[0].drb == NULL) { in queue_ddp_rcvbuf()
781 MPASS(toep->ddp.db[1].drb == NULL); in queue_ddp_rcvbuf()
809 MPASS((toep->ddp.flags & buf_flag) == 0); in queue_ddp_rcvbuf()
810 if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { in queue_ddp_rcvbuf()
812 MPASS(toep->ddp.active_id == -1); in queue_ddp_rcvbuf()
813 MPASS(toep->ddp.active_count == 0); in queue_ddp_rcvbuf()
822 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &drb->prsv, 0, drb->len, in queue_ddp_rcvbuf()
825 recycle_ddp_rcv_buffer(toep, drb); in queue_ddp_rcvbuf()
833 toep->tid, db_idx, ddp_flags, ddp_flags_mask); in queue_ddp_rcvbuf()
843 db = &toep->ddp.db[db_idx]; in queue_ddp_rcvbuf()
845 toep->ddp.flags |= buf_flag; in queue_ddp_rcvbuf()
846 toep->ddp.active_count++; in queue_ddp_rcvbuf()
847 if (toep->ddp.active_count == 1) { in queue_ddp_rcvbuf()
848 MPASS(toep->ddp.active_id == -1); in queue_ddp_rcvbuf()
849 toep->ddp.active_id = db_idx; in queue_ddp_rcvbuf()
851 toep->ddp.active_id); in queue_ddp_rcvbuf()
857 handle_ddp_data_rcvbuf(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, in handle_ddp_data_rcvbuf() argument
861 struct inpcb *inp = toep->inp; in handle_ddp_data_rcvbuf()
877 DDP_LOCK(toep); in handle_ddp_data_rcvbuf()
879 KASSERT(toep->ddp.active_id == db_idx, in handle_ddp_data_rcvbuf()
881 toep->ddp.active_id, toep->tid)); in handle_ddp_data_rcvbuf()
882 db = &toep->ddp.db[db_idx]; in handle_ddp_data_rcvbuf()
890 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); in handle_ddp_data_rcvbuf()
892 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_data_rcvbuf()
919 toep->tid, db_idx, len, report); in handle_ddp_data_rcvbuf()
923 MPASS(toep->vnet == so->so_vnet); in handle_ddp_data_rcvbuf()
924 CURVNET_SET(toep->vnet); in handle_ddp_data_rcvbuf()
930 struct adapter *sc = td_adapter(toep->td); in handle_ddp_data_rcvbuf()
940 queue_ddp_rcvbuf_mbuf(toep, db_idx, len); in handle_ddp_data_rcvbuf()
941 t4_rcvd_locked(&toep->td->tod, tp); in handle_ddp_data_rcvbuf()
948 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_data_rcvbuf()
953 if (toep->ddp.active_count != nitems(toep->ddp.db)) { in handle_ddp_data_rcvbuf()
954 drb = alloc_cached_ddp_rcv_buffer(toep); in handle_ddp_data_rcvbuf()
956 drb = alloc_ddp_rcv_buffer(toep, M_NOWAIT); in handle_ddp_data_rcvbuf()
958 ddp_queue_toep(toep); in handle_ddp_data_rcvbuf()
960 if (!queue_ddp_rcvbuf(toep, drb)) { in handle_ddp_data_rcvbuf()
961 ddp_queue_toep(toep); in handle_ddp_data_rcvbuf()
966 DDP_UNLOCK(toep); in handle_ddp_data_rcvbuf()
973 handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) in handle_ddp_data() argument
975 if ((toep->ddp.flags & DDP_RCVBUF) != 0) in handle_ddp_data()
976 return (handle_ddp_data_rcvbuf(toep, ddp_report, rcv_nxt, len)); in handle_ddp_data()
978 return (handle_ddp_data_aio(toep, ddp_report, rcv_nxt, len)); in handle_ddp_data()
982 handle_ddp_indicate(struct toepcb *toep) in handle_ddp_indicate() argument
985 DDP_ASSERT_LOCKED(toep); in handle_ddp_indicate()
986 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in handle_ddp_indicate()
995 MPASS(toep->ddp.active_count == 0); in handle_ddp_indicate()
996 MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0); in handle_ddp_indicate()
997 if (toep->ddp.waiting_count == 0) { in handle_ddp_indicate()
1007 toep->tid, toep->ddp.waiting_count); in handle_ddp_indicate()
1008 ddp_queue_toep(toep); in handle_ddp_indicate()
1020 struct toepcb *toep; in do_ddp_tcb_rpl() local
1029 toep = lookup_tid(sc, tid); in do_ddp_tcb_rpl()
1030 inp = toep->inp; in do_ddp_tcb_rpl()
1037 KASSERT((toep->ddp.flags & DDP_AIO) != 0, in do_ddp_tcb_rpl()
1040 MPASS(db_idx < nitems(toep->ddp.db)); in do_ddp_tcb_rpl()
1042 DDP_LOCK(toep); in do_ddp_tcb_rpl()
1043 db = &toep->ddp.db[db_idx]; in do_ddp_tcb_rpl()
1072 t4_rcvd(&toep->td->tod, intotcpcb(inp)); in do_ddp_tcb_rpl()
1075 complete_ddp_buffer(toep, db, db_idx); in do_ddp_tcb_rpl()
1076 if (toep->ddp.waiting_count > 0) in do_ddp_tcb_rpl()
1077 ddp_queue_toep(toep); in do_ddp_tcb_rpl()
1078 DDP_UNLOCK(toep); in do_ddp_tcb_rpl()
1090 handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt) in handle_ddp_close() argument
1092 struct socket *so = toep->inp->inp_socket; in handle_ddp_close()
1104 INP_WLOCK_ASSERT(toep->inp); in handle_ddp_close()
1105 DDP_ASSERT_LOCKED(toep); in handle_ddp_close()
1107 ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0; in handle_ddp_close()
1114 toep->tid, len); in handle_ddp_close()
1115 while (toep->ddp.active_count > 0) { in handle_ddp_close()
1116 MPASS(toep->ddp.active_id != -1); in handle_ddp_close()
1117 db_idx = toep->ddp.active_id; in handle_ddp_close()
1121 MPASS((toep->ddp.flags & db_flag) != 0); in handle_ddp_close()
1122 db = &toep->ddp.db[db_idx]; in handle_ddp_close()
1129 queue_ddp_rcvbuf_mbuf(toep, db_idx, placed); in handle_ddp_close()
1133 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_close()
1144 toep->ofld_rxq->rx_aio_ddp_jobs++; in handle_ddp_close()
1146 toep->ofld_rxq->rx_aio_ddp_octets += placed; in handle_ddp_close()
1156 __func__, toep->tid, db_idx, placed); in handle_ddp_close()
1160 complete_ddp_buffer(toep, db, db_idx); in handle_ddp_close()
1164 if ((toep->ddp.flags & DDP_AIO) != 0) in handle_ddp_close()
1165 ddp_complete_all(toep, 0); in handle_ddp_close()
1182 struct toepcb *toep = lookup_tid(sc, tid); in do_rx_data_ddp() local
1185 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); in do_rx_data_ddp()
1186 KASSERT(!(toep->flags & TPF_SYNQE), in do_rx_data_ddp()
1187 ("%s: toep %p claims to be a synq entry", __func__, toep)); in do_rx_data_ddp()
1192 __func__, vld, tid, toep); in do_rx_data_ddp()
1195 if (ulp_mode(toep) == ULP_MODE_ISCSI) { in do_rx_data_ddp()
1200 handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len)); in do_rx_data_ddp()
1212 struct toepcb *toep = lookup_tid(sc, tid); in do_rx_ddp_complete() local
1215 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); in do_rx_ddp_complete()
1216 KASSERT(!(toep->flags & TPF_SYNQE), in do_rx_ddp_complete()
1217 ("%s: toep %p claims to be a synq entry", __func__, toep)); in do_rx_ddp_complete()
1219 handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0); in do_rx_ddp_complete()
1225 set_ddp_ulp_mode(struct toepcb *toep) in set_ddp_ulp_mode() argument
1227 struct adapter *sc = toep->vi->adapter; in set_ddp_ulp_mode()
1253 wr = alloc_wrqe(len, toep->ctrlq); in set_ddp_ulp_mode()
1257 CTR(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); in set_ddp_ulp_mode()
1267 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26, in set_ddp_ulp_mode()
1271 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 28, in set_ddp_ulp_mode()
1275 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30, in set_ddp_ulp_mode()
1279 toep->params.ulp_mode = ULP_MODE_TCPDDP; in set_ddp_ulp_mode()
1280 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE, in set_ddp_ulp_mode()
1285 ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS, in set_ddp_ulp_mode()
1289 ddp_init_toep(toep); in set_ddp_ulp_mode()
1296 enable_ddp(struct adapter *sc, struct toepcb *toep) in enable_ddp() argument
1300 KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK, in enable_ddp()
1302 __func__, toep, toep->ddp.flags)); in enable_ddp()
1305 __func__, toep->tid, time_uptime); in enable_ddp()
1308 if ((toep->ddp.flags & DDP_AIO) != 0) in enable_ddp()
1311 DDP_ASSERT_LOCKED(toep); in enable_ddp()
1312 toep->ddp.flags |= DDP_SC_REQ; in enable_ddp()
1313 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS, in enable_ddp()
1317 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, in enable_ddp()
1808 t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep, in t4_write_page_pods_for_bio() argument
1843 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); in t4_write_page_pods_for_bio()
1856 V_PPOD_TID(toep->tid) | in t4_write_page_pods_for_bio()
1872 __func__, toep->tid, i, k, in t4_write_page_pods_for_bio()
1885 t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep, in t4_write_page_pods_for_buf() argument
1923 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); in t4_write_page_pods_for_buf()
1936 V_PPOD_TID(toep->tid) | in t4_write_page_pods_for_buf()
1953 __func__, toep->tid, i, k, in t4_write_page_pods_for_buf()
1975 t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep, in t4_write_page_pods_for_sgl() argument
2014 INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); in t4_write_page_pods_for_sgl()
2027 V_PPOD_TID(toep->tid) | in t4_write_page_pods_for_sgl()
2043 __func__, toep->tid, i, k, in t4_write_page_pods_for_sgl()
2088 prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps) in prep_pageset() argument
2097 t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) { in prep_pageset()
2165 hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps) in hold_aio() argument
2173 DDP_ASSERT_LOCKED(toep); in hold_aio()
2199 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes, in hold_aio()
2211 TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) { in hold_aio()
2214 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); in hold_aio()
2215 toep->ddp.cached_count--; in hold_aio()
2225 KASSERT(toep->ddp.active_count + toep->ddp.cached_count <= in hold_aio()
2226 nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__)); in hold_aio()
2227 if (toep->ddp.active_count + toep->ddp.cached_count == in hold_aio()
2228 nitems(toep->ddp.db)) { in hold_aio()
2229 KASSERT(toep->ddp.cached_count > 0, in hold_aio()
2231 ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq); in hold_aio()
2232 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); in hold_aio()
2233 toep->ddp.cached_count--; in hold_aio()
2234 free_pageset(toep->td, ps); in hold_aio()
2236 DDP_UNLOCK(toep); in hold_aio()
2246 DDP_LOCK(toep); in hold_aio()
2262 __func__, toep->tid, ps, job, ps->npages); in hold_aio()
2268 ddp_complete_all(struct toepcb *toep, int error) in ddp_complete_all() argument
2272 DDP_ASSERT_LOCKED(toep); in ddp_complete_all()
2273 KASSERT((toep->ddp.flags & DDP_AIO) != 0, ("%s: DDP_RCVBUF", __func__)); in ddp_complete_all()
2274 while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) { in ddp_complete_all()
2275 job = TAILQ_FIRST(&toep->ddp.aiojobq); in ddp_complete_all()
2276 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); in ddp_complete_all()
2277 toep->ddp.waiting_count--; in ddp_complete_all()
2306 aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job) in aio_ddp_requeue_one() argument
2309 DDP_ASSERT_LOCKED(toep); in aio_ddp_requeue_one()
2310 if (!(toep->ddp.flags & DDP_DEAD) && in aio_ddp_requeue_one()
2312 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); in aio_ddp_requeue_one()
2313 toep->ddp.waiting_count++; in aio_ddp_requeue_one()
2319 aio_ddp_requeue(struct toepcb *toep) in aio_ddp_requeue() argument
2321 struct adapter *sc = td_adapter(toep->td); in aio_ddp_requeue()
2334 DDP_ASSERT_LOCKED(toep); in aio_ddp_requeue()
2337 if (toep->ddp.flags & DDP_DEAD) { in aio_ddp_requeue()
2338 MPASS(toep->ddp.waiting_count == 0); in aio_ddp_requeue()
2339 MPASS(toep->ddp.active_count == 0); in aio_ddp_requeue()
2343 if (toep->ddp.waiting_count == 0 || in aio_ddp_requeue()
2344 toep->ddp.active_count == nitems(toep->ddp.db)) { in aio_ddp_requeue()
2348 job = TAILQ_FIRST(&toep->ddp.aiojobq); in aio_ddp_requeue()
2356 ddp_complete_all(toep, ENOTCONN); in aio_ddp_requeue()
2360 KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0, in aio_ddp_requeue()
2366 toep->ddp.waiting_count--; in aio_ddp_requeue()
2367 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); in aio_ddp_requeue()
2398 if (toep->ddp.active_count != 0) in aio_ddp_requeue()
2400 ddp_complete_all(toep, 0); in aio_ddp_requeue()
2408 if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) { in aio_ddp_requeue()
2422 if ((toep->ddp.flags & DDP_SC_REQ) == 0) in aio_ddp_requeue()
2423 enable_ddp(sc, toep); in aio_ddp_requeue()
2432 if (toep->ddp.queueing != NULL) in aio_ddp_requeue()
2436 toep->ddp.waiting_count--; in aio_ddp_requeue()
2437 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); in aio_ddp_requeue()
2440 toep->ddp.queueing = job; in aio_ddp_requeue()
2443 error = hold_aio(toep, job, &ps); in aio_ddp_requeue()
2446 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2455 recycle_pageset(toep, ps); in aio_ddp_requeue()
2457 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2464 recycle_pageset(toep, ps); in aio_ddp_requeue()
2466 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2472 recycle_pageset(toep, ps); in aio_ddp_requeue()
2473 if (toep->ddp.active_count != 0) { in aio_ddp_requeue()
2479 aio_ddp_requeue_one(toep, job); in aio_ddp_requeue()
2480 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2484 ddp_complete_all(toep, 0); in aio_ddp_requeue()
2485 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2494 MPASS(!(toep->ddp.flags & DDP_DEAD)); in aio_ddp_requeue()
2506 KASSERT(m == NULL || toep->ddp.active_count == 0, in aio_ddp_requeue()
2550 DDP_UNLOCK(toep); in aio_ddp_requeue()
2552 DDP_LOCK(toep); in aio_ddp_requeue()
2561 t4_rcvd_locked(&toep->td->tod, intotcpcb(inp)); in aio_ddp_requeue()
2563 if (resid == 0 || toep->ddp.flags & DDP_DEAD) { in aio_ddp_requeue()
2571 recycle_pageset(toep, ps); in aio_ddp_requeue()
2573 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2582 if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) { in aio_ddp_requeue()
2584 recycle_pageset(toep, ps); in aio_ddp_requeue()
2585 aio_ddp_requeue_one(toep, job); in aio_ddp_requeue()
2586 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2600 if (prep_pageset(sc, toep, ps) == 0) { in aio_ddp_requeue()
2601 recycle_pageset(toep, ps); in aio_ddp_requeue()
2602 aio_ddp_requeue_one(toep, job); in aio_ddp_requeue()
2603 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2614 if (toep->ddp.db[0].job == NULL) { in aio_ddp_requeue()
2617 MPASS(toep->ddp.db[1].job == NULL); in aio_ddp_requeue()
2640 MPASS((toep->ddp.flags & buf_flag) == 0); in aio_ddp_requeue()
2641 if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { in aio_ddp_requeue()
2643 MPASS(toep->ddp.active_id == -1); in aio_ddp_requeue()
2644 MPASS(toep->ddp.active_count == 0); in aio_ddp_requeue()
2656 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &ps->prsv, in aio_ddp_requeue()
2659 recycle_pageset(toep, ps); in aio_ddp_requeue()
2660 aio_ddp_requeue_one(toep, job); in aio_ddp_requeue()
2661 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2677 recycle_pageset(toep, ps); in aio_ddp_requeue()
2679 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2686 toep->tid, job, db_idx, ddp_flags, ddp_flags_mask); in aio_ddp_requeue()
2690 db = &toep->ddp.db[db_idx]; in aio_ddp_requeue()
2694 toep->ddp.queueing = NULL; in aio_ddp_requeue()
2695 toep->ddp.flags |= buf_flag; in aio_ddp_requeue()
2696 toep->ddp.active_count++; in aio_ddp_requeue()
2697 if (toep->ddp.active_count == 1) { in aio_ddp_requeue()
2698 MPASS(toep->ddp.active_id == -1); in aio_ddp_requeue()
2699 toep->ddp.active_id = db_idx; in aio_ddp_requeue()
2701 toep->ddp.active_id); in aio_ddp_requeue()
2707 ddp_queue_toep(struct toepcb *toep) in ddp_queue_toep() argument
2710 DDP_ASSERT_LOCKED(toep); in ddp_queue_toep()
2711 if (toep->ddp.flags & DDP_TASK_ACTIVE) in ddp_queue_toep()
2713 toep->ddp.flags |= DDP_TASK_ACTIVE; in ddp_queue_toep()
2714 hold_toepcb(toep); in ddp_queue_toep()
2715 soaio_enqueue(&toep->ddp.requeue_task); in ddp_queue_toep()
2721 struct toepcb *toep = context; in aio_ddp_requeue_task() local
2723 DDP_LOCK(toep); in aio_ddp_requeue_task()
2724 aio_ddp_requeue(toep); in aio_ddp_requeue_task()
2725 toep->ddp.flags &= ~DDP_TASK_ACTIVE; in aio_ddp_requeue_task()
2726 DDP_UNLOCK(toep); in aio_ddp_requeue_task()
2728 free_toepcb(toep); in aio_ddp_requeue_task()
2736 struct toepcb *toep = tp->t_toe; in t4_aio_cancel_active() local
2737 struct adapter *sc = td_adapter(toep->td); in t4_aio_cancel_active()
2741 DDP_LOCK(toep); in t4_aio_cancel_active()
2743 DDP_UNLOCK(toep); in t4_aio_cancel_active()
2748 for (i = 0; i < nitems(toep->ddp.db); i++) { in t4_aio_cancel_active()
2749 if (toep->ddp.db[i].job == job) { in t4_aio_cancel_active()
2751 MPASS(toep->ddp.db[i].cancel_pending == 0); in t4_aio_cancel_active()
2760 t4_set_tcb_field(sc, toep->ctrlq, toep, in t4_aio_cancel_active()
2763 toep->ddp.db[i].cancel_pending = 1; in t4_aio_cancel_active()
2769 DDP_UNLOCK(toep); in t4_aio_cancel_active()
2777 struct toepcb *toep = tp->t_toe; in t4_aio_cancel_queued() local
2779 DDP_LOCK(toep); in t4_aio_cancel_queued()
2781 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); in t4_aio_cancel_queued()
2782 toep->ddp.waiting_count--; in t4_aio_cancel_queued()
2783 if (toep->ddp.waiting_count == 0) in t4_aio_cancel_queued()
2784 ddp_queue_toep(toep); in t4_aio_cancel_queued()
2787 DDP_UNLOCK(toep); in t4_aio_cancel_queued()
2797 struct toepcb *toep = tp->t_toe; in t4_aio_queue_ddp() local
2804 if (__predict_false(ulp_mode(toep) == ULP_MODE_NONE)) { in t4_aio_queue_ddp()
2805 if (!set_ddp_ulp_mode(toep)) { in t4_aio_queue_ddp()
2812 DDP_LOCK(toep); in t4_aio_queue_ddp()
2818 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in t4_aio_queue_ddp()
2819 DDP_UNLOCK(toep); in t4_aio_queue_ddp()
2823 if ((toep->ddp.flags & DDP_AIO) == 0) { in t4_aio_queue_ddp()
2824 toep->ddp.flags |= DDP_AIO; in t4_aio_queue_ddp()
2825 TAILQ_INIT(&toep->ddp.cached_pagesets); in t4_aio_queue_ddp()
2826 TAILQ_INIT(&toep->ddp.aiojobq); in t4_aio_queue_ddp()
2827 TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, in t4_aio_queue_ddp()
2828 toep); in t4_aio_queue_ddp()
2838 CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid); in t4_aio_queue_ddp()
2842 TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list); in t4_aio_queue_ddp()
2843 toep->ddp.waiting_count++; in t4_aio_queue_ddp()
2850 aio_ddp_requeue(toep); in t4_aio_queue_ddp()
2851 DDP_UNLOCK(toep); in t4_aio_queue_ddp()
2856 ddp_rcvbuf_requeue(struct toepcb *toep) in ddp_rcvbuf_requeue() argument
2863 DDP_ASSERT_LOCKED(toep); in ddp_rcvbuf_requeue()
2865 if ((toep->ddp.flags & DDP_DEAD) != 0) { in ddp_rcvbuf_requeue()
2866 MPASS(toep->ddp.active_count == 0); in ddp_rcvbuf_requeue()
2871 if (toep->ddp.active_count == nitems(toep->ddp.db)) { in ddp_rcvbuf_requeue()
2875 inp = toep->inp; in ddp_rcvbuf_requeue()
2879 drb = alloc_cached_ddp_rcv_buffer(toep); in ddp_rcvbuf_requeue()
2880 DDP_UNLOCK(toep); in ddp_rcvbuf_requeue()
2883 drb = alloc_ddp_rcv_buffer(toep, M_WAITOK); in ddp_rcvbuf_requeue()
2886 DDP_LOCK(toep); in ddp_rcvbuf_requeue()
2891 DDP_LOCK(toep); in ddp_rcvbuf_requeue()
2892 if ((toep->ddp.flags & DDP_DEAD) != 0 || in ddp_rcvbuf_requeue()
2893 toep->ddp.active_count == nitems(toep->ddp.db)) { in ddp_rcvbuf_requeue()
2894 recycle_ddp_rcv_buffer(toep, drb); in ddp_rcvbuf_requeue()
2902 recycle_ddp_rcv_buffer(toep, drb); in ddp_rcvbuf_requeue()
2909 recycle_ddp_rcv_buffer(toep, drb); in ddp_rcvbuf_requeue()
2914 if (!queue_ddp_rcvbuf(toep, drb)) { in ddp_rcvbuf_requeue()
2931 struct toepcb *toep = context; in ddp_rcvbuf_requeue_task() local
2933 DDP_LOCK(toep); in ddp_rcvbuf_requeue_task()
2934 ddp_rcvbuf_requeue(toep); in ddp_rcvbuf_requeue_task()
2935 toep->ddp.flags &= ~DDP_TASK_ACTIVE; in ddp_rcvbuf_requeue_task()
2936 DDP_UNLOCK(toep); in ddp_rcvbuf_requeue_task()
2938 free_toepcb(toep); in ddp_rcvbuf_requeue_task()
2942 t4_enable_ddp_rcv(struct socket *so, struct toepcb *toep) in t4_enable_ddp_rcv() argument
2945 struct adapter *sc = td_adapter(toep->td); in t4_enable_ddp_rcv()
2948 switch (ulp_mode(toep)) { in t4_enable_ddp_rcv()
2952 if (set_ddp_ulp_mode(toep)) in t4_enable_ddp_rcv()
2961 DDP_LOCK(toep); in t4_enable_ddp_rcv()
2967 if ((toep->ddp.flags & DDP_AIO) != 0) { in t4_enable_ddp_rcv()
2968 DDP_UNLOCK(toep); in t4_enable_ddp_rcv()
2972 if ((toep->ddp.flags & DDP_RCVBUF) != 0) { in t4_enable_ddp_rcv()
2973 DDP_UNLOCK(toep); in t4_enable_ddp_rcv()
2977 toep->ddp.flags |= DDP_RCVBUF; in t4_enable_ddp_rcv()
2978 TAILQ_INIT(&toep->ddp.cached_buffers); in t4_enable_ddp_rcv()
2979 enable_ddp(sc, toep); in t4_enable_ddp_rcv()
2980 TASK_INIT(&toep->ddp.requeue_task, 0, ddp_rcvbuf_requeue_task, toep); in t4_enable_ddp_rcv()
2981 ddp_queue_toep(toep); in t4_enable_ddp_rcv()
2982 DDP_UNLOCK(toep); in t4_enable_ddp_rcv()