xref: /freebsd/sys/dev/cxgbe/tom/t4_ddp.c (revision a5ff72cb0e51a7675d4e2b5810a2b6dad5b91960)
1 /*-
2  * Copyright (c) 2012 Chelsio Communications, Inc.
3  * All rights reserved.
4  * Written by: Navdeep Parhar <np@FreeBSD.org>
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include "opt_inet.h"
32 
33 #include <sys/param.h>
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/ktr.h>
38 #include <sys/module.h>
39 #include <sys/protosw.h>
40 #include <sys/proc.h>
41 #include <sys/domain.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/uio.h>
45 #include <netinet/in.h>
46 #include <netinet/in_pcb.h>
47 #include <netinet/ip.h>
48 #include <netinet/tcp_var.h>
49 #define TCPSTATES
50 #include <netinet/tcp_fsm.h>
51 #include <netinet/toecore.h>
52 
53 #include <vm/vm.h>
54 #include <vm/vm_extern.h>
55 #include <vm/vm_param.h>
56 #include <vm/pmap.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_object.h>
60 
61 #ifdef TCP_OFFLOAD
62 #include "common/common.h"
63 #include "common/t4_msg.h"
64 #include "common/t4_regs.h"
65 #include "common/t4_tcb.h"
66 #include "tom/t4_tom.h"
67 
68 VNET_DECLARE(int, tcp_do_autorcvbuf);
69 #define V_tcp_do_autorcvbuf VNET(tcp_do_autorcvbuf)
70 VNET_DECLARE(int, tcp_autorcvbuf_inc);
71 #define V_tcp_autorcvbuf_inc VNET(tcp_autorcvbuf_inc)
72 VNET_DECLARE(int, tcp_autorcvbuf_max);
73 #define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
74 
75 static struct mbuf *get_ddp_mbuf(int len);
76 
77 #define PPOD_SZ(n)	((n) * sizeof(struct pagepod))
78 #define PPOD_SIZE	(PPOD_SZ(1))
79 
80 /* XXX: must match A_ULP_RX_TDDP_PSZ */
81 static int t4_ddp_pgsz[] = {4096, 4096 << 2, 4096 << 4, 4096 << 6};
82 
83 #define MAX_DDP_BUFFER_SIZE		(M_TCB_RX_DDP_BUF0_LEN)
84 static int
85 alloc_ppods(struct tom_data *td, int n, u_int *ppod_addr)
86 {
87 	vmem_addr_t v;
88 	int rc;
89 
90 	MPASS(n > 0);
91 
92 	rc = vmem_alloc(td->ppod_arena, PPOD_SZ(n), M_NOWAIT | M_FIRSTFIT, &v);
93 	*ppod_addr = (u_int)v;
94 
95 	return (rc);
96 }
97 
98 static void
99 free_ppods(struct tom_data *td, u_int ppod_addr, int n)
100 {
101 
102 	MPASS(n > 0);
103 
104 	vmem_free(td->ppod_arena, (vmem_addr_t)ppod_addr, PPOD_SZ(n));
105 }
106 
107 static inline int
108 pages_to_nppods(int npages, int ddp_pgsz)
109 {
110 	int nsegs = npages * PAGE_SIZE / ddp_pgsz;
111 
112 	return (howmany(nsegs, PPOD_PAGES));
113 }
114 
115 static void
116 free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db)
117 {
118 
119 	if (db == NULL)
120 		return;
121 
122 	if (db->pages)
123 		free(db->pages, M_CXGBE);
124 
125 	if (db->nppods > 0)
126 		free_ppods(td, db->ppod_addr, db->nppods);
127 
128 	free(db, M_CXGBE);
129 }
130 
131 void
132 release_ddp_resources(struct toepcb *toep)
133 {
134 	int i;
135 
136 	for (i = 0; i < nitems(toep->db); i++) {
137 		if (toep->db[i] != NULL) {
138 			free_ddp_buffer(toep->td, toep->db[i]);
139 			toep->db[i] = NULL;
140 		}
141 	}
142 }
143 
144 /* XXX: handle_ddp_data code duplication */
145 void
146 insert_ddp_data(struct toepcb *toep, uint32_t n)
147 {
148 	struct inpcb *inp = toep->inp;
149 	struct tcpcb *tp = intotcpcb(inp);
150 	struct sockbuf *sb = &inp->inp_socket->so_rcv;
151 	struct mbuf *m;
152 
153 	INP_WLOCK_ASSERT(inp);
154 	SOCKBUF_LOCK_ASSERT(sb);
155 
156 	m = get_ddp_mbuf(n);
157 	tp->rcv_nxt += n;
158 #ifndef USE_DDP_RX_FLOW_CONTROL
159 	KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__));
160 	tp->rcv_wnd -= n;
161 #endif
162 
163 	KASSERT(toep->sb_cc >= sbused(sb),
164 	    ("%s: sb %p has more data (%d) than last time (%d).",
165 	    __func__, sb, sbused(sb), toep->sb_cc));
166 	toep->rx_credits += toep->sb_cc - sbused(sb);
167 #ifdef USE_DDP_RX_FLOW_CONTROL
168 	toep->rx_credits -= n;	/* adjust for F_RX_FC_DDP */
169 #endif
170 	sbappendstream_locked(sb, m, 0);
171 	toep->sb_cc = sbused(sb);
172 }
173 
174 /* SET_TCB_FIELD sent as a ULP command looks like this */
175 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
176     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
177 
178 /* RX_DATA_ACK sent as a ULP command looks like this */
179 #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \
180     sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core))
181 
182 static inline void *
183 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep,
184     uint64_t word, uint64_t mask, uint64_t val)
185 {
186 	struct ulptx_idata *ulpsc;
187 	struct cpl_set_tcb_field_core *req;
188 
189 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
190 	ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16));
191 
192 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
193 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
194 	ulpsc->len = htobe32(sizeof(*req));
195 
196 	req = (struct cpl_set_tcb_field_core *)(ulpsc + 1);
197 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid));
198 	req->reply_ctrl = htobe16(V_NO_REPLY(1) |
199 	    V_QUEUENO(toep->ofld_rxq->iq.abs_id));
200 	req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0));
201         req->mask = htobe64(mask);
202         req->val = htobe64(val);
203 
204 	ulpsc = (struct ulptx_idata *)(req + 1);
205 	if (LEN__SET_TCB_FIELD_ULP % 16) {
206 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
207 		ulpsc->len = htobe32(0);
208 		return (ulpsc + 1);
209 	}
210 	return (ulpsc);
211 }
212 
213 static inline void *
214 mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep)
215 {
216 	struct ulptx_idata *ulpsc;
217 	struct cpl_rx_data_ack_core *req;
218 
219 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
220 	ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16));
221 
222 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
223 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
224 	ulpsc->len = htobe32(sizeof(*req));
225 
226 	req = (struct cpl_rx_data_ack_core *)(ulpsc + 1);
227 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid));
228 	req->credit_dack = htobe32(F_RX_MODULATE_RX);
229 
230 	ulpsc = (struct ulptx_idata *)(req + 1);
231 	if (LEN__RX_DATA_ACK_ULP % 16) {
232 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
233 		ulpsc->len = htobe32(0);
234 		return (ulpsc + 1);
235 	}
236 	return (ulpsc);
237 }
238 
239 static inline uint64_t
240 select_ddp_flags(struct socket *so, int flags, int db_idx)
241 {
242 	uint64_t ddp_flags = V_TF_DDP_INDICATE_OUT(0);
243 	int waitall = flags & MSG_WAITALL;
244 	int nb = so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO);
245 
246 	KASSERT(db_idx == 0 || db_idx == 1,
247 	    ("%s: bad DDP buffer index %d", __func__, db_idx));
248 
249 	if (db_idx == 0) {
250 		ddp_flags |= V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_ACTIVE_BUF(0);
251 		if (waitall)
252 			ddp_flags |= V_TF_DDP_PUSH_DISABLE_0(1);
253 		else if (nb)
254 			ddp_flags |= V_TF_DDP_BUF0_FLUSH(1);
255 		else
256 			ddp_flags |= V_TF_DDP_BUF0_FLUSH(0);
257 	} else {
258 		ddp_flags |= V_TF_DDP_BUF1_VALID(1) | V_TF_DDP_ACTIVE_BUF(1);
259 		if (waitall)
260 			ddp_flags |= V_TF_DDP_PUSH_DISABLE_1(1);
261 		else if (nb)
262 			ddp_flags |= V_TF_DDP_BUF1_FLUSH(1);
263 		else
264 			ddp_flags |= V_TF_DDP_BUF1_FLUSH(0);
265 	}
266 
267 	return (ddp_flags);
268 }
269 
270 static struct wrqe *
271 mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx,
272     int offset, uint64_t ddp_flags)
273 {
274 	struct ddp_buffer *db = toep->db[db_idx];
275 	struct wrqe *wr;
276 	struct work_request_hdr *wrh;
277 	struct ulp_txpkt *ulpmc;
278 	int len;
279 
280 	KASSERT(db_idx == 0 || db_idx == 1,
281 	    ("%s: bad DDP buffer index %d", __func__, db_idx));
282 
283 	/*
284 	 * We'll send a compound work request that has 3 SET_TCB_FIELDs and an
285 	 * RX_DATA_ACK (with RX_MODULATE to speed up delivery).
286 	 *
287 	 * The work request header is 16B and always ends at a 16B boundary.
288 	 * The ULPTX master commands that follow must all end at 16B boundaries
289 	 * too so we round up the size to 16.
290 	 */
291 	len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
292 	    roundup2(LEN__RX_DATA_ACK_ULP, 16);
293 
294 	wr = alloc_wrqe(len, toep->ctrlq);
295 	if (wr == NULL)
296 		return (NULL);
297 	wrh = wrtod(wr);
298 	INIT_ULPTX_WRH(wrh, len, 1, 0);	/* atomic */
299 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
300 
301 	/* Write the buffer's tag */
302 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
303 	    W_TCB_RX_DDP_BUF0_TAG + db_idx,
304 	    V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
305 	    V_TCB_RX_DDP_BUF0_TAG(db->tag));
306 
307 	/* Update the current offset in the DDP buffer and its total length */
308 	if (db_idx == 0)
309 		ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
310 		    W_TCB_RX_DDP_BUF0_OFFSET,
311 		    V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) |
312 		    V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN),
313 		    V_TCB_RX_DDP_BUF0_OFFSET(offset) |
314 		    V_TCB_RX_DDP_BUF0_LEN(db->len));
315 	else
316 		ulpmc = mk_set_tcb_field_ulp(ulpmc, toep,
317 		    W_TCB_RX_DDP_BUF1_OFFSET,
318 		    V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) |
319 		    V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32),
320 		    V_TCB_RX_DDP_BUF1_OFFSET(offset) |
321 		    V_TCB_RX_DDP_BUF1_LEN((u64)db->len << 32));
322 
323 	/* Update DDP flags */
324 	ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS,
325 	    V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF1_FLUSH(1) |
326 	    V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PUSH_DISABLE_1(1) |
327 	    V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1) |
328 	    V_TF_DDP_ACTIVE_BUF(1) | V_TF_DDP_INDICATE_OUT(1), ddp_flags);
329 
330 	/* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */
331 	ulpmc = mk_rx_data_ack_ulp(ulpmc, toep);
332 
333 	return (wr);
334 }
335 
336 static void
337 discourage_ddp(struct toepcb *toep)
338 {
339 
340 	if (toep->ddp_score && --toep->ddp_score == 0) {
341 		toep->ddp_flags &= ~DDP_OK;
342 		toep->ddp_disabled = time_uptime;
343 		CTR3(KTR_CXGBE, "%s: tid %u !DDP_OK @ %u",
344 		    __func__, toep->tid, time_uptime);
345 	}
346 }
347 
348 static int
349 handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
350 {
351 	uint32_t report = be32toh(ddp_report);
352 	unsigned int db_flag;
353 	struct inpcb *inp = toep->inp;
354 	struct tcpcb *tp;
355 	struct socket *so;
356 	struct sockbuf *sb;
357 	struct mbuf *m;
358 
359 	db_flag = report & F_DDP_BUF_IDX ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
360 
361 	if (__predict_false(!(report & F_DDP_INV)))
362 		CXGBE_UNIMPLEMENTED("DDP buffer still valid");
363 
364 	INP_WLOCK(inp);
365 	so = inp_inpcbtosocket(inp);
366 	sb = &so->so_rcv;
367 	if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) {
368 
369 		/*
370 		 * XXX: think a bit more.
371 		 * tcpcb probably gone, but socket should still be around
372 		 * because we always wait for DDP completion in soreceive no
373 		 * matter what.  Just wake it up and let it clean up.
374 		 */
375 
376 		CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x",
377 		    __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags);
378 		SOCKBUF_LOCK(sb);
379 		goto wakeup;
380 	}
381 
382 	tp = intotcpcb(inp);
383 
384 	/*
385 	 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
386 	 * sequence number of the next byte to receive.  The length of
387 	 * the data received for this message must be computed by
388 	 * comparing the new and old values of rcv_nxt.
389 	 *
390 	 * For RX_DATA_DDP, len might be non-zero, but it is only the
391 	 * length of the most recent DMA.  It does not include the
392 	 * total length of the data received since the previous update
393 	 * for this DDP buffer.  rcv_nxt is the sequence number of the
394 	 * first received byte from the most recent DMA.
395 	 */
396 	len += be32toh(rcv_nxt) - tp->rcv_nxt;
397 	tp->rcv_nxt += len;
398 	tp->t_rcvtime = ticks;
399 #ifndef USE_DDP_RX_FLOW_CONTROL
400 	KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
401 	tp->rcv_wnd -= len;
402 #endif
403 	m = get_ddp_mbuf(len);
404 
405 	SOCKBUF_LOCK(sb);
406 	if (report & F_DDP_BUF_COMPLETE)
407 		toep->ddp_score = DDP_HIGH_SCORE;
408 	else
409 		discourage_ddp(toep);
410 
411 	/* receive buffer autosize */
412 	if (sb->sb_flags & SB_AUTOSIZE &&
413 	    V_tcp_do_autorcvbuf &&
414 	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
415 	    len > (sbspace(sb) / 8 * 7)) {
416 		unsigned int hiwat = sb->sb_hiwat;
417 		unsigned int newsize = min(hiwat + V_tcp_autorcvbuf_inc,
418 		    V_tcp_autorcvbuf_max);
419 
420 		if (!sbreserve_locked(sb, newsize, so, NULL))
421 			sb->sb_flags &= ~SB_AUTOSIZE;
422 		else
423 			toep->rx_credits += newsize - hiwat;
424 	}
425 
426 	KASSERT(toep->sb_cc >= sbused(sb),
427 	    ("%s: sb %p has more data (%d) than last time (%d).",
428 	    __func__, sb, sbused(sb), toep->sb_cc));
429 	toep->rx_credits += toep->sb_cc - sbused(sb);
430 #ifdef USE_DDP_RX_FLOW_CONTROL
431 	toep->rx_credits -= len;	/* adjust for F_RX_FC_DDP */
432 #endif
433 	sbappendstream_locked(sb, m, 0);
434 	toep->sb_cc = sbused(sb);
435 wakeup:
436 	KASSERT(toep->ddp_flags & db_flag,
437 	    ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x, report 0x%x",
438 	    __func__, toep, toep->ddp_flags, report));
439 	toep->ddp_flags &= ~db_flag;
440 	sorwakeup_locked(so);
441 	SOCKBUF_UNLOCK_ASSERT(sb);
442 
443 	INP_WUNLOCK(inp);
444 	return (0);
445 }
446 
447 void
448 handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, struct sockbuf *sb,
449     __be32 rcv_nxt)
450 {
451 	struct mbuf *m;
452 	int len;
453 
454 	SOCKBUF_LOCK_ASSERT(sb);
455 	INP_WLOCK_ASSERT(toep->inp);
456 	len = be32toh(rcv_nxt) - tp->rcv_nxt;
457 
458 	/* Signal handle_ddp() to break out of its sleep loop. */
459 	toep->ddp_flags &= ~(DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE);
460 	if (len == 0)
461 		return;
462 
463 	tp->rcv_nxt += len;
464 	KASSERT(toep->sb_cc >= sbused(sb),
465 	    ("%s: sb %p has more data (%d) than last time (%d).",
466 	    __func__, sb, sbused(sb), toep->sb_cc));
467 	toep->rx_credits += toep->sb_cc - sbused(sb);
468 #ifdef USE_DDP_RX_FLOW_CONTROL
469 	toep->rx_credits -= len;	/* adjust for F_RX_FC_DDP */
470 #endif
471 
472 	m = get_ddp_mbuf(len);
473 
474 	sbappendstream_locked(sb, m, 0);
475 	toep->sb_cc = sbused(sb);
476 }
477 
478 #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\
479 	 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\
480 	 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\
481 	 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR)
482 
483 static int
484 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
485 {
486 	struct adapter *sc = iq->adapter;
487 	const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
488 	unsigned int tid = GET_TID(cpl);
489 	uint32_t vld;
490 	struct toepcb *toep = lookup_tid(sc, tid);
491 
492 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
493 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
494 	KASSERT(!(toep->flags & TPF_SYNQE),
495 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
496 
497 	vld = be32toh(cpl->ddpvld);
498 	if (__predict_false(vld & DDP_ERR)) {
499 		panic("%s: DDP error 0x%x (tid %d, toep %p)",
500 		    __func__, vld, tid, toep);
501 	}
502 
503 	if (toep->ulp_mode == ULP_MODE_ISCSI) {
504 		sc->cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m);
505 		return (0);
506 	}
507 
508 	handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len));
509 
510 	return (0);
511 }
512 
513 static int
514 do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss,
515     struct mbuf *m)
516 {
517 	struct adapter *sc = iq->adapter;
518 	const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1);
519 	unsigned int tid = GET_TID(cpl);
520 	struct toepcb *toep = lookup_tid(sc, tid);
521 
522 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
523 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
524 	KASSERT(!(toep->flags & TPF_SYNQE),
525 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
526 
527 	handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0);
528 
529 	return (0);
530 }
531 
532 void
533 enable_ddp(struct adapter *sc, struct toepcb *toep)
534 {
535 
536 	KASSERT((toep->ddp_flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK,
537 	    ("%s: toep %p has bad ddp_flags 0x%x",
538 	    __func__, toep, toep->ddp_flags));
539 
540 	CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
541 	    __func__, toep->tid, time_uptime);
542 
543 	toep->ddp_flags |= DDP_SC_REQ;
544 	t4_set_tcb_field(sc, toep, 1, W_TCB_RX_DDP_FLAGS,
545 	    V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
546 	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
547 	    V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1),
548 	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1));
549 	t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS,
550 	    V_TF_RCV_COALESCE_ENABLE(1), 0);
551 }
552 
553 static inline void
554 disable_ddp(struct adapter *sc, struct toepcb *toep)
555 {
556 
557 	KASSERT((toep->ddp_flags & (DDP_ON | DDP_SC_REQ)) == DDP_ON,
558 	    ("%s: toep %p has bad ddp_flags 0x%x",
559 	    __func__, toep, toep->ddp_flags));
560 
561 	CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
562 	    __func__, toep->tid, time_uptime);
563 
564 	toep->ddp_flags |= DDP_SC_REQ;
565 	t4_set_tcb_field(sc, toep, 1, W_TCB_T_FLAGS,
566 	    V_TF_RCV_COALESCE_ENABLE(1), V_TF_RCV_COALESCE_ENABLE(1));
567 	t4_set_tcb_field(sc, toep, 1, W_TCB_RX_DDP_FLAGS, V_TF_DDP_OFF(1),
568 	    V_TF_DDP_OFF(1));
569 }
570 
571 static int
572 hold_uio(struct uio *uio, vm_page_t **ppages, int *pnpages)
573 {
574 	struct vm_map *map;
575 	struct iovec *iov;
576 	vm_offset_t start, end;
577 	vm_page_t *pp;
578 	int n;
579 
580 	KASSERT(uio->uio_iovcnt == 1,
581 	    ("%s: uio_iovcnt %d", __func__, uio->uio_iovcnt));
582 	KASSERT(uio->uio_td->td_proc == curproc,
583 	    ("%s: uio proc (%p) is not curproc (%p)",
584 	    __func__, uio->uio_td->td_proc, curproc));
585 
586 	map = &curproc->p_vmspace->vm_map;
587 	iov = &uio->uio_iov[0];
588 	start = trunc_page((uintptr_t)iov->iov_base);
589 	end = round_page((vm_offset_t)iov->iov_base + iov->iov_len);
590 	n = howmany(end - start, PAGE_SIZE);
591 
592 	if (end - start > MAX_DDP_BUFFER_SIZE)
593 		return (E2BIG);
594 
595 	pp = malloc(n * sizeof(vm_page_t), M_CXGBE, M_NOWAIT);
596 	if (pp == NULL)
597 		return (ENOMEM);
598 
599 	if (vm_fault_quick_hold_pages(map, (vm_offset_t)iov->iov_base,
600 	    iov->iov_len, VM_PROT_WRITE, pp, n) < 0) {
601 		free(pp, M_CXGBE);
602 		return (EFAULT);
603 	}
604 
605 	*ppages = pp;
606 	*pnpages = n;
607 
608 	return (0);
609 }
610 
611 static int
612 bufcmp(struct ddp_buffer *db, vm_page_t *pages, int npages, int offset, int len)
613 {
614 	int i;
615 
616 	if (db == NULL || db->npages != npages || db->offset != offset ||
617 	    db->len != len)
618 		return (1);
619 
620 	for (i = 0; i < npages; i++) {
621 		if (pages[i]->phys_addr != db->pages[i]->phys_addr)
622 			return (1);
623 	}
624 
625 	return (0);
626 }
627 
628 static int
629 calculate_hcf(int n1, int n2)
630 {
631 	int a, b, t;
632 
633 	if (n1 <= n2) {
634 		a = n1;
635 		b = n2;
636 	} else {
637 		a = n2;
638 		b = n1;
639 	}
640 
641 	while (a != 0) {
642 		t = a;
643 		a = b % a;
644 		b = t;
645 	}
646 
647 	return (b);
648 }
649 
650 static struct ddp_buffer *
651 alloc_ddp_buffer(struct tom_data *td, vm_page_t *pages, int npages, int offset,
652     int len)
653 {
654 	int i, hcf, seglen, idx, ppod, nppods;
655 	struct ddp_buffer *db;
656 
657 	/*
658 	 * The DDP page size is unrelated to the VM page size.  We combine
659 	 * contiguous physical pages into larger segments to get the best DDP
660 	 * page size possible.  This is the largest of the four sizes in
661 	 * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in
662 	 * the page list.
663 	 */
664 	hcf = 0;
665 	for (i = 0; i < npages; i++) {
666 		seglen = PAGE_SIZE;
667 		while (i < npages - 1 &&
668 		    pages[i]->phys_addr + PAGE_SIZE == pages[i + 1]->phys_addr) {
669 			seglen += PAGE_SIZE;
670 			i++;
671 		}
672 
673 		hcf = calculate_hcf(hcf, seglen);
674 		if (hcf < t4_ddp_pgsz[1]) {
675 			idx = 0;
676 			goto have_pgsz;	/* give up, short circuit */
677 		}
678 	}
679 
680 	if (hcf % t4_ddp_pgsz[0] != 0) {
681 		/* hmmm.  This could only happen when PAGE_SIZE < 4K */
682 		KASSERT(PAGE_SIZE < 4096,
683 		    ("%s: PAGE_SIZE %d, hcf %d", __func__, PAGE_SIZE, hcf));
684 		CTR3(KTR_CXGBE, "%s: PAGE_SIZE %d, hcf %d",
685 		    __func__, PAGE_SIZE, hcf);
686 		return (NULL);
687 	}
688 
689 	for (idx = nitems(t4_ddp_pgsz) - 1; idx > 0; idx--) {
690 		if (hcf % t4_ddp_pgsz[idx] == 0)
691 			break;
692 	}
693 have_pgsz:
694 	MPASS(idx <= M_PPOD_PGSZ);
695 
696 	db = malloc(sizeof(*db), M_CXGBE, M_NOWAIT);
697 	if (db == NULL) {
698 		CTR1(KTR_CXGBE, "%s: malloc failed.", __func__);
699 		return (NULL);
700 	}
701 
702 	nppods = pages_to_nppods(npages, t4_ddp_pgsz[idx]);
703 	if (alloc_ppods(td, nppods, &db->ppod_addr) != 0) {
704 		free(db, M_CXGBE);
705 		CTR4(KTR_CXGBE, "%s: no pods, nppods %d, resid %d, pgsz %d",
706 		    __func__, nppods, len, t4_ddp_pgsz[idx]);
707 		return (NULL);
708 	}
709 	ppod = (db->ppod_addr - td->ppod_start) / PPOD_SIZE;
710 
711 	db->tag = V_PPOD_PGSZ(idx) | V_PPOD_TAG(ppod);
712 	db->nppods = nppods;
713 	db->npages = npages;
714 	db->pages = pages;
715 	db->offset = offset;
716 	db->len = len;
717 
718 	CTR6(KTR_CXGBE, "New DDP buffer.  "
719 	    "ddp_pgsz %d, ppod 0x%x, npages %d, nppods %d, offset %d, len %d",
720 	    t4_ddp_pgsz[idx], ppod, db->npages, db->nppods, db->offset,
721 	    db->len);
722 
723 	return (db);
724 }
725 
726 #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE)
727 
728 static int
729 write_page_pods(struct adapter *sc, struct toepcb *toep, struct ddp_buffer *db)
730 {
731 	struct wrqe *wr;
732 	struct ulp_mem_io *ulpmc;
733 	struct ulptx_idata *ulpsc;
734 	struct pagepod *ppod;
735 	int i, j, k, n, chunk, len, ddp_pgsz, idx;
736 	u_int ppod_addr;
737 	uint32_t cmd;
738 
739 	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
740 	if (is_t4(sc))
741 		cmd |= htobe32(F_ULP_MEMIO_ORDER);
742 	else
743 		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
744 	ddp_pgsz = t4_ddp_pgsz[G_PPOD_PGSZ(db->tag)];
745 	ppod_addr = db->ppod_addr;
746 	for (i = 0; i < db->nppods; ppod_addr += chunk) {
747 
748 		/* How many page pods are we writing in this cycle */
749 		n = min(db->nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
750 		chunk = PPOD_SZ(n);
751 		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
752 
753 		wr = alloc_wrqe(len, toep->ctrlq);
754 		if (wr == NULL)
755 			return (ENOMEM);	/* ok to just bail out */
756 		ulpmc = wrtod(wr);
757 
758 		INIT_ULPTX_WR(ulpmc, len, 0, 0);
759 		ulpmc->cmd = cmd;
760 		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
761 		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
762 		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
763 
764 		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
765 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
766 		ulpsc->len = htobe32(chunk);
767 
768 		ppod = (struct pagepod *)(ulpsc + 1);
769 		for (j = 0; j < n; i++, j++, ppod++) {
770 			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
771 			    V_PPOD_TID(toep->tid) | db->tag);
772 			ppod->len_offset = htobe64(V_PPOD_LEN(db->len) |
773 			    V_PPOD_OFST(db->offset));
774 			ppod->rsvd = 0;
775 			idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
776 			for (k = 0; k < nitems(ppod->addr); k++) {
777 				if (idx < db->npages) {
778 					ppod->addr[k] =
779 					    htobe64(db->pages[idx]->phys_addr);
780 					idx += ddp_pgsz / PAGE_SIZE;
781 				} else
782 					ppod->addr[k] = 0;
783 #if 0
784 				CTR5(KTR_CXGBE,
785 				    "%s: tid %d ppod[%d]->addr[%d] = %p",
786 				    __func__, toep->tid, i, k,
787 				    htobe64(ppod->addr[k]));
788 #endif
789 			}
790 
791 		}
792 
793 		t4_wrq_tx(sc, wr);
794 	}
795 
796 	return (0);
797 }
798 
799 /*
800  * Reuse, or allocate (and program the page pods for) a new DDP buffer.  The
801  * "pages" array is handed over to this function and should not be used in any
802  * way by the caller after that.
803  */
804 static int
805 select_ddp_buffer(struct adapter *sc, struct toepcb *toep, vm_page_t *pages,
806     int npages, int db_off, int db_len)
807 {
808 	struct ddp_buffer *db;
809 	struct tom_data *td = sc->tom_softc;
810 	int i, empty_slot = -1;
811 
812 	/* Try to reuse */
813 	for (i = 0; i < nitems(toep->db); i++) {
814 		if (bufcmp(toep->db[i], pages, npages, db_off, db_len) == 0) {
815 			free(pages, M_CXGBE);
816 			return (i);	/* pages still held */
817 		} else if (toep->db[i] == NULL && empty_slot < 0)
818 			empty_slot = i;
819 	}
820 
821 	/* Allocate new buffer, write its page pods. */
822 	db = alloc_ddp_buffer(td, pages, npages, db_off, db_len);
823 	if (db == NULL) {
824 		vm_page_unhold_pages(pages, npages);
825 		free(pages, M_CXGBE);
826 		return (-1);
827 	}
828 	if (write_page_pods(sc, toep, db) != 0) {
829 		vm_page_unhold_pages(pages, npages);
830 		free_ddp_buffer(td, db);
831 		return (-1);
832 	}
833 
834 	i = empty_slot;
835 	if (i < 0) {
836 		i = arc4random() % nitems(toep->db);
837 		free_ddp_buffer(td, toep->db[i]);
838 	}
839 	toep->db[i] = db;
840 
841 	CTR5(KTR_CXGBE, "%s: tid %d, DDP buffer[%d] = %p (tag 0x%x)",
842 	    __func__, toep->tid, i, db, db->tag);
843 
844 	return (i);
845 }
846 
847 static void
848 wire_ddp_buffer(struct ddp_buffer *db)
849 {
850 	int i;
851 	vm_page_t p;
852 
853 	for (i = 0; i < db->npages; i++) {
854 		p = db->pages[i];
855 		vm_page_lock(p);
856 		vm_page_wire(p);
857 		vm_page_unhold(p);
858 		vm_page_unlock(p);
859 	}
860 }
861 
862 static void
863 unwire_ddp_buffer(struct ddp_buffer *db)
864 {
865 	int i;
866 	vm_page_t p;
867 
868 	for (i = 0; i < db->npages; i++) {
869 		p = db->pages[i];
870 		vm_page_lock(p);
871 		vm_page_unwire(p, PQ_INACTIVE);
872 		vm_page_unlock(p);
873 	}
874 }
875 
876 static int
877 handle_ddp(struct socket *so, struct uio *uio, int flags, int error)
878 {
879 	struct sockbuf *sb = &so->so_rcv;
880 	struct tcpcb *tp = so_sototcpcb(so);
881 	struct toepcb *toep = tp->t_toe;
882 	struct adapter *sc = td_adapter(toep->td);
883 	vm_page_t *pages;
884 	int npages, db_idx, rc, buf_flag;
885 	struct ddp_buffer *db;
886 	struct wrqe *wr;
887 	uint64_t ddp_flags;
888 
889 	SOCKBUF_LOCK_ASSERT(sb);
890 
891 #if 0
892 	if (sbused(sb) + sc->tt.ddp_thres > uio->uio_resid) {
893 		CTR4(KTR_CXGBE, "%s: sb_cc %d, threshold %d, resid %d",
894 		    __func__, sbused(sb), sc->tt.ddp_thres, uio->uio_resid);
895 	}
896 #endif
897 
898 	/* XXX: too eager to disable DDP, could handle NBIO better than this. */
899 	if (sbused(sb) >= uio->uio_resid || uio->uio_resid < sc->tt.ddp_thres ||
900 	    uio->uio_resid > MAX_DDP_BUFFER_SIZE || uio->uio_iovcnt > 1 ||
901 	    so->so_state & SS_NBIO || flags & (MSG_DONTWAIT | MSG_NBIO) ||
902 	    error || so->so_error || sb->sb_state & SBS_CANTRCVMORE)
903 		goto no_ddp;
904 
905 	/*
906 	 * Fault in and then hold the pages of the uio buffers.  We'll wire them
907 	 * a bit later if everything else works out.
908 	 */
909 	SOCKBUF_UNLOCK(sb);
910 	if (hold_uio(uio, &pages, &npages) != 0) {
911 		SOCKBUF_LOCK(sb);
912 		goto no_ddp;
913 	}
914 	SOCKBUF_LOCK(sb);
915 	if (__predict_false(so->so_error || sb->sb_state & SBS_CANTRCVMORE)) {
916 		vm_page_unhold_pages(pages, npages);
917 		free(pages, M_CXGBE);
918 		goto no_ddp;
919 	}
920 
921 	/*
922 	 * Figure out which one of the two DDP buffers to use this time.
923 	 */
924 	db_idx = select_ddp_buffer(sc, toep, pages, npages,
925 	    (uintptr_t)uio->uio_iov->iov_base & PAGE_MASK, uio->uio_resid);
926 	pages = NULL;	/* handed off to select_ddp_buffer */
927 	if (db_idx < 0)
928 		goto no_ddp;
929 	db = toep->db[db_idx];
930 	buf_flag = db_idx == 0 ? DDP_BUF0_ACTIVE : DDP_BUF1_ACTIVE;
931 
932 	/*
933 	 * Build the compound work request that tells the chip where to DMA the
934 	 * payload.
935 	 */
936 	ddp_flags = select_ddp_flags(so, flags, db_idx);
937 	wr = mk_update_tcb_for_ddp(sc, toep, db_idx, sbused(sb), ddp_flags);
938 	if (wr == NULL) {
939 		/*
940 		 * Just unhold the pages.  The DDP buffer's software state is
941 		 * left as-is in the toep.  The page pods were written
942 		 * successfully and we may have an opportunity to use it in the
943 		 * future.
944 		 */
945 		vm_page_unhold_pages(db->pages, db->npages);
946 		goto no_ddp;
947 	}
948 
949 	/* Wire (and then unhold) the pages, and give the chip the go-ahead. */
950 	wire_ddp_buffer(db);
951 	t4_wrq_tx(sc, wr);
952 	sb->sb_flags &= ~SB_DDP_INDICATE;
953 	toep->ddp_flags |= buf_flag;
954 
955 	/*
956 	 * Wait for the DDP operation to complete and then unwire the pages.
957 	 * The return code from the sbwait will be the final return code of this
958 	 * function.  But we do need to wait for DDP no matter what.
959 	 */
960 	rc = sbwait(sb);
961 	while (toep->ddp_flags & buf_flag) {
962 		/* XXXGL: shouldn't here be sbwait() call? */
963 		sb->sb_flags |= SB_WAIT;
964 		msleep(&sb->sb_acc, &sb->sb_mtx, PSOCK , "sbwait", 0);
965 	}
966 	unwire_ddp_buffer(db);
967 	return (rc);
968 no_ddp:
969 	disable_ddp(sc, toep);
970 	discourage_ddp(toep);
971 	sb->sb_flags &= ~SB_DDP_INDICATE;
972 	return (0);
973 }
974 
975 void
976 t4_init_ddp(struct adapter *sc, struct tom_data *td)
977 {
978 
979 	td->ppod_start = sc->vres.ddp.start;
980 	td->ppod_arena = vmem_create("DDP page pods", sc->vres.ddp.start,
981 	    sc->vres.ddp.size, 1, 32, M_FIRSTFIT | M_NOWAIT);
982 
983 	t4_register_cpl_handler(sc, CPL_RX_DATA_DDP, do_rx_data_ddp);
984 	t4_register_cpl_handler(sc, CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
985 }
986 
987 void
988 t4_uninit_ddp(struct adapter *sc __unused, struct tom_data *td)
989 {
990 
991 	if (td->ppod_arena != NULL) {
992 		vmem_destroy(td->ppod_arena);
993 		td->ppod_arena = NULL;
994 	}
995 }
996 
997 #define	VNET_SO_ASSERT(so)						\
998 	VNET_ASSERT(curvnet != NULL,					\
999 	    ("%s:%d curvnet is NULL, so=%p", __func__, __LINE__, (so)));
1000 #define	SBLOCKWAIT(f)	(((f) & MSG_DONTWAIT) ? 0 : SBL_WAIT)
1001 static int
1002 soreceive_rcvoob(struct socket *so, struct uio *uio, int flags)
1003 {
1004 
1005 	CXGBE_UNIMPLEMENTED(__func__);
1006 }
1007 
1008 static char ddp_magic_str[] = "nothing to see here";
1009 
1010 static struct mbuf *
1011 get_ddp_mbuf(int len)
1012 {
1013 	struct mbuf *m;
1014 
1015 	m = m_get(M_NOWAIT, MT_DATA);
1016 	if (m == NULL)
1017 		CXGBE_UNIMPLEMENTED("mbuf alloc failure");
1018 	m->m_len = len;
1019 	m->m_data = &ddp_magic_str[0];
1020 
1021 	return (m);
1022 }
1023 
1024 static inline int
1025 is_ddp_mbuf(struct mbuf *m)
1026 {
1027 
1028 	return (m->m_data == &ddp_magic_str[0]);
1029 }
1030 
1031 /*
1032  * Copy an mbuf chain into a uio limited by len if set.
1033  */
1034 static int
1035 m_mbuftouio_ddp(struct uio *uio, struct mbuf *m, int len)
1036 {
1037 	int error, length, total;
1038 	int progress = 0;
1039 
1040 	if (len > 0)
1041 		total = min(uio->uio_resid, len);
1042 	else
1043 		total = uio->uio_resid;
1044 
1045 	/* Fill the uio with data from the mbufs. */
1046 	for (; m != NULL; m = m->m_next) {
1047 		length = min(m->m_len, total - progress);
1048 
1049 		if (is_ddp_mbuf(m)) {
1050 			enum uio_seg segflag = uio->uio_segflg;
1051 
1052 			uio->uio_segflg	= UIO_NOCOPY;
1053 			error = uiomove(mtod(m, void *), length, uio);
1054 			uio->uio_segflg	= segflag;
1055 		} else
1056 			error = uiomove(mtod(m, void *), length, uio);
1057 		if (error)
1058 			return (error);
1059 
1060 		progress += length;
1061 	}
1062 
1063 	return (0);
1064 }
1065 
1066 /*
1067  * Based on soreceive_stream() in uipc_socket.c
1068  */
1069 int
1070 t4_soreceive_ddp(struct socket *so, struct sockaddr **psa, struct uio *uio,
1071     struct mbuf **mp0, struct mbuf **controlp, int *flagsp)
1072 {
1073 	int len = 0, error = 0, flags, oresid, ddp_handled = 0;
1074 	struct sockbuf *sb;
1075 	struct mbuf *m, *n = NULL;
1076 
1077 	/* We only do stream sockets. */
1078 	if (so->so_type != SOCK_STREAM)
1079 		return (EINVAL);
1080 	if (psa != NULL)
1081 		*psa = NULL;
1082 	if (controlp != NULL)
1083 		return (EINVAL);
1084 	if (flagsp != NULL)
1085 		flags = *flagsp &~ MSG_EOR;
1086 	else
1087 		flags = 0;
1088 	if (flags & MSG_OOB)
1089 		return (soreceive_rcvoob(so, uio, flags));
1090 	if (mp0 != NULL)
1091 		*mp0 = NULL;
1092 
1093 	sb = &so->so_rcv;
1094 
1095 	/* Prevent other readers from entering the socket. */
1096 	error = sblock(sb, SBLOCKWAIT(flags));
1097 	SOCKBUF_LOCK(sb);
1098 	if (error)
1099 		goto out;
1100 
1101 	/* Easy one, no space to copyout anything. */
1102 	if (uio->uio_resid == 0) {
1103 		error = EINVAL;
1104 		goto out;
1105 	}
1106 	oresid = uio->uio_resid;
1107 
1108 	/* We will never ever get anything unless we are or were connected. */
1109 	if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
1110 		error = ENOTCONN;
1111 		goto out;
1112 	}
1113 
1114 restart:
1115 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1116 
1117 	if (sb->sb_flags & SB_DDP_INDICATE && !ddp_handled) {
1118 
1119 		/* uio should be just as it was at entry */
1120 		KASSERT(oresid == uio->uio_resid,
1121 		    ("%s: oresid = %d, uio_resid = %zd, sbavail = %d",
1122 		    __func__, oresid, uio->uio_resid, sbavail(sb)));
1123 
1124 		error = handle_ddp(so, uio, flags, 0);
1125 		ddp_handled = 1;
1126 		if (error)
1127 			goto out;
1128 	}
1129 
1130 	/* Abort if socket has reported problems. */
1131 	if (so->so_error) {
1132 		if (sbavail(sb))
1133 			goto deliver;
1134 		if (oresid > uio->uio_resid)
1135 			goto out;
1136 		error = so->so_error;
1137 		if (!(flags & MSG_PEEK))
1138 			so->so_error = 0;
1139 		goto out;
1140 	}
1141 
1142 	/* Door is closed.  Deliver what is left, if any. */
1143 	if (sb->sb_state & SBS_CANTRCVMORE) {
1144 		if (sbavail(sb))
1145 			goto deliver;
1146 		else
1147 			goto out;
1148 	}
1149 
1150 	/* Socket buffer is empty and we shall not block. */
1151 	if (sbavail(sb) == 0 &&
1152 	    ((so->so_state & SS_NBIO) || (flags & (MSG_DONTWAIT|MSG_NBIO)))) {
1153 		error = EAGAIN;
1154 		goto out;
1155 	}
1156 
1157 	/* Socket buffer got some data that we shall deliver now. */
1158 	if (sbavail(sb) > 0 && !(flags & MSG_WAITALL) &&
1159 	    ((so->so_state & SS_NBIO) ||
1160 	     (flags & (MSG_DONTWAIT|MSG_NBIO)) ||
1161 	     sbavail(sb) >= sb->sb_lowat ||
1162 	     sbavail(sb) >= uio->uio_resid ||
1163 	     sbavail(sb) >= sb->sb_hiwat) ) {
1164 		goto deliver;
1165 	}
1166 
1167 	/* On MSG_WAITALL we must wait until all data or error arrives. */
1168 	if ((flags & MSG_WAITALL) &&
1169 	    (sbavail(sb) >= uio->uio_resid || sbavail(sb) >= sb->sb_lowat))
1170 		goto deliver;
1171 
1172 	/*
1173 	 * Wait and block until (more) data comes in.
1174 	 * NB: Drops the sockbuf lock during wait.
1175 	 */
1176 	error = sbwait(sb);
1177 	if (error) {
1178 		if (sb->sb_flags & SB_DDP_INDICATE && !ddp_handled) {
1179 			(void) handle_ddp(so, uio, flags, 1);
1180 			ddp_handled = 1;
1181 		}
1182 		goto out;
1183 	}
1184 	goto restart;
1185 
1186 deliver:
1187 	SOCKBUF_LOCK_ASSERT(&so->so_rcv);
1188 	KASSERT(sbavail(sb) > 0, ("%s: sockbuf empty", __func__));
1189 	KASSERT(sb->sb_mb != NULL, ("%s: sb_mb == NULL", __func__));
1190 
1191 	if (sb->sb_flags & SB_DDP_INDICATE && !ddp_handled)
1192 		goto restart;
1193 
1194 	/* Statistics. */
1195 	if (uio->uio_td)
1196 		uio->uio_td->td_ru.ru_msgrcv++;
1197 
1198 	/* Fill uio until full or current end of socket buffer is reached. */
1199 	len = min(uio->uio_resid, sbavail(sb));
1200 	if (mp0 != NULL) {
1201 		/* Dequeue as many mbufs as possible. */
1202 		if (!(flags & MSG_PEEK) && len >= sb->sb_mb->m_len) {
1203 			for (*mp0 = m = sb->sb_mb;
1204 			     m != NULL && m->m_len <= len;
1205 			     m = m->m_next) {
1206 				len -= m->m_len;
1207 				uio->uio_resid -= m->m_len;
1208 				sbfree(sb, m);
1209 				n = m;
1210 			}
1211 			sb->sb_mb = m;
1212 			if (sb->sb_mb == NULL)
1213 				SB_EMPTY_FIXUP(sb);
1214 			n->m_next = NULL;
1215 		}
1216 		/* Copy the remainder. */
1217 		if (len > 0) {
1218 			KASSERT(sb->sb_mb != NULL,
1219 			    ("%s: len > 0 && sb->sb_mb empty", __func__));
1220 
1221 			m = m_copym(sb->sb_mb, 0, len, M_NOWAIT);
1222 			if (m == NULL)
1223 				len = 0;	/* Don't flush data from sockbuf. */
1224 			else
1225 				uio->uio_resid -= m->m_len;
1226 			if (*mp0 != NULL)
1227 				n->m_next = m;
1228 			else
1229 				*mp0 = m;
1230 			if (*mp0 == NULL) {
1231 				error = ENOBUFS;
1232 				goto out;
1233 			}
1234 		}
1235 	} else {
1236 		/* NB: Must unlock socket buffer as uiomove may sleep. */
1237 		SOCKBUF_UNLOCK(sb);
1238 		error = m_mbuftouio_ddp(uio, sb->sb_mb, len);
1239 		SOCKBUF_LOCK(sb);
1240 		if (error)
1241 			goto out;
1242 	}
1243 	SBLASTRECORDCHK(sb);
1244 	SBLASTMBUFCHK(sb);
1245 
1246 	/*
1247 	 * Remove the delivered data from the socket buffer unless we
1248 	 * were only peeking.
1249 	 */
1250 	if (!(flags & MSG_PEEK)) {
1251 		if (len > 0)
1252 			sbdrop_locked(sb, len);
1253 
1254 		/* Notify protocol that we drained some data. */
1255 		if ((so->so_proto->pr_flags & PR_WANTRCVD) &&
1256 		    (((flags & MSG_WAITALL) && uio->uio_resid > 0) ||
1257 		     !(flags & MSG_SOCALLBCK))) {
1258 			SOCKBUF_UNLOCK(sb);
1259 			VNET_SO_ASSERT(so);
1260 			(*so->so_proto->pr_usrreqs->pru_rcvd)(so, flags);
1261 			SOCKBUF_LOCK(sb);
1262 		}
1263 	}
1264 
1265 	/*
1266 	 * For MSG_WAITALL we may have to loop again and wait for
1267 	 * more data to come in.
1268 	 */
1269 	if ((flags & MSG_WAITALL) && uio->uio_resid > 0)
1270 		goto restart;
1271 out:
1272 	SOCKBUF_LOCK_ASSERT(sb);
1273 	SBLASTRECORDCHK(sb);
1274 	SBLASTMBUFCHK(sb);
1275 	SOCKBUF_UNLOCK(sb);
1276 	sbunlock(sb);
1277 	return (error);
1278 }
1279 
1280 #endif
1281