xref: /freebsd/sys/dev/cxgbe/tom/t4_ddp.c (revision 70693a45381b687e40ea30710aa38cb9f24b6b02)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2012 Chelsio Communications, Inc.
5  * All rights reserved.
6  * Written by: Navdeep Parhar <np@FreeBSD.org>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 #include <sys/cdefs.h>
31 #include "opt_inet.h"
32 
33 #include <sys/param.h>
34 #include <sys/aio.h>
35 #include <sys/bio.h>
36 #include <sys/file.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/ktr.h>
40 #include <sys/module.h>
41 #include <sys/protosw.h>
42 #include <sys/proc.h>
43 #include <sys/domain.h>
44 #include <sys/socket.h>
45 #include <sys/socketvar.h>
46 #include <sys/taskqueue.h>
47 #include <sys/uio.h>
48 #include <netinet/in.h>
49 #include <netinet/in_pcb.h>
50 #include <netinet/ip.h>
51 #include <netinet/tcp_var.h>
52 #define TCPSTATES
53 #include <netinet/tcp_fsm.h>
54 #include <netinet/toecore.h>
55 
56 #include <vm/vm.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_param.h>
59 #include <vm/pmap.h>
60 #include <vm/vm_map.h>
61 #include <vm/vm_page.h>
62 #include <vm/vm_object.h>
63 
64 #include <cam/scsi/scsi_all.h>
65 #include <cam/ctl/ctl_io.h>
66 
67 #ifdef TCP_OFFLOAD
68 #include "common/common.h"
69 #include "common/t4_msg.h"
70 #include "common/t4_regs.h"
71 #include "common/t4_tcb.h"
72 #include "tom/t4_tom.h"
73 
74 /*
75  * Use the 'backend3' field in AIO jobs to store the amount of data
76  * received by the AIO job so far.
77  */
78 #define	aio_received	backend3
79 
80 static void aio_ddp_requeue_task(void *context, int pending);
81 static void ddp_complete_all(struct toepcb *toep, int error);
82 static void t4_aio_cancel_active(struct kaiocb *job);
83 static void t4_aio_cancel_queued(struct kaiocb *job);
84 static int t4_alloc_page_pods_for_rcvbuf(struct ppod_region *pr,
85     struct ddp_rcv_buffer *drb);
86 static int t4_write_page_pods_for_rcvbuf(struct adapter *sc,
87     struct sge_wrq *wrq, int tid, struct ddp_rcv_buffer *drb);
88 
89 static TAILQ_HEAD(, pageset) ddp_orphan_pagesets;
90 static struct mtx ddp_orphan_pagesets_lock;
91 static struct task ddp_orphan_task;
92 
93 #define MAX_DDP_BUFFER_SIZE		(M_TCB_RX_DDP_BUF0_LEN)
94 
95 /*
96  * A page set holds information about a user buffer used for AIO DDP.
97  * The page set holds resources such as the VM pages backing the
98  * buffer (either held or wired) and the page pods associated with the
99  * buffer.  Recently used page sets are cached to allow for efficient
100  * reuse of buffers (avoiding the need to re-fault in pages, hold
101  * them, etc.).  Note that cached page sets keep the backing pages
102  * wired.  The number of wired pages is capped by only allowing for
103  * two wired pagesets per connection.  This is not a perfect cap, but
104  * is a trade-off for performance.
105  *
106  * If an application ping-pongs two buffers for a connection via
107  * aio_read(2) then those buffers should remain wired and expensive VM
108  * fault lookups should be avoided after each buffer has been used
109  * once.  If an application uses more than two buffers then this will
110  * fall back to doing expensive VM fault lookups for each operation.
111  */
112 static void
free_pageset(struct tom_data * td,struct pageset * ps)113 free_pageset(struct tom_data *td, struct pageset *ps)
114 {
115 	vm_page_t p;
116 	int i;
117 
118 	if (ps->prsv.prsv_nppods > 0)
119 		t4_free_page_pods(&ps->prsv);
120 
121 	for (i = 0; i < ps->npages; i++) {
122 		p = ps->pages[i];
123 		vm_page_unwire(p, PQ_INACTIVE);
124 	}
125 	mtx_lock(&ddp_orphan_pagesets_lock);
126 	TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link);
127 	taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task);
128 	mtx_unlock(&ddp_orphan_pagesets_lock);
129 }
130 
131 static void
ddp_free_orphan_pagesets(void * context,int pending)132 ddp_free_orphan_pagesets(void *context, int pending)
133 {
134 	struct pageset *ps;
135 
136 	mtx_lock(&ddp_orphan_pagesets_lock);
137 	while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) {
138 		ps = TAILQ_FIRST(&ddp_orphan_pagesets);
139 		TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link);
140 		mtx_unlock(&ddp_orphan_pagesets_lock);
141 		if (ps->vm)
142 			vmspace_free(ps->vm);
143 		free(ps, M_CXGBE);
144 		mtx_lock(&ddp_orphan_pagesets_lock);
145 	}
146 	mtx_unlock(&ddp_orphan_pagesets_lock);
147 }
148 
149 static void
recycle_pageset(struct toepcb * toep,struct pageset * ps)150 recycle_pageset(struct toepcb *toep, struct pageset *ps)
151 {
152 
153 	DDP_ASSERT_LOCKED(toep);
154 	if (!(toep->ddp.flags & DDP_DEAD)) {
155 		KASSERT(toep->ddp.cached_count + toep->ddp.active_count <
156 		    nitems(toep->ddp.db), ("too many wired pagesets"));
157 		TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link);
158 		toep->ddp.cached_count++;
159 	} else
160 		free_pageset(toep->td, ps);
161 }
162 
163 static void
ddp_complete_one(struct kaiocb * job,int error)164 ddp_complete_one(struct kaiocb *job, int error)
165 {
166 	long copied;
167 
168 	/*
169 	 * If this job had copied data out of the socket buffer before
170 	 * it was cancelled, report it as a short read rather than an
171 	 * error.
172 	 */
173 	copied = job->aio_received;
174 	if (copied != 0 || error == 0)
175 		aio_complete(job, copied, 0);
176 	else
177 		aio_complete(job, -1, error);
178 }
179 
180 static void
free_ddp_rcv_buffer(struct toepcb * toep,struct ddp_rcv_buffer * drb)181 free_ddp_rcv_buffer(struct toepcb *toep, struct ddp_rcv_buffer *drb)
182 {
183 	t4_free_page_pods(&drb->prsv);
184 	free(drb->buf, M_CXGBE);
185 	free(drb, M_CXGBE);
186 	counter_u64_add(toep->ofld_rxq->ddp_buffer_free, 1);
187 	free_toepcb(toep);
188 }
189 
190 static void
recycle_ddp_rcv_buffer(struct toepcb * toep,struct ddp_rcv_buffer * drb)191 recycle_ddp_rcv_buffer(struct toepcb *toep, struct ddp_rcv_buffer *drb)
192 {
193 	DDP_CACHE_LOCK(toep);
194 	if (!(toep->ddp.flags & DDP_DEAD) &&
195 	    toep->ddp.cached_count < t4_ddp_rcvbuf_cache) {
196 		TAILQ_INSERT_HEAD(&toep->ddp.cached_buffers, drb, link);
197 		toep->ddp.cached_count++;
198 		DDP_CACHE_UNLOCK(toep);
199 	} else {
200 		DDP_CACHE_UNLOCK(toep);
201 		free_ddp_rcv_buffer(toep, drb);
202 	}
203 }
204 
205 static struct ddp_rcv_buffer *
alloc_cached_ddp_rcv_buffer(struct toepcb * toep)206 alloc_cached_ddp_rcv_buffer(struct toepcb *toep)
207 {
208 	struct ddp_rcv_buffer *drb;
209 
210 	DDP_CACHE_LOCK(toep);
211 	if (!TAILQ_EMPTY(&toep->ddp.cached_buffers)) {
212 		drb = TAILQ_FIRST(&toep->ddp.cached_buffers);
213 		TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link);
214 		toep->ddp.cached_count--;
215 		counter_u64_add(toep->ofld_rxq->ddp_buffer_reuse, 1);
216 	} else
217 		drb = NULL;
218 	DDP_CACHE_UNLOCK(toep);
219 	return (drb);
220 }
221 
222 static struct ddp_rcv_buffer *
alloc_ddp_rcv_buffer(struct toepcb * toep,int how)223 alloc_ddp_rcv_buffer(struct toepcb *toep, int how)
224 {
225 	struct tom_data *td = toep->td;
226 	struct adapter *sc = td_adapter(td);
227 	struct ddp_rcv_buffer *drb;
228 	int error;
229 
230 	drb = malloc(sizeof(*drb), M_CXGBE, how | M_ZERO);
231 	if (drb == NULL)
232 		return (NULL);
233 
234 	drb->buf = contigmalloc(t4_ddp_rcvbuf_len, M_CXGBE, how, 0, ~0,
235 	    t4_ddp_rcvbuf_len, 0);
236 	if (drb->buf == NULL) {
237 		free(drb, M_CXGBE);
238 		return (NULL);
239 	}
240 	drb->len = t4_ddp_rcvbuf_len;
241 	drb->refs = 1;
242 
243 	error = t4_alloc_page_pods_for_rcvbuf(&td->pr, drb);
244 	if (error != 0) {
245 		free(drb->buf, M_CXGBE);
246 		free(drb, M_CXGBE);
247 		return (NULL);
248 	}
249 
250 	error = t4_write_page_pods_for_rcvbuf(sc, toep->ctrlq, toep->tid, drb);
251 	if (error != 0) {
252 		t4_free_page_pods(&drb->prsv);
253 		free(drb->buf, M_CXGBE);
254 		free(drb, M_CXGBE);
255 		return (NULL);
256 	}
257 
258 	hold_toepcb(toep);
259 	counter_u64_add(toep->ofld_rxq->ddp_buffer_alloc, 1);
260 	return (drb);
261 }
262 
263 static void
free_ddp_buffer(struct toepcb * toep,struct ddp_buffer * db)264 free_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db)
265 {
266 	if ((toep->ddp.flags & DDP_RCVBUF) != 0) {
267 		if (db->drb != NULL)
268 			free_ddp_rcv_buffer(toep, db->drb);
269 #ifdef INVARIANTS
270 		db->drb = NULL;
271 #endif
272 		return;
273 	}
274 
275 	if (db->job) {
276 		/*
277 		 * XXX: If we are un-offloading the socket then we
278 		 * should requeue these on the socket somehow.  If we
279 		 * got a FIN from the remote end, then this completes
280 		 * any remaining requests with an EOF read.
281 		 */
282 		if (!aio_clear_cancel_function(db->job))
283 			ddp_complete_one(db->job, 0);
284 #ifdef INVARIANTS
285 		db->job = NULL;
286 #endif
287 	}
288 
289 	if (db->ps) {
290 		free_pageset(toep->td, db->ps);
291 #ifdef INVARIANTS
292 		db->ps = NULL;
293 #endif
294 	}
295 }
296 
297 static void
ddp_init_toep(struct toepcb * toep)298 ddp_init_toep(struct toepcb *toep)
299 {
300 
301 	toep->ddp.flags = DDP_OK;
302 	toep->ddp.active_id = -1;
303 	mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF);
304 	mtx_init(&toep->ddp.cache_lock, "t4 ddp cache", NULL, MTX_DEF);
305 }
306 
307 void
ddp_uninit_toep(struct toepcb * toep)308 ddp_uninit_toep(struct toepcb *toep)
309 {
310 
311 	mtx_destroy(&toep->ddp.lock);
312 	mtx_destroy(&toep->ddp.cache_lock);
313 }
314 
315 void
release_ddp_resources(struct toepcb * toep)316 release_ddp_resources(struct toepcb *toep)
317 {
318 	struct ddp_rcv_buffer *drb;
319 	struct pageset *ps;
320 	int i;
321 
322 	DDP_LOCK(toep);
323 	DDP_CACHE_LOCK(toep);
324 	toep->ddp.flags |= DDP_DEAD;
325 	DDP_CACHE_UNLOCK(toep);
326 	for (i = 0; i < nitems(toep->ddp.db); i++) {
327 		free_ddp_buffer(toep, &toep->ddp.db[i]);
328 	}
329 	if ((toep->ddp.flags & DDP_AIO) != 0) {
330 		while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) {
331 			TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
332 			free_pageset(toep->td, ps);
333 		}
334 		ddp_complete_all(toep, 0);
335 	}
336 	if ((toep->ddp.flags & DDP_RCVBUF) != 0) {
337 		DDP_CACHE_LOCK(toep);
338 		while ((drb = TAILQ_FIRST(&toep->ddp.cached_buffers)) != NULL) {
339 			TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link);
340 			free_ddp_rcv_buffer(toep, drb);
341 		}
342 		DDP_CACHE_UNLOCK(toep);
343 	}
344 	DDP_UNLOCK(toep);
345 }
346 
347 #ifdef INVARIANTS
348 void
ddp_assert_empty(struct toepcb * toep)349 ddp_assert_empty(struct toepcb *toep)
350 {
351 	int i;
352 
353 	MPASS((toep->ddp.flags & (DDP_TASK_ACTIVE | DDP_DEAD)) != DDP_TASK_ACTIVE);
354 	for (i = 0; i < nitems(toep->ddp.db); i++) {
355 		if ((toep->ddp.flags & DDP_AIO) != 0) {
356 			MPASS(toep->ddp.db[i].job == NULL);
357 			MPASS(toep->ddp.db[i].ps == NULL);
358 		} else
359 			MPASS(toep->ddp.db[i].drb == NULL);
360 	}
361 	if ((toep->ddp.flags & DDP_AIO) != 0) {
362 		MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets));
363 		MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq));
364 	}
365 	if ((toep->ddp.flags & DDP_RCVBUF) != 0)
366 		MPASS(TAILQ_EMPTY(&toep->ddp.cached_buffers));
367 }
368 #endif
369 
370 static void
complete_ddp_buffer(struct toepcb * toep,struct ddp_buffer * db,unsigned int db_idx)371 complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db,
372     unsigned int db_idx)
373 {
374 	struct ddp_rcv_buffer *drb;
375 	unsigned int db_flag;
376 
377 	toep->ddp.active_count--;
378 	if (toep->ddp.active_id == db_idx) {
379 		if (toep->ddp.active_count == 0) {
380 			if ((toep->ddp.flags & DDP_AIO) != 0)
381 				KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL,
382 				    ("%s: active_count mismatch", __func__));
383 			else
384 				KASSERT(toep->ddp.db[db_idx ^ 1].drb == NULL,
385 				    ("%s: active_count mismatch", __func__));
386 			toep->ddp.active_id = -1;
387 		} else
388 			toep->ddp.active_id ^= 1;
389 #ifdef VERBOSE_TRACES
390 		CTR3(KTR_CXGBE, "%s: tid %u, ddp_active_id = %d", __func__,
391 		    toep->tid, toep->ddp.active_id);
392 #endif
393 	} else {
394 		KASSERT(toep->ddp.active_count != 0 &&
395 		    toep->ddp.active_id != -1,
396 		    ("%s: active count mismatch", __func__));
397 	}
398 
399 	if ((toep->ddp.flags & DDP_AIO) != 0) {
400 		db->cancel_pending = 0;
401 		db->job = NULL;
402 		recycle_pageset(toep, db->ps);
403 		db->ps = NULL;
404 	} else {
405 		drb = db->drb;
406 		if (atomic_fetchadd_int(&drb->refs, -1) == 1)
407 			recycle_ddp_rcv_buffer(toep, drb);
408 		db->drb = NULL;
409 		db->placed = 0;
410 	}
411 
412 	db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
413 	KASSERT(toep->ddp.flags & db_flag,
414 	    ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x",
415 	    __func__, toep, toep->ddp.flags));
416 	toep->ddp.flags &= ~db_flag;
417 }
418 
419 /* Called when m_free drops the last reference. */
420 static void
ddp_rcv_mbuf_done(struct mbuf * m)421 ddp_rcv_mbuf_done(struct mbuf *m)
422 {
423 	struct toepcb *toep = m->m_ext.ext_arg1;
424 	struct ddp_rcv_buffer *drb = m->m_ext.ext_arg2;
425 
426 	recycle_ddp_rcv_buffer(toep, drb);
427 }
428 
429 static void
queue_ddp_rcvbuf_mbuf(struct toepcb * toep,u_int db_idx,u_int len)430 queue_ddp_rcvbuf_mbuf(struct toepcb *toep, u_int db_idx, u_int len)
431 {
432 	struct inpcb *inp = toep->inp;
433 	struct sockbuf *sb;
434 	struct ddp_buffer *db;
435 	struct ddp_rcv_buffer *drb;
436 	struct mbuf *m;
437 
438 	m = m_gethdr(M_NOWAIT, MT_DATA);
439 	if (m == NULL) {
440 		printf("%s: failed to allocate mbuf", __func__);
441 		return;
442 	}
443 	m->m_pkthdr.rcvif = toep->vi->ifp;
444 
445 	db = &toep->ddp.db[db_idx];
446 	drb = db->drb;
447 	m_extaddref(m, (char *)drb->buf + db->placed, len, &drb->refs,
448 	    ddp_rcv_mbuf_done, toep, drb);
449 	m->m_pkthdr.len = len;
450 	m->m_len = len;
451 
452 	sb = &inp->inp_socket->so_rcv;
453 	SOCKBUF_LOCK_ASSERT(sb);
454 	sbappendstream_locked(sb, m, 0);
455 
456 	db->placed += len;
457 	toep->ofld_rxq->rx_toe_ddp_octets += len;
458 }
459 
460 /* XXX: handle_ddp_data code duplication */
461 void
insert_ddp_data(struct toepcb * toep,uint32_t n)462 insert_ddp_data(struct toepcb *toep, uint32_t n)
463 {
464 	struct inpcb *inp = toep->inp;
465 	struct tcpcb *tp = intotcpcb(inp);
466 	struct ddp_buffer *db;
467 	struct kaiocb *job;
468 	size_t placed;
469 	long copied;
470 	unsigned int db_idx;
471 #ifdef INVARIANTS
472 	unsigned int db_flag;
473 #endif
474 	bool ddp_rcvbuf;
475 
476 	INP_WLOCK_ASSERT(inp);
477 	DDP_ASSERT_LOCKED(toep);
478 
479 	ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0;
480 	tp->rcv_nxt += n;
481 #ifndef USE_DDP_RX_FLOW_CONTROL
482 	KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__));
483 	tp->rcv_wnd -= n;
484 #endif
485 	CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP",
486 	    __func__, n);
487 	while (toep->ddp.active_count > 0) {
488 		MPASS(toep->ddp.active_id != -1);
489 		db_idx = toep->ddp.active_id;
490 #ifdef INVARIANTS
491 		db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
492 #endif
493 		MPASS((toep->ddp.flags & db_flag) != 0);
494 		db = &toep->ddp.db[db_idx];
495 		if (ddp_rcvbuf) {
496 			placed = n;
497 			if (placed > db->drb->len - db->placed)
498 				placed = db->drb->len - db->placed;
499 			if (placed != 0)
500 				queue_ddp_rcvbuf_mbuf(toep, db_idx, placed);
501 			complete_ddp_buffer(toep, db, db_idx);
502 			n -= placed;
503 			continue;
504 		}
505 		job = db->job;
506 		copied = job->aio_received;
507 		placed = n;
508 		if (placed > job->uaiocb.aio_nbytes - copied)
509 			placed = job->uaiocb.aio_nbytes - copied;
510 		if (placed > 0) {
511 			job->msgrcv = 1;
512 			toep->ofld_rxq->rx_aio_ddp_jobs++;
513 		}
514 		toep->ofld_rxq->rx_aio_ddp_octets += placed;
515 		if (!aio_clear_cancel_function(job)) {
516 			/*
517 			 * Update the copied length for when
518 			 * t4_aio_cancel_active() completes this
519 			 * request.
520 			 */
521 			job->aio_received += placed;
522 		} else if (copied + placed != 0) {
523 			CTR4(KTR_CXGBE,
524 			    "%s: completing %p (copied %ld, placed %lu)",
525 			    __func__, job, copied, placed);
526 			/* XXX: This always completes if there is some data. */
527 			aio_complete(job, copied + placed, 0);
528 		} else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) {
529 			TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
530 			toep->ddp.waiting_count++;
531 		} else
532 			aio_cancel(job);
533 		n -= placed;
534 		complete_ddp_buffer(toep, db, db_idx);
535 	}
536 
537 	MPASS(n == 0);
538 }
539 
540 /* SET_TCB_FIELD sent as a ULP command looks like this */
541 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \
542     sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core))
543 
544 /* RX_DATA_ACK sent as a ULP command looks like this */
545 #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \
546     sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core))
547 
548 static inline void *
mk_rx_data_ack_ulp(struct ulp_txpkt * ulpmc,struct toepcb * toep)549 mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep)
550 {
551 	struct ulptx_idata *ulpsc;
552 	struct cpl_rx_data_ack_core *req;
553 
554 	ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0));
555 	ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16));
556 
557 	ulpsc = (struct ulptx_idata *)(ulpmc + 1);
558 	ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
559 	ulpsc->len = htobe32(sizeof(*req));
560 
561 	req = (struct cpl_rx_data_ack_core *)(ulpsc + 1);
562 	OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid));
563 	req->credit_dack = htobe32(F_RX_MODULATE_RX);
564 
565 	ulpsc = (struct ulptx_idata *)(req + 1);
566 	if (LEN__RX_DATA_ACK_ULP % 16) {
567 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP));
568 		ulpsc->len = htobe32(0);
569 		return (ulpsc + 1);
570 	}
571 	return (ulpsc);
572 }
573 
574 static struct wrqe *
mk_update_tcb_for_ddp(struct adapter * sc,struct toepcb * toep,int db_idx,struct ppod_reservation * prsv,int offset,uint32_t len,uint64_t ddp_flags,uint64_t ddp_flags_mask)575 mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx,
576     struct ppod_reservation *prsv, int offset, uint32_t len,
577     uint64_t ddp_flags, uint64_t ddp_flags_mask)
578 {
579 	struct wrqe *wr;
580 	struct work_request_hdr *wrh;
581 	struct ulp_txpkt *ulpmc;
582 	int wrlen;
583 
584 	KASSERT(db_idx == 0 || db_idx == 1,
585 	    ("%s: bad DDP buffer index %d", __func__, db_idx));
586 
587 	/*
588 	 * We'll send a compound work request that has 3 SET_TCB_FIELDs and an
589 	 * RX_DATA_ACK (with RX_MODULATE to speed up delivery).
590 	 *
591 	 * The work request header is 16B and always ends at a 16B boundary.
592 	 * The ULPTX master commands that follow must all end at 16B boundaries
593 	 * too so we round up the size to 16.
594 	 */
595 	wrlen = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) +
596 	    roundup2(LEN__RX_DATA_ACK_ULP, 16);
597 
598 	wr = alloc_wrqe(wrlen, toep->ctrlq);
599 	if (wr == NULL)
600 		return (NULL);
601 	wrh = wrtod(wr);
602 	INIT_ULPTX_WRH(wrh, wrlen, 1, 0);	/* atomic */
603 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
604 
605 	/* Write the buffer's tag */
606 	ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
607 	    W_TCB_RX_DDP_BUF0_TAG + db_idx,
608 	    V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG),
609 	    V_TCB_RX_DDP_BUF0_TAG(prsv->prsv_tag));
610 
611 	/* Update the current offset in the DDP buffer and its total length */
612 	if (db_idx == 0)
613 		ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
614 		    W_TCB_RX_DDP_BUF0_OFFSET,
615 		    V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) |
616 		    V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN),
617 		    V_TCB_RX_DDP_BUF0_OFFSET(offset) |
618 		    V_TCB_RX_DDP_BUF0_LEN(len));
619 	else
620 		ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid,
621 		    W_TCB_RX_DDP_BUF1_OFFSET,
622 		    V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) |
623 		    V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32),
624 		    V_TCB_RX_DDP_BUF1_OFFSET(offset) |
625 		    V_TCB_RX_DDP_BUF1_LEN((u64)len << 32));
626 
627 	/* Update DDP flags */
628 	ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_RX_DDP_FLAGS,
629 	    ddp_flags_mask, ddp_flags);
630 
631 	/* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */
632 	ulpmc = mk_rx_data_ack_ulp(ulpmc, toep);
633 
634 	return (wr);
635 }
636 
637 static int
handle_ddp_data_aio(struct toepcb * toep,__be32 ddp_report,__be32 rcv_nxt,int len)638 handle_ddp_data_aio(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt,
639     int len)
640 {
641 	uint32_t report = be32toh(ddp_report);
642 	unsigned int db_idx;
643 	struct inpcb *inp = toep->inp;
644 	struct ddp_buffer *db;
645 	struct tcpcb *tp;
646 	struct socket *so;
647 	struct sockbuf *sb;
648 	struct kaiocb *job;
649 	long copied;
650 
651 	db_idx = report & F_DDP_BUF_IDX ? 1 : 0;
652 
653 	if (__predict_false(!(report & F_DDP_INV)))
654 		CXGBE_UNIMPLEMENTED("DDP buffer still valid");
655 
656 	INP_WLOCK(inp);
657 	so = inp_inpcbtosocket(inp);
658 	sb = &so->so_rcv;
659 	DDP_LOCK(toep);
660 
661 	KASSERT(toep->ddp.active_id == db_idx,
662 	    ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx,
663 	    toep->ddp.active_id, toep->tid));
664 	db = &toep->ddp.db[db_idx];
665 	job = db->job;
666 
667 	if (__predict_false(inp->inp_flags & INP_DROPPED)) {
668 		/*
669 		 * This can happen due to an administrative tcpdrop(8).
670 		 * Just fail the request with ECONNRESET.
671 		 */
672 		CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x",
673 		    __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags);
674 		if (aio_clear_cancel_function(job))
675 			ddp_complete_one(job, ECONNRESET);
676 		goto completed;
677 	}
678 
679 	tp = intotcpcb(inp);
680 
681 	/*
682 	 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
683 	 * sequence number of the next byte to receive.  The length of
684 	 * the data received for this message must be computed by
685 	 * comparing the new and old values of rcv_nxt.
686 	 *
687 	 * For RX_DATA_DDP, len might be non-zero, but it is only the
688 	 * length of the most recent DMA.  It does not include the
689 	 * total length of the data received since the previous update
690 	 * for this DDP buffer.  rcv_nxt is the sequence number of the
691 	 * first received byte from the most recent DMA.
692 	 */
693 	len += be32toh(rcv_nxt) - tp->rcv_nxt;
694 	tp->rcv_nxt += len;
695 	tp->t_rcvtime = ticks;
696 #ifndef USE_DDP_RX_FLOW_CONTROL
697 	KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
698 	tp->rcv_wnd -= len;
699 #endif
700 #ifdef VERBOSE_TRACES
701 	CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__,
702 	    toep->tid, db_idx, len, report);
703 #endif
704 
705 	/* receive buffer autosize */
706 	MPASS(toep->vnet == so->so_vnet);
707 	CURVNET_SET(toep->vnet);
708 	SOCKBUF_LOCK(sb);
709 	if (sb->sb_flags & SB_AUTOSIZE &&
710 	    V_tcp_do_autorcvbuf &&
711 	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
712 	    len > (sbspace(sb) / 8 * 7)) {
713 		struct adapter *sc = td_adapter(toep->td);
714 		unsigned int hiwat = sb->sb_hiwat;
715 		unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
716 		    V_tcp_autorcvbuf_max);
717 
718 		if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
719 			sb->sb_flags &= ~SB_AUTOSIZE;
720 	}
721 	SOCKBUF_UNLOCK(sb);
722 	CURVNET_RESTORE();
723 
724 	job->msgrcv = 1;
725 	toep->ofld_rxq->rx_aio_ddp_jobs++;
726 	toep->ofld_rxq->rx_aio_ddp_octets += len;
727 	if (db->cancel_pending) {
728 		/*
729 		 * Update the job's length but defer completion to the
730 		 * TCB_RPL callback.
731 		 */
732 		job->aio_received += len;
733 		goto out;
734 	} else if (!aio_clear_cancel_function(job)) {
735 		/*
736 		 * Update the copied length for when
737 		 * t4_aio_cancel_active() completes this request.
738 		 */
739 		job->aio_received += len;
740 	} else {
741 		copied = job->aio_received;
742 #ifdef VERBOSE_TRACES
743 		CTR5(KTR_CXGBE,
744 		    "%s: tid %u, completing %p (copied %ld, placed %d)",
745 		    __func__, toep->tid, job, copied, len);
746 #endif
747 		aio_complete(job, copied + len, 0);
748 		t4_rcvd(&toep->td->tod, tp);
749 	}
750 
751 completed:
752 	complete_ddp_buffer(toep, db, db_idx);
753 	if (toep->ddp.waiting_count > 0)
754 		ddp_queue_toep(toep);
755 out:
756 	DDP_UNLOCK(toep);
757 	INP_WUNLOCK(inp);
758 
759 	return (0);
760 }
761 
762 static bool
queue_ddp_rcvbuf(struct toepcb * toep,struct ddp_rcv_buffer * drb)763 queue_ddp_rcvbuf(struct toepcb *toep, struct ddp_rcv_buffer *drb)
764 {
765 	struct adapter *sc = td_adapter(toep->td);
766 	struct ddp_buffer *db;
767 	struct wrqe *wr;
768 	uint64_t ddp_flags, ddp_flags_mask;
769 	int buf_flag, db_idx;
770 
771 	DDP_ASSERT_LOCKED(toep);
772 
773 	KASSERT((toep->ddp.flags & DDP_DEAD) == 0, ("%s: DDP_DEAD", __func__));
774 	KASSERT(toep->ddp.active_count < nitems(toep->ddp.db),
775 	    ("%s: no empty DDP buffer slot", __func__));
776 
777 	/* Determine which DDP buffer to use. */
778 	if (toep->ddp.db[0].drb == NULL) {
779 		db_idx = 0;
780 	} else {
781 		MPASS(toep->ddp.db[1].drb == NULL);
782 		db_idx = 1;
783 	}
784 
785 	/*
786 	 * Permit PSH to trigger a partial completion without
787 	 * invalidating the rest of the buffer, but disable the PUSH
788 	 * timer.
789 	 */
790 	ddp_flags = 0;
791 	ddp_flags_mask = 0;
792 	if (db_idx == 0) {
793 		ddp_flags |= V_TF_DDP_PSH_NO_INVALIDATE0(1) |
794 		    V_TF_DDP_PUSH_DISABLE_0(0) | V_TF_DDP_PSHF_ENABLE_0(1) |
795 		    V_TF_DDP_BUF0_VALID(1);
796 		ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) |
797 		    V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) |
798 		    V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1);
799 		buf_flag = DDP_BUF0_ACTIVE;
800 	} else {
801 		ddp_flags |= V_TF_DDP_PSH_NO_INVALIDATE1(1) |
802 		    V_TF_DDP_PUSH_DISABLE_1(0) | V_TF_DDP_PSHF_ENABLE_1(1) |
803 		    V_TF_DDP_BUF1_VALID(1);
804 		ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) |
805 		    V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) |
806 		    V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1);
807 		buf_flag = DDP_BUF1_ACTIVE;
808 	}
809 	MPASS((toep->ddp.flags & buf_flag) == 0);
810 	if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) {
811 		MPASS(db_idx == 0);
812 		MPASS(toep->ddp.active_id == -1);
813 		MPASS(toep->ddp.active_count == 0);
814 		ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1);
815 	}
816 
817 	/*
818 	 * The TID for this connection should still be valid.  If
819 	 * DDP_DEAD is set, SBS_CANTRCVMORE should be set, so we
820 	 * shouldn't be this far anyway.
821 	 */
822 	wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &drb->prsv, 0, drb->len,
823 	    ddp_flags, ddp_flags_mask);
824 	if (wr == NULL) {
825 		recycle_ddp_rcv_buffer(toep, drb);
826 		printf("%s: mk_update_tcb_for_ddp failed\n", __func__);
827 		return (false);
828 	}
829 
830 #ifdef VERBOSE_TRACES
831 	CTR(KTR_CXGBE,
832 	    "%s: tid %u, scheduling DDP[%d] (flags %#lx/%#lx)", __func__,
833 	    toep->tid, db_idx, ddp_flags, ddp_flags_mask);
834 #endif
835 	/*
836 	 * Hold a reference on scheduled buffers that is dropped in
837 	 * complete_ddp_buffer.
838 	 */
839 	drb->refs = 1;
840 
841 	/* Give the chip the go-ahead. */
842 	t4_wrq_tx(sc, wr);
843 	db = &toep->ddp.db[db_idx];
844 	db->drb = drb;
845 	toep->ddp.flags |= buf_flag;
846 	toep->ddp.active_count++;
847 	if (toep->ddp.active_count == 1) {
848 		MPASS(toep->ddp.active_id == -1);
849 		toep->ddp.active_id = db_idx;
850 		CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__,
851 		    toep->ddp.active_id);
852 	}
853 	return (true);
854 }
855 
856 static int
handle_ddp_data_rcvbuf(struct toepcb * toep,__be32 ddp_report,__be32 rcv_nxt,int len)857 handle_ddp_data_rcvbuf(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt,
858     int len)
859 {
860 	uint32_t report = be32toh(ddp_report);
861 	struct inpcb *inp = toep->inp;
862 	struct tcpcb *tp;
863 	struct socket *so;
864 	struct sockbuf *sb;
865 	struct ddp_buffer *db;
866 	struct ddp_rcv_buffer *drb;
867 	unsigned int db_idx;
868 	bool invalidated;
869 
870 	db_idx = report & F_DDP_BUF_IDX ? 1 : 0;
871 
872 	invalidated = (report & F_DDP_INV) != 0;
873 
874 	INP_WLOCK(inp);
875 	so = inp_inpcbtosocket(inp);
876 	sb = &so->so_rcv;
877 	DDP_LOCK(toep);
878 
879 	KASSERT(toep->ddp.active_id == db_idx,
880 	    ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx,
881 	    toep->ddp.active_id, toep->tid));
882 	db = &toep->ddp.db[db_idx];
883 
884 	if (__predict_false(inp->inp_flags & INP_DROPPED)) {
885 		/*
886 		 * This can happen due to an administrative tcpdrop(8).
887 		 * Just ignore the received data.
888 		 */
889 		CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x",
890 		    __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags);
891 		if (invalidated)
892 			complete_ddp_buffer(toep, db, db_idx);
893 		goto out;
894 	}
895 
896 	tp = intotcpcb(inp);
897 
898 	/*
899 	 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the
900 	 * sequence number of the next byte to receive.  The length of
901 	 * the data received for this message must be computed by
902 	 * comparing the new and old values of rcv_nxt.
903 	 *
904 	 * For RX_DATA_DDP, len might be non-zero, but it is only the
905 	 * length of the most recent DMA.  It does not include the
906 	 * total length of the data received since the previous update
907 	 * for this DDP buffer.  rcv_nxt is the sequence number of the
908 	 * first received byte from the most recent DMA.
909 	 */
910 	len += be32toh(rcv_nxt) - tp->rcv_nxt;
911 	tp->rcv_nxt += len;
912 	tp->t_rcvtime = ticks;
913 #ifndef USE_DDP_RX_FLOW_CONTROL
914 	KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__));
915 	tp->rcv_wnd -= len;
916 #endif
917 #ifdef VERBOSE_TRACES
918 	CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__,
919 	    toep->tid, db_idx, len, report);
920 #endif
921 
922 	/* receive buffer autosize */
923 	MPASS(toep->vnet == so->so_vnet);
924 	CURVNET_SET(toep->vnet);
925 	SOCKBUF_LOCK(sb);
926 	if (sb->sb_flags & SB_AUTOSIZE &&
927 	    V_tcp_do_autorcvbuf &&
928 	    sb->sb_hiwat < V_tcp_autorcvbuf_max &&
929 	    len > (sbspace(sb) / 8 * 7)) {
930 		struct adapter *sc = td_adapter(toep->td);
931 		unsigned int hiwat = sb->sb_hiwat;
932 		unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc,
933 		    V_tcp_autorcvbuf_max);
934 
935 		if (!sbreserve_locked(so, SO_RCV, newsize, NULL))
936 			sb->sb_flags &= ~SB_AUTOSIZE;
937 	}
938 
939 	if (len > 0) {
940 		queue_ddp_rcvbuf_mbuf(toep, db_idx, len);
941 		t4_rcvd_locked(&toep->td->tod, tp);
942 	}
943 	sorwakeup_locked(so);
944 	SOCKBUF_UNLOCK_ASSERT(sb);
945 	CURVNET_RESTORE();
946 
947 	if (invalidated)
948 		complete_ddp_buffer(toep, db, db_idx);
949 	else
950 		KASSERT(db->placed < db->drb->len,
951 		    ("%s: full DDP buffer not invalidated", __func__));
952 
953 	if (toep->ddp.active_count != nitems(toep->ddp.db)) {
954 		drb = alloc_cached_ddp_rcv_buffer(toep);
955 		if (drb == NULL)
956 			drb = alloc_ddp_rcv_buffer(toep, M_NOWAIT);
957 		if (drb == NULL)
958 			ddp_queue_toep(toep);
959 		else {
960 			if (!queue_ddp_rcvbuf(toep, drb)) {
961 				ddp_queue_toep(toep);
962 			}
963 		}
964 	}
965 out:
966 	DDP_UNLOCK(toep);
967 	INP_WUNLOCK(inp);
968 
969 	return (0);
970 }
971 
972 static int
handle_ddp_data(struct toepcb * toep,__be32 ddp_report,__be32 rcv_nxt,int len)973 handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len)
974 {
975 	if ((toep->ddp.flags & DDP_RCVBUF) != 0)
976 		return (handle_ddp_data_rcvbuf(toep, ddp_report, rcv_nxt, len));
977 	else
978 		return (handle_ddp_data_aio(toep, ddp_report, rcv_nxt, len));
979 }
980 
981 void
handle_ddp_indicate(struct toepcb * toep)982 handle_ddp_indicate(struct toepcb *toep)
983 {
984 
985 	DDP_ASSERT_LOCKED(toep);
986 	if ((toep->ddp.flags & DDP_RCVBUF) != 0) {
987 		/*
988 		 * Indicates are not meaningful for RCVBUF since
989 		 * buffers are activated when the socket option is
990 		 * set.
991 		 */
992 		return;
993 	}
994 
995 	MPASS(toep->ddp.active_count == 0);
996 	MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0);
997 	if (toep->ddp.waiting_count == 0) {
998 		/*
999 		 * The pending requests that triggered the request for an
1000 		 * an indicate were cancelled.  Those cancels should have
1001 		 * already disabled DDP.  Just ignore this as the data is
1002 		 * going into the socket buffer anyway.
1003 		 */
1004 		return;
1005 	}
1006 	CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__,
1007 	    toep->tid, toep->ddp.waiting_count);
1008 	ddp_queue_toep(toep);
1009 }
1010 
1011 CTASSERT(CPL_COOKIE_DDP0 + 1 == CPL_COOKIE_DDP1);
1012 
1013 static int
do_ddp_tcb_rpl(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)1014 do_ddp_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1015 {
1016 	struct adapter *sc = iq->adapter;
1017 	const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1);
1018 	unsigned int tid = GET_TID(cpl);
1019 	unsigned int db_idx;
1020 	struct toepcb *toep;
1021 	struct inpcb *inp;
1022 	struct ddp_buffer *db;
1023 	struct kaiocb *job;
1024 	long copied;
1025 
1026 	if (cpl->status != CPL_ERR_NONE)
1027 		panic("XXX: tcp_rpl failed: %d", cpl->status);
1028 
1029 	toep = lookup_tid(sc, tid);
1030 	inp = toep->inp;
1031 	switch (cpl->cookie) {
1032 	case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP0):
1033 	case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP1):
1034 		/*
1035 		 * XXX: This duplicates a lot of code with handle_ddp_data().
1036 		 */
1037 		KASSERT((toep->ddp.flags & DDP_AIO) != 0,
1038 		    ("%s: DDP_RCVBUF", __func__));
1039 		db_idx = G_COOKIE(cpl->cookie) - CPL_COOKIE_DDP0;
1040 		MPASS(db_idx < nitems(toep->ddp.db));
1041 		INP_WLOCK(inp);
1042 		DDP_LOCK(toep);
1043 		db = &toep->ddp.db[db_idx];
1044 
1045 		/*
1046 		 * handle_ddp_data() should leave the job around until
1047 		 * this callback runs once a cancel is pending.
1048 		 */
1049 		MPASS(db != NULL);
1050 		MPASS(db->job != NULL);
1051 		MPASS(db->cancel_pending);
1052 
1053 		/*
1054 		 * XXX: It's not clear what happens if there is data
1055 		 * placed when the buffer is invalidated.  I suspect we
1056 		 * need to read the TCB to see how much data was placed.
1057 		 *
1058 		 * For now this just pretends like nothing was placed.
1059 		 *
1060 		 * XXX: Note that if we did check the PCB we would need to
1061 		 * also take care of updating the tp, etc.
1062 		 */
1063 		job = db->job;
1064 		copied = job->aio_received;
1065 		if (copied == 0) {
1066 			CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job);
1067 			aio_cancel(job);
1068 		} else {
1069 			CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)",
1070 			    __func__, job, copied);
1071 			aio_complete(job, copied, 0);
1072 			t4_rcvd(&toep->td->tod, intotcpcb(inp));
1073 		}
1074 
1075 		complete_ddp_buffer(toep, db, db_idx);
1076 		if (toep->ddp.waiting_count > 0)
1077 			ddp_queue_toep(toep);
1078 		DDP_UNLOCK(toep);
1079 		INP_WUNLOCK(inp);
1080 		break;
1081 	default:
1082 		panic("XXX: unknown tcb_rpl offset %#x, cookie %#x",
1083 		    G_WORD(cpl->cookie), G_COOKIE(cpl->cookie));
1084 	}
1085 
1086 	return (0);
1087 }
1088 
1089 void
handle_ddp_close(struct toepcb * toep,struct tcpcb * tp,__be32 rcv_nxt)1090 handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt)
1091 {
1092 	struct socket *so = toep->inp->inp_socket;
1093 	struct sockbuf *sb = &so->so_rcv;
1094 	struct ddp_buffer *db;
1095 	struct kaiocb *job;
1096 	long copied;
1097 	unsigned int db_idx;
1098 #ifdef INVARIANTS
1099 	unsigned int db_flag;
1100 #endif
1101 	int len, placed;
1102 	bool ddp_rcvbuf;
1103 
1104 	INP_WLOCK_ASSERT(toep->inp);
1105 	DDP_ASSERT_LOCKED(toep);
1106 
1107 	ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0;
1108 
1109 	/* - 1 is to ignore the byte for FIN */
1110 	len = be32toh(rcv_nxt) - tp->rcv_nxt - 1;
1111 	tp->rcv_nxt += len;
1112 
1113 	CTR(KTR_CXGBE, "%s: tid %d placed %u bytes before FIN", __func__,
1114 	    toep->tid, len);
1115 	while (toep->ddp.active_count > 0) {
1116 		MPASS(toep->ddp.active_id != -1);
1117 		db_idx = toep->ddp.active_id;
1118 #ifdef INVARIANTS
1119 		db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE;
1120 #endif
1121 		MPASS((toep->ddp.flags & db_flag) != 0);
1122 		db = &toep->ddp.db[db_idx];
1123 		if (ddp_rcvbuf) {
1124 			placed = len;
1125 			if (placed > db->drb->len - db->placed)
1126 				placed = db->drb->len - db->placed;
1127 			if (placed != 0) {
1128 				SOCKBUF_LOCK(sb);
1129 				queue_ddp_rcvbuf_mbuf(toep, db_idx, placed);
1130 				sorwakeup_locked(so);
1131 				SOCKBUF_UNLOCK_ASSERT(sb);
1132 			}
1133 			complete_ddp_buffer(toep, db, db_idx);
1134 			len -= placed;
1135 			continue;
1136 		}
1137 		job = db->job;
1138 		copied = job->aio_received;
1139 		placed = len;
1140 		if (placed > job->uaiocb.aio_nbytes - copied)
1141 			placed = job->uaiocb.aio_nbytes - copied;
1142 		if (placed > 0) {
1143 			job->msgrcv = 1;
1144 			toep->ofld_rxq->rx_aio_ddp_jobs++;
1145 		}
1146 		toep->ofld_rxq->rx_aio_ddp_octets += placed;
1147 		if (!aio_clear_cancel_function(job)) {
1148 			/*
1149 			 * Update the copied length for when
1150 			 * t4_aio_cancel_active() completes this
1151 			 * request.
1152 			 */
1153 			job->aio_received += placed;
1154 		} else {
1155 			CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d",
1156 			    __func__, toep->tid, db_idx, placed);
1157 			aio_complete(job, copied + placed, 0);
1158 		}
1159 		len -= placed;
1160 		complete_ddp_buffer(toep, db, db_idx);
1161 	}
1162 
1163 	MPASS(len == 0);
1164 	if ((toep->ddp.flags & DDP_AIO) != 0)
1165 		ddp_complete_all(toep, 0);
1166 }
1167 
1168 #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\
1169 	 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\
1170 	 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\
1171 	 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR)
1172 
1173 extern cpl_handler_t t4_cpl_handler[];
1174 
1175 static int
do_rx_data_ddp(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)1176 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1177 {
1178 	struct adapter *sc = iq->adapter;
1179 	const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1);
1180 	unsigned int tid = GET_TID(cpl);
1181 	uint32_t vld;
1182 	struct toepcb *toep = lookup_tid(sc, tid);
1183 
1184 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1185 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1186 	KASSERT(!(toep->flags & TPF_SYNQE),
1187 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
1188 
1189 	vld = be32toh(cpl->ddpvld);
1190 	if (__predict_false(vld & DDP_ERR)) {
1191 		panic("%s: DDP error 0x%x (tid %d, toep %p)",
1192 		    __func__, vld, tid, toep);
1193 	}
1194 
1195 	if (ulp_mode(toep) == ULP_MODE_ISCSI) {
1196 		t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m);
1197 		return (0);
1198 	}
1199 
1200 	handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len));
1201 
1202 	return (0);
1203 }
1204 
1205 static int
do_rx_ddp_complete(struct sge_iq * iq,const struct rss_header * rss,struct mbuf * m)1206 do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss,
1207     struct mbuf *m)
1208 {
1209 	struct adapter *sc = iq->adapter;
1210 	const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1);
1211 	unsigned int tid = GET_TID(cpl);
1212 	struct toepcb *toep = lookup_tid(sc, tid);
1213 
1214 	KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__));
1215 	KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__));
1216 	KASSERT(!(toep->flags & TPF_SYNQE),
1217 	    ("%s: toep %p claims to be a synq entry", __func__, toep));
1218 
1219 	handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0);
1220 
1221 	return (0);
1222 }
1223 
1224 static bool
set_ddp_ulp_mode(struct toepcb * toep)1225 set_ddp_ulp_mode(struct toepcb *toep)
1226 {
1227 	struct adapter *sc = toep->vi->adapter;
1228 	struct wrqe *wr;
1229 	struct work_request_hdr *wrh;
1230 	struct ulp_txpkt *ulpmc;
1231 	int fields, len;
1232 
1233 	if (!sc->tt.ddp)
1234 		return (false);
1235 
1236 	fields = 0;
1237 
1238 	/* Overlay region including W_TCB_RX_DDP_FLAGS */
1239 	fields += 3;
1240 
1241 	/* W_TCB_ULP_TYPE */
1242 	fields++;
1243 
1244 #ifdef USE_DDP_RX_FLOW_CONTROL
1245 	/* W_TCB_T_FLAGS */
1246 	fields++;
1247 #endif
1248 
1249 	len = sizeof(*wrh) + fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16);
1250 	KASSERT(len <= SGE_MAX_WR_LEN,
1251 	    ("%s: WR with %d TCB field updates too large", __func__, fields));
1252 
1253 	wr = alloc_wrqe(len, toep->ctrlq);
1254 	if (wr == NULL)
1255 		return (false);
1256 
1257 	CTR(KTR_CXGBE, "%s: tid %u", __func__, toep->tid);
1258 
1259 	wrh = wrtod(wr);
1260 	INIT_ULPTX_WRH(wrh, len, 1, 0);	/* atomic */
1261 	ulpmc = (struct ulp_txpkt *)(wrh + 1);
1262 
1263 	/*
1264 	 * Words 26/27 are zero except for the DDP_OFF flag in
1265 	 * W_TCB_RX_DDP_FLAGS (27).
1266 	 */
1267 	ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26,
1268 	    0xffffffffffffffff, (uint64_t)V_TF_DDP_OFF(1) << 32);
1269 
1270 	/* Words 28/29 are zero. */
1271 	ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 28,
1272 	    0xffffffffffffffff, 0);
1273 
1274 	/* Words 30/31 are zero. */
1275 	ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30,
1276 	    0xffffffffffffffff, 0);
1277 
1278 	/* Set the ULP mode to ULP_MODE_TCPDDP. */
1279 	toep->params.ulp_mode = ULP_MODE_TCPDDP;
1280 	ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE,
1281 	    V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_TCPDDP));
1282 
1283 #ifdef USE_DDP_RX_FLOW_CONTROL
1284 	/* Set TF_RX_FLOW_CONTROL_DDP. */
1285 	ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS,
1286 	    V_TF_RX_FLOW_CONTROL_DDP(1), V_TF_RX_FLOW_CONTROL_DDP(1));
1287 #endif
1288 
1289 	ddp_init_toep(toep);
1290 
1291 	t4_wrq_tx(sc, wr);
1292 	return (true);
1293 }
1294 
1295 static void
enable_ddp(struct adapter * sc,struct toepcb * toep)1296 enable_ddp(struct adapter *sc, struct toepcb *toep)
1297 {
1298 	uint64_t ddp_flags;
1299 
1300 	KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK,
1301 	    ("%s: toep %p has bad ddp_flags 0x%x",
1302 	    __func__, toep, toep->ddp.flags));
1303 
1304 	CTR3(KTR_CXGBE, "%s: tid %u (time %u)",
1305 	    __func__, toep->tid, time_uptime);
1306 
1307 	ddp_flags = 0;
1308 	if ((toep->ddp.flags & DDP_AIO) != 0)
1309 		ddp_flags |= V_TF_DDP_BUF0_INDICATE(1) |
1310 		    V_TF_DDP_BUF1_INDICATE(1);
1311 	DDP_ASSERT_LOCKED(toep);
1312 	toep->ddp.flags |= DDP_SC_REQ;
1313 	t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS,
1314 	    V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) |
1315 	    V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) |
1316 	    V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1), ddp_flags, 0, 0);
1317 	t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS,
1318 	    V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0);
1319 }
1320 
1321 static int
calculate_hcf(int n1,int n2)1322 calculate_hcf(int n1, int n2)
1323 {
1324 	int a, b, t;
1325 
1326 	if (n1 <= n2) {
1327 		a = n1;
1328 		b = n2;
1329 	} else {
1330 		a = n2;
1331 		b = n1;
1332 	}
1333 
1334 	while (a != 0) {
1335 		t = a;
1336 		a = b % a;
1337 		b = t;
1338 	}
1339 
1340 	return (b);
1341 }
1342 
1343 static inline int
pages_to_nppods(int npages,int ddp_page_shift)1344 pages_to_nppods(int npages, int ddp_page_shift)
1345 {
1346 
1347 	MPASS(ddp_page_shift >= PAGE_SHIFT);
1348 
1349 	return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES));
1350 }
1351 
1352 static int
alloc_page_pods(struct ppod_region * pr,u_int nppods,u_int pgsz_idx,struct ppod_reservation * prsv)1353 alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx,
1354     struct ppod_reservation *prsv)
1355 {
1356 	vmem_addr_t addr;       /* relative to start of region */
1357 
1358 	if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT,
1359 	    &addr) != 0)
1360 		return (ENOMEM);
1361 
1362 #ifdef VERBOSE_TRACES
1363 	CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d",
1364 	    __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask,
1365 	    nppods, 1 << pr->pr_page_shift[pgsz_idx]);
1366 #endif
1367 
1368 	/*
1369 	 * The hardware tagmask includes an extra invalid bit but the arena was
1370 	 * seeded with valid values only.  An allocation out of this arena will
1371 	 * fit inside the tagmask but won't have the invalid bit set.
1372 	 */
1373 	MPASS((addr & pr->pr_tag_mask) == addr);
1374 	MPASS((addr & pr->pr_invalid_bit) == 0);
1375 
1376 	prsv->prsv_pr = pr;
1377 	prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr;
1378 	prsv->prsv_nppods = nppods;
1379 
1380 	return (0);
1381 }
1382 
1383 static int
t4_alloc_page_pods_for_vmpages(struct ppod_region * pr,vm_page_t * pages,int npages,struct ppod_reservation * prsv)1384 t4_alloc_page_pods_for_vmpages(struct ppod_region *pr, vm_page_t *pages,
1385     int npages, struct ppod_reservation *prsv)
1386 {
1387 	int i, hcf, seglen, idx, nppods;
1388 
1389 	/*
1390 	 * The DDP page size is unrelated to the VM page size.  We combine
1391 	 * contiguous physical pages into larger segments to get the best DDP
1392 	 * page size possible.  This is the largest of the four sizes in
1393 	 * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in
1394 	 * the page list.
1395 	 */
1396 	hcf = 0;
1397 	for (i = 0; i < npages; i++) {
1398 		seglen = PAGE_SIZE;
1399 		while (i < npages - 1 &&
1400 		    VM_PAGE_TO_PHYS(pages[i]) + PAGE_SIZE ==
1401 		    VM_PAGE_TO_PHYS(pages[i + 1])) {
1402 			seglen += PAGE_SIZE;
1403 			i++;
1404 		}
1405 
1406 		hcf = calculate_hcf(hcf, seglen);
1407 		if (hcf < (1 << pr->pr_page_shift[1])) {
1408 			idx = 0;
1409 			goto have_pgsz;	/* give up, short circuit */
1410 		}
1411 	}
1412 
1413 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
1414 	MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
1415 	for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
1416 		if ((hcf & PR_PAGE_MASK(idx)) == 0)
1417 			break;
1418 	}
1419 #undef PR_PAGE_MASK
1420 
1421 have_pgsz:
1422 	MPASS(idx <= M_PPOD_PGSZ);
1423 
1424 	nppods = pages_to_nppods(npages, pr->pr_page_shift[idx]);
1425 	if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
1426 		return (ENOMEM);
1427 	MPASS(prsv->prsv_nppods > 0);
1428 
1429 	return (0);
1430 }
1431 
1432 int
t4_alloc_page_pods_for_ps(struct ppod_region * pr,struct pageset * ps)1433 t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps)
1434 {
1435 	struct ppod_reservation *prsv = &ps->prsv;
1436 
1437 	KASSERT(prsv->prsv_nppods == 0,
1438 	    ("%s: page pods already allocated", __func__));
1439 
1440 	return (t4_alloc_page_pods_for_vmpages(pr, ps->pages, ps->npages,
1441 	    prsv));
1442 }
1443 
1444 int
t4_alloc_page_pods_for_bio(struct ppod_region * pr,struct bio * bp,struct ppod_reservation * prsv)1445 t4_alloc_page_pods_for_bio(struct ppod_region *pr, struct bio *bp,
1446     struct ppod_reservation *prsv)
1447 {
1448 
1449 	MPASS(bp->bio_flags & BIO_UNMAPPED);
1450 
1451 	return (t4_alloc_page_pods_for_vmpages(pr, bp->bio_ma, bp->bio_ma_n,
1452 	    prsv));
1453 }
1454 
1455 int
t4_alloc_page_pods_for_buf(struct ppod_region * pr,vm_offset_t buf,int len,struct ppod_reservation * prsv)1456 t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len,
1457     struct ppod_reservation *prsv)
1458 {
1459 	int hcf, seglen, idx, npages, nppods;
1460 	uintptr_t start_pva, end_pva, pva, p1;
1461 
1462 	MPASS(buf > 0);
1463 	MPASS(len > 0);
1464 
1465 	/*
1466 	 * The DDP page size is unrelated to the VM page size.  We combine
1467 	 * contiguous physical pages into larger segments to get the best DDP
1468 	 * page size possible.  This is the largest of the four sizes in
1469 	 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes
1470 	 * in the page list.
1471 	 */
1472 	hcf = 0;
1473 	start_pva = trunc_page(buf);
1474 	end_pva = trunc_page(buf + len - 1);
1475 	pva = start_pva;
1476 	while (pva <= end_pva) {
1477 		seglen = PAGE_SIZE;
1478 		p1 = pmap_kextract(pva);
1479 		pva += PAGE_SIZE;
1480 		while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) {
1481 			seglen += PAGE_SIZE;
1482 			pva += PAGE_SIZE;
1483 		}
1484 
1485 		hcf = calculate_hcf(hcf, seglen);
1486 		if (hcf < (1 << pr->pr_page_shift[1])) {
1487 			idx = 0;
1488 			goto have_pgsz;	/* give up, short circuit */
1489 		}
1490 	}
1491 
1492 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
1493 	MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
1494 	for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
1495 		if ((hcf & PR_PAGE_MASK(idx)) == 0)
1496 			break;
1497 	}
1498 #undef PR_PAGE_MASK
1499 
1500 have_pgsz:
1501 	MPASS(idx <= M_PPOD_PGSZ);
1502 
1503 	npages = 1;
1504 	npages += (end_pva - start_pva) >> pr->pr_page_shift[idx];
1505 	nppods = howmany(npages, PPOD_PAGES);
1506 	if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
1507 		return (ENOMEM);
1508 	MPASS(prsv->prsv_nppods > 0);
1509 
1510 	return (0);
1511 }
1512 
1513 static int
t4_alloc_page_pods_for_rcvbuf(struct ppod_region * pr,struct ddp_rcv_buffer * drb)1514 t4_alloc_page_pods_for_rcvbuf(struct ppod_region *pr,
1515     struct ddp_rcv_buffer *drb)
1516 {
1517 	struct ppod_reservation *prsv = &drb->prsv;
1518 
1519 	KASSERT(prsv->prsv_nppods == 0,
1520 	    ("%s: page pods already allocated", __func__));
1521 
1522 	return (t4_alloc_page_pods_for_buf(pr, (vm_offset_t)drb->buf, drb->len,
1523 	    prsv));
1524 }
1525 
1526 int
t4_alloc_page_pods_for_sgl(struct ppod_region * pr,struct ctl_sg_entry * sgl,int entries,struct ppod_reservation * prsv)1527 t4_alloc_page_pods_for_sgl(struct ppod_region *pr, struct ctl_sg_entry *sgl,
1528     int entries, struct ppod_reservation *prsv)
1529 {
1530 	int hcf, seglen, idx = 0, npages, nppods, i, len;
1531 	uintptr_t start_pva, end_pva, pva, p1 ;
1532 	vm_offset_t buf;
1533 	struct ctl_sg_entry *sge;
1534 
1535 	MPASS(entries > 0);
1536 	MPASS(sgl);
1537 
1538 	/*
1539 	 * The DDP page size is unrelated to the VM page size.	We combine
1540 	 * contiguous physical pages into larger segments to get the best DDP
1541 	 * page size possible.	This is the largest of the four sizes in
1542 	 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes
1543 	 * in the page list.
1544 	 */
1545 	hcf = 0;
1546 	for (i = entries - 1; i >= 0; i--) {
1547 		sge = sgl + i;
1548 		buf = (vm_offset_t)sge->addr;
1549 		len = sge->len;
1550 		start_pva = trunc_page(buf);
1551 		end_pva = trunc_page(buf + len - 1);
1552 		pva = start_pva;
1553 		while (pva <= end_pva) {
1554 			seglen = PAGE_SIZE;
1555 			p1 = pmap_kextract(pva);
1556 			pva += PAGE_SIZE;
1557 			while (pva <= end_pva && p1 + seglen ==
1558 			    pmap_kextract(pva)) {
1559 				seglen += PAGE_SIZE;
1560 				pva += PAGE_SIZE;
1561 			}
1562 
1563 			hcf = calculate_hcf(hcf, seglen);
1564 			if (hcf < (1 << pr->pr_page_shift[1])) {
1565 				idx = 0;
1566 				goto have_pgsz; /* give up, short circuit */
1567 			}
1568 		}
1569 	}
1570 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1)
1571 	MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */
1572 	for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) {
1573 		if ((hcf & PR_PAGE_MASK(idx)) == 0)
1574 			break;
1575 	}
1576 #undef PR_PAGE_MASK
1577 
1578 have_pgsz:
1579 	MPASS(idx <= M_PPOD_PGSZ);
1580 
1581 	npages = 0;
1582 	while (entries--) {
1583 		npages++;
1584 		start_pva = trunc_page((vm_offset_t)sgl->addr);
1585 		end_pva = trunc_page((vm_offset_t)sgl->addr + sgl->len - 1);
1586 		npages += (end_pva - start_pva) >> pr->pr_page_shift[idx];
1587 		sgl = sgl + 1;
1588 	}
1589 	nppods = howmany(npages, PPOD_PAGES);
1590 	if (alloc_page_pods(pr, nppods, idx, prsv) != 0)
1591 		return (ENOMEM);
1592 	MPASS(prsv->prsv_nppods > 0);
1593 	return (0);
1594 }
1595 
1596 void
t4_free_page_pods(struct ppod_reservation * prsv)1597 t4_free_page_pods(struct ppod_reservation *prsv)
1598 {
1599 	struct ppod_region *pr = prsv->prsv_pr;
1600 	vmem_addr_t addr;
1601 
1602 	MPASS(prsv != NULL);
1603 	MPASS(prsv->prsv_nppods != 0);
1604 
1605 	addr = prsv->prsv_tag & pr->pr_tag_mask;
1606 	MPASS((addr & pr->pr_invalid_bit) == 0);
1607 
1608 #ifdef VERBOSE_TRACES
1609 	CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__,
1610 	    pr->pr_arena, addr, prsv->prsv_nppods);
1611 #endif
1612 
1613 	vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods));
1614 	prsv->prsv_nppods = 0;
1615 }
1616 
1617 #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE)
1618 
1619 int
t4_write_page_pods_for_ps(struct adapter * sc,struct sge_wrq * wrq,int tid,struct pageset * ps)1620 t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid,
1621     struct pageset *ps)
1622 {
1623 	struct wrqe *wr;
1624 	struct ulp_mem_io *ulpmc;
1625 	struct ulptx_idata *ulpsc;
1626 	struct pagepod *ppod;
1627 	int i, j, k, n, chunk, len, ddp_pgsz, idx;
1628 	u_int ppod_addr;
1629 	uint32_t cmd;
1630 	struct ppod_reservation *prsv = &ps->prsv;
1631 	struct ppod_region *pr = prsv->prsv_pr;
1632 	vm_paddr_t pa;
1633 
1634 	KASSERT(!(ps->flags & PS_PPODS_WRITTEN),
1635 	    ("%s: page pods already written", __func__));
1636 	MPASS(prsv->prsv_nppods > 0);
1637 
1638 	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1639 	if (is_t4(sc))
1640 		cmd |= htobe32(F_ULP_MEMIO_ORDER);
1641 	else
1642 		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1643 	ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1644 	ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1645 	for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1646 		/* How many page pods are we writing in this cycle */
1647 		n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1648 		chunk = PPOD_SZ(n);
1649 		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1650 
1651 		wr = alloc_wrqe(len, wrq);
1652 		if (wr == NULL)
1653 			return (ENOMEM);	/* ok to just bail out */
1654 		ulpmc = wrtod(wr);
1655 
1656 		INIT_ULPTX_WR(ulpmc, len, 0, 0);
1657 		ulpmc->cmd = cmd;
1658 		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1659 		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1660 		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1661 
1662 		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1663 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1664 		ulpsc->len = htobe32(chunk);
1665 
1666 		ppod = (struct pagepod *)(ulpsc + 1);
1667 		for (j = 0; j < n; i++, j++, ppod++) {
1668 			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1669 			    V_PPOD_TID(tid) | prsv->prsv_tag);
1670 			ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) |
1671 			    V_PPOD_OFST(ps->offset));
1672 			ppod->rsvd = 0;
1673 			idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
1674 			for (k = 0; k < nitems(ppod->addr); k++) {
1675 				if (idx < ps->npages) {
1676 					pa = VM_PAGE_TO_PHYS(ps->pages[idx]);
1677 					ppod->addr[k] = htobe64(pa);
1678 					idx += ddp_pgsz / PAGE_SIZE;
1679 				} else
1680 					ppod->addr[k] = 0;
1681 #if 0
1682 				CTR5(KTR_CXGBE,
1683 				    "%s: tid %d ppod[%d]->addr[%d] = %p",
1684 				    __func__, tid, i, k,
1685 				    be64toh(ppod->addr[k]));
1686 #endif
1687 			}
1688 
1689 		}
1690 
1691 		t4_wrq_tx(sc, wr);
1692 	}
1693 	ps->flags |= PS_PPODS_WRITTEN;
1694 
1695 	return (0);
1696 }
1697 
1698 static int
t4_write_page_pods_for_rcvbuf(struct adapter * sc,struct sge_wrq * wrq,int tid,struct ddp_rcv_buffer * drb)1699 t4_write_page_pods_for_rcvbuf(struct adapter *sc, struct sge_wrq *wrq, int tid,
1700     struct ddp_rcv_buffer *drb)
1701 {
1702 	struct wrqe *wr;
1703 	struct ulp_mem_io *ulpmc;
1704 	struct ulptx_idata *ulpsc;
1705 	struct pagepod *ppod;
1706 	int i, j, k, n, chunk, len, ddp_pgsz;
1707 	u_int ppod_addr, offset;
1708 	uint32_t cmd;
1709 	struct ppod_reservation *prsv = &drb->prsv;
1710 	struct ppod_region *pr = prsv->prsv_pr;
1711 	uintptr_t end_pva, pva;
1712 	vm_paddr_t pa;
1713 
1714 	MPASS(prsv->prsv_nppods > 0);
1715 
1716 	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1717 	if (is_t4(sc))
1718 		cmd |= htobe32(F_ULP_MEMIO_ORDER);
1719 	else
1720 		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1721 	ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1722 	offset = (uintptr_t)drb->buf & PAGE_MASK;
1723 	ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1724 	pva = trunc_page((uintptr_t)drb->buf);
1725 	end_pva = trunc_page((uintptr_t)drb->buf + drb->len - 1);
1726 	for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1727 		/* How many page pods are we writing in this cycle */
1728 		n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1729 		MPASS(n > 0);
1730 		chunk = PPOD_SZ(n);
1731 		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1732 
1733 		wr = alloc_wrqe(len, wrq);
1734 		if (wr == NULL)
1735 			return (ENOMEM);	/* ok to just bail out */
1736 		ulpmc = wrtod(wr);
1737 
1738 		INIT_ULPTX_WR(ulpmc, len, 0, 0);
1739 		ulpmc->cmd = cmd;
1740 		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1741 		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1742 		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1743 
1744 		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1745 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1746 		ulpsc->len = htobe32(chunk);
1747 
1748 		ppod = (struct pagepod *)(ulpsc + 1);
1749 		for (j = 0; j < n; i++, j++, ppod++) {
1750 			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1751 			    V_PPOD_TID(tid) | prsv->prsv_tag);
1752 			ppod->len_offset = htobe64(V_PPOD_LEN(drb->len) |
1753 			    V_PPOD_OFST(offset));
1754 			ppod->rsvd = 0;
1755 
1756 			for (k = 0; k < nitems(ppod->addr); k++) {
1757 				if (pva > end_pva)
1758 					ppod->addr[k] = 0;
1759 				else {
1760 					pa = pmap_kextract(pva);
1761 					ppod->addr[k] = htobe64(pa);
1762 					pva += ddp_pgsz;
1763 				}
1764 #if 0
1765 				CTR5(KTR_CXGBE,
1766 				    "%s: tid %d ppod[%d]->addr[%d] = %p",
1767 				    __func__, tid, i, k,
1768 				    be64toh(ppod->addr[k]));
1769 #endif
1770 			}
1771 
1772 			/*
1773 			 * Walk back 1 segment so that the first address in the
1774 			 * next pod is the same as the last one in the current
1775 			 * pod.
1776 			 */
1777 			pva -= ddp_pgsz;
1778 		}
1779 
1780 		t4_wrq_tx(sc, wr);
1781 	}
1782 
1783 	MPASS(pva <= end_pva);
1784 
1785 	return (0);
1786 }
1787 
1788 static struct mbuf *
alloc_raw_wr_mbuf(int len)1789 alloc_raw_wr_mbuf(int len)
1790 {
1791 	struct mbuf *m;
1792 
1793 	if (len <= MHLEN)
1794 		m = m_gethdr(M_NOWAIT, MT_DATA);
1795 	else if (len <= MCLBYTES)
1796 		m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
1797 	else
1798 		m = NULL;
1799 	if (m == NULL)
1800 		return (NULL);
1801 	m->m_pkthdr.len = len;
1802 	m->m_len = len;
1803 	set_mbuf_raw_wr(m, true);
1804 	return (m);
1805 }
1806 
1807 int
t4_write_page_pods_for_bio(struct adapter * sc,struct toepcb * toep,struct ppod_reservation * prsv,struct bio * bp,struct mbufq * wrq)1808 t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep,
1809     struct ppod_reservation *prsv, struct bio *bp, struct mbufq *wrq)
1810 {
1811 	struct ulp_mem_io *ulpmc;
1812 	struct ulptx_idata *ulpsc;
1813 	struct pagepod *ppod;
1814 	int i, j, k, n, chunk, len, ddp_pgsz, idx;
1815 	u_int ppod_addr;
1816 	uint32_t cmd;
1817 	struct ppod_region *pr = prsv->prsv_pr;
1818 	vm_paddr_t pa;
1819 	struct mbuf *m;
1820 
1821 	MPASS(bp->bio_flags & BIO_UNMAPPED);
1822 
1823 	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1824 	if (is_t4(sc))
1825 		cmd |= htobe32(F_ULP_MEMIO_ORDER);
1826 	else
1827 		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1828 	ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1829 	ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1830 	for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1831 
1832 		/* How many page pods are we writing in this cycle */
1833 		n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1834 		MPASS(n > 0);
1835 		chunk = PPOD_SZ(n);
1836 		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1837 
1838 		m = alloc_raw_wr_mbuf(len);
1839 		if (m == NULL)
1840 			return (ENOMEM);
1841 
1842 		ulpmc = mtod(m, struct ulp_mem_io *);
1843 		INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
1844 		ulpmc->cmd = cmd;
1845 		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1846 		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1847 		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1848 
1849 		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1850 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1851 		ulpsc->len = htobe32(chunk);
1852 
1853 		ppod = (struct pagepod *)(ulpsc + 1);
1854 		for (j = 0; j < n; i++, j++, ppod++) {
1855 			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1856 			    V_PPOD_TID(toep->tid) |
1857 			    (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
1858 			ppod->len_offset = htobe64(V_PPOD_LEN(bp->bio_bcount) |
1859 			    V_PPOD_OFST(bp->bio_ma_offset));
1860 			ppod->rsvd = 0;
1861 			idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE);
1862 			for (k = 0; k < nitems(ppod->addr); k++) {
1863 				if (idx < bp->bio_ma_n) {
1864 					pa = VM_PAGE_TO_PHYS(bp->bio_ma[idx]);
1865 					ppod->addr[k] = htobe64(pa);
1866 					idx += ddp_pgsz / PAGE_SIZE;
1867 				} else
1868 					ppod->addr[k] = 0;
1869 #if 0
1870 				CTR5(KTR_CXGBE,
1871 				    "%s: tid %d ppod[%d]->addr[%d] = %p",
1872 				    __func__, toep->tid, i, k,
1873 				    be64toh(ppod->addr[k]));
1874 #endif
1875 			}
1876 		}
1877 
1878 		mbufq_enqueue(wrq, m);
1879 	}
1880 
1881 	return (0);
1882 }
1883 
1884 int
t4_write_page_pods_for_buf(struct adapter * sc,struct toepcb * toep,struct ppod_reservation * prsv,vm_offset_t buf,int buflen,struct mbufq * wrq)1885 t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep,
1886     struct ppod_reservation *prsv, vm_offset_t buf, int buflen,
1887     struct mbufq *wrq)
1888 {
1889 	struct ulp_mem_io *ulpmc;
1890 	struct ulptx_idata *ulpsc;
1891 	struct pagepod *ppod;
1892 	int i, j, k, n, chunk, len, ddp_pgsz;
1893 	u_int ppod_addr, offset;
1894 	uint32_t cmd;
1895 	struct ppod_region *pr = prsv->prsv_pr;
1896 	uintptr_t end_pva, pva;
1897 	vm_paddr_t pa;
1898 	struct mbuf *m;
1899 
1900 	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1901 	if (is_t4(sc))
1902 		cmd |= htobe32(F_ULP_MEMIO_ORDER);
1903 	else
1904 		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1905 	ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1906 	offset = buf & PAGE_MASK;
1907 	ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
1908 	pva = trunc_page(buf);
1909 	end_pva = trunc_page(buf + buflen - 1);
1910 	for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
1911 
1912 		/* How many page pods are we writing in this cycle */
1913 		n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
1914 		MPASS(n > 0);
1915 		chunk = PPOD_SZ(n);
1916 		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
1917 
1918 		m = alloc_raw_wr_mbuf(len);
1919 		if (m == NULL)
1920 			return (ENOMEM);
1921 		ulpmc = mtod(m, struct ulp_mem_io *);
1922 
1923 		INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
1924 		ulpmc->cmd = cmd;
1925 		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
1926 		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
1927 		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
1928 
1929 		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
1930 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
1931 		ulpsc->len = htobe32(chunk);
1932 
1933 		ppod = (struct pagepod *)(ulpsc + 1);
1934 		for (j = 0; j < n; i++, j++, ppod++) {
1935 			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
1936 			    V_PPOD_TID(toep->tid) |
1937 			    (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
1938 			ppod->len_offset = htobe64(V_PPOD_LEN(buflen) |
1939 			    V_PPOD_OFST(offset));
1940 			ppod->rsvd = 0;
1941 
1942 			for (k = 0; k < nitems(ppod->addr); k++) {
1943 				if (pva > end_pva)
1944 					ppod->addr[k] = 0;
1945 				else {
1946 					pa = pmap_kextract(pva);
1947 					ppod->addr[k] = htobe64(pa);
1948 					pva += ddp_pgsz;
1949 				}
1950 #if 0
1951 				CTR5(KTR_CXGBE,
1952 				    "%s: tid %d ppod[%d]->addr[%d] = %p",
1953 				    __func__, toep->tid, i, k,
1954 				    be64toh(ppod->addr[k]));
1955 #endif
1956 			}
1957 
1958 			/*
1959 			 * Walk back 1 segment so that the first address in the
1960 			 * next pod is the same as the last one in the current
1961 			 * pod.
1962 			 */
1963 			pva -= ddp_pgsz;
1964 		}
1965 
1966 		mbufq_enqueue(wrq, m);
1967 	}
1968 
1969 	MPASS(pva <= end_pva);
1970 
1971 	return (0);
1972 }
1973 
1974 int
t4_write_page_pods_for_sgl(struct adapter * sc,struct toepcb * toep,struct ppod_reservation * prsv,struct ctl_sg_entry * sgl,int entries,int xferlen,struct mbufq * wrq)1975 t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep,
1976     struct ppod_reservation *prsv, struct ctl_sg_entry *sgl, int entries,
1977     int xferlen, struct mbufq *wrq)
1978 {
1979 	struct ulp_mem_io *ulpmc;
1980 	struct ulptx_idata *ulpsc;
1981 	struct pagepod *ppod;
1982 	int i, j, k, n, chunk, len, ddp_pgsz;
1983 	u_int ppod_addr, offset, sg_offset = 0;
1984 	uint32_t cmd;
1985 	struct ppod_region *pr = prsv->prsv_pr;
1986 	uintptr_t pva;
1987 	vm_paddr_t pa;
1988 	struct mbuf *m;
1989 
1990 	MPASS(sgl != NULL);
1991 	MPASS(entries > 0);
1992 	cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE));
1993 	if (is_t4(sc))
1994 		cmd |= htobe32(F_ULP_MEMIO_ORDER);
1995 	else
1996 		cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
1997 	ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)];
1998 	offset = (vm_offset_t)sgl->addr & PAGE_MASK;
1999 	ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask);
2000 	pva = trunc_page((vm_offset_t)sgl->addr);
2001 	for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) {
2002 
2003 		/* How many page pods are we writing in this cycle */
2004 		n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS);
2005 		MPASS(n > 0);
2006 		chunk = PPOD_SZ(n);
2007 		len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16);
2008 
2009 		m = alloc_raw_wr_mbuf(len);
2010 		if (m == NULL)
2011 			return (ENOMEM);
2012 		ulpmc = mtod(m, struct ulp_mem_io *);
2013 
2014 		INIT_ULPTX_WR(ulpmc, len, 0, toep->tid);
2015 		ulpmc->cmd = cmd;
2016 		ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32));
2017 		ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16));
2018 		ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5));
2019 
2020 		ulpsc = (struct ulptx_idata *)(ulpmc + 1);
2021 		ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM));
2022 		ulpsc->len = htobe32(chunk);
2023 
2024 		ppod = (struct pagepod *)(ulpsc + 1);
2025 		for (j = 0; j < n; i++, j++, ppod++) {
2026 			ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID |
2027 			    V_PPOD_TID(toep->tid) |
2028 			    (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ)));
2029 			ppod->len_offset = htobe64(V_PPOD_LEN(xferlen) |
2030 			    V_PPOD_OFST(offset));
2031 			ppod->rsvd = 0;
2032 
2033 			for (k = 0; k < nitems(ppod->addr); k++) {
2034 				if (entries != 0) {
2035 					pa = pmap_kextract(pva + sg_offset);
2036 					ppod->addr[k] = htobe64(pa);
2037 				} else
2038 					ppod->addr[k] = 0;
2039 
2040 #if 0
2041 				CTR5(KTR_CXGBE,
2042 				    "%s: tid %d ppod[%d]->addr[%d] = %p",
2043 				    __func__, toep->tid, i, k,
2044 				    be64toh(ppod->addr[k]));
2045 #endif
2046 
2047 				/*
2048 				 * If this is the last entry in a pod,
2049 				 * reuse the same entry for first address
2050 				 * in the next pod.
2051 				 */
2052 				if (k + 1 == nitems(ppod->addr))
2053 					break;
2054 
2055 				/*
2056 				 * Don't move to the next DDP page if the
2057 				 * sgl is already finished.
2058 				 */
2059 				if (entries == 0)
2060 					continue;
2061 
2062 				sg_offset += ddp_pgsz;
2063 				if (sg_offset == sgl->len) {
2064 					/*
2065 					 * This sgl entry is done.  Go
2066 					 * to the next.
2067 					 */
2068 					entries--;
2069 					sgl++;
2070 					sg_offset = 0;
2071 					if (entries != 0)
2072 						pva = trunc_page(
2073 						    (vm_offset_t)sgl->addr);
2074 				}
2075 			}
2076 		}
2077 
2078 		mbufq_enqueue(wrq, m);
2079 	}
2080 
2081 	return (0);
2082 }
2083 
2084 /*
2085  * Prepare a pageset for DDP.  This sets up page pods.
2086  */
2087 static int
prep_pageset(struct adapter * sc,struct toepcb * toep,struct pageset * ps)2088 prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps)
2089 {
2090 	struct tom_data *td = sc->tom_softc;
2091 
2092 	if (ps->prsv.prsv_nppods == 0 &&
2093 	    t4_alloc_page_pods_for_ps(&td->pr, ps) != 0) {
2094 		return (0);
2095 	}
2096 	if (!(ps->flags & PS_PPODS_WRITTEN) &&
2097 	    t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) {
2098 		return (0);
2099 	}
2100 
2101 	return (1);
2102 }
2103 
2104 int
t4_init_ppod_region(struct ppod_region * pr,struct t4_range * r,u_int psz,const char * name)2105 t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz,
2106     const char *name)
2107 {
2108 	int i;
2109 
2110 	MPASS(pr != NULL);
2111 	MPASS(r->size > 0);
2112 
2113 	pr->pr_start = r->start;
2114 	pr->pr_len = r->size;
2115 	pr->pr_page_shift[0] = 12 + G_HPZ0(psz);
2116 	pr->pr_page_shift[1] = 12 + G_HPZ1(psz);
2117 	pr->pr_page_shift[2] = 12 + G_HPZ2(psz);
2118 	pr->pr_page_shift[3] = 12 + G_HPZ3(psz);
2119 
2120 	/* The SGL -> page pod algorithm requires the sizes to be in order. */
2121 	for (i = 1; i < nitems(pr->pr_page_shift); i++) {
2122 		if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1])
2123 			return (ENXIO);
2124 	}
2125 
2126 	pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG);
2127 	pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask;
2128 	if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0)
2129 		return (ENXIO);
2130 	pr->pr_alias_shift = fls(pr->pr_tag_mask);
2131 	pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1);
2132 
2133 	pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0,
2134 	    M_FIRSTFIT | M_NOWAIT);
2135 	if (pr->pr_arena == NULL)
2136 		return (ENOMEM);
2137 
2138 	return (0);
2139 }
2140 
2141 void
t4_free_ppod_region(struct ppod_region * pr)2142 t4_free_ppod_region(struct ppod_region *pr)
2143 {
2144 
2145 	MPASS(pr != NULL);
2146 
2147 	if (pr->pr_arena)
2148 		vmem_destroy(pr->pr_arena);
2149 	bzero(pr, sizeof(*pr));
2150 }
2151 
2152 static int
pscmp(struct pageset * ps,struct vmspace * vm,vm_offset_t start,int npages,int pgoff,int len)2153 pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages,
2154     int pgoff, int len)
2155 {
2156 
2157 	if (ps->start != start || ps->npages != npages ||
2158 	    ps->offset != pgoff || ps->len != len)
2159 		return (1);
2160 
2161 	return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp);
2162 }
2163 
2164 static int
hold_aio(struct toepcb * toep,struct kaiocb * job,struct pageset ** pps)2165 hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps)
2166 {
2167 	struct vmspace *vm;
2168 	vm_map_t map;
2169 	vm_offset_t start, end, pgoff;
2170 	struct pageset *ps;
2171 	int n;
2172 
2173 	DDP_ASSERT_LOCKED(toep);
2174 
2175 	/*
2176 	 * The AIO subsystem will cancel and drain all requests before
2177 	 * permitting a process to exit or exec, so p_vmspace should
2178 	 * be stable here.
2179 	 */
2180 	vm = job->userproc->p_vmspace;
2181 	map = &vm->vm_map;
2182 	start = (uintptr_t)job->uaiocb.aio_buf;
2183 	pgoff = start & PAGE_MASK;
2184 	end = round_page(start + job->uaiocb.aio_nbytes);
2185 	start = trunc_page(start);
2186 
2187 	if (end - start > MAX_DDP_BUFFER_SIZE) {
2188 		/*
2189 		 * Truncate the request to a short read.
2190 		 * Alternatively, we could DDP in chunks to the larger
2191 		 * buffer, but that would be quite a bit more work.
2192 		 *
2193 		 * When truncating, round the request down to avoid
2194 		 * crossing a cache line on the final transaction.
2195 		 */
2196 		end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE);
2197 #ifdef VERBOSE_TRACES
2198 		CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu",
2199 		    __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes,
2200 		    (unsigned long)(end - (start + pgoff)));
2201 		job->uaiocb.aio_nbytes = end - (start + pgoff);
2202 #endif
2203 		end = round_page(end);
2204 	}
2205 
2206 	n = atop(end - start);
2207 
2208 	/*
2209 	 * Try to reuse a cached pageset.
2210 	 */
2211 	TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) {
2212 		if (pscmp(ps, vm, start, n, pgoff,
2213 		    job->uaiocb.aio_nbytes) == 0) {
2214 			TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
2215 			toep->ddp.cached_count--;
2216 			*pps = ps;
2217 			return (0);
2218 		}
2219 	}
2220 
2221 	/*
2222 	 * If there are too many cached pagesets to create a new one,
2223 	 * free a pageset before creating a new one.
2224 	 */
2225 	KASSERT(toep->ddp.active_count + toep->ddp.cached_count <=
2226 	    nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__));
2227 	if (toep->ddp.active_count + toep->ddp.cached_count ==
2228 	    nitems(toep->ddp.db)) {
2229 		KASSERT(toep->ddp.cached_count > 0,
2230 		    ("no cached pageset to free"));
2231 		ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq);
2232 		TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link);
2233 		toep->ddp.cached_count--;
2234 		free_pageset(toep->td, ps);
2235 	}
2236 	DDP_UNLOCK(toep);
2237 
2238 	/* Create a new pageset. */
2239 	ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK |
2240 	    M_ZERO);
2241 	ps->pages = (vm_page_t *)(ps + 1);
2242 	ps->vm_timestamp = map->timestamp;
2243 	ps->npages = vm_fault_quick_hold_pages(map, start, end - start,
2244 	    VM_PROT_WRITE, ps->pages, n);
2245 
2246 	DDP_LOCK(toep);
2247 	if (ps->npages < 0) {
2248 		free(ps, M_CXGBE);
2249 		return (EFAULT);
2250 	}
2251 
2252 	KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d",
2253 	    ps->npages, n));
2254 
2255 	ps->offset = pgoff;
2256 	ps->len = job->uaiocb.aio_nbytes;
2257 	refcount_acquire(&vm->vm_refcnt);
2258 	ps->vm = vm;
2259 	ps->start = start;
2260 
2261 	CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d",
2262 	    __func__, toep->tid, ps, job, ps->npages);
2263 	*pps = ps;
2264 	return (0);
2265 }
2266 
2267 static void
ddp_complete_all(struct toepcb * toep,int error)2268 ddp_complete_all(struct toepcb *toep, int error)
2269 {
2270 	struct kaiocb *job;
2271 
2272 	DDP_ASSERT_LOCKED(toep);
2273 	KASSERT((toep->ddp.flags & DDP_AIO) != 0, ("%s: DDP_RCVBUF", __func__));
2274 	while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) {
2275 		job = TAILQ_FIRST(&toep->ddp.aiojobq);
2276 		TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2277 		toep->ddp.waiting_count--;
2278 		if (aio_clear_cancel_function(job))
2279 			ddp_complete_one(job, error);
2280 	}
2281 }
2282 
2283 static void
aio_ddp_cancel_one(struct kaiocb * job)2284 aio_ddp_cancel_one(struct kaiocb *job)
2285 {
2286 	long copied;
2287 
2288 	/*
2289 	 * If this job had copied data out of the socket buffer before
2290 	 * it was cancelled, report it as a short read rather than an
2291 	 * error.
2292 	 */
2293 	copied = job->aio_received;
2294 	if (copied != 0)
2295 		aio_complete(job, copied, 0);
2296 	else
2297 		aio_cancel(job);
2298 }
2299 
2300 /*
2301  * Called when the main loop wants to requeue a job to retry it later.
2302  * Deals with the race of the job being cancelled while it was being
2303  * examined.
2304  */
2305 static void
aio_ddp_requeue_one(struct toepcb * toep,struct kaiocb * job)2306 aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job)
2307 {
2308 
2309 	DDP_ASSERT_LOCKED(toep);
2310 	if (!(toep->ddp.flags & DDP_DEAD) &&
2311 	    aio_set_cancel_function(job, t4_aio_cancel_queued)) {
2312 		TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list);
2313 		toep->ddp.waiting_count++;
2314 	} else
2315 		aio_ddp_cancel_one(job);
2316 }
2317 
2318 static void
aio_ddp_requeue(struct toepcb * toep)2319 aio_ddp_requeue(struct toepcb *toep)
2320 {
2321 	struct adapter *sc = td_adapter(toep->td);
2322 	struct socket *so;
2323 	struct sockbuf *sb;
2324 	struct inpcb *inp;
2325 	struct kaiocb *job;
2326 	struct ddp_buffer *db;
2327 	size_t copied, offset, resid;
2328 	struct pageset *ps;
2329 	struct mbuf *m;
2330 	uint64_t ddp_flags, ddp_flags_mask;
2331 	struct wrqe *wr;
2332 	int buf_flag, db_idx, error;
2333 
2334 	DDP_ASSERT_LOCKED(toep);
2335 
2336 restart:
2337 	if (toep->ddp.flags & DDP_DEAD) {
2338 		MPASS(toep->ddp.waiting_count == 0);
2339 		MPASS(toep->ddp.active_count == 0);
2340 		return;
2341 	}
2342 
2343 	if (toep->ddp.waiting_count == 0 ||
2344 	    toep->ddp.active_count == nitems(toep->ddp.db)) {
2345 		return;
2346 	}
2347 
2348 	job = TAILQ_FIRST(&toep->ddp.aiojobq);
2349 	so = job->fd_file->f_data;
2350 	sb = &so->so_rcv;
2351 	SOCKBUF_LOCK(sb);
2352 
2353 	/* We will never get anything unless we are or were connected. */
2354 	if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
2355 		SOCKBUF_UNLOCK(sb);
2356 		ddp_complete_all(toep, ENOTCONN);
2357 		return;
2358 	}
2359 
2360 	KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0,
2361 	    ("%s: pending sockbuf data and DDP is active", __func__));
2362 
2363 	/* Abort if socket has reported problems. */
2364 	/* XXX: Wait for any queued DDP's to finish and/or flush them? */
2365 	if (so->so_error && sbavail(sb) == 0) {
2366 		toep->ddp.waiting_count--;
2367 		TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2368 		if (!aio_clear_cancel_function(job)) {
2369 			SOCKBUF_UNLOCK(sb);
2370 			goto restart;
2371 		}
2372 
2373 		/*
2374 		 * If this job has previously copied some data, report
2375 		 * a short read and leave the error to be reported by
2376 		 * a future request.
2377 		 */
2378 		copied = job->aio_received;
2379 		if (copied != 0) {
2380 			SOCKBUF_UNLOCK(sb);
2381 			aio_complete(job, copied, 0);
2382 			goto restart;
2383 		}
2384 		error = so->so_error;
2385 		so->so_error = 0;
2386 		SOCKBUF_UNLOCK(sb);
2387 		aio_complete(job, -1, error);
2388 		goto restart;
2389 	}
2390 
2391 	/*
2392 	 * Door is closed.  If there is pending data in the socket buffer,
2393 	 * deliver it.  If there are pending DDP requests, wait for those
2394 	 * to complete.  Once they have completed, return EOF reads.
2395 	 */
2396 	if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
2397 		SOCKBUF_UNLOCK(sb);
2398 		if (toep->ddp.active_count != 0)
2399 			return;
2400 		ddp_complete_all(toep, 0);
2401 		return;
2402 	}
2403 
2404 	/*
2405 	 * If DDP is not enabled and there is no pending socket buffer
2406 	 * data, try to enable DDP.
2407 	 */
2408 	if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) {
2409 		SOCKBUF_UNLOCK(sb);
2410 
2411 		/*
2412 		 * Wait for the card to ACK that DDP is enabled before
2413 		 * queueing any buffers.  Currently this waits for an
2414 		 * indicate to arrive.  This could use a TCB_SET_FIELD_RPL
2415 		 * message to know that DDP was enabled instead of waiting
2416 		 * for the indicate which would avoid copying the indicate
2417 		 * if no data is pending.
2418 		 *
2419 		 * XXX: Might want to limit the indicate size to the size
2420 		 * of the first queued request.
2421 		 */
2422 		if ((toep->ddp.flags & DDP_SC_REQ) == 0)
2423 			enable_ddp(sc, toep);
2424 		return;
2425 	}
2426 	SOCKBUF_UNLOCK(sb);
2427 
2428 	/*
2429 	 * If another thread is queueing a buffer for DDP, let it
2430 	 * drain any work and return.
2431 	 */
2432 	if (toep->ddp.queueing != NULL)
2433 		return;
2434 
2435 	/* Take the next job to prep it for DDP. */
2436 	toep->ddp.waiting_count--;
2437 	TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2438 	if (!aio_clear_cancel_function(job))
2439 		goto restart;
2440 	toep->ddp.queueing = job;
2441 
2442 	/* NB: This drops DDP_LOCK while it holds the backing VM pages. */
2443 	error = hold_aio(toep, job, &ps);
2444 	if (error != 0) {
2445 		ddp_complete_one(job, error);
2446 		toep->ddp.queueing = NULL;
2447 		goto restart;
2448 	}
2449 
2450 	SOCKBUF_LOCK(sb);
2451 	if (so->so_error && sbavail(sb) == 0) {
2452 		copied = job->aio_received;
2453 		if (copied != 0) {
2454 			SOCKBUF_UNLOCK(sb);
2455 			recycle_pageset(toep, ps);
2456 			aio_complete(job, copied, 0);
2457 			toep->ddp.queueing = NULL;
2458 			goto restart;
2459 		}
2460 
2461 		error = so->so_error;
2462 		so->so_error = 0;
2463 		SOCKBUF_UNLOCK(sb);
2464 		recycle_pageset(toep, ps);
2465 		aio_complete(job, -1, error);
2466 		toep->ddp.queueing = NULL;
2467 		goto restart;
2468 	}
2469 
2470 	if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) {
2471 		SOCKBUF_UNLOCK(sb);
2472 		recycle_pageset(toep, ps);
2473 		if (toep->ddp.active_count != 0) {
2474 			/*
2475 			 * The door is closed, but there are still pending
2476 			 * DDP buffers.  Requeue.  These jobs will all be
2477 			 * completed once those buffers drain.
2478 			 */
2479 			aio_ddp_requeue_one(toep, job);
2480 			toep->ddp.queueing = NULL;
2481 			return;
2482 		}
2483 		ddp_complete_one(job, 0);
2484 		ddp_complete_all(toep, 0);
2485 		toep->ddp.queueing = NULL;
2486 		return;
2487 	}
2488 
2489 sbcopy:
2490 	/*
2491 	 * If the toep is dead, there shouldn't be any data in the socket
2492 	 * buffer, so the above case should have handled this.
2493 	 */
2494 	MPASS(!(toep->ddp.flags & DDP_DEAD));
2495 
2496 	/*
2497 	 * If there is pending data in the socket buffer (either
2498 	 * from before the requests were queued or a DDP indicate),
2499 	 * copy those mbufs out directly.
2500 	 */
2501 	copied = 0;
2502 	offset = ps->offset + job->aio_received;
2503 	MPASS(job->aio_received <= job->uaiocb.aio_nbytes);
2504 	resid = job->uaiocb.aio_nbytes - job->aio_received;
2505 	m = sb->sb_mb;
2506 	KASSERT(m == NULL || toep->ddp.active_count == 0,
2507 	    ("%s: sockbuf data with active DDP", __func__));
2508 	while (m != NULL && resid > 0) {
2509 		struct iovec iov[1];
2510 		struct uio uio;
2511 #ifdef INVARIANTS
2512 		int error;
2513 #endif
2514 
2515 		iov[0].iov_base = mtod(m, void *);
2516 		iov[0].iov_len = m->m_len;
2517 		if (iov[0].iov_len > resid)
2518 			iov[0].iov_len = resid;
2519 		uio.uio_iov = iov;
2520 		uio.uio_iovcnt = 1;
2521 		uio.uio_offset = 0;
2522 		uio.uio_resid = iov[0].iov_len;
2523 		uio.uio_segflg = UIO_SYSSPACE;
2524 		uio.uio_rw = UIO_WRITE;
2525 #ifdef INVARIANTS
2526 		error = uiomove_fromphys(ps->pages, offset + copied,
2527 		    uio.uio_resid, &uio);
2528 #else
2529 		uiomove_fromphys(ps->pages, offset + copied, uio.uio_resid, &uio);
2530 #endif
2531 		MPASS(error == 0 && uio.uio_resid == 0);
2532 		copied += uio.uio_offset;
2533 		resid -= uio.uio_offset;
2534 		m = m->m_next;
2535 	}
2536 	if (copied != 0) {
2537 		sbdrop_locked(sb, copied);
2538 		job->aio_received += copied;
2539 		job->msgrcv = 1;
2540 		copied = job->aio_received;
2541 		inp = sotoinpcb(so);
2542 		if (!INP_TRY_WLOCK(inp)) {
2543 			/*
2544 			 * The reference on the socket file descriptor in
2545 			 * the AIO job should keep 'sb' and 'inp' stable.
2546 			 * Our caller has a reference on the 'toep' that
2547 			 * keeps it stable.
2548 			 */
2549 			SOCKBUF_UNLOCK(sb);
2550 			DDP_UNLOCK(toep);
2551 			INP_WLOCK(inp);
2552 			DDP_LOCK(toep);
2553 			SOCKBUF_LOCK(sb);
2554 
2555 			/*
2556 			 * If the socket has been closed, we should detect
2557 			 * that and complete this request if needed on
2558 			 * the next trip around the loop.
2559 			 */
2560 		}
2561 		t4_rcvd_locked(&toep->td->tod, intotcpcb(inp));
2562 		INP_WUNLOCK(inp);
2563 		if (resid == 0 || toep->ddp.flags & DDP_DEAD) {
2564 			/*
2565 			 * We filled the entire buffer with socket
2566 			 * data, DDP is not being used, or the socket
2567 			 * is being shut down, so complete the
2568 			 * request.
2569 			 */
2570 			SOCKBUF_UNLOCK(sb);
2571 			recycle_pageset(toep, ps);
2572 			aio_complete(job, copied, 0);
2573 			toep->ddp.queueing = NULL;
2574 			goto restart;
2575 		}
2576 
2577 		/*
2578 		 * If DDP is not enabled, requeue this request and restart.
2579 		 * This will either enable DDP or wait for more data to
2580 		 * arrive on the socket buffer.
2581 		 */
2582 		if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) {
2583 			SOCKBUF_UNLOCK(sb);
2584 			recycle_pageset(toep, ps);
2585 			aio_ddp_requeue_one(toep, job);
2586 			toep->ddp.queueing = NULL;
2587 			goto restart;
2588 		}
2589 
2590 		/*
2591 		 * An indicate might have arrived and been added to
2592 		 * the socket buffer while it was unlocked after the
2593 		 * copy to lock the INP.  If so, restart the copy.
2594 		 */
2595 		if (sbavail(sb) != 0)
2596 			goto sbcopy;
2597 	}
2598 	SOCKBUF_UNLOCK(sb);
2599 
2600 	if (prep_pageset(sc, toep, ps) == 0) {
2601 		recycle_pageset(toep, ps);
2602 		aio_ddp_requeue_one(toep, job);
2603 		toep->ddp.queueing = NULL;
2604 
2605 		/*
2606 		 * XXX: Need to retry this later.  Mostly need a trigger
2607 		 * when page pods are freed up.
2608 		 */
2609 		printf("%s: prep_pageset failed\n", __func__);
2610 		return;
2611 	}
2612 
2613 	/* Determine which DDP buffer to use. */
2614 	if (toep->ddp.db[0].job == NULL) {
2615 		db_idx = 0;
2616 	} else {
2617 		MPASS(toep->ddp.db[1].job == NULL);
2618 		db_idx = 1;
2619 	}
2620 
2621 	ddp_flags = 0;
2622 	ddp_flags_mask = 0;
2623 	if (db_idx == 0) {
2624 		ddp_flags |= V_TF_DDP_BUF0_VALID(1);
2625 		if (so->so_state & SS_NBIO)
2626 			ddp_flags |= V_TF_DDP_BUF0_FLUSH(1);
2627 		ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) |
2628 		    V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) |
2629 		    V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1);
2630 		buf_flag = DDP_BUF0_ACTIVE;
2631 	} else {
2632 		ddp_flags |= V_TF_DDP_BUF1_VALID(1);
2633 		if (so->so_state & SS_NBIO)
2634 			ddp_flags |= V_TF_DDP_BUF1_FLUSH(1);
2635 		ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) |
2636 		    V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) |
2637 		    V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1);
2638 		buf_flag = DDP_BUF1_ACTIVE;
2639 	}
2640 	MPASS((toep->ddp.flags & buf_flag) == 0);
2641 	if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) {
2642 		MPASS(db_idx == 0);
2643 		MPASS(toep->ddp.active_id == -1);
2644 		MPASS(toep->ddp.active_count == 0);
2645 		ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1);
2646 	}
2647 
2648 	/*
2649 	 * The TID for this connection should still be valid.  If DDP_DEAD
2650 	 * is set, SBS_CANTRCVMORE should be set, so we shouldn't be
2651 	 * this far anyway.  Even if the socket is closing on the other
2652 	 * end, the AIO job holds a reference on this end of the socket
2653 	 * which will keep it open and keep the TCP PCB attached until
2654 	 * after the job is completed.
2655 	 */
2656 	wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &ps->prsv,
2657 	    job->aio_received, ps->len, ddp_flags, ddp_flags_mask);
2658 	if (wr == NULL) {
2659 		recycle_pageset(toep, ps);
2660 		aio_ddp_requeue_one(toep, job);
2661 		toep->ddp.queueing = NULL;
2662 
2663 		/*
2664 		 * XXX: Need a way to kick a retry here.
2665 		 *
2666 		 * XXX: We know the fixed size needed and could
2667 		 * preallocate this using a blocking request at the
2668 		 * start of the task to avoid having to handle this
2669 		 * edge case.
2670 		 */
2671 		printf("%s: mk_update_tcb_for_ddp failed\n", __func__);
2672 		return;
2673 	}
2674 
2675 	if (!aio_set_cancel_function(job, t4_aio_cancel_active)) {
2676 		free_wrqe(wr);
2677 		recycle_pageset(toep, ps);
2678 		aio_ddp_cancel_one(job);
2679 		toep->ddp.queueing = NULL;
2680 		goto restart;
2681 	}
2682 
2683 #ifdef VERBOSE_TRACES
2684 	CTR6(KTR_CXGBE,
2685 	    "%s: tid %u, scheduling %p for DDP[%d] (flags %#lx/%#lx)", __func__,
2686 	    toep->tid, job, db_idx, ddp_flags, ddp_flags_mask);
2687 #endif
2688 	/* Give the chip the go-ahead. */
2689 	t4_wrq_tx(sc, wr);
2690 	db = &toep->ddp.db[db_idx];
2691 	db->cancel_pending = 0;
2692 	db->job = job;
2693 	db->ps = ps;
2694 	toep->ddp.queueing = NULL;
2695 	toep->ddp.flags |= buf_flag;
2696 	toep->ddp.active_count++;
2697 	if (toep->ddp.active_count == 1) {
2698 		MPASS(toep->ddp.active_id == -1);
2699 		toep->ddp.active_id = db_idx;
2700 		CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__,
2701 		    toep->ddp.active_id);
2702 	}
2703 	goto restart;
2704 }
2705 
2706 void
ddp_queue_toep(struct toepcb * toep)2707 ddp_queue_toep(struct toepcb *toep)
2708 {
2709 
2710 	DDP_ASSERT_LOCKED(toep);
2711 	if (toep->ddp.flags & DDP_TASK_ACTIVE)
2712 		return;
2713 	toep->ddp.flags |= DDP_TASK_ACTIVE;
2714 	hold_toepcb(toep);
2715 	soaio_enqueue(&toep->ddp.requeue_task);
2716 }
2717 
2718 static void
aio_ddp_requeue_task(void * context,int pending)2719 aio_ddp_requeue_task(void *context, int pending)
2720 {
2721 	struct toepcb *toep = context;
2722 
2723 	DDP_LOCK(toep);
2724 	aio_ddp_requeue(toep);
2725 	toep->ddp.flags &= ~DDP_TASK_ACTIVE;
2726 	DDP_UNLOCK(toep);
2727 
2728 	free_toepcb(toep);
2729 }
2730 
2731 static void
t4_aio_cancel_active(struct kaiocb * job)2732 t4_aio_cancel_active(struct kaiocb *job)
2733 {
2734 	struct socket *so = job->fd_file->f_data;
2735 	struct tcpcb *tp = sototcpcb(so);
2736 	struct toepcb *toep = tp->t_toe;
2737 	struct adapter *sc = td_adapter(toep->td);
2738 	uint64_t valid_flag;
2739 	int i;
2740 
2741 	DDP_LOCK(toep);
2742 	if (aio_cancel_cleared(job)) {
2743 		DDP_UNLOCK(toep);
2744 		aio_ddp_cancel_one(job);
2745 		return;
2746 	}
2747 
2748 	for (i = 0; i < nitems(toep->ddp.db); i++) {
2749 		if (toep->ddp.db[i].job == job) {
2750 			/* Should only ever get one cancel request for a job. */
2751 			MPASS(toep->ddp.db[i].cancel_pending == 0);
2752 
2753 			/*
2754 			 * Invalidate this buffer.  It will be
2755 			 * cancelled or partially completed once the
2756 			 * card ACKs the invalidate.
2757 			 */
2758 			valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) :
2759 			    V_TF_DDP_BUF1_VALID(1);
2760 			t4_set_tcb_field(sc, toep->ctrlq, toep,
2761 			    W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1,
2762 			    CPL_COOKIE_DDP0 + i);
2763 			toep->ddp.db[i].cancel_pending = 1;
2764 			CTR2(KTR_CXGBE, "%s: request %p marked pending",
2765 			    __func__, job);
2766 			break;
2767 		}
2768 	}
2769 	DDP_UNLOCK(toep);
2770 }
2771 
2772 static void
t4_aio_cancel_queued(struct kaiocb * job)2773 t4_aio_cancel_queued(struct kaiocb *job)
2774 {
2775 	struct socket *so = job->fd_file->f_data;
2776 	struct tcpcb *tp = sototcpcb(so);
2777 	struct toepcb *toep = tp->t_toe;
2778 
2779 	DDP_LOCK(toep);
2780 	if (!aio_cancel_cleared(job)) {
2781 		TAILQ_REMOVE(&toep->ddp.aiojobq, job, list);
2782 		toep->ddp.waiting_count--;
2783 		if (toep->ddp.waiting_count == 0)
2784 			ddp_queue_toep(toep);
2785 	}
2786 	CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job);
2787 	DDP_UNLOCK(toep);
2788 
2789 	aio_ddp_cancel_one(job);
2790 }
2791 
2792 int
t4_aio_queue_ddp(struct socket * so,struct kaiocb * job)2793 t4_aio_queue_ddp(struct socket *so, struct kaiocb *job)
2794 {
2795 	struct inpcb *inp = sotoinpcb(so);
2796 	struct tcpcb *tp = intotcpcb(inp);
2797 	struct toepcb *toep = tp->t_toe;
2798 
2799 	/* Ignore writes. */
2800 	if (job->uaiocb.aio_lio_opcode != LIO_READ)
2801 		return (EOPNOTSUPP);
2802 
2803 	INP_WLOCK(inp);
2804 	if (__predict_false(ulp_mode(toep) == ULP_MODE_NONE)) {
2805 		if (!set_ddp_ulp_mode(toep)) {
2806 			INP_WUNLOCK(inp);
2807 			return (EOPNOTSUPP);
2808 		}
2809 	}
2810 	INP_WUNLOCK(inp);
2811 
2812 	DDP_LOCK(toep);
2813 
2814 	/*
2815 	 * If DDP is being used for all normal receive, don't use it
2816 	 * for AIO.
2817 	 */
2818 	if ((toep->ddp.flags & DDP_RCVBUF) != 0) {
2819 		DDP_UNLOCK(toep);
2820 		return (EOPNOTSUPP);
2821 	}
2822 
2823 	if ((toep->ddp.flags & DDP_AIO) == 0) {
2824 		toep->ddp.flags |= DDP_AIO;
2825 		TAILQ_INIT(&toep->ddp.cached_pagesets);
2826 		TAILQ_INIT(&toep->ddp.aiojobq);
2827 		TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task,
2828 		    toep);
2829 	}
2830 
2831 	/*
2832 	 * XXX: Think about possibly returning errors for ENOTCONN,
2833 	 * etc.  Perhaps the caller would only queue the request
2834 	 * if it failed with EOPNOTSUPP?
2835 	 */
2836 
2837 #ifdef VERBOSE_TRACES
2838 	CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid);
2839 #endif
2840 	if (!aio_set_cancel_function(job, t4_aio_cancel_queued))
2841 		panic("new job was cancelled");
2842 	TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list);
2843 	toep->ddp.waiting_count++;
2844 
2845 	/*
2846 	 * Try to handle this request synchronously.  If this has
2847 	 * to block because the task is running, it will just bail
2848 	 * and let the task handle it instead.
2849 	 */
2850 	aio_ddp_requeue(toep);
2851 	DDP_UNLOCK(toep);
2852 	return (0);
2853 }
2854 
2855 static void
ddp_rcvbuf_requeue(struct toepcb * toep)2856 ddp_rcvbuf_requeue(struct toepcb *toep)
2857 {
2858 	struct socket *so;
2859 	struct sockbuf *sb;
2860 	struct inpcb *inp;
2861 	struct ddp_rcv_buffer *drb;
2862 
2863 	DDP_ASSERT_LOCKED(toep);
2864 restart:
2865 	if ((toep->ddp.flags & DDP_DEAD) != 0) {
2866 		MPASS(toep->ddp.active_count == 0);
2867 		return;
2868 	}
2869 
2870 	/* If both buffers are active, nothing to do. */
2871 	if (toep->ddp.active_count == nitems(toep->ddp.db)) {
2872 		return;
2873 	}
2874 
2875 	inp = toep->inp;
2876 	so = inp->inp_socket;
2877 	sb = &so->so_rcv;
2878 
2879 	drb = alloc_cached_ddp_rcv_buffer(toep);
2880 	DDP_UNLOCK(toep);
2881 
2882 	if (drb == NULL) {
2883 		drb = alloc_ddp_rcv_buffer(toep, M_WAITOK);
2884 		if (drb == NULL) {
2885 			printf("%s: failed to allocate buffer\n", __func__);
2886 			DDP_LOCK(toep);
2887 			return;
2888 		}
2889 	}
2890 
2891 	DDP_LOCK(toep);
2892 	if ((toep->ddp.flags & DDP_DEAD) != 0 ||
2893 	    toep->ddp.active_count == nitems(toep->ddp.db)) {
2894 		recycle_ddp_rcv_buffer(toep, drb);
2895 		return;
2896 	}
2897 
2898 	/* We will never get anything unless we are or were connected. */
2899 	SOCKBUF_LOCK(sb);
2900 	if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) {
2901 		SOCKBUF_UNLOCK(sb);
2902 		recycle_ddp_rcv_buffer(toep, drb);
2903 		return;
2904 	}
2905 
2906 	/* Abort if socket has reported problems or is closed. */
2907 	if (so->so_error != 0 || (sb->sb_state & SBS_CANTRCVMORE) != 0) {
2908 		SOCKBUF_UNLOCK(sb);
2909 		recycle_ddp_rcv_buffer(toep, drb);
2910 		return;
2911 	}
2912 	SOCKBUF_UNLOCK(sb);
2913 
2914 	if (!queue_ddp_rcvbuf(toep, drb)) {
2915 		/*
2916 		 * XXX: Need a way to kick a retry here.
2917 		 *
2918 		 * XXX: We know the fixed size needed and could
2919 		 * preallocate the work request using a blocking
2920 		 * request at the start of the task to avoid having to
2921 		 * handle this edge case.
2922 		 */
2923 		return;
2924 	}
2925 	goto restart;
2926 }
2927 
2928 static void
ddp_rcvbuf_requeue_task(void * context,int pending)2929 ddp_rcvbuf_requeue_task(void *context, int pending)
2930 {
2931 	struct toepcb *toep = context;
2932 
2933 	DDP_LOCK(toep);
2934 	ddp_rcvbuf_requeue(toep);
2935 	toep->ddp.flags &= ~DDP_TASK_ACTIVE;
2936 	DDP_UNLOCK(toep);
2937 
2938 	free_toepcb(toep);
2939 }
2940 
2941 int
t4_enable_ddp_rcv(struct socket * so,struct toepcb * toep)2942 t4_enable_ddp_rcv(struct socket *so, struct toepcb *toep)
2943 {
2944 	struct inpcb *inp = sotoinpcb(so);
2945 	struct adapter *sc = td_adapter(toep->td);
2946 
2947 	INP_WLOCK(inp);
2948 	switch (ulp_mode(toep)) {
2949 	case ULP_MODE_TCPDDP:
2950 		break;
2951 	case ULP_MODE_NONE:
2952 		if (set_ddp_ulp_mode(toep))
2953 			break;
2954 		/* FALLTHROUGH */
2955 	default:
2956 		INP_WUNLOCK(inp);
2957 		return (EOPNOTSUPP);
2958 	}
2959 	INP_WUNLOCK(inp);
2960 
2961 	DDP_LOCK(toep);
2962 
2963 	/*
2964 	 * If DDP is being used for AIO already, don't use it for
2965 	 * normal receive.
2966 	 */
2967 	if ((toep->ddp.flags & DDP_AIO) != 0) {
2968 		DDP_UNLOCK(toep);
2969 		return (EOPNOTSUPP);
2970 	}
2971 
2972 	if ((toep->ddp.flags & DDP_RCVBUF) != 0) {
2973 		DDP_UNLOCK(toep);
2974 		return (EBUSY);
2975 	}
2976 
2977 	toep->ddp.flags |= DDP_RCVBUF;
2978 	TAILQ_INIT(&toep->ddp.cached_buffers);
2979 	enable_ddp(sc, toep);
2980 	TASK_INIT(&toep->ddp.requeue_task, 0, ddp_rcvbuf_requeue_task, toep);
2981 	ddp_queue_toep(toep);
2982 	DDP_UNLOCK(toep);
2983 	return (0);
2984 }
2985 
2986 void
t4_ddp_mod_load(void)2987 t4_ddp_mod_load(void)
2988 {
2989 	if (t4_ddp_rcvbuf_len < PAGE_SIZE)
2990 		t4_ddp_rcvbuf_len = PAGE_SIZE;
2991 	if (t4_ddp_rcvbuf_len > MAX_DDP_BUFFER_SIZE)
2992 		t4_ddp_rcvbuf_len = MAX_DDP_BUFFER_SIZE;
2993 	if (!powerof2(t4_ddp_rcvbuf_len))
2994 		t4_ddp_rcvbuf_len = 1 << fls(t4_ddp_rcvbuf_len);
2995 
2996 	t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl,
2997 	    CPL_COOKIE_DDP0);
2998 	t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl,
2999 	    CPL_COOKIE_DDP1);
3000 	t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp);
3001 	t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete);
3002 	TAILQ_INIT(&ddp_orphan_pagesets);
3003 	mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF);
3004 	TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL);
3005 }
3006 
3007 void
t4_ddp_mod_unload(void)3008 t4_ddp_mod_unload(void)
3009 {
3010 
3011 	taskqueue_drain(taskqueue_thread, &ddp_orphan_task);
3012 	MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets));
3013 	mtx_destroy(&ddp_orphan_pagesets_lock);
3014 	t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP0);
3015 	t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP1);
3016 	t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL);
3017 	t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL);
3018 }
3019 #endif
3020