1e682d02eSNavdeep Parhar /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 3718cf2ccSPedro F. Giffuni * 4e682d02eSNavdeep Parhar * Copyright (c) 2012 Chelsio Communications, Inc. 5e682d02eSNavdeep Parhar * All rights reserved. 6e682d02eSNavdeep Parhar * Written by: Navdeep Parhar <np@FreeBSD.org> 7e682d02eSNavdeep Parhar * 8e682d02eSNavdeep Parhar * Redistribution and use in source and binary forms, with or without 9e682d02eSNavdeep Parhar * modification, are permitted provided that the following conditions 10e682d02eSNavdeep Parhar * are met: 11e682d02eSNavdeep Parhar * 1. Redistributions of source code must retain the above copyright 12e682d02eSNavdeep Parhar * notice, this list of conditions and the following disclaimer. 13e682d02eSNavdeep Parhar * 2. Redistributions in binary form must reproduce the above copyright 14e682d02eSNavdeep Parhar * notice, this list of conditions and the following disclaimer in the 15e682d02eSNavdeep Parhar * documentation and/or other materials provided with the distribution. 16e682d02eSNavdeep Parhar * 17e682d02eSNavdeep Parhar * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18e682d02eSNavdeep Parhar * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19e682d02eSNavdeep Parhar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20e682d02eSNavdeep Parhar * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21e682d02eSNavdeep Parhar * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22e682d02eSNavdeep Parhar * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23e682d02eSNavdeep Parhar * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24e682d02eSNavdeep Parhar * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25e682d02eSNavdeep Parhar * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26e682d02eSNavdeep Parhar * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27e682d02eSNavdeep Parhar * SUCH DAMAGE. 28e682d02eSNavdeep Parhar */ 29e682d02eSNavdeep Parhar 30e682d02eSNavdeep Parhar #include <sys/cdefs.h> 31e682d02eSNavdeep Parhar #include "opt_inet.h" 32e682d02eSNavdeep Parhar 33e682d02eSNavdeep Parhar #include <sys/param.h> 34dc964385SJohn Baldwin #include <sys/aio.h> 352beaefe8SJohn Baldwin #include <sys/bio.h> 36dc964385SJohn Baldwin #include <sys/file.h> 37e682d02eSNavdeep Parhar #include <sys/systm.h> 38e682d02eSNavdeep Parhar #include <sys/kernel.h> 39e682d02eSNavdeep Parhar #include <sys/ktr.h> 40e682d02eSNavdeep Parhar #include <sys/module.h> 41e682d02eSNavdeep Parhar #include <sys/protosw.h> 42e682d02eSNavdeep Parhar #include <sys/proc.h> 43e682d02eSNavdeep Parhar #include <sys/domain.h> 44e682d02eSNavdeep Parhar #include <sys/socket.h> 45e682d02eSNavdeep Parhar #include <sys/socketvar.h> 46dc964385SJohn Baldwin #include <sys/taskqueue.h> 47e682d02eSNavdeep Parhar #include <sys/uio.h> 48e682d02eSNavdeep Parhar #include <netinet/in.h> 49e682d02eSNavdeep Parhar #include <netinet/in_pcb.h> 50e682d02eSNavdeep Parhar #include <netinet/ip.h> 51e682d02eSNavdeep Parhar #include <netinet/tcp_var.h> 52e682d02eSNavdeep Parhar #define TCPSTATES 53e682d02eSNavdeep Parhar #include <netinet/tcp_fsm.h> 54e682d02eSNavdeep Parhar #include <netinet/toecore.h> 55e682d02eSNavdeep Parhar 56e682d02eSNavdeep Parhar #include <vm/vm.h> 57e682d02eSNavdeep Parhar #include <vm/vm_extern.h> 58e682d02eSNavdeep Parhar #include <vm/vm_param.h> 59e682d02eSNavdeep Parhar #include <vm/pmap.h> 60e682d02eSNavdeep Parhar #include <vm/vm_map.h> 61e682d02eSNavdeep Parhar #include <vm/vm_page.h> 62e682d02eSNavdeep Parhar #include <vm/vm_object.h> 63e682d02eSNavdeep Parhar 6446bee804SJohn Baldwin #include <cam/scsi/scsi_all.h> 6546bee804SJohn Baldwin #include <cam/ctl/ctl_io.h> 6646bee804SJohn Baldwin 67e682d02eSNavdeep Parhar #ifdef TCP_OFFLOAD 68e682d02eSNavdeep Parhar #include "common/common.h" 69e682d02eSNavdeep Parhar #include "common/t4_msg.h" 70e682d02eSNavdeep Parhar #include "common/t4_regs.h" 71e682d02eSNavdeep Parhar #include "common/t4_tcb.h" 72e682d02eSNavdeep Parhar #include "tom/t4_tom.h" 73e682d02eSNavdeep Parhar 74fe0bdd1dSJohn Baldwin /* 75fe0bdd1dSJohn Baldwin * Use the 'backend3' field in AIO jobs to store the amount of data 76fe0bdd1dSJohn Baldwin * received by the AIO job so far. 77fe0bdd1dSJohn Baldwin */ 78fe0bdd1dSJohn Baldwin #define aio_received backend3 79fe0bdd1dSJohn Baldwin 80dc964385SJohn Baldwin static void aio_ddp_requeue_task(void *context, int pending); 81dc964385SJohn Baldwin static void ddp_complete_all(struct toepcb *toep, int error); 82dc964385SJohn Baldwin static void t4_aio_cancel_active(struct kaiocb *job); 83dc964385SJohn Baldwin static void t4_aio_cancel_queued(struct kaiocb *job); 84*eba13bbcSJohn Baldwin static int t4_alloc_page_pods_for_rcvbuf(struct ppod_region *pr, 85*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb); 86*eba13bbcSJohn Baldwin static int t4_write_page_pods_for_rcvbuf(struct adapter *sc, 87*eba13bbcSJohn Baldwin struct sge_wrq *wrq, int tid, struct ddp_rcv_buffer *drb); 88b12c0a9eSJohn Baldwin 89dc964385SJohn Baldwin static TAILQ_HEAD(, pageset) ddp_orphan_pagesets; 90dc964385SJohn Baldwin static struct mtx ddp_orphan_pagesets_lock; 91dc964385SJohn Baldwin static struct task ddp_orphan_task; 92dc964385SJohn Baldwin 93e682d02eSNavdeep Parhar #define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN) 94e682d02eSNavdeep Parhar 95dc964385SJohn Baldwin /* 96*eba13bbcSJohn Baldwin * A page set holds information about a user buffer used for AIO DDP. 97*eba13bbcSJohn Baldwin * The page set holds resources such as the VM pages backing the 98*eba13bbcSJohn Baldwin * buffer (either held or wired) and the page pods associated with the 99*eba13bbcSJohn Baldwin * buffer. Recently used page sets are cached to allow for efficient 100*eba13bbcSJohn Baldwin * reuse of buffers (avoiding the need to re-fault in pages, hold 101*eba13bbcSJohn Baldwin * them, etc.). Note that cached page sets keep the backing pages 102*eba13bbcSJohn Baldwin * wired. The number of wired pages is capped by only allowing for 103*eba13bbcSJohn Baldwin * two wired pagesets per connection. This is not a perfect cap, but 104*eba13bbcSJohn Baldwin * is a trade-off for performance. 105dc964385SJohn Baldwin * 106dc964385SJohn Baldwin * If an application ping-pongs two buffers for a connection via 107dc964385SJohn Baldwin * aio_read(2) then those buffers should remain wired and expensive VM 108dc964385SJohn Baldwin * fault lookups should be avoided after each buffer has been used 109dc964385SJohn Baldwin * once. If an application uses more than two buffers then this will 110dc964385SJohn Baldwin * fall back to doing expensive VM fault lookups for each operation. 111dc964385SJohn Baldwin */ 112dc964385SJohn Baldwin static void 113dc964385SJohn Baldwin free_pageset(struct tom_data *td, struct pageset *ps) 114dc964385SJohn Baldwin { 115dc964385SJohn Baldwin vm_page_t p; 116dc964385SJohn Baldwin int i; 117dc964385SJohn Baldwin 118968267fdSNavdeep Parhar if (ps->prsv.prsv_nppods > 0) 119968267fdSNavdeep Parhar t4_free_page_pods(&ps->prsv); 120dc964385SJohn Baldwin 121dc964385SJohn Baldwin for (i = 0; i < ps->npages; i++) { 122dc964385SJohn Baldwin p = ps->pages[i]; 123dc964385SJohn Baldwin vm_page_unwire(p, PQ_INACTIVE); 124dc964385SJohn Baldwin } 125dc964385SJohn Baldwin mtx_lock(&ddp_orphan_pagesets_lock); 126dc964385SJohn Baldwin TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link); 127dc964385SJohn Baldwin taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task); 128dc964385SJohn Baldwin mtx_unlock(&ddp_orphan_pagesets_lock); 129dc964385SJohn Baldwin } 130dc964385SJohn Baldwin 131dc964385SJohn Baldwin static void 132dc964385SJohn Baldwin ddp_free_orphan_pagesets(void *context, int pending) 133dc964385SJohn Baldwin { 134dc964385SJohn Baldwin struct pageset *ps; 135dc964385SJohn Baldwin 136dc964385SJohn Baldwin mtx_lock(&ddp_orphan_pagesets_lock); 137dc964385SJohn Baldwin while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) { 138dc964385SJohn Baldwin ps = TAILQ_FIRST(&ddp_orphan_pagesets); 139dc964385SJohn Baldwin TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link); 140dc964385SJohn Baldwin mtx_unlock(&ddp_orphan_pagesets_lock); 141dc964385SJohn Baldwin if (ps->vm) 142dc964385SJohn Baldwin vmspace_free(ps->vm); 143dc964385SJohn Baldwin free(ps, M_CXGBE); 144dc964385SJohn Baldwin mtx_lock(&ddp_orphan_pagesets_lock); 145dc964385SJohn Baldwin } 146dc964385SJohn Baldwin mtx_unlock(&ddp_orphan_pagesets_lock); 147dc964385SJohn Baldwin } 148dc964385SJohn Baldwin 149dc964385SJohn Baldwin static void 150dc964385SJohn Baldwin recycle_pageset(struct toepcb *toep, struct pageset *ps) 151dc964385SJohn Baldwin { 152dc964385SJohn Baldwin 153dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 154eeacb3b0SMark Johnston if (!(toep->ddp.flags & DDP_DEAD)) { 155125d42feSJohn Baldwin KASSERT(toep->ddp.cached_count + toep->ddp.active_count < 156125d42feSJohn Baldwin nitems(toep->ddp.db), ("too many wired pagesets")); 157125d42feSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link); 158125d42feSJohn Baldwin toep->ddp.cached_count++; 159dc964385SJohn Baldwin } else 160dc964385SJohn Baldwin free_pageset(toep->td, ps); 161dc964385SJohn Baldwin } 162dc964385SJohn Baldwin 163dc964385SJohn Baldwin static void 164dc964385SJohn Baldwin ddp_complete_one(struct kaiocb *job, int error) 165dc964385SJohn Baldwin { 166dc964385SJohn Baldwin long copied; 167dc964385SJohn Baldwin 168dc964385SJohn Baldwin /* 169dc964385SJohn Baldwin * If this job had copied data out of the socket buffer before 170dc964385SJohn Baldwin * it was cancelled, report it as a short read rather than an 171dc964385SJohn Baldwin * error. 172dc964385SJohn Baldwin */ 173fe0bdd1dSJohn Baldwin copied = job->aio_received; 174dc964385SJohn Baldwin if (copied != 0 || error == 0) 175dc964385SJohn Baldwin aio_complete(job, copied, 0); 176dc964385SJohn Baldwin else 177dc964385SJohn Baldwin aio_complete(job, -1, error); 178dc964385SJohn Baldwin } 179dc964385SJohn Baldwin 180e682d02eSNavdeep Parhar static void 181*eba13bbcSJohn Baldwin free_ddp_rcv_buffer(struct toepcb *toep, struct ddp_rcv_buffer *drb) 182e682d02eSNavdeep Parhar { 183*eba13bbcSJohn Baldwin t4_free_page_pods(&drb->prsv); 184*eba13bbcSJohn Baldwin contigfree(drb->buf, drb->len, M_CXGBE); 185*eba13bbcSJohn Baldwin free(drb, M_CXGBE); 186*eba13bbcSJohn Baldwin counter_u64_add(toep->ofld_rxq->ddp_buffer_free, 1); 187*eba13bbcSJohn Baldwin free_toepcb(toep); 188*eba13bbcSJohn Baldwin } 189*eba13bbcSJohn Baldwin 190*eba13bbcSJohn Baldwin static void 191*eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(struct toepcb *toep, struct ddp_rcv_buffer *drb) 192*eba13bbcSJohn Baldwin { 193*eba13bbcSJohn Baldwin DDP_CACHE_LOCK(toep); 194*eba13bbcSJohn Baldwin if (!(toep->ddp.flags & DDP_DEAD) && 195*eba13bbcSJohn Baldwin toep->ddp.cached_count < t4_ddp_rcvbuf_cache) { 196*eba13bbcSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.cached_buffers, drb, link); 197*eba13bbcSJohn Baldwin toep->ddp.cached_count++; 198*eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 199*eba13bbcSJohn Baldwin } else { 200*eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 201*eba13bbcSJohn Baldwin free_ddp_rcv_buffer(toep, drb); 202*eba13bbcSJohn Baldwin } 203*eba13bbcSJohn Baldwin } 204*eba13bbcSJohn Baldwin 205*eba13bbcSJohn Baldwin static struct ddp_rcv_buffer * 206*eba13bbcSJohn Baldwin alloc_cached_ddp_rcv_buffer(struct toepcb *toep) 207*eba13bbcSJohn Baldwin { 208*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 209*eba13bbcSJohn Baldwin 210*eba13bbcSJohn Baldwin DDP_CACHE_LOCK(toep); 211*eba13bbcSJohn Baldwin if (!TAILQ_EMPTY(&toep->ddp.cached_buffers)) { 212*eba13bbcSJohn Baldwin drb = TAILQ_FIRST(&toep->ddp.cached_buffers); 213*eba13bbcSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link); 214*eba13bbcSJohn Baldwin toep->ddp.cached_count--; 215*eba13bbcSJohn Baldwin counter_u64_add(toep->ofld_rxq->ddp_buffer_reuse, 1); 216*eba13bbcSJohn Baldwin } else 217*eba13bbcSJohn Baldwin drb = NULL; 218*eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 219*eba13bbcSJohn Baldwin return (drb); 220*eba13bbcSJohn Baldwin } 221*eba13bbcSJohn Baldwin 222*eba13bbcSJohn Baldwin static struct ddp_rcv_buffer * 223*eba13bbcSJohn Baldwin alloc_ddp_rcv_buffer(struct toepcb *toep, int how) 224*eba13bbcSJohn Baldwin { 225*eba13bbcSJohn Baldwin struct tom_data *td = toep->td; 226*eba13bbcSJohn Baldwin struct adapter *sc = td_adapter(td); 227*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 228*eba13bbcSJohn Baldwin int error; 229*eba13bbcSJohn Baldwin 230*eba13bbcSJohn Baldwin drb = malloc(sizeof(*drb), M_CXGBE, how | M_ZERO); 231*eba13bbcSJohn Baldwin if (drb == NULL) 232*eba13bbcSJohn Baldwin return (NULL); 233*eba13bbcSJohn Baldwin 234*eba13bbcSJohn Baldwin drb->buf = contigmalloc(t4_ddp_rcvbuf_len, M_CXGBE, how, 0, ~0, 235*eba13bbcSJohn Baldwin t4_ddp_rcvbuf_len, 0); 236*eba13bbcSJohn Baldwin if (drb->buf == NULL) { 237*eba13bbcSJohn Baldwin free(drb, M_CXGBE); 238*eba13bbcSJohn Baldwin return (NULL); 239*eba13bbcSJohn Baldwin } 240*eba13bbcSJohn Baldwin drb->len = t4_ddp_rcvbuf_len; 241*eba13bbcSJohn Baldwin drb->refs = 1; 242*eba13bbcSJohn Baldwin 243*eba13bbcSJohn Baldwin error = t4_alloc_page_pods_for_rcvbuf(&td->pr, drb); 244*eba13bbcSJohn Baldwin if (error != 0) { 245*eba13bbcSJohn Baldwin contigfree(drb->buf, drb->len, M_CXGBE); 246*eba13bbcSJohn Baldwin free(drb, M_CXGBE); 247*eba13bbcSJohn Baldwin return (NULL); 248*eba13bbcSJohn Baldwin } 249*eba13bbcSJohn Baldwin 250*eba13bbcSJohn Baldwin error = t4_write_page_pods_for_rcvbuf(sc, toep->ctrlq, toep->tid, drb); 251*eba13bbcSJohn Baldwin if (error != 0) { 252*eba13bbcSJohn Baldwin t4_free_page_pods(&drb->prsv); 253*eba13bbcSJohn Baldwin contigfree(drb->buf, drb->len, M_CXGBE); 254*eba13bbcSJohn Baldwin free(drb, M_CXGBE); 255*eba13bbcSJohn Baldwin return (NULL); 256*eba13bbcSJohn Baldwin } 257*eba13bbcSJohn Baldwin 258*eba13bbcSJohn Baldwin hold_toepcb(toep); 259*eba13bbcSJohn Baldwin counter_u64_add(toep->ofld_rxq->ddp_buffer_alloc, 1); 260*eba13bbcSJohn Baldwin return (drb); 261*eba13bbcSJohn Baldwin } 262*eba13bbcSJohn Baldwin 263*eba13bbcSJohn Baldwin static void 264*eba13bbcSJohn Baldwin free_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db) 265*eba13bbcSJohn Baldwin { 266*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 267*eba13bbcSJohn Baldwin if (db->drb != NULL) 268*eba13bbcSJohn Baldwin free_ddp_rcv_buffer(toep, db->drb); 269*eba13bbcSJohn Baldwin #ifdef INVARIANTS 270*eba13bbcSJohn Baldwin db->drb = NULL; 271*eba13bbcSJohn Baldwin #endif 272*eba13bbcSJohn Baldwin return; 273*eba13bbcSJohn Baldwin } 274e682d02eSNavdeep Parhar 275dc964385SJohn Baldwin if (db->job) { 276dc964385SJohn Baldwin /* 277dc964385SJohn Baldwin * XXX: If we are un-offloading the socket then we 278dc964385SJohn Baldwin * should requeue these on the socket somehow. If we 279dc964385SJohn Baldwin * got a FIN from the remote end, then this completes 280dc964385SJohn Baldwin * any remaining requests with an EOF read. 281dc964385SJohn Baldwin */ 282dc964385SJohn Baldwin if (!aio_clear_cancel_function(db->job)) 283dc964385SJohn Baldwin ddp_complete_one(db->job, 0); 28425429e27SJohn Baldwin #ifdef INVARIANTS 28525429e27SJohn Baldwin db->job = NULL; 28625429e27SJohn Baldwin #endif 287dc964385SJohn Baldwin } 288e682d02eSNavdeep Parhar 28925429e27SJohn Baldwin if (db->ps) { 290*eba13bbcSJohn Baldwin free_pageset(toep->td, db->ps); 29125429e27SJohn Baldwin #ifdef INVARIANTS 29225429e27SJohn Baldwin db->ps = NULL; 29325429e27SJohn Baldwin #endif 29425429e27SJohn Baldwin } 295dc964385SJohn Baldwin } 296e682d02eSNavdeep Parhar 297a5a965d7SJohn Baldwin static void 298dc964385SJohn Baldwin ddp_init_toep(struct toepcb *toep) 299dc964385SJohn Baldwin { 300e682d02eSNavdeep Parhar 301125d42feSJohn Baldwin toep->ddp.flags = DDP_OK; 302125d42feSJohn Baldwin toep->ddp.active_id = -1; 303125d42feSJohn Baldwin mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF); 304*eba13bbcSJohn Baldwin mtx_init(&toep->ddp.cache_lock, "t4 ddp cache", NULL, MTX_DEF); 305dc964385SJohn Baldwin } 306dc964385SJohn Baldwin 307dc964385SJohn Baldwin void 308dc964385SJohn Baldwin ddp_uninit_toep(struct toepcb *toep) 309dc964385SJohn Baldwin { 310dc964385SJohn Baldwin 311125d42feSJohn Baldwin mtx_destroy(&toep->ddp.lock); 312*eba13bbcSJohn Baldwin mtx_destroy(&toep->ddp.cache_lock); 313e682d02eSNavdeep Parhar } 314e682d02eSNavdeep Parhar 315e682d02eSNavdeep Parhar void 316e682d02eSNavdeep Parhar release_ddp_resources(struct toepcb *toep) 317e682d02eSNavdeep Parhar { 318*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 319dc964385SJohn Baldwin struct pageset *ps; 320e682d02eSNavdeep Parhar int i; 321e682d02eSNavdeep Parhar 322dc964385SJohn Baldwin DDP_LOCK(toep); 323*eba13bbcSJohn Baldwin DDP_CACHE_LOCK(toep); 32417795d82SNavdeep Parhar toep->ddp.flags |= DDP_DEAD; 325*eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 326125d42feSJohn Baldwin for (i = 0; i < nitems(toep->ddp.db); i++) { 327*eba13bbcSJohn Baldwin free_ddp_buffer(toep, &toep->ddp.db[i]); 328e682d02eSNavdeep Parhar } 329*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 330125d42feSJohn Baldwin while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) { 331125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 332dc964385SJohn Baldwin free_pageset(toep->td, ps); 333e682d02eSNavdeep Parhar } 334dc964385SJohn Baldwin ddp_complete_all(toep, 0); 335*eba13bbcSJohn Baldwin } 336*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 337*eba13bbcSJohn Baldwin DDP_CACHE_LOCK(toep); 338*eba13bbcSJohn Baldwin while ((drb = TAILQ_FIRST(&toep->ddp.cached_buffers)) != NULL) { 339*eba13bbcSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link); 340*eba13bbcSJohn Baldwin free_ddp_rcv_buffer(toep, drb); 341*eba13bbcSJohn Baldwin } 342*eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 343*eba13bbcSJohn Baldwin } 344dc964385SJohn Baldwin DDP_UNLOCK(toep); 345dc964385SJohn Baldwin } 346dc964385SJohn Baldwin 347dc964385SJohn Baldwin #ifdef INVARIANTS 348dc964385SJohn Baldwin void 349dc964385SJohn Baldwin ddp_assert_empty(struct toepcb *toep) 350dc964385SJohn Baldwin { 351dc964385SJohn Baldwin int i; 352dc964385SJohn Baldwin 353*eba13bbcSJohn Baldwin MPASS((toep->ddp.flags & (DDP_TASK_ACTIVE | DDP_DEAD)) != DDP_TASK_ACTIVE); 354125d42feSJohn Baldwin for (i = 0; i < nitems(toep->ddp.db); i++) { 355*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 356125d42feSJohn Baldwin MPASS(toep->ddp.db[i].job == NULL); 357125d42feSJohn Baldwin MPASS(toep->ddp.db[i].ps == NULL); 358*eba13bbcSJohn Baldwin } else 359*eba13bbcSJohn Baldwin MPASS(toep->ddp.db[i].drb == NULL); 360dc964385SJohn Baldwin } 361*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 362125d42feSJohn Baldwin MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets)); 363125d42feSJohn Baldwin MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq)); 364dc964385SJohn Baldwin } 365*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) 366*eba13bbcSJohn Baldwin MPASS(TAILQ_EMPTY(&toep->ddp.cached_buffers)); 367*eba13bbcSJohn Baldwin } 368dc964385SJohn Baldwin #endif 369dc964385SJohn Baldwin 370dc964385SJohn Baldwin static void 371dc964385SJohn Baldwin complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db, 372dc964385SJohn Baldwin unsigned int db_idx) 373dc964385SJohn Baldwin { 374*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 375dc964385SJohn Baldwin unsigned int db_flag; 376dc964385SJohn Baldwin 377125d42feSJohn Baldwin toep->ddp.active_count--; 378125d42feSJohn Baldwin if (toep->ddp.active_id == db_idx) { 379125d42feSJohn Baldwin if (toep->ddp.active_count == 0) { 380*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) 381125d42feSJohn Baldwin KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL, 382dc964385SJohn Baldwin ("%s: active_count mismatch", __func__)); 383*eba13bbcSJohn Baldwin else 384*eba13bbcSJohn Baldwin KASSERT(toep->ddp.db[db_idx ^ 1].drb == NULL, 385*eba13bbcSJohn Baldwin ("%s: active_count mismatch", __func__)); 386125d42feSJohn Baldwin toep->ddp.active_id = -1; 387dc964385SJohn Baldwin } else 388125d42feSJohn Baldwin toep->ddp.active_id ^= 1; 3891081d276SJohn Baldwin #ifdef VERBOSE_TRACES 3908674e626SNavdeep Parhar CTR3(KTR_CXGBE, "%s: tid %u, ddp_active_id = %d", __func__, 3918674e626SNavdeep Parhar toep->tid, toep->ddp.active_id); 3921081d276SJohn Baldwin #endif 393dc964385SJohn Baldwin } else { 394125d42feSJohn Baldwin KASSERT(toep->ddp.active_count != 0 && 395125d42feSJohn Baldwin toep->ddp.active_id != -1, 396dc964385SJohn Baldwin ("%s: active count mismatch", __func__)); 397dc964385SJohn Baldwin } 398dc964385SJohn Baldwin 399*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 400dc964385SJohn Baldwin db->cancel_pending = 0; 401dc964385SJohn Baldwin db->job = NULL; 402dc964385SJohn Baldwin recycle_pageset(toep, db->ps); 403dc964385SJohn Baldwin db->ps = NULL; 404*eba13bbcSJohn Baldwin } else { 405*eba13bbcSJohn Baldwin drb = db->drb; 406*eba13bbcSJohn Baldwin if (atomic_fetchadd_int(&drb->refs, -1) == 1) 407*eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 408*eba13bbcSJohn Baldwin db->drb = NULL; 409*eba13bbcSJohn Baldwin db->placed = 0; 410*eba13bbcSJohn Baldwin } 411dc964385SJohn Baldwin 412dc964385SJohn Baldwin db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 413125d42feSJohn Baldwin KASSERT(toep->ddp.flags & db_flag, 414dc964385SJohn Baldwin ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x", 415125d42feSJohn Baldwin __func__, toep, toep->ddp.flags)); 416125d42feSJohn Baldwin toep->ddp.flags &= ~db_flag; 417e682d02eSNavdeep Parhar } 418e682d02eSNavdeep Parhar 419*eba13bbcSJohn Baldwin /* Called when m_free drops the last reference. */ 420*eba13bbcSJohn Baldwin static void 421*eba13bbcSJohn Baldwin ddp_rcv_mbuf_done(struct mbuf *m) 422*eba13bbcSJohn Baldwin { 423*eba13bbcSJohn Baldwin struct toepcb *toep = m->m_ext.ext_arg1; 424*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb = m->m_ext.ext_arg2; 425*eba13bbcSJohn Baldwin 426*eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 427*eba13bbcSJohn Baldwin } 428*eba13bbcSJohn Baldwin 429*eba13bbcSJohn Baldwin static void 430*eba13bbcSJohn Baldwin queue_ddp_rcvbuf_mbuf(struct toepcb *toep, u_int db_idx, u_int len) 431*eba13bbcSJohn Baldwin { 432*eba13bbcSJohn Baldwin struct inpcb *inp = toep->inp; 433*eba13bbcSJohn Baldwin struct sockbuf *sb; 434*eba13bbcSJohn Baldwin struct ddp_buffer *db; 435*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 436*eba13bbcSJohn Baldwin struct mbuf *m; 437*eba13bbcSJohn Baldwin 438*eba13bbcSJohn Baldwin m = m_gethdr(M_NOWAIT, MT_DATA); 439*eba13bbcSJohn Baldwin if (m == NULL) { 440*eba13bbcSJohn Baldwin printf("%s: failed to allocate mbuf", __func__); 441*eba13bbcSJohn Baldwin return; 442*eba13bbcSJohn Baldwin } 443*eba13bbcSJohn Baldwin m->m_pkthdr.rcvif = toep->vi->ifp; 444*eba13bbcSJohn Baldwin 445*eba13bbcSJohn Baldwin db = &toep->ddp.db[db_idx]; 446*eba13bbcSJohn Baldwin drb = db->drb; 447*eba13bbcSJohn Baldwin m_extaddref(m, (char *)drb->buf + db->placed, len, &drb->refs, 448*eba13bbcSJohn Baldwin ddp_rcv_mbuf_done, toep, drb); 449*eba13bbcSJohn Baldwin m->m_pkthdr.len = len; 450*eba13bbcSJohn Baldwin m->m_len = len; 451*eba13bbcSJohn Baldwin 452*eba13bbcSJohn Baldwin sb = &inp->inp_socket->so_rcv; 453*eba13bbcSJohn Baldwin SOCKBUF_LOCK_ASSERT(sb); 454*eba13bbcSJohn Baldwin sbappendstream_locked(sb, m, 0); 455*eba13bbcSJohn Baldwin 456*eba13bbcSJohn Baldwin db->placed += len; 457*eba13bbcSJohn Baldwin toep->ofld_rxq->rx_toe_ddp_octets += len; 458*eba13bbcSJohn Baldwin } 459*eba13bbcSJohn Baldwin 460d588c1f9SNavdeep Parhar /* XXX: handle_ddp_data code duplication */ 461d588c1f9SNavdeep Parhar void 462d588c1f9SNavdeep Parhar insert_ddp_data(struct toepcb *toep, uint32_t n) 463d588c1f9SNavdeep Parhar { 464d588c1f9SNavdeep Parhar struct inpcb *inp = toep->inp; 465d588c1f9SNavdeep Parhar struct tcpcb *tp = intotcpcb(inp); 466dc964385SJohn Baldwin struct ddp_buffer *db; 467dc964385SJohn Baldwin struct kaiocb *job; 468dc964385SJohn Baldwin size_t placed; 469dc964385SJohn Baldwin long copied; 47039d5cbdcSNavdeep Parhar unsigned int db_idx; 47139d5cbdcSNavdeep Parhar #ifdef INVARIANTS 47239d5cbdcSNavdeep Parhar unsigned int db_flag; 47339d5cbdcSNavdeep Parhar #endif 474*eba13bbcSJohn Baldwin bool ddp_rcvbuf; 475d588c1f9SNavdeep Parhar 476d588c1f9SNavdeep Parhar INP_WLOCK_ASSERT(inp); 477dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 478d588c1f9SNavdeep Parhar 479*eba13bbcSJohn Baldwin ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0; 480d588c1f9SNavdeep Parhar tp->rcv_nxt += n; 481d588c1f9SNavdeep Parhar #ifndef USE_DDP_RX_FLOW_CONTROL 482d588c1f9SNavdeep Parhar KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__)); 483d588c1f9SNavdeep Parhar tp->rcv_wnd -= n; 484d588c1f9SNavdeep Parhar #endif 485dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP", 486dc964385SJohn Baldwin __func__, n); 487125d42feSJohn Baldwin while (toep->ddp.active_count > 0) { 488125d42feSJohn Baldwin MPASS(toep->ddp.active_id != -1); 489125d42feSJohn Baldwin db_idx = toep->ddp.active_id; 49039d5cbdcSNavdeep Parhar #ifdef INVARIANTS 491dc964385SJohn Baldwin db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 49239d5cbdcSNavdeep Parhar #endif 493125d42feSJohn Baldwin MPASS((toep->ddp.flags & db_flag) != 0); 494125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 495*eba13bbcSJohn Baldwin if (ddp_rcvbuf) { 496*eba13bbcSJohn Baldwin placed = n; 497*eba13bbcSJohn Baldwin if (placed > db->drb->len - db->placed) 498*eba13bbcSJohn Baldwin placed = db->drb->len - db->placed; 499*eba13bbcSJohn Baldwin if (placed != 0) 500*eba13bbcSJohn Baldwin queue_ddp_rcvbuf_mbuf(toep, db_idx, placed); 501*eba13bbcSJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 502*eba13bbcSJohn Baldwin n -= placed; 503*eba13bbcSJohn Baldwin continue; 504*eba13bbcSJohn Baldwin } 505dc964385SJohn Baldwin job = db->job; 506fe0bdd1dSJohn Baldwin copied = job->aio_received; 507dc964385SJohn Baldwin placed = n; 508dc964385SJohn Baldwin if (placed > job->uaiocb.aio_nbytes - copied) 509dc964385SJohn Baldwin placed = job->uaiocb.aio_nbytes - copied; 510c3d4aea6SJohn Baldwin if (placed > 0) { 511b1012d80SJohn Baldwin job->msgrcv = 1; 512c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_jobs++; 513c3d4aea6SJohn Baldwin } 514c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_octets += placed; 515dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) { 516dc964385SJohn Baldwin /* 517dc964385SJohn Baldwin * Update the copied length for when 518dc964385SJohn Baldwin * t4_aio_cancel_active() completes this 519dc964385SJohn Baldwin * request. 520dc964385SJohn Baldwin */ 521fe0bdd1dSJohn Baldwin job->aio_received += placed; 522dc964385SJohn Baldwin } else if (copied + placed != 0) { 523dc964385SJohn Baldwin CTR4(KTR_CXGBE, 524dc964385SJohn Baldwin "%s: completing %p (copied %ld, placed %lu)", 525dc964385SJohn Baldwin __func__, job, copied, placed); 526dc964385SJohn Baldwin /* XXX: This always completes if there is some data. */ 527dc964385SJohn Baldwin aio_complete(job, copied + placed, 0); 528dc964385SJohn Baldwin } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) { 529125d42feSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 530125d42feSJohn Baldwin toep->ddp.waiting_count++; 531dc964385SJohn Baldwin } else 532dc964385SJohn Baldwin aio_cancel(job); 533dc964385SJohn Baldwin n -= placed; 534dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 535dc964385SJohn Baldwin } 536dc964385SJohn Baldwin 537dc964385SJohn Baldwin MPASS(n == 0); 538d588c1f9SNavdeep Parhar } 539d588c1f9SNavdeep Parhar 540e682d02eSNavdeep Parhar /* SET_TCB_FIELD sent as a ULP command looks like this */ 541e682d02eSNavdeep Parhar #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 542e682d02eSNavdeep Parhar sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 543e682d02eSNavdeep Parhar 544e682d02eSNavdeep Parhar /* RX_DATA_ACK sent as a ULP command looks like this */ 545e682d02eSNavdeep Parhar #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \ 546e682d02eSNavdeep Parhar sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core)) 547e682d02eSNavdeep Parhar 548e682d02eSNavdeep Parhar static inline void * 549e682d02eSNavdeep Parhar mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep, 550e682d02eSNavdeep Parhar uint64_t word, uint64_t mask, uint64_t val) 551e682d02eSNavdeep Parhar { 552e682d02eSNavdeep Parhar struct ulptx_idata *ulpsc; 553e682d02eSNavdeep Parhar struct cpl_set_tcb_field_core *req; 554e682d02eSNavdeep Parhar 555e682d02eSNavdeep Parhar ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 556e682d02eSNavdeep Parhar ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); 557e682d02eSNavdeep Parhar 558e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 559e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 560e682d02eSNavdeep Parhar ulpsc->len = htobe32(sizeof(*req)); 561e682d02eSNavdeep Parhar 562e682d02eSNavdeep Parhar req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); 563e682d02eSNavdeep Parhar OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid)); 564e682d02eSNavdeep Parhar req->reply_ctrl = htobe16(V_NO_REPLY(1) | 565e682d02eSNavdeep Parhar V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 566e682d02eSNavdeep Parhar req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 567e682d02eSNavdeep Parhar req->mask = htobe64(mask); 568e682d02eSNavdeep Parhar req->val = htobe64(val); 569e682d02eSNavdeep Parhar 570e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(req + 1); 571e682d02eSNavdeep Parhar if (LEN__SET_TCB_FIELD_ULP % 16) { 572e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 573e682d02eSNavdeep Parhar ulpsc->len = htobe32(0); 574e682d02eSNavdeep Parhar return (ulpsc + 1); 575e682d02eSNavdeep Parhar } 576e682d02eSNavdeep Parhar return (ulpsc); 577e682d02eSNavdeep Parhar } 578e682d02eSNavdeep Parhar 579e682d02eSNavdeep Parhar static inline void * 580e682d02eSNavdeep Parhar mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep) 581e682d02eSNavdeep Parhar { 582e682d02eSNavdeep Parhar struct ulptx_idata *ulpsc; 583e682d02eSNavdeep Parhar struct cpl_rx_data_ack_core *req; 584e682d02eSNavdeep Parhar 585e682d02eSNavdeep Parhar ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 586e682d02eSNavdeep Parhar ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16)); 587e682d02eSNavdeep Parhar 588e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 589e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 590e682d02eSNavdeep Parhar ulpsc->len = htobe32(sizeof(*req)); 591e682d02eSNavdeep Parhar 592e682d02eSNavdeep Parhar req = (struct cpl_rx_data_ack_core *)(ulpsc + 1); 593e682d02eSNavdeep Parhar OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid)); 594e682d02eSNavdeep Parhar req->credit_dack = htobe32(F_RX_MODULATE_RX); 595e682d02eSNavdeep Parhar 596e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(req + 1); 597e682d02eSNavdeep Parhar if (LEN__RX_DATA_ACK_ULP % 16) { 598e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 599e682d02eSNavdeep Parhar ulpsc->len = htobe32(0); 600e682d02eSNavdeep Parhar return (ulpsc + 1); 601e682d02eSNavdeep Parhar } 602e682d02eSNavdeep Parhar return (ulpsc); 603e682d02eSNavdeep Parhar } 604e682d02eSNavdeep Parhar 605e682d02eSNavdeep Parhar static struct wrqe * 606e682d02eSNavdeep Parhar mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx, 607*eba13bbcSJohn Baldwin struct ppod_reservation *prsv, int offset, uint32_t len, 608*eba13bbcSJohn Baldwin uint64_t ddp_flags, uint64_t ddp_flags_mask) 609e682d02eSNavdeep Parhar { 610e682d02eSNavdeep Parhar struct wrqe *wr; 611e682d02eSNavdeep Parhar struct work_request_hdr *wrh; 612e682d02eSNavdeep Parhar struct ulp_txpkt *ulpmc; 613*eba13bbcSJohn Baldwin int wrlen; 614e682d02eSNavdeep Parhar 615e682d02eSNavdeep Parhar KASSERT(db_idx == 0 || db_idx == 1, 616e682d02eSNavdeep Parhar ("%s: bad DDP buffer index %d", __func__, db_idx)); 617e682d02eSNavdeep Parhar 618e682d02eSNavdeep Parhar /* 619e682d02eSNavdeep Parhar * We'll send a compound work request that has 3 SET_TCB_FIELDs and an 620e682d02eSNavdeep Parhar * RX_DATA_ACK (with RX_MODULATE to speed up delivery). 621e682d02eSNavdeep Parhar * 622e682d02eSNavdeep Parhar * The work request header is 16B and always ends at a 16B boundary. 623e682d02eSNavdeep Parhar * The ULPTX master commands that follow must all end at 16B boundaries 624e682d02eSNavdeep Parhar * too so we round up the size to 16. 625e682d02eSNavdeep Parhar */ 626*eba13bbcSJohn Baldwin wrlen = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) + 627d14b0ac1SNavdeep Parhar roundup2(LEN__RX_DATA_ACK_ULP, 16); 628e682d02eSNavdeep Parhar 629*eba13bbcSJohn Baldwin wr = alloc_wrqe(wrlen, toep->ctrlq); 630e682d02eSNavdeep Parhar if (wr == NULL) 631e682d02eSNavdeep Parhar return (NULL); 632e682d02eSNavdeep Parhar wrh = wrtod(wr); 633*eba13bbcSJohn Baldwin INIT_ULPTX_WRH(wrh, wrlen, 1, 0); /* atomic */ 634e682d02eSNavdeep Parhar ulpmc = (struct ulp_txpkt *)(wrh + 1); 635e682d02eSNavdeep Parhar 636e682d02eSNavdeep Parhar /* Write the buffer's tag */ 637e682d02eSNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 638e682d02eSNavdeep Parhar W_TCB_RX_DDP_BUF0_TAG + db_idx, 639e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG), 640*eba13bbcSJohn Baldwin V_TCB_RX_DDP_BUF0_TAG(prsv->prsv_tag)); 641e682d02eSNavdeep Parhar 642e682d02eSNavdeep Parhar /* Update the current offset in the DDP buffer and its total length */ 643e682d02eSNavdeep Parhar if (db_idx == 0) 644e682d02eSNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 645e682d02eSNavdeep Parhar W_TCB_RX_DDP_BUF0_OFFSET, 646e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) | 647e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN), 648e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_OFFSET(offset) | 649*eba13bbcSJohn Baldwin V_TCB_RX_DDP_BUF0_LEN(len)); 650e682d02eSNavdeep Parhar else 651e682d02eSNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 652e682d02eSNavdeep Parhar W_TCB_RX_DDP_BUF1_OFFSET, 653e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) | 654e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32), 655e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF1_OFFSET(offset) | 656*eba13bbcSJohn Baldwin V_TCB_RX_DDP_BUF1_LEN((u64)len << 32)); 657e682d02eSNavdeep Parhar 658e682d02eSNavdeep Parhar /* Update DDP flags */ 659e682d02eSNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS, 660dc964385SJohn Baldwin ddp_flags_mask, ddp_flags); 661e682d02eSNavdeep Parhar 662e682d02eSNavdeep Parhar /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */ 663e682d02eSNavdeep Parhar ulpmc = mk_rx_data_ack_ulp(ulpmc, toep); 664e682d02eSNavdeep Parhar 665e682d02eSNavdeep Parhar return (wr); 666e682d02eSNavdeep Parhar } 667e682d02eSNavdeep Parhar 668e682d02eSNavdeep Parhar static int 669*eba13bbcSJohn Baldwin handle_ddp_data_aio(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, 670*eba13bbcSJohn Baldwin int len) 671e682d02eSNavdeep Parhar { 672e682d02eSNavdeep Parhar uint32_t report = be32toh(ddp_report); 673dc964385SJohn Baldwin unsigned int db_idx; 674e682d02eSNavdeep Parhar struct inpcb *inp = toep->inp; 675dc964385SJohn Baldwin struct ddp_buffer *db; 676e682d02eSNavdeep Parhar struct tcpcb *tp; 677e682d02eSNavdeep Parhar struct socket *so; 678e682d02eSNavdeep Parhar struct sockbuf *sb; 679dc964385SJohn Baldwin struct kaiocb *job; 680dc964385SJohn Baldwin long copied; 681e682d02eSNavdeep Parhar 682dc964385SJohn Baldwin db_idx = report & F_DDP_BUF_IDX ? 1 : 0; 683e682d02eSNavdeep Parhar 684e682d02eSNavdeep Parhar if (__predict_false(!(report & F_DDP_INV))) 685e682d02eSNavdeep Parhar CXGBE_UNIMPLEMENTED("DDP buffer still valid"); 686e682d02eSNavdeep Parhar 687e682d02eSNavdeep Parhar INP_WLOCK(inp); 688e682d02eSNavdeep Parhar so = inp_inpcbtosocket(inp); 689e682d02eSNavdeep Parhar sb = &so->so_rcv; 690dc964385SJohn Baldwin DDP_LOCK(toep); 691dc964385SJohn Baldwin 692125d42feSJohn Baldwin KASSERT(toep->ddp.active_id == db_idx, 693dc964385SJohn Baldwin ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx, 694125d42feSJohn Baldwin toep->ddp.active_id, toep->tid)); 695125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 696dc964385SJohn Baldwin job = db->job; 697dc964385SJohn Baldwin 69853af6903SGleb Smirnoff if (__predict_false(inp->inp_flags & INP_DROPPED)) { 699e682d02eSNavdeep Parhar /* 700dc964385SJohn Baldwin * This can happen due to an administrative tcpdrop(8). 701dc964385SJohn Baldwin * Just fail the request with ECONNRESET. 702e682d02eSNavdeep Parhar */ 703e682d02eSNavdeep Parhar CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x", 704e682d02eSNavdeep Parhar __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); 705dc964385SJohn Baldwin if (aio_clear_cancel_function(job)) 706dc964385SJohn Baldwin ddp_complete_one(job, ECONNRESET); 707dc964385SJohn Baldwin goto completed; 708e682d02eSNavdeep Parhar } 709e682d02eSNavdeep Parhar 710e682d02eSNavdeep Parhar tp = intotcpcb(inp); 7118fb15ddbSJohn Baldwin 7128fb15ddbSJohn Baldwin /* 7138fb15ddbSJohn Baldwin * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the 7148fb15ddbSJohn Baldwin * sequence number of the next byte to receive. The length of 7158fb15ddbSJohn Baldwin * the data received for this message must be computed by 7168fb15ddbSJohn Baldwin * comparing the new and old values of rcv_nxt. 7178fb15ddbSJohn Baldwin * 7188fb15ddbSJohn Baldwin * For RX_DATA_DDP, len might be non-zero, but it is only the 7198fb15ddbSJohn Baldwin * length of the most recent DMA. It does not include the 7208fb15ddbSJohn Baldwin * total length of the data received since the previous update 7218fb15ddbSJohn Baldwin * for this DDP buffer. rcv_nxt is the sequence number of the 7228fb15ddbSJohn Baldwin * first received byte from the most recent DMA. 7238fb15ddbSJohn Baldwin */ 724e682d02eSNavdeep Parhar len += be32toh(rcv_nxt) - tp->rcv_nxt; 725e682d02eSNavdeep Parhar tp->rcv_nxt += len; 726e682d02eSNavdeep Parhar tp->t_rcvtime = ticks; 727e682d02eSNavdeep Parhar #ifndef USE_DDP_RX_FLOW_CONTROL 728e682d02eSNavdeep Parhar KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 729e682d02eSNavdeep Parhar tp->rcv_wnd -= len; 730e682d02eSNavdeep Parhar #endif 731dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 7328674e626SNavdeep Parhar CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__, 7338674e626SNavdeep Parhar toep->tid, db_idx, len, report); 734dc964385SJohn Baldwin #endif 735e682d02eSNavdeep Parhar 73669a08863SJohn Baldwin /* receive buffer autosize */ 737a342904bSNavdeep Parhar MPASS(toep->vnet == so->so_vnet); 738a342904bSNavdeep Parhar CURVNET_SET(toep->vnet); 739dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 74069a08863SJohn Baldwin if (sb->sb_flags & SB_AUTOSIZE && 74169a08863SJohn Baldwin V_tcp_do_autorcvbuf && 74269a08863SJohn Baldwin sb->sb_hiwat < V_tcp_autorcvbuf_max && 74369a08863SJohn Baldwin len > (sbspace(sb) / 8 * 7)) { 744be09e82aSNavdeep Parhar struct adapter *sc = td_adapter(toep->td); 74569a08863SJohn Baldwin unsigned int hiwat = sb->sb_hiwat; 746be09e82aSNavdeep Parhar unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 74769a08863SJohn Baldwin V_tcp_autorcvbuf_max); 74869a08863SJohn Baldwin 74943283184SGleb Smirnoff if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 75069a08863SJohn Baldwin sb->sb_flags &= ~SB_AUTOSIZE; 75169a08863SJohn Baldwin } 752dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 753dc964385SJohn Baldwin CURVNET_RESTORE(); 75469a08863SJohn Baldwin 755b1012d80SJohn Baldwin job->msgrcv = 1; 756c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_jobs++; 757c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_octets += len; 758dc964385SJohn Baldwin if (db->cancel_pending) { 759dc964385SJohn Baldwin /* 760dc964385SJohn Baldwin * Update the job's length but defer completion to the 761dc964385SJohn Baldwin * TCB_RPL callback. 762dc964385SJohn Baldwin */ 763fe0bdd1dSJohn Baldwin job->aio_received += len; 764dc964385SJohn Baldwin goto out; 765dc964385SJohn Baldwin } else if (!aio_clear_cancel_function(job)) { 766dc964385SJohn Baldwin /* 767dc964385SJohn Baldwin * Update the copied length for when 768dc964385SJohn Baldwin * t4_aio_cancel_active() completes this request. 769dc964385SJohn Baldwin */ 770fe0bdd1dSJohn Baldwin job->aio_received += len; 771dc964385SJohn Baldwin } else { 772fe0bdd1dSJohn Baldwin copied = job->aio_received; 773dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 7748674e626SNavdeep Parhar CTR5(KTR_CXGBE, 7758674e626SNavdeep Parhar "%s: tid %u, completing %p (copied %ld, placed %d)", 7768674e626SNavdeep Parhar __func__, toep->tid, job, copied, len); 777dc964385SJohn Baldwin #endif 778dc964385SJohn Baldwin aio_complete(job, copied + len, 0); 779dc964385SJohn Baldwin t4_rcvd(&toep->td->tod, tp); 780dc964385SJohn Baldwin } 781dc964385SJohn Baldwin 782dc964385SJohn Baldwin completed: 783dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 784125d42feSJohn Baldwin if (toep->ddp.waiting_count > 0) 785dc964385SJohn Baldwin ddp_queue_toep(toep); 786dc964385SJohn Baldwin out: 787dc964385SJohn Baldwin DDP_UNLOCK(toep); 788e682d02eSNavdeep Parhar INP_WUNLOCK(inp); 789dc964385SJohn Baldwin 790e682d02eSNavdeep Parhar return (0); 791e682d02eSNavdeep Parhar } 792e682d02eSNavdeep Parhar 793*eba13bbcSJohn Baldwin static bool 794*eba13bbcSJohn Baldwin queue_ddp_rcvbuf(struct toepcb *toep, struct ddp_rcv_buffer *drb) 795*eba13bbcSJohn Baldwin { 796*eba13bbcSJohn Baldwin struct adapter *sc = td_adapter(toep->td); 797*eba13bbcSJohn Baldwin struct ddp_buffer *db; 798*eba13bbcSJohn Baldwin struct wrqe *wr; 799*eba13bbcSJohn Baldwin uint64_t ddp_flags, ddp_flags_mask; 800*eba13bbcSJohn Baldwin int buf_flag, db_idx; 801*eba13bbcSJohn Baldwin 802*eba13bbcSJohn Baldwin DDP_ASSERT_LOCKED(toep); 803*eba13bbcSJohn Baldwin 804*eba13bbcSJohn Baldwin KASSERT((toep->ddp.flags & DDP_DEAD) == 0, ("%s: DDP_DEAD", __func__)); 805*eba13bbcSJohn Baldwin KASSERT(toep->ddp.active_count < nitems(toep->ddp.db), 806*eba13bbcSJohn Baldwin ("%s: no empty DDP buffer slot", __func__)); 807*eba13bbcSJohn Baldwin 808*eba13bbcSJohn Baldwin /* Determine which DDP buffer to use. */ 809*eba13bbcSJohn Baldwin if (toep->ddp.db[0].drb == NULL) { 810*eba13bbcSJohn Baldwin db_idx = 0; 811*eba13bbcSJohn Baldwin } else { 812*eba13bbcSJohn Baldwin MPASS(toep->ddp.db[1].drb == NULL); 813*eba13bbcSJohn Baldwin db_idx = 1; 814*eba13bbcSJohn Baldwin } 815*eba13bbcSJohn Baldwin 816*eba13bbcSJohn Baldwin /* 817*eba13bbcSJohn Baldwin * Permit PSH to trigger a partial completion without 818*eba13bbcSJohn Baldwin * invalidating the rest of the buffer, but disable the PUSH 819*eba13bbcSJohn Baldwin * timer. 820*eba13bbcSJohn Baldwin */ 821*eba13bbcSJohn Baldwin ddp_flags = 0; 822*eba13bbcSJohn Baldwin ddp_flags_mask = 0; 823*eba13bbcSJohn Baldwin if (db_idx == 0) { 824*eba13bbcSJohn Baldwin ddp_flags |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 825*eba13bbcSJohn Baldwin V_TF_DDP_PUSH_DISABLE_0(0) | V_TF_DDP_PSHF_ENABLE_0(1) | 826*eba13bbcSJohn Baldwin V_TF_DDP_BUF0_VALID(1); 827*eba13bbcSJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 828*eba13bbcSJohn Baldwin V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) | 829*eba13bbcSJohn Baldwin V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1); 830*eba13bbcSJohn Baldwin buf_flag = DDP_BUF0_ACTIVE; 831*eba13bbcSJohn Baldwin } else { 832*eba13bbcSJohn Baldwin ddp_flags |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 833*eba13bbcSJohn Baldwin V_TF_DDP_PUSH_DISABLE_1(0) | V_TF_DDP_PSHF_ENABLE_1(1) | 834*eba13bbcSJohn Baldwin V_TF_DDP_BUF1_VALID(1); 835*eba13bbcSJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 836*eba13bbcSJohn Baldwin V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) | 837*eba13bbcSJohn Baldwin V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1); 838*eba13bbcSJohn Baldwin buf_flag = DDP_BUF1_ACTIVE; 839*eba13bbcSJohn Baldwin } 840*eba13bbcSJohn Baldwin MPASS((toep->ddp.flags & buf_flag) == 0); 841*eba13bbcSJohn Baldwin if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { 842*eba13bbcSJohn Baldwin MPASS(db_idx == 0); 843*eba13bbcSJohn Baldwin MPASS(toep->ddp.active_id == -1); 844*eba13bbcSJohn Baldwin MPASS(toep->ddp.active_count == 0); 845*eba13bbcSJohn Baldwin ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1); 846*eba13bbcSJohn Baldwin } 847*eba13bbcSJohn Baldwin 848*eba13bbcSJohn Baldwin /* 849*eba13bbcSJohn Baldwin * The TID for this connection should still be valid. If 850*eba13bbcSJohn Baldwin * DDP_DEAD is set, SBS_CANTRCVMORE should be set, so we 851*eba13bbcSJohn Baldwin * shouldn't be this far anyway. 852*eba13bbcSJohn Baldwin */ 853*eba13bbcSJohn Baldwin wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &drb->prsv, 0, drb->len, 854*eba13bbcSJohn Baldwin ddp_flags, ddp_flags_mask); 855*eba13bbcSJohn Baldwin if (wr == NULL) { 856*eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 857*eba13bbcSJohn Baldwin printf("%s: mk_update_tcb_for_ddp failed\n", __func__); 858*eba13bbcSJohn Baldwin return (false); 859*eba13bbcSJohn Baldwin } 860*eba13bbcSJohn Baldwin 861*eba13bbcSJohn Baldwin #ifdef VERBOSE_TRACES 862*eba13bbcSJohn Baldwin CTR(KTR_CXGBE, 863*eba13bbcSJohn Baldwin "%s: tid %u, scheduling DDP[%d] (flags %#lx/%#lx)", __func__, 864*eba13bbcSJohn Baldwin toep->tid, db_idx, ddp_flags, ddp_flags_mask); 865*eba13bbcSJohn Baldwin #endif 866*eba13bbcSJohn Baldwin /* 867*eba13bbcSJohn Baldwin * Hold a reference on scheduled buffers that is dropped in 868*eba13bbcSJohn Baldwin * complete_ddp_buffer. 869*eba13bbcSJohn Baldwin */ 870*eba13bbcSJohn Baldwin drb->refs = 1; 871*eba13bbcSJohn Baldwin 872*eba13bbcSJohn Baldwin /* Give the chip the go-ahead. */ 873*eba13bbcSJohn Baldwin t4_wrq_tx(sc, wr); 874*eba13bbcSJohn Baldwin db = &toep->ddp.db[db_idx]; 875*eba13bbcSJohn Baldwin db->drb = drb; 876*eba13bbcSJohn Baldwin toep->ddp.flags |= buf_flag; 877*eba13bbcSJohn Baldwin toep->ddp.active_count++; 878*eba13bbcSJohn Baldwin if (toep->ddp.active_count == 1) { 879*eba13bbcSJohn Baldwin MPASS(toep->ddp.active_id == -1); 880*eba13bbcSJohn Baldwin toep->ddp.active_id = db_idx; 881*eba13bbcSJohn Baldwin CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 882*eba13bbcSJohn Baldwin toep->ddp.active_id); 883*eba13bbcSJohn Baldwin } 884*eba13bbcSJohn Baldwin return (true); 885*eba13bbcSJohn Baldwin } 886*eba13bbcSJohn Baldwin 887*eba13bbcSJohn Baldwin static int 888*eba13bbcSJohn Baldwin handle_ddp_data_rcvbuf(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, 889*eba13bbcSJohn Baldwin int len) 890*eba13bbcSJohn Baldwin { 891*eba13bbcSJohn Baldwin uint32_t report = be32toh(ddp_report); 892*eba13bbcSJohn Baldwin struct inpcb *inp = toep->inp; 893*eba13bbcSJohn Baldwin struct tcpcb *tp; 894*eba13bbcSJohn Baldwin struct socket *so; 895*eba13bbcSJohn Baldwin struct sockbuf *sb; 896*eba13bbcSJohn Baldwin struct ddp_buffer *db; 897*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 898*eba13bbcSJohn Baldwin unsigned int db_idx; 899*eba13bbcSJohn Baldwin bool invalidated; 900*eba13bbcSJohn Baldwin 901*eba13bbcSJohn Baldwin db_idx = report & F_DDP_BUF_IDX ? 1 : 0; 902*eba13bbcSJohn Baldwin 903*eba13bbcSJohn Baldwin invalidated = (report & F_DDP_INV) != 0; 904*eba13bbcSJohn Baldwin 905*eba13bbcSJohn Baldwin INP_WLOCK(inp); 906*eba13bbcSJohn Baldwin so = inp_inpcbtosocket(inp); 907*eba13bbcSJohn Baldwin sb = &so->so_rcv; 908*eba13bbcSJohn Baldwin DDP_LOCK(toep); 909*eba13bbcSJohn Baldwin 910*eba13bbcSJohn Baldwin KASSERT(toep->ddp.active_id == db_idx, 911*eba13bbcSJohn Baldwin ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx, 912*eba13bbcSJohn Baldwin toep->ddp.active_id, toep->tid)); 913*eba13bbcSJohn Baldwin db = &toep->ddp.db[db_idx]; 914*eba13bbcSJohn Baldwin 915*eba13bbcSJohn Baldwin if (__predict_false(inp->inp_flags & INP_DROPPED)) { 916*eba13bbcSJohn Baldwin /* 917*eba13bbcSJohn Baldwin * This can happen due to an administrative tcpdrop(8). 918*eba13bbcSJohn Baldwin * Just ignore the received data. 919*eba13bbcSJohn Baldwin */ 920*eba13bbcSJohn Baldwin CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x", 921*eba13bbcSJohn Baldwin __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); 922*eba13bbcSJohn Baldwin if (invalidated) 923*eba13bbcSJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 924*eba13bbcSJohn Baldwin goto out; 925*eba13bbcSJohn Baldwin } 926*eba13bbcSJohn Baldwin 927*eba13bbcSJohn Baldwin tp = intotcpcb(inp); 928*eba13bbcSJohn Baldwin 929*eba13bbcSJohn Baldwin /* 930*eba13bbcSJohn Baldwin * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the 931*eba13bbcSJohn Baldwin * sequence number of the next byte to receive. The length of 932*eba13bbcSJohn Baldwin * the data received for this message must be computed by 933*eba13bbcSJohn Baldwin * comparing the new and old values of rcv_nxt. 934*eba13bbcSJohn Baldwin * 935*eba13bbcSJohn Baldwin * For RX_DATA_DDP, len might be non-zero, but it is only the 936*eba13bbcSJohn Baldwin * length of the most recent DMA. It does not include the 937*eba13bbcSJohn Baldwin * total length of the data received since the previous update 938*eba13bbcSJohn Baldwin * for this DDP buffer. rcv_nxt is the sequence number of the 939*eba13bbcSJohn Baldwin * first received byte from the most recent DMA. 940*eba13bbcSJohn Baldwin */ 941*eba13bbcSJohn Baldwin len += be32toh(rcv_nxt) - tp->rcv_nxt; 942*eba13bbcSJohn Baldwin tp->rcv_nxt += len; 943*eba13bbcSJohn Baldwin tp->t_rcvtime = ticks; 944*eba13bbcSJohn Baldwin #ifndef USE_DDP_RX_FLOW_CONTROL 945*eba13bbcSJohn Baldwin KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 946*eba13bbcSJohn Baldwin tp->rcv_wnd -= len; 947*eba13bbcSJohn Baldwin #endif 948*eba13bbcSJohn Baldwin #ifdef VERBOSE_TRACES 949*eba13bbcSJohn Baldwin CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__, 950*eba13bbcSJohn Baldwin toep->tid, db_idx, len, report); 951*eba13bbcSJohn Baldwin #endif 952*eba13bbcSJohn Baldwin 953*eba13bbcSJohn Baldwin /* receive buffer autosize */ 954*eba13bbcSJohn Baldwin MPASS(toep->vnet == so->so_vnet); 955*eba13bbcSJohn Baldwin CURVNET_SET(toep->vnet); 956*eba13bbcSJohn Baldwin SOCKBUF_LOCK(sb); 957*eba13bbcSJohn Baldwin if (sb->sb_flags & SB_AUTOSIZE && 958*eba13bbcSJohn Baldwin V_tcp_do_autorcvbuf && 959*eba13bbcSJohn Baldwin sb->sb_hiwat < V_tcp_autorcvbuf_max && 960*eba13bbcSJohn Baldwin len > (sbspace(sb) / 8 * 7)) { 961*eba13bbcSJohn Baldwin struct adapter *sc = td_adapter(toep->td); 962*eba13bbcSJohn Baldwin unsigned int hiwat = sb->sb_hiwat; 963*eba13bbcSJohn Baldwin unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 964*eba13bbcSJohn Baldwin V_tcp_autorcvbuf_max); 965*eba13bbcSJohn Baldwin 966*eba13bbcSJohn Baldwin if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 967*eba13bbcSJohn Baldwin sb->sb_flags &= ~SB_AUTOSIZE; 968*eba13bbcSJohn Baldwin } 969*eba13bbcSJohn Baldwin 970*eba13bbcSJohn Baldwin if (len > 0) { 971*eba13bbcSJohn Baldwin queue_ddp_rcvbuf_mbuf(toep, db_idx, len); 972*eba13bbcSJohn Baldwin t4_rcvd_locked(&toep->td->tod, tp); 973*eba13bbcSJohn Baldwin } 974*eba13bbcSJohn Baldwin sorwakeup_locked(so); 975*eba13bbcSJohn Baldwin SOCKBUF_UNLOCK_ASSERT(sb); 976*eba13bbcSJohn Baldwin CURVNET_RESTORE(); 977*eba13bbcSJohn Baldwin 978*eba13bbcSJohn Baldwin if (invalidated) 979*eba13bbcSJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 980*eba13bbcSJohn Baldwin else 981*eba13bbcSJohn Baldwin KASSERT(db->placed < db->drb->len, 982*eba13bbcSJohn Baldwin ("%s: full DDP buffer not invalidated", __func__)); 983*eba13bbcSJohn Baldwin 984*eba13bbcSJohn Baldwin if (toep->ddp.active_count != nitems(toep->ddp.db)) { 985*eba13bbcSJohn Baldwin drb = alloc_cached_ddp_rcv_buffer(toep); 986*eba13bbcSJohn Baldwin if (drb == NULL) 987*eba13bbcSJohn Baldwin drb = alloc_ddp_rcv_buffer(toep, M_NOWAIT); 988*eba13bbcSJohn Baldwin if (drb == NULL) 989*eba13bbcSJohn Baldwin ddp_queue_toep(toep); 990*eba13bbcSJohn Baldwin else { 991*eba13bbcSJohn Baldwin if (!queue_ddp_rcvbuf(toep, drb)) { 992*eba13bbcSJohn Baldwin ddp_queue_toep(toep); 993*eba13bbcSJohn Baldwin } 994*eba13bbcSJohn Baldwin } 995*eba13bbcSJohn Baldwin } 996*eba13bbcSJohn Baldwin out: 997*eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 998*eba13bbcSJohn Baldwin INP_WUNLOCK(inp); 999*eba13bbcSJohn Baldwin 1000*eba13bbcSJohn Baldwin return (0); 1001*eba13bbcSJohn Baldwin } 1002*eba13bbcSJohn Baldwin 1003*eba13bbcSJohn Baldwin static int 1004*eba13bbcSJohn Baldwin handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) 1005*eba13bbcSJohn Baldwin { 1006*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) 1007*eba13bbcSJohn Baldwin return (handle_ddp_data_rcvbuf(toep, ddp_report, rcv_nxt, len)); 1008*eba13bbcSJohn Baldwin else 1009*eba13bbcSJohn Baldwin return (handle_ddp_data_aio(toep, ddp_report, rcv_nxt, len)); 1010*eba13bbcSJohn Baldwin } 1011*eba13bbcSJohn Baldwin 1012b12c0a9eSJohn Baldwin void 1013dc964385SJohn Baldwin handle_ddp_indicate(struct toepcb *toep) 1014b12c0a9eSJohn Baldwin { 1015b12c0a9eSJohn Baldwin 1016dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 1017*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 1018*eba13bbcSJohn Baldwin /* 1019*eba13bbcSJohn Baldwin * Indicates are not meaningful for RCVBUF since 1020*eba13bbcSJohn Baldwin * buffers are activated when the socket option is 1021*eba13bbcSJohn Baldwin * set. 1022*eba13bbcSJohn Baldwin */ 1023*eba13bbcSJohn Baldwin return; 1024*eba13bbcSJohn Baldwin } 1025*eba13bbcSJohn Baldwin 1026125d42feSJohn Baldwin MPASS(toep->ddp.active_count == 0); 1027125d42feSJohn Baldwin MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0); 1028125d42feSJohn Baldwin if (toep->ddp.waiting_count == 0) { 1029dc964385SJohn Baldwin /* 1030dc964385SJohn Baldwin * The pending requests that triggered the request for an 1031dc964385SJohn Baldwin * an indicate were cancelled. Those cancels should have 1032dc964385SJohn Baldwin * already disabled DDP. Just ignore this as the data is 1033dc964385SJohn Baldwin * going into the socket buffer anyway. 1034dc964385SJohn Baldwin */ 1035dc964385SJohn Baldwin return; 1036dc964385SJohn Baldwin } 1037dc964385SJohn Baldwin CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__, 1038125d42feSJohn Baldwin toep->tid, toep->ddp.waiting_count); 1039dc964385SJohn Baldwin ddp_queue_toep(toep); 1040dc964385SJohn Baldwin } 1041dc964385SJohn Baldwin 1042017902fcSJohn Baldwin CTASSERT(CPL_COOKIE_DDP0 + 1 == CPL_COOKIE_DDP1); 10434535e804SNavdeep Parhar 10444535e804SNavdeep Parhar static int 10454535e804SNavdeep Parhar do_ddp_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1046dc964385SJohn Baldwin { 10474535e804SNavdeep Parhar struct adapter *sc = iq->adapter; 10484535e804SNavdeep Parhar const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 10494535e804SNavdeep Parhar unsigned int tid = GET_TID(cpl); 1050dc964385SJohn Baldwin unsigned int db_idx; 10514535e804SNavdeep Parhar struct toepcb *toep; 10524535e804SNavdeep Parhar struct inpcb *inp; 1053dc964385SJohn Baldwin struct ddp_buffer *db; 1054dc964385SJohn Baldwin struct kaiocb *job; 1055dc964385SJohn Baldwin long copied; 1056dc964385SJohn Baldwin 1057dc964385SJohn Baldwin if (cpl->status != CPL_ERR_NONE) 1058dc964385SJohn Baldwin panic("XXX: tcp_rpl failed: %d", cpl->status); 1059dc964385SJohn Baldwin 10604535e804SNavdeep Parhar toep = lookup_tid(sc, tid); 10614535e804SNavdeep Parhar inp = toep->inp; 1062dc964385SJohn Baldwin switch (cpl->cookie) { 1063017902fcSJohn Baldwin case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP0): 1064017902fcSJohn Baldwin case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP1): 1065dc964385SJohn Baldwin /* 1066dc964385SJohn Baldwin * XXX: This duplicates a lot of code with handle_ddp_data(). 1067dc964385SJohn Baldwin */ 1068*eba13bbcSJohn Baldwin KASSERT((toep->ddp.flags & DDP_AIO) != 0, 1069*eba13bbcSJohn Baldwin ("%s: DDP_RCVBUF", __func__)); 1070017902fcSJohn Baldwin db_idx = G_COOKIE(cpl->cookie) - CPL_COOKIE_DDP0; 10714535e804SNavdeep Parhar MPASS(db_idx < nitems(toep->ddp.db)); 1072dc964385SJohn Baldwin INP_WLOCK(inp); 1073dc964385SJohn Baldwin DDP_LOCK(toep); 1074125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 1075dc964385SJohn Baldwin 1076dc964385SJohn Baldwin /* 1077dc964385SJohn Baldwin * handle_ddp_data() should leave the job around until 1078dc964385SJohn Baldwin * this callback runs once a cancel is pending. 1079dc964385SJohn Baldwin */ 1080dc964385SJohn Baldwin MPASS(db != NULL); 1081dc964385SJohn Baldwin MPASS(db->job != NULL); 1082dc964385SJohn Baldwin MPASS(db->cancel_pending); 1083dc964385SJohn Baldwin 1084dc964385SJohn Baldwin /* 1085dc964385SJohn Baldwin * XXX: It's not clear what happens if there is data 1086dc964385SJohn Baldwin * placed when the buffer is invalidated. I suspect we 1087dc964385SJohn Baldwin * need to read the TCB to see how much data was placed. 1088dc964385SJohn Baldwin * 1089dc964385SJohn Baldwin * For now this just pretends like nothing was placed. 1090dc964385SJohn Baldwin * 1091dc964385SJohn Baldwin * XXX: Note that if we did check the PCB we would need to 1092dc964385SJohn Baldwin * also take care of updating the tp, etc. 1093dc964385SJohn Baldwin */ 1094dc964385SJohn Baldwin job = db->job; 1095fe0bdd1dSJohn Baldwin copied = job->aio_received; 1096dc964385SJohn Baldwin if (copied == 0) { 1097dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job); 1098dc964385SJohn Baldwin aio_cancel(job); 1099dc964385SJohn Baldwin } else { 1100dc964385SJohn Baldwin CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)", 1101dc964385SJohn Baldwin __func__, job, copied); 1102dc964385SJohn Baldwin aio_complete(job, copied, 0); 1103dc964385SJohn Baldwin t4_rcvd(&toep->td->tod, intotcpcb(inp)); 1104dc964385SJohn Baldwin } 1105dc964385SJohn Baldwin 1106dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 1107125d42feSJohn Baldwin if (toep->ddp.waiting_count > 0) 1108dc964385SJohn Baldwin ddp_queue_toep(toep); 1109dc964385SJohn Baldwin DDP_UNLOCK(toep); 1110dc964385SJohn Baldwin INP_WUNLOCK(inp); 1111dc964385SJohn Baldwin break; 1112dc964385SJohn Baldwin default: 1113dc964385SJohn Baldwin panic("XXX: unknown tcb_rpl offset %#x, cookie %#x", 1114dc964385SJohn Baldwin G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); 1115dc964385SJohn Baldwin } 11164535e804SNavdeep Parhar 11174535e804SNavdeep Parhar return (0); 1118dc964385SJohn Baldwin } 1119dc964385SJohn Baldwin 1120dc964385SJohn Baldwin void 1121dc964385SJohn Baldwin handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt) 1122dc964385SJohn Baldwin { 1123*eba13bbcSJohn Baldwin struct socket *so = toep->inp->inp_socket; 1124*eba13bbcSJohn Baldwin struct sockbuf *sb = &so->so_rcv; 1125dc964385SJohn Baldwin struct ddp_buffer *db; 1126dc964385SJohn Baldwin struct kaiocb *job; 1127dc964385SJohn Baldwin long copied; 112839d5cbdcSNavdeep Parhar unsigned int db_idx; 112939d5cbdcSNavdeep Parhar #ifdef INVARIANTS 113039d5cbdcSNavdeep Parhar unsigned int db_flag; 113139d5cbdcSNavdeep Parhar #endif 1132dc964385SJohn Baldwin int len, placed; 1133*eba13bbcSJohn Baldwin bool ddp_rcvbuf; 1134dc964385SJohn Baldwin 1135b12c0a9eSJohn Baldwin INP_WLOCK_ASSERT(toep->inp); 1136dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 1137b12c0a9eSJohn Baldwin 1138*eba13bbcSJohn Baldwin ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0; 1139*eba13bbcSJohn Baldwin 11405dbf8c15SJohn Baldwin /* - 1 is to ignore the byte for FIN */ 11415dbf8c15SJohn Baldwin len = be32toh(rcv_nxt) - tp->rcv_nxt - 1; 1142b12c0a9eSJohn Baldwin tp->rcv_nxt += len; 1143b12c0a9eSJohn Baldwin 1144*eba13bbcSJohn Baldwin CTR(KTR_CXGBE, "%s: tid %d placed %u bytes before FIN", __func__, 1145*eba13bbcSJohn Baldwin toep->tid, len); 1146125d42feSJohn Baldwin while (toep->ddp.active_count > 0) { 1147125d42feSJohn Baldwin MPASS(toep->ddp.active_id != -1); 1148125d42feSJohn Baldwin db_idx = toep->ddp.active_id; 114939d5cbdcSNavdeep Parhar #ifdef INVARIANTS 1150dc964385SJohn Baldwin db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 115139d5cbdcSNavdeep Parhar #endif 1152125d42feSJohn Baldwin MPASS((toep->ddp.flags & db_flag) != 0); 1153125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 1154*eba13bbcSJohn Baldwin if (ddp_rcvbuf) { 1155*eba13bbcSJohn Baldwin placed = len; 1156*eba13bbcSJohn Baldwin if (placed > db->drb->len - db->placed) 1157*eba13bbcSJohn Baldwin placed = db->drb->len - db->placed; 1158*eba13bbcSJohn Baldwin if (placed != 0) { 1159*eba13bbcSJohn Baldwin SOCKBUF_LOCK(sb); 1160*eba13bbcSJohn Baldwin queue_ddp_rcvbuf_mbuf(toep, db_idx, placed); 1161*eba13bbcSJohn Baldwin sorwakeup_locked(so); 1162*eba13bbcSJohn Baldwin SOCKBUF_UNLOCK_ASSERT(sb); 1163*eba13bbcSJohn Baldwin } 1164*eba13bbcSJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 1165*eba13bbcSJohn Baldwin len -= placed; 1166*eba13bbcSJohn Baldwin continue; 1167*eba13bbcSJohn Baldwin } 1168dc964385SJohn Baldwin job = db->job; 1169fe0bdd1dSJohn Baldwin copied = job->aio_received; 1170dc964385SJohn Baldwin placed = len; 1171dc964385SJohn Baldwin if (placed > job->uaiocb.aio_nbytes - copied) 1172dc964385SJohn Baldwin placed = job->uaiocb.aio_nbytes - copied; 1173c3d4aea6SJohn Baldwin if (placed > 0) { 1174b1012d80SJohn Baldwin job->msgrcv = 1; 1175c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_jobs++; 1176c3d4aea6SJohn Baldwin } 1177c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_octets += placed; 1178dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) { 1179dc964385SJohn Baldwin /* 1180dc964385SJohn Baldwin * Update the copied length for when 1181dc964385SJohn Baldwin * t4_aio_cancel_active() completes this 1182dc964385SJohn Baldwin * request. 1183dc964385SJohn Baldwin */ 1184fe0bdd1dSJohn Baldwin job->aio_received += placed; 1185dc964385SJohn Baldwin } else { 1186dc964385SJohn Baldwin CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d", 1187dc964385SJohn Baldwin __func__, toep->tid, db_idx, placed); 1188dc964385SJohn Baldwin aio_complete(job, copied + placed, 0); 1189dc964385SJohn Baldwin } 1190dc964385SJohn Baldwin len -= placed; 1191dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 1192dc964385SJohn Baldwin } 1193b12c0a9eSJohn Baldwin 1194dc964385SJohn Baldwin MPASS(len == 0); 1195*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) 1196dc964385SJohn Baldwin ddp_complete_all(toep, 0); 1197b12c0a9eSJohn Baldwin } 1198b12c0a9eSJohn Baldwin 1199e682d02eSNavdeep Parhar #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\ 1200e682d02eSNavdeep Parhar F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\ 1201e682d02eSNavdeep Parhar F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\ 1202e682d02eSNavdeep Parhar F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR) 1203e682d02eSNavdeep Parhar 1204671bf2b8SNavdeep Parhar extern cpl_handler_t t4_cpl_handler[]; 1205671bf2b8SNavdeep Parhar 1206e682d02eSNavdeep Parhar static int 1207e682d02eSNavdeep Parhar do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1208e682d02eSNavdeep Parhar { 1209e682d02eSNavdeep Parhar struct adapter *sc = iq->adapter; 1210e682d02eSNavdeep Parhar const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1); 1211e682d02eSNavdeep Parhar unsigned int tid = GET_TID(cpl); 1212e682d02eSNavdeep Parhar uint32_t vld; 1213e682d02eSNavdeep Parhar struct toepcb *toep = lookup_tid(sc, tid); 1214e682d02eSNavdeep Parhar 1215e682d02eSNavdeep Parhar KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1216e682d02eSNavdeep Parhar KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 1217c91bcaaaSNavdeep Parhar KASSERT(!(toep->flags & TPF_SYNQE), 1218e682d02eSNavdeep Parhar ("%s: toep %p claims to be a synq entry", __func__, toep)); 1219e682d02eSNavdeep Parhar 1220e682d02eSNavdeep Parhar vld = be32toh(cpl->ddpvld); 1221e682d02eSNavdeep Parhar if (__predict_false(vld & DDP_ERR)) { 1222e682d02eSNavdeep Parhar panic("%s: DDP error 0x%x (tid %d, toep %p)", 1223e682d02eSNavdeep Parhar __func__, vld, tid, toep); 1224e682d02eSNavdeep Parhar } 12259eb533d3SNavdeep Parhar 1226c537e887SNavdeep Parhar if (ulp_mode(toep) == ULP_MODE_ISCSI) { 1227671bf2b8SNavdeep Parhar t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m); 12280fe98277SNavdeep Parhar return (0); 12290fe98277SNavdeep Parhar } 1230e682d02eSNavdeep Parhar 1231e682d02eSNavdeep Parhar handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len)); 1232e682d02eSNavdeep Parhar 1233e682d02eSNavdeep Parhar return (0); 1234e682d02eSNavdeep Parhar } 1235e682d02eSNavdeep Parhar 1236e682d02eSNavdeep Parhar static int 1237e682d02eSNavdeep Parhar do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss, 1238e682d02eSNavdeep Parhar struct mbuf *m) 1239e682d02eSNavdeep Parhar { 1240e682d02eSNavdeep Parhar struct adapter *sc = iq->adapter; 1241e682d02eSNavdeep Parhar const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1); 1242e682d02eSNavdeep Parhar unsigned int tid = GET_TID(cpl); 1243e682d02eSNavdeep Parhar struct toepcb *toep = lookup_tid(sc, tid); 1244e682d02eSNavdeep Parhar 1245e682d02eSNavdeep Parhar KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1246e682d02eSNavdeep Parhar KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 1247c91bcaaaSNavdeep Parhar KASSERT(!(toep->flags & TPF_SYNQE), 1248e682d02eSNavdeep Parhar ("%s: toep %p claims to be a synq entry", __func__, toep)); 1249e682d02eSNavdeep Parhar 1250e682d02eSNavdeep Parhar handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0); 1251e682d02eSNavdeep Parhar 1252e682d02eSNavdeep Parhar return (0); 1253e682d02eSNavdeep Parhar } 1254e682d02eSNavdeep Parhar 1255a5a965d7SJohn Baldwin static bool 1256a5a965d7SJohn Baldwin set_ddp_ulp_mode(struct toepcb *toep) 1257a5a965d7SJohn Baldwin { 1258a5a965d7SJohn Baldwin struct adapter *sc = toep->vi->adapter; 1259a5a965d7SJohn Baldwin struct wrqe *wr; 1260a5a965d7SJohn Baldwin struct work_request_hdr *wrh; 1261a5a965d7SJohn Baldwin struct ulp_txpkt *ulpmc; 1262a5a965d7SJohn Baldwin int fields, len; 1263a5a965d7SJohn Baldwin 1264a5a965d7SJohn Baldwin if (!sc->tt.ddp) 1265a5a965d7SJohn Baldwin return (false); 1266a5a965d7SJohn Baldwin 1267a5a965d7SJohn Baldwin fields = 0; 1268a5a965d7SJohn Baldwin 1269a5a965d7SJohn Baldwin /* Overlay region including W_TCB_RX_DDP_FLAGS */ 1270a5a965d7SJohn Baldwin fields += 3; 1271a5a965d7SJohn Baldwin 1272a5a965d7SJohn Baldwin /* W_TCB_ULP_TYPE */ 1273a5a965d7SJohn Baldwin fields++; 1274a5a965d7SJohn Baldwin 1275a5a965d7SJohn Baldwin #ifdef USE_DDP_RX_FLOW_CONTROL 1276a5a965d7SJohn Baldwin /* W_TCB_T_FLAGS */ 1277a5a965d7SJohn Baldwin fields++; 1278a5a965d7SJohn Baldwin #endif 1279a5a965d7SJohn Baldwin 1280a5a965d7SJohn Baldwin len = sizeof(*wrh) + fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1281a5a965d7SJohn Baldwin KASSERT(len <= SGE_MAX_WR_LEN, 1282a5a965d7SJohn Baldwin ("%s: WR with %d TCB field updates too large", __func__, fields)); 1283a5a965d7SJohn Baldwin 1284a5a965d7SJohn Baldwin wr = alloc_wrqe(len, toep->ctrlq); 1285a5a965d7SJohn Baldwin if (wr == NULL) 1286a5a965d7SJohn Baldwin return (false); 1287a5a965d7SJohn Baldwin 1288a5a965d7SJohn Baldwin CTR(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 1289a5a965d7SJohn Baldwin 1290a5a965d7SJohn Baldwin wrh = wrtod(wr); 1291a5a965d7SJohn Baldwin INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 1292a5a965d7SJohn Baldwin ulpmc = (struct ulp_txpkt *)(wrh + 1); 1293a5a965d7SJohn Baldwin 1294a5a965d7SJohn Baldwin /* 1295a5a965d7SJohn Baldwin * Words 26/27 are zero except for the DDP_OFF flag in 1296a5a965d7SJohn Baldwin * W_TCB_RX_DDP_FLAGS (27). 1297a5a965d7SJohn Baldwin */ 1298a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 26, 1299a5a965d7SJohn Baldwin 0xffffffffffffffff, (uint64_t)V_TF_DDP_OFF(1) << 32); 1300a5a965d7SJohn Baldwin 1301a5a965d7SJohn Baldwin /* Words 28/29 are zero. */ 1302a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 28, 1303a5a965d7SJohn Baldwin 0xffffffffffffffff, 0); 1304a5a965d7SJohn Baldwin 1305a5a965d7SJohn Baldwin /* Words 30/31 are zero. */ 1306a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 30, 1307a5a965d7SJohn Baldwin 0xffffffffffffffff, 0); 1308a5a965d7SJohn Baldwin 1309a5a965d7SJohn Baldwin /* Set the ULP mode to ULP_MODE_TCPDDP. */ 1310a5a965d7SJohn Baldwin toep->params.ulp_mode = ULP_MODE_TCPDDP; 1311a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_TYPE, 1312a5a965d7SJohn Baldwin V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), 1313a5a965d7SJohn Baldwin V_TCB_ULP_TYPE(ULP_MODE_TCPDDP)); 1314a5a965d7SJohn Baldwin 1315a5a965d7SJohn Baldwin #ifdef USE_DDP_RX_FLOW_CONTROL 1316a5a965d7SJohn Baldwin /* Set TF_RX_FLOW_CONTROL_DDP. */ 1317a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_T_FLAGS, 1318a5a965d7SJohn Baldwin V_TF_RX_FLOW_CONTROL_DDP(1), V_TF_RX_FLOW_CONTROL_DDP(1)); 1319a5a965d7SJohn Baldwin #endif 1320a5a965d7SJohn Baldwin 1321a5a965d7SJohn Baldwin ddp_init_toep(toep); 1322a5a965d7SJohn Baldwin 1323a5a965d7SJohn Baldwin t4_wrq_tx(sc, wr); 1324a5a965d7SJohn Baldwin return (true); 1325a5a965d7SJohn Baldwin } 1326a5a965d7SJohn Baldwin 1327dc964385SJohn Baldwin static void 1328e682d02eSNavdeep Parhar enable_ddp(struct adapter *sc, struct toepcb *toep) 1329e682d02eSNavdeep Parhar { 1330*eba13bbcSJohn Baldwin uint64_t ddp_flags; 1331e682d02eSNavdeep Parhar 1332125d42feSJohn Baldwin KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK, 1333e682d02eSNavdeep Parhar ("%s: toep %p has bad ddp_flags 0x%x", 1334125d42feSJohn Baldwin __func__, toep, toep->ddp.flags)); 1335e682d02eSNavdeep Parhar 1336e682d02eSNavdeep Parhar CTR3(KTR_CXGBE, "%s: tid %u (time %u)", 1337e682d02eSNavdeep Parhar __func__, toep->tid, time_uptime); 1338e682d02eSNavdeep Parhar 1339*eba13bbcSJohn Baldwin ddp_flags = 0; 1340*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) 1341*eba13bbcSJohn Baldwin ddp_flags |= V_TF_DDP_BUF0_INDICATE(1) | 1342*eba13bbcSJohn Baldwin V_TF_DDP_BUF1_INDICATE(1); 1343dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 1344125d42feSJohn Baldwin toep->ddp.flags |= DDP_SC_REQ; 1345edf95febSJohn Baldwin t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS, 1346e682d02eSNavdeep Parhar V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) | 1347e682d02eSNavdeep Parhar V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) | 1348*eba13bbcSJohn Baldwin V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1), ddp_flags, 0, 0); 1349edf95febSJohn Baldwin t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 1350edf95febSJohn Baldwin V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0); 1351e682d02eSNavdeep Parhar } 1352e682d02eSNavdeep Parhar 1353e682d02eSNavdeep Parhar static int 1354e682d02eSNavdeep Parhar calculate_hcf(int n1, int n2) 1355e682d02eSNavdeep Parhar { 1356e682d02eSNavdeep Parhar int a, b, t; 1357e682d02eSNavdeep Parhar 1358e682d02eSNavdeep Parhar if (n1 <= n2) { 1359e682d02eSNavdeep Parhar a = n1; 1360e682d02eSNavdeep Parhar b = n2; 1361e682d02eSNavdeep Parhar } else { 1362e682d02eSNavdeep Parhar a = n2; 1363e682d02eSNavdeep Parhar b = n1; 1364e682d02eSNavdeep Parhar } 1365e682d02eSNavdeep Parhar 1366e682d02eSNavdeep Parhar while (a != 0) { 1367e682d02eSNavdeep Parhar t = a; 1368e682d02eSNavdeep Parhar a = b % a; 1369e682d02eSNavdeep Parhar b = t; 1370e682d02eSNavdeep Parhar } 1371e682d02eSNavdeep Parhar 1372e682d02eSNavdeep Parhar return (b); 1373e682d02eSNavdeep Parhar } 1374e682d02eSNavdeep Parhar 1375968267fdSNavdeep Parhar static inline int 1376968267fdSNavdeep Parhar pages_to_nppods(int npages, int ddp_page_shift) 1377e682d02eSNavdeep Parhar { 1378dc964385SJohn Baldwin 1379968267fdSNavdeep Parhar MPASS(ddp_page_shift >= PAGE_SHIFT); 1380968267fdSNavdeep Parhar 1381968267fdSNavdeep Parhar return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES)); 1382968267fdSNavdeep Parhar } 1383968267fdSNavdeep Parhar 1384968267fdSNavdeep Parhar static int 1385968267fdSNavdeep Parhar alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx, 1386968267fdSNavdeep Parhar struct ppod_reservation *prsv) 1387968267fdSNavdeep Parhar { 1388968267fdSNavdeep Parhar vmem_addr_t addr; /* relative to start of region */ 1389968267fdSNavdeep Parhar 1390968267fdSNavdeep Parhar if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT, 1391968267fdSNavdeep Parhar &addr) != 0) 1392968267fdSNavdeep Parhar return (ENOMEM); 1393968267fdSNavdeep Parhar 139427539974SJohn Baldwin #ifdef VERBOSE_TRACES 1395968267fdSNavdeep Parhar CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d", 1396968267fdSNavdeep Parhar __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask, 1397968267fdSNavdeep Parhar nppods, 1 << pr->pr_page_shift[pgsz_idx]); 139827539974SJohn Baldwin #endif 1399968267fdSNavdeep Parhar 1400968267fdSNavdeep Parhar /* 1401968267fdSNavdeep Parhar * The hardware tagmask includes an extra invalid bit but the arena was 1402968267fdSNavdeep Parhar * seeded with valid values only. An allocation out of this arena will 1403968267fdSNavdeep Parhar * fit inside the tagmask but won't have the invalid bit set. 1404968267fdSNavdeep Parhar */ 1405968267fdSNavdeep Parhar MPASS((addr & pr->pr_tag_mask) == addr); 1406968267fdSNavdeep Parhar MPASS((addr & pr->pr_invalid_bit) == 0); 1407968267fdSNavdeep Parhar 1408968267fdSNavdeep Parhar prsv->prsv_pr = pr; 1409968267fdSNavdeep Parhar prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr; 1410968267fdSNavdeep Parhar prsv->prsv_nppods = nppods; 1411968267fdSNavdeep Parhar 1412968267fdSNavdeep Parhar return (0); 1413968267fdSNavdeep Parhar } 1414968267fdSNavdeep Parhar 14152beaefe8SJohn Baldwin static int 14162beaefe8SJohn Baldwin t4_alloc_page_pods_for_vmpages(struct ppod_region *pr, vm_page_t *pages, 14172beaefe8SJohn Baldwin int npages, struct ppod_reservation *prsv) 1418968267fdSNavdeep Parhar { 1419968267fdSNavdeep Parhar int i, hcf, seglen, idx, nppods; 1420e682d02eSNavdeep Parhar 1421e682d02eSNavdeep Parhar /* 1422e682d02eSNavdeep Parhar * The DDP page size is unrelated to the VM page size. We combine 1423e682d02eSNavdeep Parhar * contiguous physical pages into larger segments to get the best DDP 1424e682d02eSNavdeep Parhar * page size possible. This is the largest of the four sizes in 1425e682d02eSNavdeep Parhar * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in 1426e682d02eSNavdeep Parhar * the page list. 1427e682d02eSNavdeep Parhar */ 1428e682d02eSNavdeep Parhar hcf = 0; 14292beaefe8SJohn Baldwin for (i = 0; i < npages; i++) { 1430e682d02eSNavdeep Parhar seglen = PAGE_SIZE; 14312beaefe8SJohn Baldwin while (i < npages - 1 && 14322beaefe8SJohn Baldwin VM_PAGE_TO_PHYS(pages[i]) + PAGE_SIZE == 14332beaefe8SJohn Baldwin VM_PAGE_TO_PHYS(pages[i + 1])) { 1434e682d02eSNavdeep Parhar seglen += PAGE_SIZE; 1435e682d02eSNavdeep Parhar i++; 1436e682d02eSNavdeep Parhar } 1437e682d02eSNavdeep Parhar 1438e682d02eSNavdeep Parhar hcf = calculate_hcf(hcf, seglen); 1439968267fdSNavdeep Parhar if (hcf < (1 << pr->pr_page_shift[1])) { 1440e682d02eSNavdeep Parhar idx = 0; 1441e682d02eSNavdeep Parhar goto have_pgsz; /* give up, short circuit */ 1442e682d02eSNavdeep Parhar } 1443e682d02eSNavdeep Parhar } 1444e682d02eSNavdeep Parhar 1445968267fdSNavdeep Parhar #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 1446968267fdSNavdeep Parhar MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 1447968267fdSNavdeep Parhar for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 1448968267fdSNavdeep Parhar if ((hcf & PR_PAGE_MASK(idx)) == 0) 1449e682d02eSNavdeep Parhar break; 1450e682d02eSNavdeep Parhar } 1451968267fdSNavdeep Parhar #undef PR_PAGE_MASK 1452968267fdSNavdeep Parhar 1453e682d02eSNavdeep Parhar have_pgsz: 1454db8bcd1bSNavdeep Parhar MPASS(idx <= M_PPOD_PGSZ); 1455e682d02eSNavdeep Parhar 14562beaefe8SJohn Baldwin nppods = pages_to_nppods(npages, pr->pr_page_shift[idx]); 1457968267fdSNavdeep Parhar if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 14582beaefe8SJohn Baldwin return (ENOMEM); 1459968267fdSNavdeep Parhar MPASS(prsv->prsv_nppods > 0); 1460e682d02eSNavdeep Parhar 14612beaefe8SJohn Baldwin return (0); 14622beaefe8SJohn Baldwin } 14632beaefe8SJohn Baldwin 14642beaefe8SJohn Baldwin int 14652beaefe8SJohn Baldwin t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps) 14662beaefe8SJohn Baldwin { 14672beaefe8SJohn Baldwin struct ppod_reservation *prsv = &ps->prsv; 14682beaefe8SJohn Baldwin 14692beaefe8SJohn Baldwin KASSERT(prsv->prsv_nppods == 0, 14702beaefe8SJohn Baldwin ("%s: page pods already allocated", __func__)); 14712beaefe8SJohn Baldwin 14722beaefe8SJohn Baldwin return (t4_alloc_page_pods_for_vmpages(pr, ps->pages, ps->npages, 14732beaefe8SJohn Baldwin prsv)); 14742beaefe8SJohn Baldwin } 14752beaefe8SJohn Baldwin 14762beaefe8SJohn Baldwin int 14772beaefe8SJohn Baldwin t4_alloc_page_pods_for_bio(struct ppod_region *pr, struct bio *bp, 14782beaefe8SJohn Baldwin struct ppod_reservation *prsv) 14792beaefe8SJohn Baldwin { 14802beaefe8SJohn Baldwin 14812beaefe8SJohn Baldwin MPASS(bp->bio_flags & BIO_UNMAPPED); 14822beaefe8SJohn Baldwin 14832beaefe8SJohn Baldwin return (t4_alloc_page_pods_for_vmpages(pr, bp->bio_ma, bp->bio_ma_n, 14842beaefe8SJohn Baldwin prsv)); 1485e682d02eSNavdeep Parhar } 1486e682d02eSNavdeep Parhar 1487a9feb2cdSNavdeep Parhar int 1488a9feb2cdSNavdeep Parhar t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len, 1489a9feb2cdSNavdeep Parhar struct ppod_reservation *prsv) 1490a9feb2cdSNavdeep Parhar { 1491a9feb2cdSNavdeep Parhar int hcf, seglen, idx, npages, nppods; 1492a9feb2cdSNavdeep Parhar uintptr_t start_pva, end_pva, pva, p1; 1493a9feb2cdSNavdeep Parhar 1494a9feb2cdSNavdeep Parhar MPASS(buf > 0); 1495a9feb2cdSNavdeep Parhar MPASS(len > 0); 1496a9feb2cdSNavdeep Parhar 1497a9feb2cdSNavdeep Parhar /* 1498a9feb2cdSNavdeep Parhar * The DDP page size is unrelated to the VM page size. We combine 1499a9feb2cdSNavdeep Parhar * contiguous physical pages into larger segments to get the best DDP 1500a9feb2cdSNavdeep Parhar * page size possible. This is the largest of the four sizes in 1501a9feb2cdSNavdeep Parhar * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes 1502a9feb2cdSNavdeep Parhar * in the page list. 1503a9feb2cdSNavdeep Parhar */ 1504a9feb2cdSNavdeep Parhar hcf = 0; 1505a9feb2cdSNavdeep Parhar start_pva = trunc_page(buf); 1506a9feb2cdSNavdeep Parhar end_pva = trunc_page(buf + len - 1); 1507a9feb2cdSNavdeep Parhar pva = start_pva; 1508a9feb2cdSNavdeep Parhar while (pva <= end_pva) { 1509a9feb2cdSNavdeep Parhar seglen = PAGE_SIZE; 1510a9feb2cdSNavdeep Parhar p1 = pmap_kextract(pva); 1511a9feb2cdSNavdeep Parhar pva += PAGE_SIZE; 1512a9feb2cdSNavdeep Parhar while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) { 1513a9feb2cdSNavdeep Parhar seglen += PAGE_SIZE; 1514a9feb2cdSNavdeep Parhar pva += PAGE_SIZE; 1515a9feb2cdSNavdeep Parhar } 1516a9feb2cdSNavdeep Parhar 1517a9feb2cdSNavdeep Parhar hcf = calculate_hcf(hcf, seglen); 1518a9feb2cdSNavdeep Parhar if (hcf < (1 << pr->pr_page_shift[1])) { 1519a9feb2cdSNavdeep Parhar idx = 0; 1520a9feb2cdSNavdeep Parhar goto have_pgsz; /* give up, short circuit */ 1521a9feb2cdSNavdeep Parhar } 1522a9feb2cdSNavdeep Parhar } 1523a9feb2cdSNavdeep Parhar 1524a9feb2cdSNavdeep Parhar #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 1525a9feb2cdSNavdeep Parhar MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 1526a9feb2cdSNavdeep Parhar for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 1527a9feb2cdSNavdeep Parhar if ((hcf & PR_PAGE_MASK(idx)) == 0) 1528a9feb2cdSNavdeep Parhar break; 1529a9feb2cdSNavdeep Parhar } 1530a9feb2cdSNavdeep Parhar #undef PR_PAGE_MASK 1531a9feb2cdSNavdeep Parhar 1532a9feb2cdSNavdeep Parhar have_pgsz: 1533a9feb2cdSNavdeep Parhar MPASS(idx <= M_PPOD_PGSZ); 1534a9feb2cdSNavdeep Parhar 1535a9feb2cdSNavdeep Parhar npages = 1; 1536a9feb2cdSNavdeep Parhar npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; 1537a9feb2cdSNavdeep Parhar nppods = howmany(npages, PPOD_PAGES); 1538a9feb2cdSNavdeep Parhar if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 1539a9feb2cdSNavdeep Parhar return (ENOMEM); 1540a9feb2cdSNavdeep Parhar MPASS(prsv->prsv_nppods > 0); 1541a9feb2cdSNavdeep Parhar 1542a9feb2cdSNavdeep Parhar return (0); 1543a9feb2cdSNavdeep Parhar } 1544a9feb2cdSNavdeep Parhar 1545*eba13bbcSJohn Baldwin static int 1546*eba13bbcSJohn Baldwin t4_alloc_page_pods_for_rcvbuf(struct ppod_region *pr, 1547*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb) 1548*eba13bbcSJohn Baldwin { 1549*eba13bbcSJohn Baldwin struct ppod_reservation *prsv = &drb->prsv; 1550*eba13bbcSJohn Baldwin 1551*eba13bbcSJohn Baldwin KASSERT(prsv->prsv_nppods == 0, 1552*eba13bbcSJohn Baldwin ("%s: page pods already allocated", __func__)); 1553*eba13bbcSJohn Baldwin 1554*eba13bbcSJohn Baldwin return (t4_alloc_page_pods_for_buf(pr, (vm_offset_t)drb->buf, drb->len, 1555*eba13bbcSJohn Baldwin prsv)); 1556*eba13bbcSJohn Baldwin } 1557*eba13bbcSJohn Baldwin 155846bee804SJohn Baldwin int 155946bee804SJohn Baldwin t4_alloc_page_pods_for_sgl(struct ppod_region *pr, struct ctl_sg_entry *sgl, 156046bee804SJohn Baldwin int entries, struct ppod_reservation *prsv) 156146bee804SJohn Baldwin { 156246bee804SJohn Baldwin int hcf, seglen, idx = 0, npages, nppods, i, len; 156346bee804SJohn Baldwin uintptr_t start_pva, end_pva, pva, p1 ; 156446bee804SJohn Baldwin vm_offset_t buf; 156546bee804SJohn Baldwin struct ctl_sg_entry *sge; 156646bee804SJohn Baldwin 156746bee804SJohn Baldwin MPASS(entries > 0); 156846bee804SJohn Baldwin MPASS(sgl); 156946bee804SJohn Baldwin 157046bee804SJohn Baldwin /* 157146bee804SJohn Baldwin * The DDP page size is unrelated to the VM page size. We combine 157246bee804SJohn Baldwin * contiguous physical pages into larger segments to get the best DDP 157346bee804SJohn Baldwin * page size possible. This is the largest of the four sizes in 157446bee804SJohn Baldwin * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes 157546bee804SJohn Baldwin * in the page list. 157646bee804SJohn Baldwin */ 157746bee804SJohn Baldwin hcf = 0; 157846bee804SJohn Baldwin for (i = entries - 1; i >= 0; i--) { 157946bee804SJohn Baldwin sge = sgl + i; 158046bee804SJohn Baldwin buf = (vm_offset_t)sge->addr; 158146bee804SJohn Baldwin len = sge->len; 158246bee804SJohn Baldwin start_pva = trunc_page(buf); 158346bee804SJohn Baldwin end_pva = trunc_page(buf + len - 1); 158446bee804SJohn Baldwin pva = start_pva; 158546bee804SJohn Baldwin while (pva <= end_pva) { 158646bee804SJohn Baldwin seglen = PAGE_SIZE; 158746bee804SJohn Baldwin p1 = pmap_kextract(pva); 158846bee804SJohn Baldwin pva += PAGE_SIZE; 158946bee804SJohn Baldwin while (pva <= end_pva && p1 + seglen == 159046bee804SJohn Baldwin pmap_kextract(pva)) { 159146bee804SJohn Baldwin seglen += PAGE_SIZE; 159246bee804SJohn Baldwin pva += PAGE_SIZE; 159346bee804SJohn Baldwin } 159446bee804SJohn Baldwin 159546bee804SJohn Baldwin hcf = calculate_hcf(hcf, seglen); 159646bee804SJohn Baldwin if (hcf < (1 << pr->pr_page_shift[1])) { 159746bee804SJohn Baldwin idx = 0; 159846bee804SJohn Baldwin goto have_pgsz; /* give up, short circuit */ 159946bee804SJohn Baldwin } 160046bee804SJohn Baldwin } 160146bee804SJohn Baldwin } 160246bee804SJohn Baldwin #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 160346bee804SJohn Baldwin MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 160446bee804SJohn Baldwin for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 160546bee804SJohn Baldwin if ((hcf & PR_PAGE_MASK(idx)) == 0) 160646bee804SJohn Baldwin break; 160746bee804SJohn Baldwin } 160846bee804SJohn Baldwin #undef PR_PAGE_MASK 160946bee804SJohn Baldwin 161046bee804SJohn Baldwin have_pgsz: 161146bee804SJohn Baldwin MPASS(idx <= M_PPOD_PGSZ); 161246bee804SJohn Baldwin 161346bee804SJohn Baldwin npages = 0; 161446bee804SJohn Baldwin while (entries--) { 161546bee804SJohn Baldwin npages++; 16168d2b4b2eSJohn Baldwin start_pva = trunc_page((vm_offset_t)sgl->addr); 161746bee804SJohn Baldwin end_pva = trunc_page((vm_offset_t)sgl->addr + sgl->len - 1); 161846bee804SJohn Baldwin npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; 161946bee804SJohn Baldwin sgl = sgl + 1; 162046bee804SJohn Baldwin } 162146bee804SJohn Baldwin nppods = howmany(npages, PPOD_PAGES); 162246bee804SJohn Baldwin if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 162346bee804SJohn Baldwin return (ENOMEM); 162446bee804SJohn Baldwin MPASS(prsv->prsv_nppods > 0); 162546bee804SJohn Baldwin return (0); 162646bee804SJohn Baldwin } 162746bee804SJohn Baldwin 1628968267fdSNavdeep Parhar void 1629968267fdSNavdeep Parhar t4_free_page_pods(struct ppod_reservation *prsv) 1630968267fdSNavdeep Parhar { 1631968267fdSNavdeep Parhar struct ppod_region *pr = prsv->prsv_pr; 1632968267fdSNavdeep Parhar vmem_addr_t addr; 1633968267fdSNavdeep Parhar 1634968267fdSNavdeep Parhar MPASS(prsv != NULL); 1635968267fdSNavdeep Parhar MPASS(prsv->prsv_nppods != 0); 1636968267fdSNavdeep Parhar 1637968267fdSNavdeep Parhar addr = prsv->prsv_tag & pr->pr_tag_mask; 1638968267fdSNavdeep Parhar MPASS((addr & pr->pr_invalid_bit) == 0); 1639968267fdSNavdeep Parhar 164027539974SJohn Baldwin #ifdef VERBOSE_TRACES 1641968267fdSNavdeep Parhar CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__, 1642968267fdSNavdeep Parhar pr->pr_arena, addr, prsv->prsv_nppods); 164327539974SJohn Baldwin #endif 1644968267fdSNavdeep Parhar 1645968267fdSNavdeep Parhar vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods)); 1646968267fdSNavdeep Parhar prsv->prsv_nppods = 0; 1647968267fdSNavdeep Parhar } 1648968267fdSNavdeep Parhar 1649e682d02eSNavdeep Parhar #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE) 1650e682d02eSNavdeep Parhar 1651968267fdSNavdeep Parhar int 1652968267fdSNavdeep Parhar t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid, 1653968267fdSNavdeep Parhar struct pageset *ps) 1654e682d02eSNavdeep Parhar { 1655e682d02eSNavdeep Parhar struct wrqe *wr; 1656e682d02eSNavdeep Parhar struct ulp_mem_io *ulpmc; 1657e682d02eSNavdeep Parhar struct ulptx_idata *ulpsc; 1658e682d02eSNavdeep Parhar struct pagepod *ppod; 1659db8bcd1bSNavdeep Parhar int i, j, k, n, chunk, len, ddp_pgsz, idx; 1660db8bcd1bSNavdeep Parhar u_int ppod_addr; 166188c4ff7bSNavdeep Parhar uint32_t cmd; 1662968267fdSNavdeep Parhar struct ppod_reservation *prsv = &ps->prsv; 1663968267fdSNavdeep Parhar struct ppod_region *pr = prsv->prsv_pr; 166487b0e771SJohn Baldwin vm_paddr_t pa; 1665e682d02eSNavdeep Parhar 1666dc964385SJohn Baldwin KASSERT(!(ps->flags & PS_PPODS_WRITTEN), 1667dc964385SJohn Baldwin ("%s: page pods already written", __func__)); 1668968267fdSNavdeep Parhar MPASS(prsv->prsv_nppods > 0); 1669dc964385SJohn Baldwin 167088c4ff7bSNavdeep Parhar cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 167188c4ff7bSNavdeep Parhar if (is_t4(sc)) 167288c4ff7bSNavdeep Parhar cmd |= htobe32(F_ULP_MEMIO_ORDER); 167388c4ff7bSNavdeep Parhar else 167488c4ff7bSNavdeep Parhar cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1675968267fdSNavdeep Parhar ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1676968267fdSNavdeep Parhar ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1677968267fdSNavdeep Parhar for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1678e682d02eSNavdeep Parhar /* How many page pods are we writing in this cycle */ 1679968267fdSNavdeep Parhar n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1680e682d02eSNavdeep Parhar chunk = PPOD_SZ(n); 1681d14b0ac1SNavdeep Parhar len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1682e682d02eSNavdeep Parhar 1683968267fdSNavdeep Parhar wr = alloc_wrqe(len, wrq); 1684e682d02eSNavdeep Parhar if (wr == NULL) 1685e682d02eSNavdeep Parhar return (ENOMEM); /* ok to just bail out */ 1686e682d02eSNavdeep Parhar ulpmc = wrtod(wr); 1687e682d02eSNavdeep Parhar 1688e682d02eSNavdeep Parhar INIT_ULPTX_WR(ulpmc, len, 0, 0); 168988c4ff7bSNavdeep Parhar ulpmc->cmd = cmd; 1690e682d02eSNavdeep Parhar ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1691e682d02eSNavdeep Parhar ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1692e682d02eSNavdeep Parhar ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1693e682d02eSNavdeep Parhar 1694e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1695e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1696e682d02eSNavdeep Parhar ulpsc->len = htobe32(chunk); 1697e682d02eSNavdeep Parhar 1698e682d02eSNavdeep Parhar ppod = (struct pagepod *)(ulpsc + 1); 1699e682d02eSNavdeep Parhar for (j = 0; j < n; i++, j++, ppod++) { 1700e682d02eSNavdeep Parhar ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1701968267fdSNavdeep Parhar V_PPOD_TID(tid) | prsv->prsv_tag); 1702dc964385SJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) | 1703dc964385SJohn Baldwin V_PPOD_OFST(ps->offset)); 1704e682d02eSNavdeep Parhar ppod->rsvd = 0; 1705e682d02eSNavdeep Parhar idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); 170657c60f98SNavdeep Parhar for (k = 0; k < nitems(ppod->addr); k++) { 1707dc964385SJohn Baldwin if (idx < ps->npages) { 170887b0e771SJohn Baldwin pa = VM_PAGE_TO_PHYS(ps->pages[idx]); 170987b0e771SJohn Baldwin ppod->addr[k] = htobe64(pa); 1710e682d02eSNavdeep Parhar idx += ddp_pgsz / PAGE_SIZE; 1711e682d02eSNavdeep Parhar } else 1712e682d02eSNavdeep Parhar ppod->addr[k] = 0; 1713e682d02eSNavdeep Parhar #if 0 1714e682d02eSNavdeep Parhar CTR5(KTR_CXGBE, 1715e682d02eSNavdeep Parhar "%s: tid %d ppod[%d]->addr[%d] = %p", 1716bca6e339SJohn Baldwin __func__, tid, i, k, 171744e7472dSJohn Baldwin be64toh(ppod->addr[k])); 1718e682d02eSNavdeep Parhar #endif 1719e682d02eSNavdeep Parhar } 1720e682d02eSNavdeep Parhar 1721e682d02eSNavdeep Parhar } 1722e682d02eSNavdeep Parhar 1723e682d02eSNavdeep Parhar t4_wrq_tx(sc, wr); 1724e682d02eSNavdeep Parhar } 1725dc964385SJohn Baldwin ps->flags |= PS_PPODS_WRITTEN; 1726e682d02eSNavdeep Parhar 1727e682d02eSNavdeep Parhar return (0); 1728e682d02eSNavdeep Parhar } 1729e682d02eSNavdeep Parhar 1730*eba13bbcSJohn Baldwin static int 1731*eba13bbcSJohn Baldwin t4_write_page_pods_for_rcvbuf(struct adapter *sc, struct sge_wrq *wrq, int tid, 1732*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb) 1733*eba13bbcSJohn Baldwin { 1734*eba13bbcSJohn Baldwin struct wrqe *wr; 1735*eba13bbcSJohn Baldwin struct ulp_mem_io *ulpmc; 1736*eba13bbcSJohn Baldwin struct ulptx_idata *ulpsc; 1737*eba13bbcSJohn Baldwin struct pagepod *ppod; 1738*eba13bbcSJohn Baldwin int i, j, k, n, chunk, len, ddp_pgsz; 1739*eba13bbcSJohn Baldwin u_int ppod_addr, offset; 1740*eba13bbcSJohn Baldwin uint32_t cmd; 1741*eba13bbcSJohn Baldwin struct ppod_reservation *prsv = &drb->prsv; 1742*eba13bbcSJohn Baldwin struct ppod_region *pr = prsv->prsv_pr; 1743*eba13bbcSJohn Baldwin uintptr_t end_pva, pva; 1744*eba13bbcSJohn Baldwin vm_paddr_t pa; 1745*eba13bbcSJohn Baldwin 1746*eba13bbcSJohn Baldwin MPASS(prsv->prsv_nppods > 0); 1747*eba13bbcSJohn Baldwin 1748*eba13bbcSJohn Baldwin cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1749*eba13bbcSJohn Baldwin if (is_t4(sc)) 1750*eba13bbcSJohn Baldwin cmd |= htobe32(F_ULP_MEMIO_ORDER); 1751*eba13bbcSJohn Baldwin else 1752*eba13bbcSJohn Baldwin cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1753*eba13bbcSJohn Baldwin ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1754*eba13bbcSJohn Baldwin offset = (uintptr_t)drb->buf & PAGE_MASK; 1755*eba13bbcSJohn Baldwin ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1756*eba13bbcSJohn Baldwin pva = trunc_page((uintptr_t)drb->buf); 1757*eba13bbcSJohn Baldwin end_pva = trunc_page((uintptr_t)drb->buf + drb->len - 1); 1758*eba13bbcSJohn Baldwin for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1759*eba13bbcSJohn Baldwin /* How many page pods are we writing in this cycle */ 1760*eba13bbcSJohn Baldwin n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1761*eba13bbcSJohn Baldwin MPASS(n > 0); 1762*eba13bbcSJohn Baldwin chunk = PPOD_SZ(n); 1763*eba13bbcSJohn Baldwin len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1764*eba13bbcSJohn Baldwin 1765*eba13bbcSJohn Baldwin wr = alloc_wrqe(len, wrq); 1766*eba13bbcSJohn Baldwin if (wr == NULL) 1767*eba13bbcSJohn Baldwin return (ENOMEM); /* ok to just bail out */ 1768*eba13bbcSJohn Baldwin ulpmc = wrtod(wr); 1769*eba13bbcSJohn Baldwin 1770*eba13bbcSJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, 0); 1771*eba13bbcSJohn Baldwin ulpmc->cmd = cmd; 1772*eba13bbcSJohn Baldwin ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1773*eba13bbcSJohn Baldwin ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1774*eba13bbcSJohn Baldwin ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1775*eba13bbcSJohn Baldwin 1776*eba13bbcSJohn Baldwin ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1777*eba13bbcSJohn Baldwin ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1778*eba13bbcSJohn Baldwin ulpsc->len = htobe32(chunk); 1779*eba13bbcSJohn Baldwin 1780*eba13bbcSJohn Baldwin ppod = (struct pagepod *)(ulpsc + 1); 1781*eba13bbcSJohn Baldwin for (j = 0; j < n; i++, j++, ppod++) { 1782*eba13bbcSJohn Baldwin ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1783*eba13bbcSJohn Baldwin V_PPOD_TID(tid) | prsv->prsv_tag); 1784*eba13bbcSJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(drb->len) | 1785*eba13bbcSJohn Baldwin V_PPOD_OFST(offset)); 1786*eba13bbcSJohn Baldwin ppod->rsvd = 0; 1787*eba13bbcSJohn Baldwin 1788*eba13bbcSJohn Baldwin for (k = 0; k < nitems(ppod->addr); k++) { 1789*eba13bbcSJohn Baldwin if (pva > end_pva) 1790*eba13bbcSJohn Baldwin ppod->addr[k] = 0; 1791*eba13bbcSJohn Baldwin else { 1792*eba13bbcSJohn Baldwin pa = pmap_kextract(pva); 1793*eba13bbcSJohn Baldwin ppod->addr[k] = htobe64(pa); 1794*eba13bbcSJohn Baldwin pva += ddp_pgsz; 1795*eba13bbcSJohn Baldwin } 1796*eba13bbcSJohn Baldwin #if 0 1797*eba13bbcSJohn Baldwin CTR5(KTR_CXGBE, 1798*eba13bbcSJohn Baldwin "%s: tid %d ppod[%d]->addr[%d] = %p", 1799*eba13bbcSJohn Baldwin __func__, tid, i, k, 1800*eba13bbcSJohn Baldwin be64toh(ppod->addr[k])); 1801*eba13bbcSJohn Baldwin #endif 1802*eba13bbcSJohn Baldwin } 1803*eba13bbcSJohn Baldwin 1804*eba13bbcSJohn Baldwin /* 1805*eba13bbcSJohn Baldwin * Walk back 1 segment so that the first address in the 1806*eba13bbcSJohn Baldwin * next pod is the same as the last one in the current 1807*eba13bbcSJohn Baldwin * pod. 1808*eba13bbcSJohn Baldwin */ 1809*eba13bbcSJohn Baldwin pva -= ddp_pgsz; 1810*eba13bbcSJohn Baldwin } 1811*eba13bbcSJohn Baldwin 1812*eba13bbcSJohn Baldwin t4_wrq_tx(sc, wr); 1813*eba13bbcSJohn Baldwin } 1814*eba13bbcSJohn Baldwin 1815*eba13bbcSJohn Baldwin MPASS(pva <= end_pva); 1816*eba13bbcSJohn Baldwin 1817*eba13bbcSJohn Baldwin return (0); 1818*eba13bbcSJohn Baldwin } 1819*eba13bbcSJohn Baldwin 18204427ac36SJohn Baldwin static struct mbuf * 18214427ac36SJohn Baldwin alloc_raw_wr_mbuf(int len) 18224427ac36SJohn Baldwin { 18234427ac36SJohn Baldwin struct mbuf *m; 18244427ac36SJohn Baldwin 18254427ac36SJohn Baldwin if (len <= MHLEN) 18264427ac36SJohn Baldwin m = m_gethdr(M_NOWAIT, MT_DATA); 18274427ac36SJohn Baldwin else if (len <= MCLBYTES) 18284427ac36SJohn Baldwin m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 18294427ac36SJohn Baldwin else 18304427ac36SJohn Baldwin m = NULL; 18314427ac36SJohn Baldwin if (m == NULL) 18324427ac36SJohn Baldwin return (NULL); 18334427ac36SJohn Baldwin m->m_pkthdr.len = len; 18344427ac36SJohn Baldwin m->m_len = len; 18354427ac36SJohn Baldwin set_mbuf_raw_wr(m, true); 18364427ac36SJohn Baldwin return (m); 18374427ac36SJohn Baldwin } 18384427ac36SJohn Baldwin 1839a9feb2cdSNavdeep Parhar int 18402beaefe8SJohn Baldwin t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep, 18412beaefe8SJohn Baldwin struct ppod_reservation *prsv, struct bio *bp, struct mbufq *wrq) 18422beaefe8SJohn Baldwin { 18432beaefe8SJohn Baldwin struct ulp_mem_io *ulpmc; 18442beaefe8SJohn Baldwin struct ulptx_idata *ulpsc; 18452beaefe8SJohn Baldwin struct pagepod *ppod; 18462beaefe8SJohn Baldwin int i, j, k, n, chunk, len, ddp_pgsz, idx; 18472beaefe8SJohn Baldwin u_int ppod_addr; 18482beaefe8SJohn Baldwin uint32_t cmd; 18492beaefe8SJohn Baldwin struct ppod_region *pr = prsv->prsv_pr; 18502beaefe8SJohn Baldwin vm_paddr_t pa; 18512beaefe8SJohn Baldwin struct mbuf *m; 18522beaefe8SJohn Baldwin 18532beaefe8SJohn Baldwin MPASS(bp->bio_flags & BIO_UNMAPPED); 18542beaefe8SJohn Baldwin 18552beaefe8SJohn Baldwin cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 18562beaefe8SJohn Baldwin if (is_t4(sc)) 18572beaefe8SJohn Baldwin cmd |= htobe32(F_ULP_MEMIO_ORDER); 18582beaefe8SJohn Baldwin else 18592beaefe8SJohn Baldwin cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 18602beaefe8SJohn Baldwin ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 18612beaefe8SJohn Baldwin ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 18622beaefe8SJohn Baldwin for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 18632beaefe8SJohn Baldwin 18642beaefe8SJohn Baldwin /* How many page pods are we writing in this cycle */ 18652beaefe8SJohn Baldwin n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 18662beaefe8SJohn Baldwin MPASS(n > 0); 18672beaefe8SJohn Baldwin chunk = PPOD_SZ(n); 18682beaefe8SJohn Baldwin len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 18692beaefe8SJohn Baldwin 18702beaefe8SJohn Baldwin m = alloc_raw_wr_mbuf(len); 18712beaefe8SJohn Baldwin if (m == NULL) 18722beaefe8SJohn Baldwin return (ENOMEM); 18732beaefe8SJohn Baldwin 18742beaefe8SJohn Baldwin ulpmc = mtod(m, struct ulp_mem_io *); 18752beaefe8SJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); 18762beaefe8SJohn Baldwin ulpmc->cmd = cmd; 18772beaefe8SJohn Baldwin ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 18782beaefe8SJohn Baldwin ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 18792beaefe8SJohn Baldwin ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 18802beaefe8SJohn Baldwin 18812beaefe8SJohn Baldwin ulpsc = (struct ulptx_idata *)(ulpmc + 1); 18822beaefe8SJohn Baldwin ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 18832beaefe8SJohn Baldwin ulpsc->len = htobe32(chunk); 18842beaefe8SJohn Baldwin 18852beaefe8SJohn Baldwin ppod = (struct pagepod *)(ulpsc + 1); 18862beaefe8SJohn Baldwin for (j = 0; j < n; i++, j++, ppod++) { 18872beaefe8SJohn Baldwin ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 18882beaefe8SJohn Baldwin V_PPOD_TID(toep->tid) | 18892beaefe8SJohn Baldwin (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 18902beaefe8SJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(bp->bio_bcount) | 18912beaefe8SJohn Baldwin V_PPOD_OFST(bp->bio_ma_offset)); 18922beaefe8SJohn Baldwin ppod->rsvd = 0; 18932beaefe8SJohn Baldwin idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); 18942beaefe8SJohn Baldwin for (k = 0; k < nitems(ppod->addr); k++) { 18952beaefe8SJohn Baldwin if (idx < bp->bio_ma_n) { 18962beaefe8SJohn Baldwin pa = VM_PAGE_TO_PHYS(bp->bio_ma[idx]); 18972beaefe8SJohn Baldwin ppod->addr[k] = htobe64(pa); 18982beaefe8SJohn Baldwin idx += ddp_pgsz / PAGE_SIZE; 18992beaefe8SJohn Baldwin } else 19002beaefe8SJohn Baldwin ppod->addr[k] = 0; 19012beaefe8SJohn Baldwin #if 0 19022beaefe8SJohn Baldwin CTR5(KTR_CXGBE, 19032beaefe8SJohn Baldwin "%s: tid %d ppod[%d]->addr[%d] = %p", 19042beaefe8SJohn Baldwin __func__, toep->tid, i, k, 19052beaefe8SJohn Baldwin be64toh(ppod->addr[k])); 19062beaefe8SJohn Baldwin #endif 19072beaefe8SJohn Baldwin } 19082beaefe8SJohn Baldwin } 19092beaefe8SJohn Baldwin 19102beaefe8SJohn Baldwin mbufq_enqueue(wrq, m); 19112beaefe8SJohn Baldwin } 19122beaefe8SJohn Baldwin 19132beaefe8SJohn Baldwin return (0); 19142beaefe8SJohn Baldwin } 19152beaefe8SJohn Baldwin 19162beaefe8SJohn Baldwin int 19174427ac36SJohn Baldwin t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep, 1918f949967cSJohn Baldwin struct ppod_reservation *prsv, vm_offset_t buf, int buflen, 1919f949967cSJohn Baldwin struct mbufq *wrq) 1920a9feb2cdSNavdeep Parhar { 1921a9feb2cdSNavdeep Parhar struct ulp_mem_io *ulpmc; 1922a9feb2cdSNavdeep Parhar struct ulptx_idata *ulpsc; 1923a9feb2cdSNavdeep Parhar struct pagepod *ppod; 1924a9feb2cdSNavdeep Parhar int i, j, k, n, chunk, len, ddp_pgsz; 1925a9feb2cdSNavdeep Parhar u_int ppod_addr, offset; 1926a9feb2cdSNavdeep Parhar uint32_t cmd; 1927a9feb2cdSNavdeep Parhar struct ppod_region *pr = prsv->prsv_pr; 1928de414339SJohn Baldwin uintptr_t end_pva, pva; 1929de414339SJohn Baldwin vm_paddr_t pa; 19304427ac36SJohn Baldwin struct mbuf *m; 1931a9feb2cdSNavdeep Parhar 1932a9feb2cdSNavdeep Parhar cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1933a9feb2cdSNavdeep Parhar if (is_t4(sc)) 1934a9feb2cdSNavdeep Parhar cmd |= htobe32(F_ULP_MEMIO_ORDER); 1935a9feb2cdSNavdeep Parhar else 1936a9feb2cdSNavdeep Parhar cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1937a9feb2cdSNavdeep Parhar ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1938a9feb2cdSNavdeep Parhar offset = buf & PAGE_MASK; 1939a9feb2cdSNavdeep Parhar ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1940a9feb2cdSNavdeep Parhar pva = trunc_page(buf); 1941a9feb2cdSNavdeep Parhar end_pva = trunc_page(buf + buflen - 1); 1942a9feb2cdSNavdeep Parhar for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1943a9feb2cdSNavdeep Parhar 1944a9feb2cdSNavdeep Parhar /* How many page pods are we writing in this cycle */ 1945a9feb2cdSNavdeep Parhar n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1946a9feb2cdSNavdeep Parhar MPASS(n > 0); 1947a9feb2cdSNavdeep Parhar chunk = PPOD_SZ(n); 1948a9feb2cdSNavdeep Parhar len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1949a9feb2cdSNavdeep Parhar 19504427ac36SJohn Baldwin m = alloc_raw_wr_mbuf(len); 1951f949967cSJohn Baldwin if (m == NULL) 19524427ac36SJohn Baldwin return (ENOMEM); 19534427ac36SJohn Baldwin ulpmc = mtod(m, struct ulp_mem_io *); 1954a9feb2cdSNavdeep Parhar 19554427ac36SJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); 1956a9feb2cdSNavdeep Parhar ulpmc->cmd = cmd; 1957a9feb2cdSNavdeep Parhar ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1958a9feb2cdSNavdeep Parhar ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1959a9feb2cdSNavdeep Parhar ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1960a9feb2cdSNavdeep Parhar 1961a9feb2cdSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1962a9feb2cdSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1963a9feb2cdSNavdeep Parhar ulpsc->len = htobe32(chunk); 1964a9feb2cdSNavdeep Parhar 1965a9feb2cdSNavdeep Parhar ppod = (struct pagepod *)(ulpsc + 1); 1966a9feb2cdSNavdeep Parhar for (j = 0; j < n; i++, j++, ppod++) { 1967a9feb2cdSNavdeep Parhar ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 19684427ac36SJohn Baldwin V_PPOD_TID(toep->tid) | 1969a9feb2cdSNavdeep Parhar (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 1970a9feb2cdSNavdeep Parhar ppod->len_offset = htobe64(V_PPOD_LEN(buflen) | 1971a9feb2cdSNavdeep Parhar V_PPOD_OFST(offset)); 1972a9feb2cdSNavdeep Parhar ppod->rsvd = 0; 1973a9feb2cdSNavdeep Parhar 1974a9feb2cdSNavdeep Parhar for (k = 0; k < nitems(ppod->addr); k++) { 1975a9feb2cdSNavdeep Parhar if (pva > end_pva) 1976a9feb2cdSNavdeep Parhar ppod->addr[k] = 0; 1977a9feb2cdSNavdeep Parhar else { 1978a9feb2cdSNavdeep Parhar pa = pmap_kextract(pva); 1979a9feb2cdSNavdeep Parhar ppod->addr[k] = htobe64(pa); 1980a9feb2cdSNavdeep Parhar pva += ddp_pgsz; 1981a9feb2cdSNavdeep Parhar } 1982a9feb2cdSNavdeep Parhar #if 0 1983a9feb2cdSNavdeep Parhar CTR5(KTR_CXGBE, 1984a9feb2cdSNavdeep Parhar "%s: tid %d ppod[%d]->addr[%d] = %p", 19854427ac36SJohn Baldwin __func__, toep->tid, i, k, 198644e7472dSJohn Baldwin be64toh(ppod->addr[k])); 1987a9feb2cdSNavdeep Parhar #endif 1988a9feb2cdSNavdeep Parhar } 1989a9feb2cdSNavdeep Parhar 1990a9feb2cdSNavdeep Parhar /* 1991a9feb2cdSNavdeep Parhar * Walk back 1 segment so that the first address in the 1992a9feb2cdSNavdeep Parhar * next pod is the same as the last one in the current 1993a9feb2cdSNavdeep Parhar * pod. 1994a9feb2cdSNavdeep Parhar */ 1995a9feb2cdSNavdeep Parhar pva -= ddp_pgsz; 1996a9feb2cdSNavdeep Parhar } 1997a9feb2cdSNavdeep Parhar 1998f949967cSJohn Baldwin mbufq_enqueue(wrq, m); 1999a9feb2cdSNavdeep Parhar } 2000a9feb2cdSNavdeep Parhar 2001a9feb2cdSNavdeep Parhar MPASS(pva <= end_pva); 2002a9feb2cdSNavdeep Parhar 2003a9feb2cdSNavdeep Parhar return (0); 2004a9feb2cdSNavdeep Parhar } 2005a9feb2cdSNavdeep Parhar 200646bee804SJohn Baldwin int 200746bee804SJohn Baldwin t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep, 200846bee804SJohn Baldwin struct ppod_reservation *prsv, struct ctl_sg_entry *sgl, int entries, 2009f949967cSJohn Baldwin int xferlen, struct mbufq *wrq) 201046bee804SJohn Baldwin { 201146bee804SJohn Baldwin struct ulp_mem_io *ulpmc; 201246bee804SJohn Baldwin struct ulptx_idata *ulpsc; 201346bee804SJohn Baldwin struct pagepod *ppod; 201446bee804SJohn Baldwin int i, j, k, n, chunk, len, ddp_pgsz; 201546bee804SJohn Baldwin u_int ppod_addr, offset, sg_offset = 0; 201646bee804SJohn Baldwin uint32_t cmd; 201746bee804SJohn Baldwin struct ppod_region *pr = prsv->prsv_pr; 2018de414339SJohn Baldwin uintptr_t pva; 2019de414339SJohn Baldwin vm_paddr_t pa; 202046bee804SJohn Baldwin struct mbuf *m; 202146bee804SJohn Baldwin 202246bee804SJohn Baldwin MPASS(sgl != NULL); 202346bee804SJohn Baldwin MPASS(entries > 0); 202446bee804SJohn Baldwin cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 202546bee804SJohn Baldwin if (is_t4(sc)) 202646bee804SJohn Baldwin cmd |= htobe32(F_ULP_MEMIO_ORDER); 202746bee804SJohn Baldwin else 202846bee804SJohn Baldwin cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 202946bee804SJohn Baldwin ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 203046bee804SJohn Baldwin offset = (vm_offset_t)sgl->addr & PAGE_MASK; 203146bee804SJohn Baldwin ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 20328d2b4b2eSJohn Baldwin pva = trunc_page((vm_offset_t)sgl->addr); 203346bee804SJohn Baldwin for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 203446bee804SJohn Baldwin 203546bee804SJohn Baldwin /* How many page pods are we writing in this cycle */ 203646bee804SJohn Baldwin n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 203746bee804SJohn Baldwin MPASS(n > 0); 203846bee804SJohn Baldwin chunk = PPOD_SZ(n); 203946bee804SJohn Baldwin len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 204046bee804SJohn Baldwin 204146bee804SJohn Baldwin m = alloc_raw_wr_mbuf(len); 2042f949967cSJohn Baldwin if (m == NULL) 204346bee804SJohn Baldwin return (ENOMEM); 204446bee804SJohn Baldwin ulpmc = mtod(m, struct ulp_mem_io *); 204546bee804SJohn Baldwin 204646bee804SJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); 204746bee804SJohn Baldwin ulpmc->cmd = cmd; 204846bee804SJohn Baldwin ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 204946bee804SJohn Baldwin ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 205046bee804SJohn Baldwin ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 205146bee804SJohn Baldwin 205246bee804SJohn Baldwin ulpsc = (struct ulptx_idata *)(ulpmc + 1); 205346bee804SJohn Baldwin ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 205446bee804SJohn Baldwin ulpsc->len = htobe32(chunk); 205546bee804SJohn Baldwin 205646bee804SJohn Baldwin ppod = (struct pagepod *)(ulpsc + 1); 205746bee804SJohn Baldwin for (j = 0; j < n; i++, j++, ppod++) { 205846bee804SJohn Baldwin ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 205946bee804SJohn Baldwin V_PPOD_TID(toep->tid) | 206046bee804SJohn Baldwin (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 206146bee804SJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(xferlen) | 206246bee804SJohn Baldwin V_PPOD_OFST(offset)); 206346bee804SJohn Baldwin ppod->rsvd = 0; 206446bee804SJohn Baldwin 206546bee804SJohn Baldwin for (k = 0; k < nitems(ppod->addr); k++) { 206646bee804SJohn Baldwin if (entries != 0) { 206746bee804SJohn Baldwin pa = pmap_kextract(pva + sg_offset); 206846bee804SJohn Baldwin ppod->addr[k] = htobe64(pa); 206946bee804SJohn Baldwin } else 207046bee804SJohn Baldwin ppod->addr[k] = 0; 207146bee804SJohn Baldwin 207246bee804SJohn Baldwin #if 0 207346bee804SJohn Baldwin CTR5(KTR_CXGBE, 207446bee804SJohn Baldwin "%s: tid %d ppod[%d]->addr[%d] = %p", 207546bee804SJohn Baldwin __func__, toep->tid, i, k, 207644e7472dSJohn Baldwin be64toh(ppod->addr[k])); 207746bee804SJohn Baldwin #endif 207846bee804SJohn Baldwin 207946bee804SJohn Baldwin /* 208046bee804SJohn Baldwin * If this is the last entry in a pod, 208146bee804SJohn Baldwin * reuse the same entry for first address 208246bee804SJohn Baldwin * in the next pod. 208346bee804SJohn Baldwin */ 208446bee804SJohn Baldwin if (k + 1 == nitems(ppod->addr)) 208546bee804SJohn Baldwin break; 208646bee804SJohn Baldwin 208746bee804SJohn Baldwin /* 208846bee804SJohn Baldwin * Don't move to the next DDP page if the 208946bee804SJohn Baldwin * sgl is already finished. 209046bee804SJohn Baldwin */ 209146bee804SJohn Baldwin if (entries == 0) 209246bee804SJohn Baldwin continue; 209346bee804SJohn Baldwin 209446bee804SJohn Baldwin sg_offset += ddp_pgsz; 209546bee804SJohn Baldwin if (sg_offset == sgl->len) { 209646bee804SJohn Baldwin /* 209746bee804SJohn Baldwin * This sgl entry is done. Go 209846bee804SJohn Baldwin * to the next. 209946bee804SJohn Baldwin */ 210046bee804SJohn Baldwin entries--; 210146bee804SJohn Baldwin sgl++; 210246bee804SJohn Baldwin sg_offset = 0; 210346bee804SJohn Baldwin if (entries != 0) 210446bee804SJohn Baldwin pva = trunc_page( 210546bee804SJohn Baldwin (vm_offset_t)sgl->addr); 210646bee804SJohn Baldwin } 210746bee804SJohn Baldwin } 210846bee804SJohn Baldwin } 210946bee804SJohn Baldwin 2110f949967cSJohn Baldwin mbufq_enqueue(wrq, m); 211146bee804SJohn Baldwin } 211246bee804SJohn Baldwin 211346bee804SJohn Baldwin return (0); 211446bee804SJohn Baldwin } 211546bee804SJohn Baldwin 2116dc964385SJohn Baldwin /* 2117eeacb3b0SMark Johnston * Prepare a pageset for DDP. This sets up page pods. 2118dc964385SJohn Baldwin */ 2119e682d02eSNavdeep Parhar static int 2120dc964385SJohn Baldwin prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps) 2121e682d02eSNavdeep Parhar { 2122dc964385SJohn Baldwin struct tom_data *td = sc->tom_softc; 2123e682d02eSNavdeep Parhar 2124968267fdSNavdeep Parhar if (ps->prsv.prsv_nppods == 0 && 21252beaefe8SJohn Baldwin t4_alloc_page_pods_for_ps(&td->pr, ps) != 0) { 2126e682d02eSNavdeep Parhar return (0); 2127e682d02eSNavdeep Parhar } 2128dc964385SJohn Baldwin if (!(ps->flags & PS_PPODS_WRITTEN) && 2129968267fdSNavdeep Parhar t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) { 2130dc964385SJohn Baldwin return (0); 2131dc964385SJohn Baldwin } 2132dc964385SJohn Baldwin 2133dc964385SJohn Baldwin return (1); 2134dc964385SJohn Baldwin } 2135e682d02eSNavdeep Parhar 2136968267fdSNavdeep Parhar int 2137968267fdSNavdeep Parhar t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz, 2138968267fdSNavdeep Parhar const char *name) 2139e682d02eSNavdeep Parhar { 2140515b36c5SNavdeep Parhar int i; 2141515b36c5SNavdeep Parhar 2142968267fdSNavdeep Parhar MPASS(pr != NULL); 2143968267fdSNavdeep Parhar MPASS(r->size > 0); 2144515b36c5SNavdeep Parhar 2145968267fdSNavdeep Parhar pr->pr_start = r->start; 2146968267fdSNavdeep Parhar pr->pr_len = r->size; 2147968267fdSNavdeep Parhar pr->pr_page_shift[0] = 12 + G_HPZ0(psz); 2148968267fdSNavdeep Parhar pr->pr_page_shift[1] = 12 + G_HPZ1(psz); 2149968267fdSNavdeep Parhar pr->pr_page_shift[2] = 12 + G_HPZ2(psz); 2150968267fdSNavdeep Parhar pr->pr_page_shift[3] = 12 + G_HPZ3(psz); 2151968267fdSNavdeep Parhar 2152968267fdSNavdeep Parhar /* The SGL -> page pod algorithm requires the sizes to be in order. */ 2153968267fdSNavdeep Parhar for (i = 1; i < nitems(pr->pr_page_shift); i++) { 2154968267fdSNavdeep Parhar if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1]) 2155968267fdSNavdeep Parhar return (ENXIO); 2156515b36c5SNavdeep Parhar } 2157e682d02eSNavdeep Parhar 2158968267fdSNavdeep Parhar pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG); 2159968267fdSNavdeep Parhar pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask; 2160968267fdSNavdeep Parhar if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0) 2161968267fdSNavdeep Parhar return (ENXIO); 2162968267fdSNavdeep Parhar pr->pr_alias_shift = fls(pr->pr_tag_mask); 2163968267fdSNavdeep Parhar pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1); 2164968267fdSNavdeep Parhar 2165968267fdSNavdeep Parhar pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0, 2166968267fdSNavdeep Parhar M_FIRSTFIT | M_NOWAIT); 2167968267fdSNavdeep Parhar if (pr->pr_arena == NULL) 2168968267fdSNavdeep Parhar return (ENOMEM); 2169968267fdSNavdeep Parhar 2170968267fdSNavdeep Parhar return (0); 2171e682d02eSNavdeep Parhar } 2172e682d02eSNavdeep Parhar 2173e682d02eSNavdeep Parhar void 2174968267fdSNavdeep Parhar t4_free_ppod_region(struct ppod_region *pr) 2175e682d02eSNavdeep Parhar { 2176e682d02eSNavdeep Parhar 2177968267fdSNavdeep Parhar MPASS(pr != NULL); 2178968267fdSNavdeep Parhar 2179968267fdSNavdeep Parhar if (pr->pr_arena) 2180968267fdSNavdeep Parhar vmem_destroy(pr->pr_arena); 2181968267fdSNavdeep Parhar bzero(pr, sizeof(*pr)); 2182e682d02eSNavdeep Parhar } 2183e682d02eSNavdeep Parhar 2184e682d02eSNavdeep Parhar static int 2185dc964385SJohn Baldwin pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages, 2186dc964385SJohn Baldwin int pgoff, int len) 2187e682d02eSNavdeep Parhar { 2188e682d02eSNavdeep Parhar 218991a65e2fSJohn Baldwin if (ps->start != start || ps->npages != npages || 219091a65e2fSJohn Baldwin ps->offset != pgoff || ps->len != len) 2191dc964385SJohn Baldwin return (1); 2192dc964385SJohn Baldwin 2193dc964385SJohn Baldwin return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp); 2194e682d02eSNavdeep Parhar } 2195e682d02eSNavdeep Parhar 2196dc964385SJohn Baldwin static int 2197dc964385SJohn Baldwin hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps) 2198688dba74SNavdeep Parhar { 2199dc964385SJohn Baldwin struct vmspace *vm; 2200dc964385SJohn Baldwin vm_map_t map; 2201dc964385SJohn Baldwin vm_offset_t start, end, pgoff; 2202dc964385SJohn Baldwin struct pageset *ps; 2203dc964385SJohn Baldwin int n; 2204688dba74SNavdeep Parhar 2205dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2206688dba74SNavdeep Parhar 2207dc964385SJohn Baldwin /* 2208dc964385SJohn Baldwin * The AIO subsystem will cancel and drain all requests before 2209dc964385SJohn Baldwin * permitting a process to exit or exec, so p_vmspace should 2210dc964385SJohn Baldwin * be stable here. 2211dc964385SJohn Baldwin */ 2212dc964385SJohn Baldwin vm = job->userproc->p_vmspace; 2213dc964385SJohn Baldwin map = &vm->vm_map; 2214dc964385SJohn Baldwin start = (uintptr_t)job->uaiocb.aio_buf; 2215dc964385SJohn Baldwin pgoff = start & PAGE_MASK; 2216dc964385SJohn Baldwin end = round_page(start + job->uaiocb.aio_nbytes); 2217dc964385SJohn Baldwin start = trunc_page(start); 2218dc964385SJohn Baldwin 2219dc964385SJohn Baldwin if (end - start > MAX_DDP_BUFFER_SIZE) { 2220dc964385SJohn Baldwin /* 2221dc964385SJohn Baldwin * Truncate the request to a short read. 2222dc964385SJohn Baldwin * Alternatively, we could DDP in chunks to the larger 2223dc964385SJohn Baldwin * buffer, but that would be quite a bit more work. 2224dc964385SJohn Baldwin * 2225dc964385SJohn Baldwin * When truncating, round the request down to avoid 2226dc964385SJohn Baldwin * crossing a cache line on the final transaction. 2227dc964385SJohn Baldwin */ 2228dc964385SJohn Baldwin end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE); 2229dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 2230dc964385SJohn Baldwin CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu", 2231dc964385SJohn Baldwin __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes, 2232dc964385SJohn Baldwin (unsigned long)(end - (start + pgoff))); 2233dc964385SJohn Baldwin job->uaiocb.aio_nbytes = end - (start + pgoff); 2234dc964385SJohn Baldwin #endif 2235dc964385SJohn Baldwin end = round_page(end); 2236688dba74SNavdeep Parhar } 2237688dba74SNavdeep Parhar 2238dc964385SJohn Baldwin n = atop(end - start); 2239688dba74SNavdeep Parhar 2240dc964385SJohn Baldwin /* 2241dc964385SJohn Baldwin * Try to reuse a cached pageset. 2242dc964385SJohn Baldwin */ 2243125d42feSJohn Baldwin TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) { 2244dc964385SJohn Baldwin if (pscmp(ps, vm, start, n, pgoff, 2245dc964385SJohn Baldwin job->uaiocb.aio_nbytes) == 0) { 2246125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 2247125d42feSJohn Baldwin toep->ddp.cached_count--; 2248dc964385SJohn Baldwin *pps = ps; 2249dc964385SJohn Baldwin return (0); 2250dc964385SJohn Baldwin } 2251688dba74SNavdeep Parhar } 2252688dba74SNavdeep Parhar 2253e682d02eSNavdeep Parhar /* 2254dc964385SJohn Baldwin * If there are too many cached pagesets to create a new one, 2255dc964385SJohn Baldwin * free a pageset before creating a new one. 2256e682d02eSNavdeep Parhar */ 2257125d42feSJohn Baldwin KASSERT(toep->ddp.active_count + toep->ddp.cached_count <= 2258125d42feSJohn Baldwin nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__)); 2259125d42feSJohn Baldwin if (toep->ddp.active_count + toep->ddp.cached_count == 2260125d42feSJohn Baldwin nitems(toep->ddp.db)) { 2261125d42feSJohn Baldwin KASSERT(toep->ddp.cached_count > 0, 2262dc964385SJohn Baldwin ("no cached pageset to free")); 2263125d42feSJohn Baldwin ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq); 2264125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 2265125d42feSJohn Baldwin toep->ddp.cached_count--; 2266dc964385SJohn Baldwin free_pageset(toep->td, ps); 2267dc964385SJohn Baldwin } 2268dc964385SJohn Baldwin DDP_UNLOCK(toep); 2269e682d02eSNavdeep Parhar 2270dc964385SJohn Baldwin /* Create a new pageset. */ 2271dc964385SJohn Baldwin ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 2272dc964385SJohn Baldwin M_ZERO); 2273dc964385SJohn Baldwin ps->pages = (vm_page_t *)(ps + 1); 2274dc964385SJohn Baldwin ps->vm_timestamp = map->timestamp; 2275dc964385SJohn Baldwin ps->npages = vm_fault_quick_hold_pages(map, start, end - start, 2276dc964385SJohn Baldwin VM_PROT_WRITE, ps->pages, n); 2277e682d02eSNavdeep Parhar 2278dc964385SJohn Baldwin DDP_LOCK(toep); 2279dc964385SJohn Baldwin if (ps->npages < 0) { 2280dc964385SJohn Baldwin free(ps, M_CXGBE); 2281dc964385SJohn Baldwin return (EFAULT); 2282e682d02eSNavdeep Parhar } 2283e682d02eSNavdeep Parhar 2284dc964385SJohn Baldwin KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d", 2285dc964385SJohn Baldwin ps->npages, n)); 2286dc964385SJohn Baldwin 2287dc964385SJohn Baldwin ps->offset = pgoff; 2288dc964385SJohn Baldwin ps->len = job->uaiocb.aio_nbytes; 2289f7db0c95SMark Johnston refcount_acquire(&vm->vm_refcnt); 2290dc964385SJohn Baldwin ps->vm = vm; 229191a65e2fSJohn Baldwin ps->start = start; 2292dc964385SJohn Baldwin 2293dc964385SJohn Baldwin CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 2294dc964385SJohn Baldwin __func__, toep->tid, ps, job, ps->npages); 2295dc964385SJohn Baldwin *pps = ps; 2296e682d02eSNavdeep Parhar return (0); 2297e682d02eSNavdeep Parhar } 2298e682d02eSNavdeep Parhar 2299dc964385SJohn Baldwin static void 2300dc964385SJohn Baldwin ddp_complete_all(struct toepcb *toep, int error) 2301e682d02eSNavdeep Parhar { 2302dc964385SJohn Baldwin struct kaiocb *job; 2303e682d02eSNavdeep Parhar 2304dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2305*eba13bbcSJohn Baldwin KASSERT((toep->ddp.flags & DDP_AIO) != 0, ("%s: DDP_RCVBUF", __func__)); 2306125d42feSJohn Baldwin while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) { 2307125d42feSJohn Baldwin job = TAILQ_FIRST(&toep->ddp.aiojobq); 2308125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 2309125d42feSJohn Baldwin toep->ddp.waiting_count--; 2310dc964385SJohn Baldwin if (aio_clear_cancel_function(job)) 2311dc964385SJohn Baldwin ddp_complete_one(job, error); 2312dc964385SJohn Baldwin } 2313dc964385SJohn Baldwin } 2314dc964385SJohn Baldwin 2315dc964385SJohn Baldwin static void 2316dc964385SJohn Baldwin aio_ddp_cancel_one(struct kaiocb *job) 2317dc964385SJohn Baldwin { 2318dc964385SJohn Baldwin long copied; 2319dc964385SJohn Baldwin 2320dc964385SJohn Baldwin /* 2321dc964385SJohn Baldwin * If this job had copied data out of the socket buffer before 2322dc964385SJohn Baldwin * it was cancelled, report it as a short read rather than an 2323dc964385SJohn Baldwin * error. 2324dc964385SJohn Baldwin */ 2325fe0bdd1dSJohn Baldwin copied = job->aio_received; 2326dc964385SJohn Baldwin if (copied != 0) 2327dc964385SJohn Baldwin aio_complete(job, copied, 0); 2328e682d02eSNavdeep Parhar else 2329dc964385SJohn Baldwin aio_cancel(job); 2330e682d02eSNavdeep Parhar } 2331e682d02eSNavdeep Parhar 2332dc964385SJohn Baldwin /* 2333dc964385SJohn Baldwin * Called when the main loop wants to requeue a job to retry it later. 2334dc964385SJohn Baldwin * Deals with the race of the job being cancelled while it was being 2335dc964385SJohn Baldwin * examined. 2336dc964385SJohn Baldwin */ 2337dc964385SJohn Baldwin static void 2338dc964385SJohn Baldwin aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job) 2339dc964385SJohn Baldwin { 2340dc964385SJohn Baldwin 2341dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2342125d42feSJohn Baldwin if (!(toep->ddp.flags & DDP_DEAD) && 2343dc964385SJohn Baldwin aio_set_cancel_function(job, t4_aio_cancel_queued)) { 2344125d42feSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 2345125d42feSJohn Baldwin toep->ddp.waiting_count++; 2346dc964385SJohn Baldwin } else 2347dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2348e682d02eSNavdeep Parhar } 2349e682d02eSNavdeep Parhar 2350dc964385SJohn Baldwin static void 2351dc964385SJohn Baldwin aio_ddp_requeue(struct toepcb *toep) 2352dc964385SJohn Baldwin { 2353dc964385SJohn Baldwin struct adapter *sc = td_adapter(toep->td); 2354dc964385SJohn Baldwin struct socket *so; 2355dc964385SJohn Baldwin struct sockbuf *sb; 2356dc964385SJohn Baldwin struct inpcb *inp; 2357dc964385SJohn Baldwin struct kaiocb *job; 2358dc964385SJohn Baldwin struct ddp_buffer *db; 2359dc964385SJohn Baldwin size_t copied, offset, resid; 2360dc964385SJohn Baldwin struct pageset *ps; 2361dc964385SJohn Baldwin struct mbuf *m; 2362dc964385SJohn Baldwin uint64_t ddp_flags, ddp_flags_mask; 2363dc964385SJohn Baldwin struct wrqe *wr; 2364dc964385SJohn Baldwin int buf_flag, db_idx, error; 2365dc964385SJohn Baldwin 2366dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2367dc964385SJohn Baldwin 2368e682d02eSNavdeep Parhar restart: 2369125d42feSJohn Baldwin if (toep->ddp.flags & DDP_DEAD) { 2370125d42feSJohn Baldwin MPASS(toep->ddp.waiting_count == 0); 2371125d42feSJohn Baldwin MPASS(toep->ddp.active_count == 0); 2372dc964385SJohn Baldwin return; 2373e682d02eSNavdeep Parhar } 2374e682d02eSNavdeep Parhar 2375125d42feSJohn Baldwin if (toep->ddp.waiting_count == 0 || 2376125d42feSJohn Baldwin toep->ddp.active_count == nitems(toep->ddp.db)) { 2377dc964385SJohn Baldwin return; 2378dc964385SJohn Baldwin } 2379dc964385SJohn Baldwin 2380125d42feSJohn Baldwin job = TAILQ_FIRST(&toep->ddp.aiojobq); 2381dc964385SJohn Baldwin so = job->fd_file->f_data; 2382dc964385SJohn Baldwin sb = &so->so_rcv; 2383dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 2384dc964385SJohn Baldwin 2385dc964385SJohn Baldwin /* We will never get anything unless we are or were connected. */ 2386dc964385SJohn Baldwin if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 2387dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2388dc964385SJohn Baldwin ddp_complete_all(toep, ENOTCONN); 2389dc964385SJohn Baldwin return; 2390dc964385SJohn Baldwin } 2391dc964385SJohn Baldwin 2392125d42feSJohn Baldwin KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0, 2393dc964385SJohn Baldwin ("%s: pending sockbuf data and DDP is active", __func__)); 2394dc964385SJohn Baldwin 2395e682d02eSNavdeep Parhar /* Abort if socket has reported problems. */ 2396dc964385SJohn Baldwin /* XXX: Wait for any queued DDP's to finish and/or flush them? */ 2397dc964385SJohn Baldwin if (so->so_error && sbavail(sb) == 0) { 2398125d42feSJohn Baldwin toep->ddp.waiting_count--; 2399125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 2400dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) { 2401dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2402dc964385SJohn Baldwin goto restart; 2403dc964385SJohn Baldwin } 2404dc964385SJohn Baldwin 2405dc964385SJohn Baldwin /* 2406dc964385SJohn Baldwin * If this job has previously copied some data, report 2407dc964385SJohn Baldwin * a short read and leave the error to be reported by 2408dc964385SJohn Baldwin * a future request. 2409dc964385SJohn Baldwin */ 2410fe0bdd1dSJohn Baldwin copied = job->aio_received; 2411dc964385SJohn Baldwin if (copied != 0) { 2412dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2413dc964385SJohn Baldwin aio_complete(job, copied, 0); 2414dc964385SJohn Baldwin goto restart; 2415dc964385SJohn Baldwin } 2416e682d02eSNavdeep Parhar error = so->so_error; 2417e682d02eSNavdeep Parhar so->so_error = 0; 2418dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2419dc964385SJohn Baldwin aio_complete(job, -1, error); 2420dc964385SJohn Baldwin goto restart; 2421e682d02eSNavdeep Parhar } 2422e682d02eSNavdeep Parhar 2423e682d02eSNavdeep Parhar /* 2424dc964385SJohn Baldwin * Door is closed. If there is pending data in the socket buffer, 2425dc964385SJohn Baldwin * deliver it. If there are pending DDP requests, wait for those 2426dc964385SJohn Baldwin * to complete. Once they have completed, return EOF reads. 2427e682d02eSNavdeep Parhar */ 2428dc964385SJohn Baldwin if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 2429dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2430125d42feSJohn Baldwin if (toep->ddp.active_count != 0) 2431dc964385SJohn Baldwin return; 2432dc964385SJohn Baldwin ddp_complete_all(toep, 0); 2433dc964385SJohn Baldwin return; 2434e682d02eSNavdeep Parhar } 2435dc964385SJohn Baldwin 2436dc964385SJohn Baldwin /* 2437dc964385SJohn Baldwin * If DDP is not enabled and there is no pending socket buffer 2438dc964385SJohn Baldwin * data, try to enable DDP. 2439dc964385SJohn Baldwin */ 2440125d42feSJohn Baldwin if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) { 2441dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2442dc964385SJohn Baldwin 2443dc964385SJohn Baldwin /* 2444dc964385SJohn Baldwin * Wait for the card to ACK that DDP is enabled before 2445dc964385SJohn Baldwin * queueing any buffers. Currently this waits for an 2446dc964385SJohn Baldwin * indicate to arrive. This could use a TCB_SET_FIELD_RPL 2447dc964385SJohn Baldwin * message to know that DDP was enabled instead of waiting 2448dc964385SJohn Baldwin * for the indicate which would avoid copying the indicate 2449dc964385SJohn Baldwin * if no data is pending. 2450dc964385SJohn Baldwin * 2451dc964385SJohn Baldwin * XXX: Might want to limit the indicate size to the size 2452dc964385SJohn Baldwin * of the first queued request. 2453dc964385SJohn Baldwin */ 2454125d42feSJohn Baldwin if ((toep->ddp.flags & DDP_SC_REQ) == 0) 2455dc964385SJohn Baldwin enable_ddp(sc, toep); 2456dc964385SJohn Baldwin return; 2457e682d02eSNavdeep Parhar } 2458dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2459dc964385SJohn Baldwin 2460dc964385SJohn Baldwin /* 2461dc964385SJohn Baldwin * If another thread is queueing a buffer for DDP, let it 2462dc964385SJohn Baldwin * drain any work and return. 2463dc964385SJohn Baldwin */ 2464125d42feSJohn Baldwin if (toep->ddp.queueing != NULL) 2465dc964385SJohn Baldwin return; 2466dc964385SJohn Baldwin 2467dc964385SJohn Baldwin /* Take the next job to prep it for DDP. */ 2468125d42feSJohn Baldwin toep->ddp.waiting_count--; 2469125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 2470dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) 2471e682d02eSNavdeep Parhar goto restart; 2472125d42feSJohn Baldwin toep->ddp.queueing = job; 2473e682d02eSNavdeep Parhar 2474dc964385SJohn Baldwin /* NB: This drops DDP_LOCK while it holds the backing VM pages. */ 2475dc964385SJohn Baldwin error = hold_aio(toep, job, &ps); 2476dc964385SJohn Baldwin if (error != 0) { 2477dc964385SJohn Baldwin ddp_complete_one(job, error); 2478125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2479e682d02eSNavdeep Parhar goto restart; 2480dc964385SJohn Baldwin } 2481e682d02eSNavdeep Parhar 2482dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 2483dc964385SJohn Baldwin if (so->so_error && sbavail(sb) == 0) { 2484fe0bdd1dSJohn Baldwin copied = job->aio_received; 2485dc964385SJohn Baldwin if (copied != 0) { 2486dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2487dc964385SJohn Baldwin recycle_pageset(toep, ps); 2488dc964385SJohn Baldwin aio_complete(job, copied, 0); 2489125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2490dc964385SJohn Baldwin goto restart; 2491dc964385SJohn Baldwin } 2492e682d02eSNavdeep Parhar 2493dc964385SJohn Baldwin error = so->so_error; 2494dc964385SJohn Baldwin so->so_error = 0; 2495dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2496dc964385SJohn Baldwin recycle_pageset(toep, ps); 2497dc964385SJohn Baldwin aio_complete(job, -1, error); 2498125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2499dc964385SJohn Baldwin goto restart; 2500e682d02eSNavdeep Parhar } 2501e682d02eSNavdeep Parhar 2502dc964385SJohn Baldwin if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 2503dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2504dc964385SJohn Baldwin recycle_pageset(toep, ps); 2505125d42feSJohn Baldwin if (toep->ddp.active_count != 0) { 2506dc964385SJohn Baldwin /* 2507dc964385SJohn Baldwin * The door is closed, but there are still pending 2508dc964385SJohn Baldwin * DDP buffers. Requeue. These jobs will all be 2509dc964385SJohn Baldwin * completed once those buffers drain. 2510dc964385SJohn Baldwin */ 2511dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2512125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2513dc964385SJohn Baldwin return; 2514e682d02eSNavdeep Parhar } 2515dc964385SJohn Baldwin ddp_complete_one(job, 0); 2516dc964385SJohn Baldwin ddp_complete_all(toep, 0); 2517125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2518dc964385SJohn Baldwin return; 2519e682d02eSNavdeep Parhar } 2520dc964385SJohn Baldwin 2521dc964385SJohn Baldwin sbcopy: 2522dc964385SJohn Baldwin /* 2523dc964385SJohn Baldwin * If the toep is dead, there shouldn't be any data in the socket 2524dc964385SJohn Baldwin * buffer, so the above case should have handled this. 2525dc964385SJohn Baldwin */ 2526125d42feSJohn Baldwin MPASS(!(toep->ddp.flags & DDP_DEAD)); 2527dc964385SJohn Baldwin 2528dc964385SJohn Baldwin /* 2529dc964385SJohn Baldwin * If there is pending data in the socket buffer (either 2530dc964385SJohn Baldwin * from before the requests were queued or a DDP indicate), 2531dc964385SJohn Baldwin * copy those mbufs out directly. 2532dc964385SJohn Baldwin */ 2533dc964385SJohn Baldwin copied = 0; 2534fe0bdd1dSJohn Baldwin offset = ps->offset + job->aio_received; 2535fe0bdd1dSJohn Baldwin MPASS(job->aio_received <= job->uaiocb.aio_nbytes); 2536fe0bdd1dSJohn Baldwin resid = job->uaiocb.aio_nbytes - job->aio_received; 2537dc964385SJohn Baldwin m = sb->sb_mb; 2538125d42feSJohn Baldwin KASSERT(m == NULL || toep->ddp.active_count == 0, 2539dc964385SJohn Baldwin ("%s: sockbuf data with active DDP", __func__)); 2540dc964385SJohn Baldwin while (m != NULL && resid > 0) { 2541dc964385SJohn Baldwin struct iovec iov[1]; 2542dc964385SJohn Baldwin struct uio uio; 254339d5cbdcSNavdeep Parhar #ifdef INVARIANTS 2544dc964385SJohn Baldwin int error; 254539d5cbdcSNavdeep Parhar #endif 2546dc964385SJohn Baldwin 2547dc964385SJohn Baldwin iov[0].iov_base = mtod(m, void *); 2548dc964385SJohn Baldwin iov[0].iov_len = m->m_len; 2549dc964385SJohn Baldwin if (iov[0].iov_len > resid) 2550dc964385SJohn Baldwin iov[0].iov_len = resid; 2551dc964385SJohn Baldwin uio.uio_iov = iov; 2552dc964385SJohn Baldwin uio.uio_iovcnt = 1; 2553dc964385SJohn Baldwin uio.uio_offset = 0; 2554dc964385SJohn Baldwin uio.uio_resid = iov[0].iov_len; 2555dc964385SJohn Baldwin uio.uio_segflg = UIO_SYSSPACE; 2556dc964385SJohn Baldwin uio.uio_rw = UIO_WRITE; 255739d5cbdcSNavdeep Parhar #ifdef INVARIANTS 2558dc964385SJohn Baldwin error = uiomove_fromphys(ps->pages, offset + copied, 2559dc964385SJohn Baldwin uio.uio_resid, &uio); 256039d5cbdcSNavdeep Parhar #else 256139d5cbdcSNavdeep Parhar uiomove_fromphys(ps->pages, offset + copied, uio.uio_resid, &uio); 256239d5cbdcSNavdeep Parhar #endif 2563dc964385SJohn Baldwin MPASS(error == 0 && uio.uio_resid == 0); 2564dc964385SJohn Baldwin copied += uio.uio_offset; 2565dc964385SJohn Baldwin resid -= uio.uio_offset; 2566dc964385SJohn Baldwin m = m->m_next; 2567dc964385SJohn Baldwin } 2568dc964385SJohn Baldwin if (copied != 0) { 2569dc964385SJohn Baldwin sbdrop_locked(sb, copied); 2570fe0bdd1dSJohn Baldwin job->aio_received += copied; 2571b1012d80SJohn Baldwin job->msgrcv = 1; 2572fe0bdd1dSJohn Baldwin copied = job->aio_received; 2573dc964385SJohn Baldwin inp = sotoinpcb(so); 2574dc964385SJohn Baldwin if (!INP_TRY_WLOCK(inp)) { 2575dc964385SJohn Baldwin /* 2576dc964385SJohn Baldwin * The reference on the socket file descriptor in 2577dc964385SJohn Baldwin * the AIO job should keep 'sb' and 'inp' stable. 2578dc964385SJohn Baldwin * Our caller has a reference on the 'toep' that 2579dc964385SJohn Baldwin * keeps it stable. 2580dc964385SJohn Baldwin */ 2581dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2582dc964385SJohn Baldwin DDP_UNLOCK(toep); 2583dc964385SJohn Baldwin INP_WLOCK(inp); 2584dc964385SJohn Baldwin DDP_LOCK(toep); 2585dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 2586dc964385SJohn Baldwin 2587dc964385SJohn Baldwin /* 2588dc964385SJohn Baldwin * If the socket has been closed, we should detect 2589dc964385SJohn Baldwin * that and complete this request if needed on 2590dc964385SJohn Baldwin * the next trip around the loop. 2591dc964385SJohn Baldwin */ 2592dc964385SJohn Baldwin } 2593dc964385SJohn Baldwin t4_rcvd_locked(&toep->td->tod, intotcpcb(inp)); 2594dc964385SJohn Baldwin INP_WUNLOCK(inp); 2595125d42feSJohn Baldwin if (resid == 0 || toep->ddp.flags & DDP_DEAD) { 2596dc964385SJohn Baldwin /* 2597dc964385SJohn Baldwin * We filled the entire buffer with socket 2598dc964385SJohn Baldwin * data, DDP is not being used, or the socket 2599dc964385SJohn Baldwin * is being shut down, so complete the 2600dc964385SJohn Baldwin * request. 2601dc964385SJohn Baldwin */ 2602dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2603dc964385SJohn Baldwin recycle_pageset(toep, ps); 2604dc964385SJohn Baldwin aio_complete(job, copied, 0); 2605125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2606dc964385SJohn Baldwin goto restart; 2607dc964385SJohn Baldwin } 2608dc964385SJohn Baldwin 2609dc964385SJohn Baldwin /* 2610dc964385SJohn Baldwin * If DDP is not enabled, requeue this request and restart. 2611dc964385SJohn Baldwin * This will either enable DDP or wait for more data to 2612dc964385SJohn Baldwin * arrive on the socket buffer. 2613dc964385SJohn Baldwin */ 2614125d42feSJohn Baldwin if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) { 2615dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2616dc964385SJohn Baldwin recycle_pageset(toep, ps); 2617dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2618125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2619dc964385SJohn Baldwin goto restart; 2620dc964385SJohn Baldwin } 2621dc964385SJohn Baldwin 2622dc964385SJohn Baldwin /* 2623dc964385SJohn Baldwin * An indicate might have arrived and been added to 2624dc964385SJohn Baldwin * the socket buffer while it was unlocked after the 2625dc964385SJohn Baldwin * copy to lock the INP. If so, restart the copy. 2626dc964385SJohn Baldwin */ 2627dc964385SJohn Baldwin if (sbavail(sb) != 0) 2628dc964385SJohn Baldwin goto sbcopy; 2629dc964385SJohn Baldwin } 2630dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2631dc964385SJohn Baldwin 2632dc964385SJohn Baldwin if (prep_pageset(sc, toep, ps) == 0) { 2633dc964385SJohn Baldwin recycle_pageset(toep, ps); 2634dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2635125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2636dc964385SJohn Baldwin 2637dc964385SJohn Baldwin /* 2638dc964385SJohn Baldwin * XXX: Need to retry this later. Mostly need a trigger 2639dc964385SJohn Baldwin * when page pods are freed up. 2640dc964385SJohn Baldwin */ 2641dc964385SJohn Baldwin printf("%s: prep_pageset failed\n", __func__); 2642dc964385SJohn Baldwin return; 2643dc964385SJohn Baldwin } 2644dc964385SJohn Baldwin 2645dc964385SJohn Baldwin /* Determine which DDP buffer to use. */ 2646125d42feSJohn Baldwin if (toep->ddp.db[0].job == NULL) { 2647dc964385SJohn Baldwin db_idx = 0; 2648e682d02eSNavdeep Parhar } else { 2649125d42feSJohn Baldwin MPASS(toep->ddp.db[1].job == NULL); 2650dc964385SJohn Baldwin db_idx = 1; 2651e682d02eSNavdeep Parhar } 2652e682d02eSNavdeep Parhar 2653dc964385SJohn Baldwin ddp_flags = 0; 2654dc964385SJohn Baldwin ddp_flags_mask = 0; 2655dc964385SJohn Baldwin if (db_idx == 0) { 2656dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF0_VALID(1); 2657dc964385SJohn Baldwin if (so->so_state & SS_NBIO) 2658dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF0_FLUSH(1); 2659dc964385SJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 2660dc964385SJohn Baldwin V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) | 2661dc964385SJohn Baldwin V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1); 2662dc964385SJohn Baldwin buf_flag = DDP_BUF0_ACTIVE; 2663dc964385SJohn Baldwin } else { 2664dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF1_VALID(1); 2665dc964385SJohn Baldwin if (so->so_state & SS_NBIO) 2666dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF1_FLUSH(1); 2667dc964385SJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 2668dc964385SJohn Baldwin V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) | 2669dc964385SJohn Baldwin V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1); 2670dc964385SJohn Baldwin buf_flag = DDP_BUF1_ACTIVE; 2671e682d02eSNavdeep Parhar } 2672125d42feSJohn Baldwin MPASS((toep->ddp.flags & buf_flag) == 0); 2673125d42feSJohn Baldwin if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { 2674dc964385SJohn Baldwin MPASS(db_idx == 0); 2675125d42feSJohn Baldwin MPASS(toep->ddp.active_id == -1); 2676125d42feSJohn Baldwin MPASS(toep->ddp.active_count == 0); 2677dc964385SJohn Baldwin ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1); 2678e682d02eSNavdeep Parhar } 2679e682d02eSNavdeep Parhar 2680e682d02eSNavdeep Parhar /* 2681dc964385SJohn Baldwin * The TID for this connection should still be valid. If DDP_DEAD 2682dc964385SJohn Baldwin * is set, SBS_CANTRCVMORE should be set, so we shouldn't be 2683dc964385SJohn Baldwin * this far anyway. Even if the socket is closing on the other 2684dc964385SJohn Baldwin * end, the AIO job holds a reference on this end of the socket 2685dc964385SJohn Baldwin * which will keep it open and keep the TCP PCB attached until 2686dc964385SJohn Baldwin * after the job is completed. 2687e682d02eSNavdeep Parhar */ 2688*eba13bbcSJohn Baldwin wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &ps->prsv, ps->len, 2689*eba13bbcSJohn Baldwin job->aio_received, ddp_flags, ddp_flags_mask); 2690dc964385SJohn Baldwin if (wr == NULL) { 2691dc964385SJohn Baldwin recycle_pageset(toep, ps); 2692dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2693125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2694dc964385SJohn Baldwin 2695dc964385SJohn Baldwin /* 2696dc964385SJohn Baldwin * XXX: Need a way to kick a retry here. 2697dc964385SJohn Baldwin * 2698dc964385SJohn Baldwin * XXX: We know the fixed size needed and could 2699dc964385SJohn Baldwin * preallocate this using a blocking request at the 2700dc964385SJohn Baldwin * start of the task to avoid having to handle this 2701dc964385SJohn Baldwin * edge case. 2702dc964385SJohn Baldwin */ 2703dc964385SJohn Baldwin printf("%s: mk_update_tcb_for_ddp failed\n", __func__); 2704dc964385SJohn Baldwin return; 2705dc964385SJohn Baldwin } 2706dc964385SJohn Baldwin 2707dc964385SJohn Baldwin if (!aio_set_cancel_function(job, t4_aio_cancel_active)) { 2708dc964385SJohn Baldwin free_wrqe(wr); 2709dc964385SJohn Baldwin recycle_pageset(toep, ps); 2710dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2711125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2712e682d02eSNavdeep Parhar goto restart; 2713e682d02eSNavdeep Parhar } 2714e682d02eSNavdeep Parhar 2715dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 27168674e626SNavdeep Parhar CTR6(KTR_CXGBE, 27178674e626SNavdeep Parhar "%s: tid %u, scheduling %p for DDP[%d] (flags %#lx/%#lx)", __func__, 27188674e626SNavdeep Parhar toep->tid, job, db_idx, ddp_flags, ddp_flags_mask); 2719dc964385SJohn Baldwin #endif 2720dc964385SJohn Baldwin /* Give the chip the go-ahead. */ 2721dc964385SJohn Baldwin t4_wrq_tx(sc, wr); 2722125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 2723dc964385SJohn Baldwin db->cancel_pending = 0; 2724dc964385SJohn Baldwin db->job = job; 2725dc964385SJohn Baldwin db->ps = ps; 2726125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2727125d42feSJohn Baldwin toep->ddp.flags |= buf_flag; 2728125d42feSJohn Baldwin toep->ddp.active_count++; 2729125d42feSJohn Baldwin if (toep->ddp.active_count == 1) { 2730125d42feSJohn Baldwin MPASS(toep->ddp.active_id == -1); 2731125d42feSJohn Baldwin toep->ddp.active_id = db_idx; 2732dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 2733125d42feSJohn Baldwin toep->ddp.active_id); 2734dc964385SJohn Baldwin } 2735dc964385SJohn Baldwin goto restart; 2736dc964385SJohn Baldwin } 2737dc964385SJohn Baldwin 2738dc964385SJohn Baldwin void 2739dc964385SJohn Baldwin ddp_queue_toep(struct toepcb *toep) 2740dc964385SJohn Baldwin { 2741dc964385SJohn Baldwin 2742dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2743125d42feSJohn Baldwin if (toep->ddp.flags & DDP_TASK_ACTIVE) 2744dc964385SJohn Baldwin return; 2745125d42feSJohn Baldwin toep->ddp.flags |= DDP_TASK_ACTIVE; 2746dc964385SJohn Baldwin hold_toepcb(toep); 2747125d42feSJohn Baldwin soaio_enqueue(&toep->ddp.requeue_task); 2748dc964385SJohn Baldwin } 2749dc964385SJohn Baldwin 2750dc964385SJohn Baldwin static void 2751dc964385SJohn Baldwin aio_ddp_requeue_task(void *context, int pending) 2752dc964385SJohn Baldwin { 2753dc964385SJohn Baldwin struct toepcb *toep = context; 2754dc964385SJohn Baldwin 2755dc964385SJohn Baldwin DDP_LOCK(toep); 2756dc964385SJohn Baldwin aio_ddp_requeue(toep); 2757125d42feSJohn Baldwin toep->ddp.flags &= ~DDP_TASK_ACTIVE; 2758dc964385SJohn Baldwin DDP_UNLOCK(toep); 2759dc964385SJohn Baldwin 2760dc964385SJohn Baldwin free_toepcb(toep); 2761dc964385SJohn Baldwin } 2762dc964385SJohn Baldwin 2763dc964385SJohn Baldwin static void 2764dc964385SJohn Baldwin t4_aio_cancel_active(struct kaiocb *job) 2765dc964385SJohn Baldwin { 2766dc964385SJohn Baldwin struct socket *so = job->fd_file->f_data; 2767e1401f75SGleb Smirnoff struct tcpcb *tp = sototcpcb(so); 2768dc964385SJohn Baldwin struct toepcb *toep = tp->t_toe; 2769dc964385SJohn Baldwin struct adapter *sc = td_adapter(toep->td); 2770dc964385SJohn Baldwin uint64_t valid_flag; 2771dc964385SJohn Baldwin int i; 2772dc964385SJohn Baldwin 2773dc964385SJohn Baldwin DDP_LOCK(toep); 2774dc964385SJohn Baldwin if (aio_cancel_cleared(job)) { 2775dc964385SJohn Baldwin DDP_UNLOCK(toep); 2776dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2777dc964385SJohn Baldwin return; 2778dc964385SJohn Baldwin } 2779dc964385SJohn Baldwin 2780125d42feSJohn Baldwin for (i = 0; i < nitems(toep->ddp.db); i++) { 2781125d42feSJohn Baldwin if (toep->ddp.db[i].job == job) { 2782dc964385SJohn Baldwin /* Should only ever get one cancel request for a job. */ 2783125d42feSJohn Baldwin MPASS(toep->ddp.db[i].cancel_pending == 0); 2784dc964385SJohn Baldwin 2785dc964385SJohn Baldwin /* 2786dc964385SJohn Baldwin * Invalidate this buffer. It will be 2787dc964385SJohn Baldwin * cancelled or partially completed once the 2788dc964385SJohn Baldwin * card ACKs the invalidate. 2789dc964385SJohn Baldwin */ 2790dc964385SJohn Baldwin valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) : 2791dc964385SJohn Baldwin V_TF_DDP_BUF1_VALID(1); 2792edf95febSJohn Baldwin t4_set_tcb_field(sc, toep->ctrlq, toep, 2793671bf2b8SNavdeep Parhar W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1, 2794017902fcSJohn Baldwin CPL_COOKIE_DDP0 + i); 2795125d42feSJohn Baldwin toep->ddp.db[i].cancel_pending = 1; 2796dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: request %p marked pending", 2797dc964385SJohn Baldwin __func__, job); 2798dc964385SJohn Baldwin break; 2799dc964385SJohn Baldwin } 2800dc964385SJohn Baldwin } 2801dc964385SJohn Baldwin DDP_UNLOCK(toep); 2802dc964385SJohn Baldwin } 2803dc964385SJohn Baldwin 2804dc964385SJohn Baldwin static void 2805dc964385SJohn Baldwin t4_aio_cancel_queued(struct kaiocb *job) 2806dc964385SJohn Baldwin { 2807dc964385SJohn Baldwin struct socket *so = job->fd_file->f_data; 2808e1401f75SGleb Smirnoff struct tcpcb *tp = sototcpcb(so); 2809dc964385SJohn Baldwin struct toepcb *toep = tp->t_toe; 2810dc964385SJohn Baldwin 2811dc964385SJohn Baldwin DDP_LOCK(toep); 2812dc964385SJohn Baldwin if (!aio_cancel_cleared(job)) { 2813125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 2814125d42feSJohn Baldwin toep->ddp.waiting_count--; 2815125d42feSJohn Baldwin if (toep->ddp.waiting_count == 0) 2816dc964385SJohn Baldwin ddp_queue_toep(toep); 2817dc964385SJohn Baldwin } 2818dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job); 2819dc964385SJohn Baldwin DDP_UNLOCK(toep); 2820dc964385SJohn Baldwin 2821dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2822dc964385SJohn Baldwin } 2823dc964385SJohn Baldwin 2824dc964385SJohn Baldwin int 2825dc964385SJohn Baldwin t4_aio_queue_ddp(struct socket *so, struct kaiocb *job) 2826dc964385SJohn Baldwin { 2827a5a965d7SJohn Baldwin struct inpcb *inp = sotoinpcb(so); 2828a5a965d7SJohn Baldwin struct tcpcb *tp = intotcpcb(inp); 2829dc964385SJohn Baldwin struct toepcb *toep = tp->t_toe; 2830dc964385SJohn Baldwin 2831dc964385SJohn Baldwin /* Ignore writes. */ 2832dc964385SJohn Baldwin if (job->uaiocb.aio_lio_opcode != LIO_READ) 2833dc964385SJohn Baldwin return (EOPNOTSUPP); 2834dc964385SJohn Baldwin 2835a5a965d7SJohn Baldwin INP_WLOCK(inp); 2836a5a965d7SJohn Baldwin if (__predict_false(ulp_mode(toep) == ULP_MODE_NONE)) { 2837a5a965d7SJohn Baldwin if (!set_ddp_ulp_mode(toep)) { 2838a5a965d7SJohn Baldwin INP_WUNLOCK(inp); 2839a5a965d7SJohn Baldwin return (EOPNOTSUPP); 2840a5a965d7SJohn Baldwin } 2841a5a965d7SJohn Baldwin } 2842a5a965d7SJohn Baldwin INP_WUNLOCK(inp); 2843a5a965d7SJohn Baldwin 2844dc964385SJohn Baldwin DDP_LOCK(toep); 2845dc964385SJohn Baldwin 2846dc964385SJohn Baldwin /* 2847*eba13bbcSJohn Baldwin * If DDP is being used for all normal receive, don't use it 2848*eba13bbcSJohn Baldwin * for AIO. 2849*eba13bbcSJohn Baldwin */ 2850*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 2851*eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 2852*eba13bbcSJohn Baldwin return (EOPNOTSUPP); 2853*eba13bbcSJohn Baldwin } 2854*eba13bbcSJohn Baldwin 2855*eba13bbcSJohn Baldwin /* 2856dc964385SJohn Baldwin * XXX: Think about possibly returning errors for ENOTCONN, 2857dc964385SJohn Baldwin * etc. Perhaps the caller would only queue the request 2858dc964385SJohn Baldwin * if it failed with EOPNOTSUPP? 2859dc964385SJohn Baldwin */ 2860dc964385SJohn Baldwin 2861dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 28628674e626SNavdeep Parhar CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid); 2863dc964385SJohn Baldwin #endif 2864dc964385SJohn Baldwin if (!aio_set_cancel_function(job, t4_aio_cancel_queued)) 2865dc964385SJohn Baldwin panic("new job was cancelled"); 2866125d42feSJohn Baldwin TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list); 2867125d42feSJohn Baldwin toep->ddp.waiting_count++; 2868*eba13bbcSJohn Baldwin 2869*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) == 0) { 2870*eba13bbcSJohn Baldwin toep->ddp.flags |= DDP_AIO; 2871*eba13bbcSJohn Baldwin TAILQ_INIT(&toep->ddp.cached_pagesets); 2872*eba13bbcSJohn Baldwin TAILQ_INIT(&toep->ddp.aiojobq); 2873*eba13bbcSJohn Baldwin TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, 2874*eba13bbcSJohn Baldwin toep); 2875*eba13bbcSJohn Baldwin } 2876dc964385SJohn Baldwin 2877dc964385SJohn Baldwin /* 2878dc964385SJohn Baldwin * Try to handle this request synchronously. If this has 2879dc964385SJohn Baldwin * to block because the task is running, it will just bail 2880dc964385SJohn Baldwin * and let the task handle it instead. 2881dc964385SJohn Baldwin */ 2882dc964385SJohn Baldwin aio_ddp_requeue(toep); 2883dc964385SJohn Baldwin DDP_UNLOCK(toep); 2884dc964385SJohn Baldwin return (0); 2885dc964385SJohn Baldwin } 2886dc964385SJohn Baldwin 2887*eba13bbcSJohn Baldwin static void 2888*eba13bbcSJohn Baldwin ddp_rcvbuf_requeue(struct toepcb *toep) 2889*eba13bbcSJohn Baldwin { 2890*eba13bbcSJohn Baldwin struct socket *so; 2891*eba13bbcSJohn Baldwin struct sockbuf *sb; 2892*eba13bbcSJohn Baldwin struct inpcb *inp; 2893*eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 2894*eba13bbcSJohn Baldwin 2895*eba13bbcSJohn Baldwin DDP_ASSERT_LOCKED(toep); 2896*eba13bbcSJohn Baldwin restart: 2897*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_DEAD) != 0) { 2898*eba13bbcSJohn Baldwin MPASS(toep->ddp.active_count == 0); 2899*eba13bbcSJohn Baldwin return; 2900*eba13bbcSJohn Baldwin } 2901*eba13bbcSJohn Baldwin 2902*eba13bbcSJohn Baldwin /* If both buffers are active, nothing to do. */ 2903*eba13bbcSJohn Baldwin if (toep->ddp.active_count == nitems(toep->ddp.db)) { 2904*eba13bbcSJohn Baldwin return; 2905*eba13bbcSJohn Baldwin } 2906*eba13bbcSJohn Baldwin 2907*eba13bbcSJohn Baldwin inp = toep->inp; 2908*eba13bbcSJohn Baldwin so = inp->inp_socket; 2909*eba13bbcSJohn Baldwin sb = &so->so_rcv; 2910*eba13bbcSJohn Baldwin 2911*eba13bbcSJohn Baldwin drb = alloc_cached_ddp_rcv_buffer(toep); 2912*eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 2913*eba13bbcSJohn Baldwin 2914*eba13bbcSJohn Baldwin if (drb == NULL) { 2915*eba13bbcSJohn Baldwin drb = alloc_ddp_rcv_buffer(toep, M_WAITOK); 2916*eba13bbcSJohn Baldwin if (drb == NULL) { 2917*eba13bbcSJohn Baldwin printf("%s: failed to allocate buffer\n", __func__); 2918*eba13bbcSJohn Baldwin DDP_LOCK(toep); 2919*eba13bbcSJohn Baldwin return; 2920*eba13bbcSJohn Baldwin } 2921*eba13bbcSJohn Baldwin } 2922*eba13bbcSJohn Baldwin 2923*eba13bbcSJohn Baldwin DDP_LOCK(toep); 2924*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_DEAD) != 0 || 2925*eba13bbcSJohn Baldwin toep->ddp.active_count == nitems(toep->ddp.db)) { 2926*eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 2927*eba13bbcSJohn Baldwin return; 2928*eba13bbcSJohn Baldwin } 2929*eba13bbcSJohn Baldwin 2930*eba13bbcSJohn Baldwin /* We will never get anything unless we are or were connected. */ 2931*eba13bbcSJohn Baldwin SOCKBUF_LOCK(sb); 2932*eba13bbcSJohn Baldwin if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 2933*eba13bbcSJohn Baldwin SOCKBUF_UNLOCK(sb); 2934*eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 2935*eba13bbcSJohn Baldwin return; 2936*eba13bbcSJohn Baldwin } 2937*eba13bbcSJohn Baldwin 2938*eba13bbcSJohn Baldwin /* Abort if socket has reported problems or is closed. */ 2939*eba13bbcSJohn Baldwin if (so->so_error != 0 || (sb->sb_state & SBS_CANTRCVMORE) != 0) { 2940*eba13bbcSJohn Baldwin SOCKBUF_UNLOCK(sb); 2941*eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 2942*eba13bbcSJohn Baldwin return; 2943*eba13bbcSJohn Baldwin } 2944*eba13bbcSJohn Baldwin SOCKBUF_UNLOCK(sb); 2945*eba13bbcSJohn Baldwin 2946*eba13bbcSJohn Baldwin if (!queue_ddp_rcvbuf(toep, drb)) { 2947*eba13bbcSJohn Baldwin /* 2948*eba13bbcSJohn Baldwin * XXX: Need a way to kick a retry here. 2949*eba13bbcSJohn Baldwin * 2950*eba13bbcSJohn Baldwin * XXX: We know the fixed size needed and could 2951*eba13bbcSJohn Baldwin * preallocate the work request using a blocking 2952*eba13bbcSJohn Baldwin * request at the start of the task to avoid having to 2953*eba13bbcSJohn Baldwin * handle this edge case. 2954*eba13bbcSJohn Baldwin */ 2955*eba13bbcSJohn Baldwin return; 2956*eba13bbcSJohn Baldwin } 2957*eba13bbcSJohn Baldwin goto restart; 2958*eba13bbcSJohn Baldwin } 2959*eba13bbcSJohn Baldwin 2960*eba13bbcSJohn Baldwin static void 2961*eba13bbcSJohn Baldwin ddp_rcvbuf_requeue_task(void *context, int pending) 2962*eba13bbcSJohn Baldwin { 2963*eba13bbcSJohn Baldwin struct toepcb *toep = context; 2964*eba13bbcSJohn Baldwin 2965*eba13bbcSJohn Baldwin DDP_LOCK(toep); 2966*eba13bbcSJohn Baldwin ddp_rcvbuf_requeue(toep); 2967*eba13bbcSJohn Baldwin toep->ddp.flags &= ~DDP_TASK_ACTIVE; 2968*eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 2969*eba13bbcSJohn Baldwin 2970*eba13bbcSJohn Baldwin free_toepcb(toep); 2971*eba13bbcSJohn Baldwin } 2972*eba13bbcSJohn Baldwin 2973*eba13bbcSJohn Baldwin int 2974*eba13bbcSJohn Baldwin t4_enable_ddp_rcv(struct socket *so, struct toepcb *toep) 2975*eba13bbcSJohn Baldwin { 2976*eba13bbcSJohn Baldwin struct inpcb *inp = sotoinpcb(so); 2977*eba13bbcSJohn Baldwin struct adapter *sc = td_adapter(toep->td); 2978*eba13bbcSJohn Baldwin 2979*eba13bbcSJohn Baldwin INP_WLOCK(inp); 2980*eba13bbcSJohn Baldwin switch (ulp_mode(toep)) { 2981*eba13bbcSJohn Baldwin case ULP_MODE_TCPDDP: 2982*eba13bbcSJohn Baldwin break; 2983*eba13bbcSJohn Baldwin case ULP_MODE_NONE: 2984*eba13bbcSJohn Baldwin if (set_ddp_ulp_mode(toep)) 2985*eba13bbcSJohn Baldwin break; 2986*eba13bbcSJohn Baldwin /* FALLTHROUGH */ 2987*eba13bbcSJohn Baldwin default: 2988*eba13bbcSJohn Baldwin INP_WUNLOCK(inp); 2989*eba13bbcSJohn Baldwin return (EOPNOTSUPP); 2990*eba13bbcSJohn Baldwin } 2991*eba13bbcSJohn Baldwin INP_WUNLOCK(inp); 2992*eba13bbcSJohn Baldwin 2993*eba13bbcSJohn Baldwin DDP_LOCK(toep); 2994*eba13bbcSJohn Baldwin 2995*eba13bbcSJohn Baldwin /* 2996*eba13bbcSJohn Baldwin * If DDP is being used for AIO already, don't use it for 2997*eba13bbcSJohn Baldwin * normal receive. 2998*eba13bbcSJohn Baldwin */ 2999*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 3000*eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 3001*eba13bbcSJohn Baldwin return (EOPNOTSUPP); 3002*eba13bbcSJohn Baldwin } 3003*eba13bbcSJohn Baldwin 3004*eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 3005*eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 3006*eba13bbcSJohn Baldwin return (EBUSY); 3007*eba13bbcSJohn Baldwin } 3008*eba13bbcSJohn Baldwin 3009*eba13bbcSJohn Baldwin toep->ddp.flags |= DDP_RCVBUF; 3010*eba13bbcSJohn Baldwin TAILQ_INIT(&toep->ddp.cached_buffers); 3011*eba13bbcSJohn Baldwin enable_ddp(sc, toep); 3012*eba13bbcSJohn Baldwin TASK_INIT(&toep->ddp.requeue_task, 0, ddp_rcvbuf_requeue_task, toep); 3013*eba13bbcSJohn Baldwin ddp_queue_toep(toep); 3014*eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 3015*eba13bbcSJohn Baldwin return (0); 3016*eba13bbcSJohn Baldwin } 3017*eba13bbcSJohn Baldwin 30189689995dSJohn Baldwin void 3019dc964385SJohn Baldwin t4_ddp_mod_load(void) 3020dc964385SJohn Baldwin { 3021*eba13bbcSJohn Baldwin if (t4_ddp_rcvbuf_len < PAGE_SIZE) 3022*eba13bbcSJohn Baldwin t4_ddp_rcvbuf_len = PAGE_SIZE; 3023*eba13bbcSJohn Baldwin if (t4_ddp_rcvbuf_len > MAX_DDP_BUFFER_SIZE) 3024*eba13bbcSJohn Baldwin t4_ddp_rcvbuf_len = MAX_DDP_BUFFER_SIZE; 3025*eba13bbcSJohn Baldwin if (!powerof2(t4_ddp_rcvbuf_len)) 3026*eba13bbcSJohn Baldwin t4_ddp_rcvbuf_len = 1 << fls(t4_ddp_rcvbuf_len); 3027dc964385SJohn Baldwin 30284535e804SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 30294535e804SNavdeep Parhar CPL_COOKIE_DDP0); 30304535e804SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 30314535e804SNavdeep Parhar CPL_COOKIE_DDP1); 3032671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp); 3033671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); 3034dc964385SJohn Baldwin TAILQ_INIT(&ddp_orphan_pagesets); 3035dc964385SJohn Baldwin mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF); 3036dc964385SJohn Baldwin TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL); 3037dc964385SJohn Baldwin } 3038dc964385SJohn Baldwin 3039dc964385SJohn Baldwin void 3040dc964385SJohn Baldwin t4_ddp_mod_unload(void) 3041dc964385SJohn Baldwin { 3042dc964385SJohn Baldwin 3043dc964385SJohn Baldwin taskqueue_drain(taskqueue_thread, &ddp_orphan_task); 3044dc964385SJohn Baldwin MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets)); 3045dc964385SJohn Baldwin mtx_destroy(&ddp_orphan_pagesets_lock); 3046d6ddb084SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP0); 3047d6ddb084SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP1); 3048671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL); 3049671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL); 3050dc964385SJohn Baldwin } 3051e682d02eSNavdeep Parhar #endif 3052