1e682d02eSNavdeep Parhar /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 3718cf2ccSPedro F. Giffuni * 4e682d02eSNavdeep Parhar * Copyright (c) 2012 Chelsio Communications, Inc. 5e682d02eSNavdeep Parhar * All rights reserved. 6e682d02eSNavdeep Parhar * Written by: Navdeep Parhar <np@FreeBSD.org> 7e682d02eSNavdeep Parhar * 8e682d02eSNavdeep Parhar * Redistribution and use in source and binary forms, with or without 9e682d02eSNavdeep Parhar * modification, are permitted provided that the following conditions 10e682d02eSNavdeep Parhar * are met: 11e682d02eSNavdeep Parhar * 1. Redistributions of source code must retain the above copyright 12e682d02eSNavdeep Parhar * notice, this list of conditions and the following disclaimer. 13e682d02eSNavdeep Parhar * 2. Redistributions in binary form must reproduce the above copyright 14e682d02eSNavdeep Parhar * notice, this list of conditions and the following disclaimer in the 15e682d02eSNavdeep Parhar * documentation and/or other materials provided with the distribution. 16e682d02eSNavdeep Parhar * 17e682d02eSNavdeep Parhar * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18e682d02eSNavdeep Parhar * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19e682d02eSNavdeep Parhar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20e682d02eSNavdeep Parhar * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21e682d02eSNavdeep Parhar * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22e682d02eSNavdeep Parhar * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23e682d02eSNavdeep Parhar * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24e682d02eSNavdeep Parhar * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25e682d02eSNavdeep Parhar * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26e682d02eSNavdeep Parhar * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27e682d02eSNavdeep Parhar * SUCH DAMAGE. 28e682d02eSNavdeep Parhar */ 29e682d02eSNavdeep Parhar 30e682d02eSNavdeep Parhar #include <sys/cdefs.h> 31e682d02eSNavdeep Parhar #include "opt_inet.h" 32e682d02eSNavdeep Parhar 33e682d02eSNavdeep Parhar #include <sys/param.h> 34dc964385SJohn Baldwin #include <sys/aio.h> 352beaefe8SJohn Baldwin #include <sys/bio.h> 36dc964385SJohn Baldwin #include <sys/file.h> 37e682d02eSNavdeep Parhar #include <sys/systm.h> 38e682d02eSNavdeep Parhar #include <sys/kernel.h> 39e682d02eSNavdeep Parhar #include <sys/ktr.h> 40e682d02eSNavdeep Parhar #include <sys/module.h> 41e682d02eSNavdeep Parhar #include <sys/protosw.h> 42e682d02eSNavdeep Parhar #include <sys/proc.h> 43e682d02eSNavdeep Parhar #include <sys/domain.h> 44e682d02eSNavdeep Parhar #include <sys/socket.h> 45e682d02eSNavdeep Parhar #include <sys/socketvar.h> 46dc964385SJohn Baldwin #include <sys/taskqueue.h> 47e682d02eSNavdeep Parhar #include <sys/uio.h> 48e682d02eSNavdeep Parhar #include <netinet/in.h> 49e682d02eSNavdeep Parhar #include <netinet/in_pcb.h> 50e682d02eSNavdeep Parhar #include <netinet/ip.h> 51e682d02eSNavdeep Parhar #include <netinet/tcp_var.h> 52e682d02eSNavdeep Parhar #define TCPSTATES 53e682d02eSNavdeep Parhar #include <netinet/tcp_fsm.h> 54e682d02eSNavdeep Parhar #include <netinet/toecore.h> 55e682d02eSNavdeep Parhar 56e682d02eSNavdeep Parhar #include <vm/vm.h> 57e682d02eSNavdeep Parhar #include <vm/vm_extern.h> 58e682d02eSNavdeep Parhar #include <vm/vm_param.h> 59e682d02eSNavdeep Parhar #include <vm/pmap.h> 60e682d02eSNavdeep Parhar #include <vm/vm_map.h> 61e682d02eSNavdeep Parhar #include <vm/vm_page.h> 62e682d02eSNavdeep Parhar #include <vm/vm_object.h> 63e682d02eSNavdeep Parhar 6446bee804SJohn Baldwin #include <cam/scsi/scsi_all.h> 6546bee804SJohn Baldwin #include <cam/ctl/ctl_io.h> 6646bee804SJohn Baldwin 67e682d02eSNavdeep Parhar #ifdef TCP_OFFLOAD 68e682d02eSNavdeep Parhar #include "common/common.h" 69e682d02eSNavdeep Parhar #include "common/t4_msg.h" 70e682d02eSNavdeep Parhar #include "common/t4_regs.h" 71e682d02eSNavdeep Parhar #include "common/t4_tcb.h" 72e682d02eSNavdeep Parhar #include "tom/t4_tom.h" 73e682d02eSNavdeep Parhar 74fe0bdd1dSJohn Baldwin /* 75fe0bdd1dSJohn Baldwin * Use the 'backend3' field in AIO jobs to store the amount of data 76fe0bdd1dSJohn Baldwin * received by the AIO job so far. 77fe0bdd1dSJohn Baldwin */ 78fe0bdd1dSJohn Baldwin #define aio_received backend3 79fe0bdd1dSJohn Baldwin 80dc964385SJohn Baldwin static void aio_ddp_requeue_task(void *context, int pending); 81dc964385SJohn Baldwin static void ddp_complete_all(struct toepcb *toep, int error); 82dc964385SJohn Baldwin static void t4_aio_cancel_active(struct kaiocb *job); 83dc964385SJohn Baldwin static void t4_aio_cancel_queued(struct kaiocb *job); 84eba13bbcSJohn Baldwin static int t4_alloc_page_pods_for_rcvbuf(struct ppod_region *pr, 85eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb); 86eba13bbcSJohn Baldwin static int t4_write_page_pods_for_rcvbuf(struct adapter *sc, 87eba13bbcSJohn Baldwin struct sge_wrq *wrq, int tid, struct ddp_rcv_buffer *drb); 88b12c0a9eSJohn Baldwin 89dc964385SJohn Baldwin static TAILQ_HEAD(, pageset) ddp_orphan_pagesets; 90dc964385SJohn Baldwin static struct mtx ddp_orphan_pagesets_lock; 91dc964385SJohn Baldwin static struct task ddp_orphan_task; 92dc964385SJohn Baldwin 93e682d02eSNavdeep Parhar #define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN) 94e682d02eSNavdeep Parhar 95dc964385SJohn Baldwin /* 96eba13bbcSJohn Baldwin * A page set holds information about a user buffer used for AIO DDP. 97eba13bbcSJohn Baldwin * The page set holds resources such as the VM pages backing the 98eba13bbcSJohn Baldwin * buffer (either held or wired) and the page pods associated with the 99eba13bbcSJohn Baldwin * buffer. Recently used page sets are cached to allow for efficient 100eba13bbcSJohn Baldwin * reuse of buffers (avoiding the need to re-fault in pages, hold 101eba13bbcSJohn Baldwin * them, etc.). Note that cached page sets keep the backing pages 102eba13bbcSJohn Baldwin * wired. The number of wired pages is capped by only allowing for 103eba13bbcSJohn Baldwin * two wired pagesets per connection. This is not a perfect cap, but 104eba13bbcSJohn Baldwin * is a trade-off for performance. 105dc964385SJohn Baldwin * 106dc964385SJohn Baldwin * If an application ping-pongs two buffers for a connection via 107dc964385SJohn Baldwin * aio_read(2) then those buffers should remain wired and expensive VM 108dc964385SJohn Baldwin * fault lookups should be avoided after each buffer has been used 109dc964385SJohn Baldwin * once. If an application uses more than two buffers then this will 110dc964385SJohn Baldwin * fall back to doing expensive VM fault lookups for each operation. 111dc964385SJohn Baldwin */ 112dc964385SJohn Baldwin static void 113dc964385SJohn Baldwin free_pageset(struct tom_data *td, struct pageset *ps) 114dc964385SJohn Baldwin { 115dc964385SJohn Baldwin vm_page_t p; 116dc964385SJohn Baldwin int i; 117dc964385SJohn Baldwin 118968267fdSNavdeep Parhar if (ps->prsv.prsv_nppods > 0) 119968267fdSNavdeep Parhar t4_free_page_pods(&ps->prsv); 120dc964385SJohn Baldwin 121dc964385SJohn Baldwin for (i = 0; i < ps->npages; i++) { 122dc964385SJohn Baldwin p = ps->pages[i]; 123dc964385SJohn Baldwin vm_page_unwire(p, PQ_INACTIVE); 124dc964385SJohn Baldwin } 125dc964385SJohn Baldwin mtx_lock(&ddp_orphan_pagesets_lock); 126dc964385SJohn Baldwin TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link); 127dc964385SJohn Baldwin taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task); 128dc964385SJohn Baldwin mtx_unlock(&ddp_orphan_pagesets_lock); 129dc964385SJohn Baldwin } 130dc964385SJohn Baldwin 131dc964385SJohn Baldwin static void 132dc964385SJohn Baldwin ddp_free_orphan_pagesets(void *context, int pending) 133dc964385SJohn Baldwin { 134dc964385SJohn Baldwin struct pageset *ps; 135dc964385SJohn Baldwin 136dc964385SJohn Baldwin mtx_lock(&ddp_orphan_pagesets_lock); 137dc964385SJohn Baldwin while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) { 138dc964385SJohn Baldwin ps = TAILQ_FIRST(&ddp_orphan_pagesets); 139dc964385SJohn Baldwin TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link); 140dc964385SJohn Baldwin mtx_unlock(&ddp_orphan_pagesets_lock); 141dc964385SJohn Baldwin if (ps->vm) 142dc964385SJohn Baldwin vmspace_free(ps->vm); 143dc964385SJohn Baldwin free(ps, M_CXGBE); 144dc964385SJohn Baldwin mtx_lock(&ddp_orphan_pagesets_lock); 145dc964385SJohn Baldwin } 146dc964385SJohn Baldwin mtx_unlock(&ddp_orphan_pagesets_lock); 147dc964385SJohn Baldwin } 148dc964385SJohn Baldwin 149dc964385SJohn Baldwin static void 150dc964385SJohn Baldwin recycle_pageset(struct toepcb *toep, struct pageset *ps) 151dc964385SJohn Baldwin { 152dc964385SJohn Baldwin 153dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 154eeacb3b0SMark Johnston if (!(toep->ddp.flags & DDP_DEAD)) { 155125d42feSJohn Baldwin KASSERT(toep->ddp.cached_count + toep->ddp.active_count < 156125d42feSJohn Baldwin nitems(toep->ddp.db), ("too many wired pagesets")); 157125d42feSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link); 158125d42feSJohn Baldwin toep->ddp.cached_count++; 159dc964385SJohn Baldwin } else 160dc964385SJohn Baldwin free_pageset(toep->td, ps); 161dc964385SJohn Baldwin } 162dc964385SJohn Baldwin 163dc964385SJohn Baldwin static void 164dc964385SJohn Baldwin ddp_complete_one(struct kaiocb *job, int error) 165dc964385SJohn Baldwin { 166dc964385SJohn Baldwin long copied; 167dc964385SJohn Baldwin 168dc964385SJohn Baldwin /* 169dc964385SJohn Baldwin * If this job had copied data out of the socket buffer before 170dc964385SJohn Baldwin * it was cancelled, report it as a short read rather than an 171dc964385SJohn Baldwin * error. 172dc964385SJohn Baldwin */ 173fe0bdd1dSJohn Baldwin copied = job->aio_received; 174dc964385SJohn Baldwin if (copied != 0 || error == 0) 175dc964385SJohn Baldwin aio_complete(job, copied, 0); 176dc964385SJohn Baldwin else 177dc964385SJohn Baldwin aio_complete(job, -1, error); 178dc964385SJohn Baldwin } 179dc964385SJohn Baldwin 180e682d02eSNavdeep Parhar static void 181eba13bbcSJohn Baldwin free_ddp_rcv_buffer(struct toepcb *toep, struct ddp_rcv_buffer *drb) 182e682d02eSNavdeep Parhar { 183eba13bbcSJohn Baldwin t4_free_page_pods(&drb->prsv); 184*d1bdc282SBjoern A. Zeeb free(drb->buf, M_CXGBE); 185eba13bbcSJohn Baldwin free(drb, M_CXGBE); 186eba13bbcSJohn Baldwin counter_u64_add(toep->ofld_rxq->ddp_buffer_free, 1); 187eba13bbcSJohn Baldwin free_toepcb(toep); 188eba13bbcSJohn Baldwin } 189eba13bbcSJohn Baldwin 190eba13bbcSJohn Baldwin static void 191eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(struct toepcb *toep, struct ddp_rcv_buffer *drb) 192eba13bbcSJohn Baldwin { 193eba13bbcSJohn Baldwin DDP_CACHE_LOCK(toep); 194eba13bbcSJohn Baldwin if (!(toep->ddp.flags & DDP_DEAD) && 195eba13bbcSJohn Baldwin toep->ddp.cached_count < t4_ddp_rcvbuf_cache) { 196eba13bbcSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.cached_buffers, drb, link); 197eba13bbcSJohn Baldwin toep->ddp.cached_count++; 198eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 199eba13bbcSJohn Baldwin } else { 200eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 201eba13bbcSJohn Baldwin free_ddp_rcv_buffer(toep, drb); 202eba13bbcSJohn Baldwin } 203eba13bbcSJohn Baldwin } 204eba13bbcSJohn Baldwin 205eba13bbcSJohn Baldwin static struct ddp_rcv_buffer * 206eba13bbcSJohn Baldwin alloc_cached_ddp_rcv_buffer(struct toepcb *toep) 207eba13bbcSJohn Baldwin { 208eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 209eba13bbcSJohn Baldwin 210eba13bbcSJohn Baldwin DDP_CACHE_LOCK(toep); 211eba13bbcSJohn Baldwin if (!TAILQ_EMPTY(&toep->ddp.cached_buffers)) { 212eba13bbcSJohn Baldwin drb = TAILQ_FIRST(&toep->ddp.cached_buffers); 213eba13bbcSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link); 214eba13bbcSJohn Baldwin toep->ddp.cached_count--; 215eba13bbcSJohn Baldwin counter_u64_add(toep->ofld_rxq->ddp_buffer_reuse, 1); 216eba13bbcSJohn Baldwin } else 217eba13bbcSJohn Baldwin drb = NULL; 218eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 219eba13bbcSJohn Baldwin return (drb); 220eba13bbcSJohn Baldwin } 221eba13bbcSJohn Baldwin 222eba13bbcSJohn Baldwin static struct ddp_rcv_buffer * 223eba13bbcSJohn Baldwin alloc_ddp_rcv_buffer(struct toepcb *toep, int how) 224eba13bbcSJohn Baldwin { 225eba13bbcSJohn Baldwin struct tom_data *td = toep->td; 226eba13bbcSJohn Baldwin struct adapter *sc = td_adapter(td); 227eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 228eba13bbcSJohn Baldwin int error; 229eba13bbcSJohn Baldwin 230eba13bbcSJohn Baldwin drb = malloc(sizeof(*drb), M_CXGBE, how | M_ZERO); 231eba13bbcSJohn Baldwin if (drb == NULL) 232eba13bbcSJohn Baldwin return (NULL); 233eba13bbcSJohn Baldwin 234eba13bbcSJohn Baldwin drb->buf = contigmalloc(t4_ddp_rcvbuf_len, M_CXGBE, how, 0, ~0, 235eba13bbcSJohn Baldwin t4_ddp_rcvbuf_len, 0); 236eba13bbcSJohn Baldwin if (drb->buf == NULL) { 237eba13bbcSJohn Baldwin free(drb, M_CXGBE); 238eba13bbcSJohn Baldwin return (NULL); 239eba13bbcSJohn Baldwin } 240eba13bbcSJohn Baldwin drb->len = t4_ddp_rcvbuf_len; 241eba13bbcSJohn Baldwin drb->refs = 1; 242eba13bbcSJohn Baldwin 243eba13bbcSJohn Baldwin error = t4_alloc_page_pods_for_rcvbuf(&td->pr, drb); 244eba13bbcSJohn Baldwin if (error != 0) { 245*d1bdc282SBjoern A. Zeeb free(drb->buf, M_CXGBE); 246eba13bbcSJohn Baldwin free(drb, M_CXGBE); 247eba13bbcSJohn Baldwin return (NULL); 248eba13bbcSJohn Baldwin } 249eba13bbcSJohn Baldwin 250eba13bbcSJohn Baldwin error = t4_write_page_pods_for_rcvbuf(sc, toep->ctrlq, toep->tid, drb); 251eba13bbcSJohn Baldwin if (error != 0) { 252eba13bbcSJohn Baldwin t4_free_page_pods(&drb->prsv); 253*d1bdc282SBjoern A. Zeeb free(drb->buf, M_CXGBE); 254eba13bbcSJohn Baldwin free(drb, M_CXGBE); 255eba13bbcSJohn Baldwin return (NULL); 256eba13bbcSJohn Baldwin } 257eba13bbcSJohn Baldwin 258eba13bbcSJohn Baldwin hold_toepcb(toep); 259eba13bbcSJohn Baldwin counter_u64_add(toep->ofld_rxq->ddp_buffer_alloc, 1); 260eba13bbcSJohn Baldwin return (drb); 261eba13bbcSJohn Baldwin } 262eba13bbcSJohn Baldwin 263eba13bbcSJohn Baldwin static void 264eba13bbcSJohn Baldwin free_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db) 265eba13bbcSJohn Baldwin { 266eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 267eba13bbcSJohn Baldwin if (db->drb != NULL) 268eba13bbcSJohn Baldwin free_ddp_rcv_buffer(toep, db->drb); 269eba13bbcSJohn Baldwin #ifdef INVARIANTS 270eba13bbcSJohn Baldwin db->drb = NULL; 271eba13bbcSJohn Baldwin #endif 272eba13bbcSJohn Baldwin return; 273eba13bbcSJohn Baldwin } 274e682d02eSNavdeep Parhar 275dc964385SJohn Baldwin if (db->job) { 276dc964385SJohn Baldwin /* 277dc964385SJohn Baldwin * XXX: If we are un-offloading the socket then we 278dc964385SJohn Baldwin * should requeue these on the socket somehow. If we 279dc964385SJohn Baldwin * got a FIN from the remote end, then this completes 280dc964385SJohn Baldwin * any remaining requests with an EOF read. 281dc964385SJohn Baldwin */ 282dc964385SJohn Baldwin if (!aio_clear_cancel_function(db->job)) 283dc964385SJohn Baldwin ddp_complete_one(db->job, 0); 28425429e27SJohn Baldwin #ifdef INVARIANTS 28525429e27SJohn Baldwin db->job = NULL; 28625429e27SJohn Baldwin #endif 287dc964385SJohn Baldwin } 288e682d02eSNavdeep Parhar 28925429e27SJohn Baldwin if (db->ps) { 290eba13bbcSJohn Baldwin free_pageset(toep->td, db->ps); 29125429e27SJohn Baldwin #ifdef INVARIANTS 29225429e27SJohn Baldwin db->ps = NULL; 29325429e27SJohn Baldwin #endif 29425429e27SJohn Baldwin } 295dc964385SJohn Baldwin } 296e682d02eSNavdeep Parhar 297a5a965d7SJohn Baldwin static void 298dc964385SJohn Baldwin ddp_init_toep(struct toepcb *toep) 299dc964385SJohn Baldwin { 300e682d02eSNavdeep Parhar 301125d42feSJohn Baldwin toep->ddp.flags = DDP_OK; 302125d42feSJohn Baldwin toep->ddp.active_id = -1; 303125d42feSJohn Baldwin mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF); 304eba13bbcSJohn Baldwin mtx_init(&toep->ddp.cache_lock, "t4 ddp cache", NULL, MTX_DEF); 305dc964385SJohn Baldwin } 306dc964385SJohn Baldwin 307dc964385SJohn Baldwin void 308dc964385SJohn Baldwin ddp_uninit_toep(struct toepcb *toep) 309dc964385SJohn Baldwin { 310dc964385SJohn Baldwin 311125d42feSJohn Baldwin mtx_destroy(&toep->ddp.lock); 312eba13bbcSJohn Baldwin mtx_destroy(&toep->ddp.cache_lock); 313e682d02eSNavdeep Parhar } 314e682d02eSNavdeep Parhar 315e682d02eSNavdeep Parhar void 316e682d02eSNavdeep Parhar release_ddp_resources(struct toepcb *toep) 317e682d02eSNavdeep Parhar { 318eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 319dc964385SJohn Baldwin struct pageset *ps; 320e682d02eSNavdeep Parhar int i; 321e682d02eSNavdeep Parhar 322dc964385SJohn Baldwin DDP_LOCK(toep); 323eba13bbcSJohn Baldwin DDP_CACHE_LOCK(toep); 32417795d82SNavdeep Parhar toep->ddp.flags |= DDP_DEAD; 325eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 326125d42feSJohn Baldwin for (i = 0; i < nitems(toep->ddp.db); i++) { 327eba13bbcSJohn Baldwin free_ddp_buffer(toep, &toep->ddp.db[i]); 328e682d02eSNavdeep Parhar } 329eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 330125d42feSJohn Baldwin while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) { 331125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 332dc964385SJohn Baldwin free_pageset(toep->td, ps); 333e682d02eSNavdeep Parhar } 334dc964385SJohn Baldwin ddp_complete_all(toep, 0); 335eba13bbcSJohn Baldwin } 336eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 337eba13bbcSJohn Baldwin DDP_CACHE_LOCK(toep); 338eba13bbcSJohn Baldwin while ((drb = TAILQ_FIRST(&toep->ddp.cached_buffers)) != NULL) { 339eba13bbcSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_buffers, drb, link); 340eba13bbcSJohn Baldwin free_ddp_rcv_buffer(toep, drb); 341eba13bbcSJohn Baldwin } 342eba13bbcSJohn Baldwin DDP_CACHE_UNLOCK(toep); 343eba13bbcSJohn Baldwin } 344dc964385SJohn Baldwin DDP_UNLOCK(toep); 345dc964385SJohn Baldwin } 346dc964385SJohn Baldwin 347dc964385SJohn Baldwin #ifdef INVARIANTS 348dc964385SJohn Baldwin void 349dc964385SJohn Baldwin ddp_assert_empty(struct toepcb *toep) 350dc964385SJohn Baldwin { 351dc964385SJohn Baldwin int i; 352dc964385SJohn Baldwin 353eba13bbcSJohn Baldwin MPASS((toep->ddp.flags & (DDP_TASK_ACTIVE | DDP_DEAD)) != DDP_TASK_ACTIVE); 354125d42feSJohn Baldwin for (i = 0; i < nitems(toep->ddp.db); i++) { 355eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 356125d42feSJohn Baldwin MPASS(toep->ddp.db[i].job == NULL); 357125d42feSJohn Baldwin MPASS(toep->ddp.db[i].ps == NULL); 358eba13bbcSJohn Baldwin } else 359eba13bbcSJohn Baldwin MPASS(toep->ddp.db[i].drb == NULL); 360dc964385SJohn Baldwin } 361eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 362125d42feSJohn Baldwin MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets)); 363125d42feSJohn Baldwin MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq)); 364dc964385SJohn Baldwin } 365eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) 366eba13bbcSJohn Baldwin MPASS(TAILQ_EMPTY(&toep->ddp.cached_buffers)); 367eba13bbcSJohn Baldwin } 368dc964385SJohn Baldwin #endif 369dc964385SJohn Baldwin 370dc964385SJohn Baldwin static void 371dc964385SJohn Baldwin complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db, 372dc964385SJohn Baldwin unsigned int db_idx) 373dc964385SJohn Baldwin { 374eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 375dc964385SJohn Baldwin unsigned int db_flag; 376dc964385SJohn Baldwin 377125d42feSJohn Baldwin toep->ddp.active_count--; 378125d42feSJohn Baldwin if (toep->ddp.active_id == db_idx) { 379125d42feSJohn Baldwin if (toep->ddp.active_count == 0) { 380eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) 381125d42feSJohn Baldwin KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL, 382dc964385SJohn Baldwin ("%s: active_count mismatch", __func__)); 383eba13bbcSJohn Baldwin else 384eba13bbcSJohn Baldwin KASSERT(toep->ddp.db[db_idx ^ 1].drb == NULL, 385eba13bbcSJohn Baldwin ("%s: active_count mismatch", __func__)); 386125d42feSJohn Baldwin toep->ddp.active_id = -1; 387dc964385SJohn Baldwin } else 388125d42feSJohn Baldwin toep->ddp.active_id ^= 1; 3891081d276SJohn Baldwin #ifdef VERBOSE_TRACES 3908674e626SNavdeep Parhar CTR3(KTR_CXGBE, "%s: tid %u, ddp_active_id = %d", __func__, 3918674e626SNavdeep Parhar toep->tid, toep->ddp.active_id); 3921081d276SJohn Baldwin #endif 393dc964385SJohn Baldwin } else { 394125d42feSJohn Baldwin KASSERT(toep->ddp.active_count != 0 && 395125d42feSJohn Baldwin toep->ddp.active_id != -1, 396dc964385SJohn Baldwin ("%s: active count mismatch", __func__)); 397dc964385SJohn Baldwin } 398dc964385SJohn Baldwin 399eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 400dc964385SJohn Baldwin db->cancel_pending = 0; 401dc964385SJohn Baldwin db->job = NULL; 402dc964385SJohn Baldwin recycle_pageset(toep, db->ps); 403dc964385SJohn Baldwin db->ps = NULL; 404eba13bbcSJohn Baldwin } else { 405eba13bbcSJohn Baldwin drb = db->drb; 406eba13bbcSJohn Baldwin if (atomic_fetchadd_int(&drb->refs, -1) == 1) 407eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 408eba13bbcSJohn Baldwin db->drb = NULL; 409eba13bbcSJohn Baldwin db->placed = 0; 410eba13bbcSJohn Baldwin } 411dc964385SJohn Baldwin 412dc964385SJohn Baldwin db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 413125d42feSJohn Baldwin KASSERT(toep->ddp.flags & db_flag, 414dc964385SJohn Baldwin ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x", 415125d42feSJohn Baldwin __func__, toep, toep->ddp.flags)); 416125d42feSJohn Baldwin toep->ddp.flags &= ~db_flag; 417e682d02eSNavdeep Parhar } 418e682d02eSNavdeep Parhar 419eba13bbcSJohn Baldwin /* Called when m_free drops the last reference. */ 420eba13bbcSJohn Baldwin static void 421eba13bbcSJohn Baldwin ddp_rcv_mbuf_done(struct mbuf *m) 422eba13bbcSJohn Baldwin { 423eba13bbcSJohn Baldwin struct toepcb *toep = m->m_ext.ext_arg1; 424eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb = m->m_ext.ext_arg2; 425eba13bbcSJohn Baldwin 426eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 427eba13bbcSJohn Baldwin } 428eba13bbcSJohn Baldwin 429eba13bbcSJohn Baldwin static void 430eba13bbcSJohn Baldwin queue_ddp_rcvbuf_mbuf(struct toepcb *toep, u_int db_idx, u_int len) 431eba13bbcSJohn Baldwin { 432eba13bbcSJohn Baldwin struct inpcb *inp = toep->inp; 433eba13bbcSJohn Baldwin struct sockbuf *sb; 434eba13bbcSJohn Baldwin struct ddp_buffer *db; 435eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 436eba13bbcSJohn Baldwin struct mbuf *m; 437eba13bbcSJohn Baldwin 438eba13bbcSJohn Baldwin m = m_gethdr(M_NOWAIT, MT_DATA); 439eba13bbcSJohn Baldwin if (m == NULL) { 440eba13bbcSJohn Baldwin printf("%s: failed to allocate mbuf", __func__); 441eba13bbcSJohn Baldwin return; 442eba13bbcSJohn Baldwin } 443eba13bbcSJohn Baldwin m->m_pkthdr.rcvif = toep->vi->ifp; 444eba13bbcSJohn Baldwin 445eba13bbcSJohn Baldwin db = &toep->ddp.db[db_idx]; 446eba13bbcSJohn Baldwin drb = db->drb; 447eba13bbcSJohn Baldwin m_extaddref(m, (char *)drb->buf + db->placed, len, &drb->refs, 448eba13bbcSJohn Baldwin ddp_rcv_mbuf_done, toep, drb); 449eba13bbcSJohn Baldwin m->m_pkthdr.len = len; 450eba13bbcSJohn Baldwin m->m_len = len; 451eba13bbcSJohn Baldwin 452eba13bbcSJohn Baldwin sb = &inp->inp_socket->so_rcv; 453eba13bbcSJohn Baldwin SOCKBUF_LOCK_ASSERT(sb); 454eba13bbcSJohn Baldwin sbappendstream_locked(sb, m, 0); 455eba13bbcSJohn Baldwin 456eba13bbcSJohn Baldwin db->placed += len; 457eba13bbcSJohn Baldwin toep->ofld_rxq->rx_toe_ddp_octets += len; 458eba13bbcSJohn Baldwin } 459eba13bbcSJohn Baldwin 460d588c1f9SNavdeep Parhar /* XXX: handle_ddp_data code duplication */ 461d588c1f9SNavdeep Parhar void 462d588c1f9SNavdeep Parhar insert_ddp_data(struct toepcb *toep, uint32_t n) 463d588c1f9SNavdeep Parhar { 464d588c1f9SNavdeep Parhar struct inpcb *inp = toep->inp; 465d588c1f9SNavdeep Parhar struct tcpcb *tp = intotcpcb(inp); 466dc964385SJohn Baldwin struct ddp_buffer *db; 467dc964385SJohn Baldwin struct kaiocb *job; 468dc964385SJohn Baldwin size_t placed; 469dc964385SJohn Baldwin long copied; 47039d5cbdcSNavdeep Parhar unsigned int db_idx; 47139d5cbdcSNavdeep Parhar #ifdef INVARIANTS 47239d5cbdcSNavdeep Parhar unsigned int db_flag; 47339d5cbdcSNavdeep Parhar #endif 474eba13bbcSJohn Baldwin bool ddp_rcvbuf; 475d588c1f9SNavdeep Parhar 476d588c1f9SNavdeep Parhar INP_WLOCK_ASSERT(inp); 477dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 478d588c1f9SNavdeep Parhar 479eba13bbcSJohn Baldwin ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0; 480d588c1f9SNavdeep Parhar tp->rcv_nxt += n; 481d588c1f9SNavdeep Parhar #ifndef USE_DDP_RX_FLOW_CONTROL 482d588c1f9SNavdeep Parhar KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__)); 483d588c1f9SNavdeep Parhar tp->rcv_wnd -= n; 484d588c1f9SNavdeep Parhar #endif 485dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP", 486dc964385SJohn Baldwin __func__, n); 487125d42feSJohn Baldwin while (toep->ddp.active_count > 0) { 488125d42feSJohn Baldwin MPASS(toep->ddp.active_id != -1); 489125d42feSJohn Baldwin db_idx = toep->ddp.active_id; 49039d5cbdcSNavdeep Parhar #ifdef INVARIANTS 491dc964385SJohn Baldwin db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 49239d5cbdcSNavdeep Parhar #endif 493125d42feSJohn Baldwin MPASS((toep->ddp.flags & db_flag) != 0); 494125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 495eba13bbcSJohn Baldwin if (ddp_rcvbuf) { 496eba13bbcSJohn Baldwin placed = n; 497eba13bbcSJohn Baldwin if (placed > db->drb->len - db->placed) 498eba13bbcSJohn Baldwin placed = db->drb->len - db->placed; 499eba13bbcSJohn Baldwin if (placed != 0) 500eba13bbcSJohn Baldwin queue_ddp_rcvbuf_mbuf(toep, db_idx, placed); 501eba13bbcSJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 502eba13bbcSJohn Baldwin n -= placed; 503eba13bbcSJohn Baldwin continue; 504eba13bbcSJohn Baldwin } 505dc964385SJohn Baldwin job = db->job; 506fe0bdd1dSJohn Baldwin copied = job->aio_received; 507dc964385SJohn Baldwin placed = n; 508dc964385SJohn Baldwin if (placed > job->uaiocb.aio_nbytes - copied) 509dc964385SJohn Baldwin placed = job->uaiocb.aio_nbytes - copied; 510c3d4aea6SJohn Baldwin if (placed > 0) { 511b1012d80SJohn Baldwin job->msgrcv = 1; 512c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_jobs++; 513c3d4aea6SJohn Baldwin } 514c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_octets += placed; 515dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) { 516dc964385SJohn Baldwin /* 517dc964385SJohn Baldwin * Update the copied length for when 518dc964385SJohn Baldwin * t4_aio_cancel_active() completes this 519dc964385SJohn Baldwin * request. 520dc964385SJohn Baldwin */ 521fe0bdd1dSJohn Baldwin job->aio_received += placed; 522dc964385SJohn Baldwin } else if (copied + placed != 0) { 523dc964385SJohn Baldwin CTR4(KTR_CXGBE, 524dc964385SJohn Baldwin "%s: completing %p (copied %ld, placed %lu)", 525dc964385SJohn Baldwin __func__, job, copied, placed); 526dc964385SJohn Baldwin /* XXX: This always completes if there is some data. */ 527dc964385SJohn Baldwin aio_complete(job, copied + placed, 0); 528dc964385SJohn Baldwin } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) { 529125d42feSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 530125d42feSJohn Baldwin toep->ddp.waiting_count++; 531dc964385SJohn Baldwin } else 532dc964385SJohn Baldwin aio_cancel(job); 533dc964385SJohn Baldwin n -= placed; 534dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 535dc964385SJohn Baldwin } 536dc964385SJohn Baldwin 537dc964385SJohn Baldwin MPASS(n == 0); 538d588c1f9SNavdeep Parhar } 539d588c1f9SNavdeep Parhar 540e682d02eSNavdeep Parhar /* SET_TCB_FIELD sent as a ULP command looks like this */ 541e682d02eSNavdeep Parhar #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 542e682d02eSNavdeep Parhar sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 543e682d02eSNavdeep Parhar 544e682d02eSNavdeep Parhar /* RX_DATA_ACK sent as a ULP command looks like this */ 545e682d02eSNavdeep Parhar #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \ 546e682d02eSNavdeep Parhar sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core)) 547e682d02eSNavdeep Parhar 548e682d02eSNavdeep Parhar static inline void * 549e682d02eSNavdeep Parhar mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep) 550e682d02eSNavdeep Parhar { 551e682d02eSNavdeep Parhar struct ulptx_idata *ulpsc; 552e682d02eSNavdeep Parhar struct cpl_rx_data_ack_core *req; 553e682d02eSNavdeep Parhar 554e682d02eSNavdeep Parhar ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 555e682d02eSNavdeep Parhar ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16)); 556e682d02eSNavdeep Parhar 557e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 558e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 559e682d02eSNavdeep Parhar ulpsc->len = htobe32(sizeof(*req)); 560e682d02eSNavdeep Parhar 561e682d02eSNavdeep Parhar req = (struct cpl_rx_data_ack_core *)(ulpsc + 1); 562e682d02eSNavdeep Parhar OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid)); 563e682d02eSNavdeep Parhar req->credit_dack = htobe32(F_RX_MODULATE_RX); 564e682d02eSNavdeep Parhar 565e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(req + 1); 566e682d02eSNavdeep Parhar if (LEN__RX_DATA_ACK_ULP % 16) { 567e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 568e682d02eSNavdeep Parhar ulpsc->len = htobe32(0); 569e682d02eSNavdeep Parhar return (ulpsc + 1); 570e682d02eSNavdeep Parhar } 571e682d02eSNavdeep Parhar return (ulpsc); 572e682d02eSNavdeep Parhar } 573e682d02eSNavdeep Parhar 574e682d02eSNavdeep Parhar static struct wrqe * 575e682d02eSNavdeep Parhar mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx, 576eba13bbcSJohn Baldwin struct ppod_reservation *prsv, int offset, uint32_t len, 577eba13bbcSJohn Baldwin uint64_t ddp_flags, uint64_t ddp_flags_mask) 578e682d02eSNavdeep Parhar { 579e682d02eSNavdeep Parhar struct wrqe *wr; 580e682d02eSNavdeep Parhar struct work_request_hdr *wrh; 581e682d02eSNavdeep Parhar struct ulp_txpkt *ulpmc; 582eba13bbcSJohn Baldwin int wrlen; 583e682d02eSNavdeep Parhar 584e682d02eSNavdeep Parhar KASSERT(db_idx == 0 || db_idx == 1, 585e682d02eSNavdeep Parhar ("%s: bad DDP buffer index %d", __func__, db_idx)); 586e682d02eSNavdeep Parhar 587e682d02eSNavdeep Parhar /* 588e682d02eSNavdeep Parhar * We'll send a compound work request that has 3 SET_TCB_FIELDs and an 589e682d02eSNavdeep Parhar * RX_DATA_ACK (with RX_MODULATE to speed up delivery). 590e682d02eSNavdeep Parhar * 591e682d02eSNavdeep Parhar * The work request header is 16B and always ends at a 16B boundary. 592e682d02eSNavdeep Parhar * The ULPTX master commands that follow must all end at 16B boundaries 593e682d02eSNavdeep Parhar * too so we round up the size to 16. 594e682d02eSNavdeep Parhar */ 595eba13bbcSJohn Baldwin wrlen = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) + 596d14b0ac1SNavdeep Parhar roundup2(LEN__RX_DATA_ACK_ULP, 16); 597e682d02eSNavdeep Parhar 598eba13bbcSJohn Baldwin wr = alloc_wrqe(wrlen, toep->ctrlq); 599e682d02eSNavdeep Parhar if (wr == NULL) 600e682d02eSNavdeep Parhar return (NULL); 601e682d02eSNavdeep Parhar wrh = wrtod(wr); 602eba13bbcSJohn Baldwin INIT_ULPTX_WRH(wrh, wrlen, 1, 0); /* atomic */ 603e682d02eSNavdeep Parhar ulpmc = (struct ulp_txpkt *)(wrh + 1); 604e682d02eSNavdeep Parhar 605e682d02eSNavdeep Parhar /* Write the buffer's tag */ 60664a00f87SNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 607e682d02eSNavdeep Parhar W_TCB_RX_DDP_BUF0_TAG + db_idx, 608e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG), 609eba13bbcSJohn Baldwin V_TCB_RX_DDP_BUF0_TAG(prsv->prsv_tag)); 610e682d02eSNavdeep Parhar 611e682d02eSNavdeep Parhar /* Update the current offset in the DDP buffer and its total length */ 612e682d02eSNavdeep Parhar if (db_idx == 0) 61364a00f87SNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 614e682d02eSNavdeep Parhar W_TCB_RX_DDP_BUF0_OFFSET, 615e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) | 616e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN), 617e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_OFFSET(offset) | 618eba13bbcSJohn Baldwin V_TCB_RX_DDP_BUF0_LEN(len)); 619e682d02eSNavdeep Parhar else 62064a00f87SNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 621e682d02eSNavdeep Parhar W_TCB_RX_DDP_BUF1_OFFSET, 622e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) | 623e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32), 624e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF1_OFFSET(offset) | 625eba13bbcSJohn Baldwin V_TCB_RX_DDP_BUF1_LEN((u64)len << 32)); 626e682d02eSNavdeep Parhar 627e682d02eSNavdeep Parhar /* Update DDP flags */ 62864a00f87SNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_RX_DDP_FLAGS, 629dc964385SJohn Baldwin ddp_flags_mask, ddp_flags); 630e682d02eSNavdeep Parhar 631e682d02eSNavdeep Parhar /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */ 632e682d02eSNavdeep Parhar ulpmc = mk_rx_data_ack_ulp(ulpmc, toep); 633e682d02eSNavdeep Parhar 634e682d02eSNavdeep Parhar return (wr); 635e682d02eSNavdeep Parhar } 636e682d02eSNavdeep Parhar 637e682d02eSNavdeep Parhar static int 638eba13bbcSJohn Baldwin handle_ddp_data_aio(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, 639eba13bbcSJohn Baldwin int len) 640e682d02eSNavdeep Parhar { 641e682d02eSNavdeep Parhar uint32_t report = be32toh(ddp_report); 642dc964385SJohn Baldwin unsigned int db_idx; 643e682d02eSNavdeep Parhar struct inpcb *inp = toep->inp; 644dc964385SJohn Baldwin struct ddp_buffer *db; 645e682d02eSNavdeep Parhar struct tcpcb *tp; 646e682d02eSNavdeep Parhar struct socket *so; 647e682d02eSNavdeep Parhar struct sockbuf *sb; 648dc964385SJohn Baldwin struct kaiocb *job; 649dc964385SJohn Baldwin long copied; 650e682d02eSNavdeep Parhar 651dc964385SJohn Baldwin db_idx = report & F_DDP_BUF_IDX ? 1 : 0; 652e682d02eSNavdeep Parhar 653e682d02eSNavdeep Parhar if (__predict_false(!(report & F_DDP_INV))) 654e682d02eSNavdeep Parhar CXGBE_UNIMPLEMENTED("DDP buffer still valid"); 655e682d02eSNavdeep Parhar 656e682d02eSNavdeep Parhar INP_WLOCK(inp); 657e682d02eSNavdeep Parhar so = inp_inpcbtosocket(inp); 658e682d02eSNavdeep Parhar sb = &so->so_rcv; 659dc964385SJohn Baldwin DDP_LOCK(toep); 660dc964385SJohn Baldwin 661125d42feSJohn Baldwin KASSERT(toep->ddp.active_id == db_idx, 662dc964385SJohn Baldwin ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx, 663125d42feSJohn Baldwin toep->ddp.active_id, toep->tid)); 664125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 665dc964385SJohn Baldwin job = db->job; 666dc964385SJohn Baldwin 66753af6903SGleb Smirnoff if (__predict_false(inp->inp_flags & INP_DROPPED)) { 668e682d02eSNavdeep Parhar /* 669dc964385SJohn Baldwin * This can happen due to an administrative tcpdrop(8). 670dc964385SJohn Baldwin * Just fail the request with ECONNRESET. 671e682d02eSNavdeep Parhar */ 672e682d02eSNavdeep Parhar CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x", 673e682d02eSNavdeep Parhar __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); 674dc964385SJohn Baldwin if (aio_clear_cancel_function(job)) 675dc964385SJohn Baldwin ddp_complete_one(job, ECONNRESET); 676dc964385SJohn Baldwin goto completed; 677e682d02eSNavdeep Parhar } 678e682d02eSNavdeep Parhar 679e682d02eSNavdeep Parhar tp = intotcpcb(inp); 6808fb15ddbSJohn Baldwin 6818fb15ddbSJohn Baldwin /* 6828fb15ddbSJohn Baldwin * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the 6838fb15ddbSJohn Baldwin * sequence number of the next byte to receive. The length of 6848fb15ddbSJohn Baldwin * the data received for this message must be computed by 6858fb15ddbSJohn Baldwin * comparing the new and old values of rcv_nxt. 6868fb15ddbSJohn Baldwin * 6878fb15ddbSJohn Baldwin * For RX_DATA_DDP, len might be non-zero, but it is only the 6888fb15ddbSJohn Baldwin * length of the most recent DMA. It does not include the 6898fb15ddbSJohn Baldwin * total length of the data received since the previous update 6908fb15ddbSJohn Baldwin * for this DDP buffer. rcv_nxt is the sequence number of the 6918fb15ddbSJohn Baldwin * first received byte from the most recent DMA. 6928fb15ddbSJohn Baldwin */ 693e682d02eSNavdeep Parhar len += be32toh(rcv_nxt) - tp->rcv_nxt; 694e682d02eSNavdeep Parhar tp->rcv_nxt += len; 695e682d02eSNavdeep Parhar tp->t_rcvtime = ticks; 696e682d02eSNavdeep Parhar #ifndef USE_DDP_RX_FLOW_CONTROL 697e682d02eSNavdeep Parhar KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 698e682d02eSNavdeep Parhar tp->rcv_wnd -= len; 699e682d02eSNavdeep Parhar #endif 700dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 7018674e626SNavdeep Parhar CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__, 7028674e626SNavdeep Parhar toep->tid, db_idx, len, report); 703dc964385SJohn Baldwin #endif 704e682d02eSNavdeep Parhar 70569a08863SJohn Baldwin /* receive buffer autosize */ 706a342904bSNavdeep Parhar MPASS(toep->vnet == so->so_vnet); 707a342904bSNavdeep Parhar CURVNET_SET(toep->vnet); 708dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 70969a08863SJohn Baldwin if (sb->sb_flags & SB_AUTOSIZE && 71069a08863SJohn Baldwin V_tcp_do_autorcvbuf && 71169a08863SJohn Baldwin sb->sb_hiwat < V_tcp_autorcvbuf_max && 71269a08863SJohn Baldwin len > (sbspace(sb) / 8 * 7)) { 713be09e82aSNavdeep Parhar struct adapter *sc = td_adapter(toep->td); 71469a08863SJohn Baldwin unsigned int hiwat = sb->sb_hiwat; 715be09e82aSNavdeep Parhar unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 71669a08863SJohn Baldwin V_tcp_autorcvbuf_max); 71769a08863SJohn Baldwin 71843283184SGleb Smirnoff if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 71969a08863SJohn Baldwin sb->sb_flags &= ~SB_AUTOSIZE; 72069a08863SJohn Baldwin } 721dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 722dc964385SJohn Baldwin CURVNET_RESTORE(); 72369a08863SJohn Baldwin 724b1012d80SJohn Baldwin job->msgrcv = 1; 725c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_jobs++; 726c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_octets += len; 727dc964385SJohn Baldwin if (db->cancel_pending) { 728dc964385SJohn Baldwin /* 729dc964385SJohn Baldwin * Update the job's length but defer completion to the 730dc964385SJohn Baldwin * TCB_RPL callback. 731dc964385SJohn Baldwin */ 732fe0bdd1dSJohn Baldwin job->aio_received += len; 733dc964385SJohn Baldwin goto out; 734dc964385SJohn Baldwin } else if (!aio_clear_cancel_function(job)) { 735dc964385SJohn Baldwin /* 736dc964385SJohn Baldwin * Update the copied length for when 737dc964385SJohn Baldwin * t4_aio_cancel_active() completes this request. 738dc964385SJohn Baldwin */ 739fe0bdd1dSJohn Baldwin job->aio_received += len; 740dc964385SJohn Baldwin } else { 741fe0bdd1dSJohn Baldwin copied = job->aio_received; 742dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 7438674e626SNavdeep Parhar CTR5(KTR_CXGBE, 7448674e626SNavdeep Parhar "%s: tid %u, completing %p (copied %ld, placed %d)", 7458674e626SNavdeep Parhar __func__, toep->tid, job, copied, len); 746dc964385SJohn Baldwin #endif 747dc964385SJohn Baldwin aio_complete(job, copied + len, 0); 748dc964385SJohn Baldwin t4_rcvd(&toep->td->tod, tp); 749dc964385SJohn Baldwin } 750dc964385SJohn Baldwin 751dc964385SJohn Baldwin completed: 752dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 753125d42feSJohn Baldwin if (toep->ddp.waiting_count > 0) 754dc964385SJohn Baldwin ddp_queue_toep(toep); 755dc964385SJohn Baldwin out: 756dc964385SJohn Baldwin DDP_UNLOCK(toep); 757e682d02eSNavdeep Parhar INP_WUNLOCK(inp); 758dc964385SJohn Baldwin 759e682d02eSNavdeep Parhar return (0); 760e682d02eSNavdeep Parhar } 761e682d02eSNavdeep Parhar 762eba13bbcSJohn Baldwin static bool 763eba13bbcSJohn Baldwin queue_ddp_rcvbuf(struct toepcb *toep, struct ddp_rcv_buffer *drb) 764eba13bbcSJohn Baldwin { 765eba13bbcSJohn Baldwin struct adapter *sc = td_adapter(toep->td); 766eba13bbcSJohn Baldwin struct ddp_buffer *db; 767eba13bbcSJohn Baldwin struct wrqe *wr; 768eba13bbcSJohn Baldwin uint64_t ddp_flags, ddp_flags_mask; 769eba13bbcSJohn Baldwin int buf_flag, db_idx; 770eba13bbcSJohn Baldwin 771eba13bbcSJohn Baldwin DDP_ASSERT_LOCKED(toep); 772eba13bbcSJohn Baldwin 773eba13bbcSJohn Baldwin KASSERT((toep->ddp.flags & DDP_DEAD) == 0, ("%s: DDP_DEAD", __func__)); 774eba13bbcSJohn Baldwin KASSERT(toep->ddp.active_count < nitems(toep->ddp.db), 775eba13bbcSJohn Baldwin ("%s: no empty DDP buffer slot", __func__)); 776eba13bbcSJohn Baldwin 777eba13bbcSJohn Baldwin /* Determine which DDP buffer to use. */ 778eba13bbcSJohn Baldwin if (toep->ddp.db[0].drb == NULL) { 779eba13bbcSJohn Baldwin db_idx = 0; 780eba13bbcSJohn Baldwin } else { 781eba13bbcSJohn Baldwin MPASS(toep->ddp.db[1].drb == NULL); 782eba13bbcSJohn Baldwin db_idx = 1; 783eba13bbcSJohn Baldwin } 784eba13bbcSJohn Baldwin 785eba13bbcSJohn Baldwin /* 786eba13bbcSJohn Baldwin * Permit PSH to trigger a partial completion without 787eba13bbcSJohn Baldwin * invalidating the rest of the buffer, but disable the PUSH 788eba13bbcSJohn Baldwin * timer. 789eba13bbcSJohn Baldwin */ 790eba13bbcSJohn Baldwin ddp_flags = 0; 791eba13bbcSJohn Baldwin ddp_flags_mask = 0; 792eba13bbcSJohn Baldwin if (db_idx == 0) { 793eba13bbcSJohn Baldwin ddp_flags |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 794eba13bbcSJohn Baldwin V_TF_DDP_PUSH_DISABLE_0(0) | V_TF_DDP_PSHF_ENABLE_0(1) | 795eba13bbcSJohn Baldwin V_TF_DDP_BUF0_VALID(1); 796eba13bbcSJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 797eba13bbcSJohn Baldwin V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) | 798eba13bbcSJohn Baldwin V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1); 799eba13bbcSJohn Baldwin buf_flag = DDP_BUF0_ACTIVE; 800eba13bbcSJohn Baldwin } else { 801eba13bbcSJohn Baldwin ddp_flags |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 802eba13bbcSJohn Baldwin V_TF_DDP_PUSH_DISABLE_1(0) | V_TF_DDP_PSHF_ENABLE_1(1) | 803eba13bbcSJohn Baldwin V_TF_DDP_BUF1_VALID(1); 804eba13bbcSJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 805eba13bbcSJohn Baldwin V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) | 806eba13bbcSJohn Baldwin V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1); 807eba13bbcSJohn Baldwin buf_flag = DDP_BUF1_ACTIVE; 808eba13bbcSJohn Baldwin } 809eba13bbcSJohn Baldwin MPASS((toep->ddp.flags & buf_flag) == 0); 810eba13bbcSJohn Baldwin if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { 811eba13bbcSJohn Baldwin MPASS(db_idx == 0); 812eba13bbcSJohn Baldwin MPASS(toep->ddp.active_id == -1); 813eba13bbcSJohn Baldwin MPASS(toep->ddp.active_count == 0); 814eba13bbcSJohn Baldwin ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1); 815eba13bbcSJohn Baldwin } 816eba13bbcSJohn Baldwin 817eba13bbcSJohn Baldwin /* 818eba13bbcSJohn Baldwin * The TID for this connection should still be valid. If 819eba13bbcSJohn Baldwin * DDP_DEAD is set, SBS_CANTRCVMORE should be set, so we 820eba13bbcSJohn Baldwin * shouldn't be this far anyway. 821eba13bbcSJohn Baldwin */ 822eba13bbcSJohn Baldwin wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &drb->prsv, 0, drb->len, 823eba13bbcSJohn Baldwin ddp_flags, ddp_flags_mask); 824eba13bbcSJohn Baldwin if (wr == NULL) { 825eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 826eba13bbcSJohn Baldwin printf("%s: mk_update_tcb_for_ddp failed\n", __func__); 827eba13bbcSJohn Baldwin return (false); 828eba13bbcSJohn Baldwin } 829eba13bbcSJohn Baldwin 830eba13bbcSJohn Baldwin #ifdef VERBOSE_TRACES 831eba13bbcSJohn Baldwin CTR(KTR_CXGBE, 832eba13bbcSJohn Baldwin "%s: tid %u, scheduling DDP[%d] (flags %#lx/%#lx)", __func__, 833eba13bbcSJohn Baldwin toep->tid, db_idx, ddp_flags, ddp_flags_mask); 834eba13bbcSJohn Baldwin #endif 835eba13bbcSJohn Baldwin /* 836eba13bbcSJohn Baldwin * Hold a reference on scheduled buffers that is dropped in 837eba13bbcSJohn Baldwin * complete_ddp_buffer. 838eba13bbcSJohn Baldwin */ 839eba13bbcSJohn Baldwin drb->refs = 1; 840eba13bbcSJohn Baldwin 841eba13bbcSJohn Baldwin /* Give the chip the go-ahead. */ 842eba13bbcSJohn Baldwin t4_wrq_tx(sc, wr); 843eba13bbcSJohn Baldwin db = &toep->ddp.db[db_idx]; 844eba13bbcSJohn Baldwin db->drb = drb; 845eba13bbcSJohn Baldwin toep->ddp.flags |= buf_flag; 846eba13bbcSJohn Baldwin toep->ddp.active_count++; 847eba13bbcSJohn Baldwin if (toep->ddp.active_count == 1) { 848eba13bbcSJohn Baldwin MPASS(toep->ddp.active_id == -1); 849eba13bbcSJohn Baldwin toep->ddp.active_id = db_idx; 850eba13bbcSJohn Baldwin CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 851eba13bbcSJohn Baldwin toep->ddp.active_id); 852eba13bbcSJohn Baldwin } 853eba13bbcSJohn Baldwin return (true); 854eba13bbcSJohn Baldwin } 855eba13bbcSJohn Baldwin 856eba13bbcSJohn Baldwin static int 857eba13bbcSJohn Baldwin handle_ddp_data_rcvbuf(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, 858eba13bbcSJohn Baldwin int len) 859eba13bbcSJohn Baldwin { 860eba13bbcSJohn Baldwin uint32_t report = be32toh(ddp_report); 861eba13bbcSJohn Baldwin struct inpcb *inp = toep->inp; 862eba13bbcSJohn Baldwin struct tcpcb *tp; 863eba13bbcSJohn Baldwin struct socket *so; 864eba13bbcSJohn Baldwin struct sockbuf *sb; 865eba13bbcSJohn Baldwin struct ddp_buffer *db; 866eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 867eba13bbcSJohn Baldwin unsigned int db_idx; 868eba13bbcSJohn Baldwin bool invalidated; 869eba13bbcSJohn Baldwin 870eba13bbcSJohn Baldwin db_idx = report & F_DDP_BUF_IDX ? 1 : 0; 871eba13bbcSJohn Baldwin 872eba13bbcSJohn Baldwin invalidated = (report & F_DDP_INV) != 0; 873eba13bbcSJohn Baldwin 874eba13bbcSJohn Baldwin INP_WLOCK(inp); 875eba13bbcSJohn Baldwin so = inp_inpcbtosocket(inp); 876eba13bbcSJohn Baldwin sb = &so->so_rcv; 877eba13bbcSJohn Baldwin DDP_LOCK(toep); 878eba13bbcSJohn Baldwin 879eba13bbcSJohn Baldwin KASSERT(toep->ddp.active_id == db_idx, 880eba13bbcSJohn Baldwin ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx, 881eba13bbcSJohn Baldwin toep->ddp.active_id, toep->tid)); 882eba13bbcSJohn Baldwin db = &toep->ddp.db[db_idx]; 883eba13bbcSJohn Baldwin 884eba13bbcSJohn Baldwin if (__predict_false(inp->inp_flags & INP_DROPPED)) { 885eba13bbcSJohn Baldwin /* 886eba13bbcSJohn Baldwin * This can happen due to an administrative tcpdrop(8). 887eba13bbcSJohn Baldwin * Just ignore the received data. 888eba13bbcSJohn Baldwin */ 889eba13bbcSJohn Baldwin CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x", 890eba13bbcSJohn Baldwin __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); 891eba13bbcSJohn Baldwin if (invalidated) 892eba13bbcSJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 893eba13bbcSJohn Baldwin goto out; 894eba13bbcSJohn Baldwin } 895eba13bbcSJohn Baldwin 896eba13bbcSJohn Baldwin tp = intotcpcb(inp); 897eba13bbcSJohn Baldwin 898eba13bbcSJohn Baldwin /* 899eba13bbcSJohn Baldwin * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the 900eba13bbcSJohn Baldwin * sequence number of the next byte to receive. The length of 901eba13bbcSJohn Baldwin * the data received for this message must be computed by 902eba13bbcSJohn Baldwin * comparing the new and old values of rcv_nxt. 903eba13bbcSJohn Baldwin * 904eba13bbcSJohn Baldwin * For RX_DATA_DDP, len might be non-zero, but it is only the 905eba13bbcSJohn Baldwin * length of the most recent DMA. It does not include the 906eba13bbcSJohn Baldwin * total length of the data received since the previous update 907eba13bbcSJohn Baldwin * for this DDP buffer. rcv_nxt is the sequence number of the 908eba13bbcSJohn Baldwin * first received byte from the most recent DMA. 909eba13bbcSJohn Baldwin */ 910eba13bbcSJohn Baldwin len += be32toh(rcv_nxt) - tp->rcv_nxt; 911eba13bbcSJohn Baldwin tp->rcv_nxt += len; 912eba13bbcSJohn Baldwin tp->t_rcvtime = ticks; 913eba13bbcSJohn Baldwin #ifndef USE_DDP_RX_FLOW_CONTROL 914eba13bbcSJohn Baldwin KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 915eba13bbcSJohn Baldwin tp->rcv_wnd -= len; 916eba13bbcSJohn Baldwin #endif 917eba13bbcSJohn Baldwin #ifdef VERBOSE_TRACES 918eba13bbcSJohn Baldwin CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__, 919eba13bbcSJohn Baldwin toep->tid, db_idx, len, report); 920eba13bbcSJohn Baldwin #endif 921eba13bbcSJohn Baldwin 922eba13bbcSJohn Baldwin /* receive buffer autosize */ 923eba13bbcSJohn Baldwin MPASS(toep->vnet == so->so_vnet); 924eba13bbcSJohn Baldwin CURVNET_SET(toep->vnet); 925eba13bbcSJohn Baldwin SOCKBUF_LOCK(sb); 926eba13bbcSJohn Baldwin if (sb->sb_flags & SB_AUTOSIZE && 927eba13bbcSJohn Baldwin V_tcp_do_autorcvbuf && 928eba13bbcSJohn Baldwin sb->sb_hiwat < V_tcp_autorcvbuf_max && 929eba13bbcSJohn Baldwin len > (sbspace(sb) / 8 * 7)) { 930eba13bbcSJohn Baldwin struct adapter *sc = td_adapter(toep->td); 931eba13bbcSJohn Baldwin unsigned int hiwat = sb->sb_hiwat; 932eba13bbcSJohn Baldwin unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 933eba13bbcSJohn Baldwin V_tcp_autorcvbuf_max); 934eba13bbcSJohn Baldwin 935eba13bbcSJohn Baldwin if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 936eba13bbcSJohn Baldwin sb->sb_flags &= ~SB_AUTOSIZE; 937eba13bbcSJohn Baldwin } 938eba13bbcSJohn Baldwin 939eba13bbcSJohn Baldwin if (len > 0) { 940eba13bbcSJohn Baldwin queue_ddp_rcvbuf_mbuf(toep, db_idx, len); 941eba13bbcSJohn Baldwin t4_rcvd_locked(&toep->td->tod, tp); 942eba13bbcSJohn Baldwin } 943eba13bbcSJohn Baldwin sorwakeup_locked(so); 944eba13bbcSJohn Baldwin SOCKBUF_UNLOCK_ASSERT(sb); 945eba13bbcSJohn Baldwin CURVNET_RESTORE(); 946eba13bbcSJohn Baldwin 947eba13bbcSJohn Baldwin if (invalidated) 948eba13bbcSJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 949eba13bbcSJohn Baldwin else 950eba13bbcSJohn Baldwin KASSERT(db->placed < db->drb->len, 951eba13bbcSJohn Baldwin ("%s: full DDP buffer not invalidated", __func__)); 952eba13bbcSJohn Baldwin 953eba13bbcSJohn Baldwin if (toep->ddp.active_count != nitems(toep->ddp.db)) { 954eba13bbcSJohn Baldwin drb = alloc_cached_ddp_rcv_buffer(toep); 955eba13bbcSJohn Baldwin if (drb == NULL) 956eba13bbcSJohn Baldwin drb = alloc_ddp_rcv_buffer(toep, M_NOWAIT); 957eba13bbcSJohn Baldwin if (drb == NULL) 958eba13bbcSJohn Baldwin ddp_queue_toep(toep); 959eba13bbcSJohn Baldwin else { 960eba13bbcSJohn Baldwin if (!queue_ddp_rcvbuf(toep, drb)) { 961eba13bbcSJohn Baldwin ddp_queue_toep(toep); 962eba13bbcSJohn Baldwin } 963eba13bbcSJohn Baldwin } 964eba13bbcSJohn Baldwin } 965eba13bbcSJohn Baldwin out: 966eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 967eba13bbcSJohn Baldwin INP_WUNLOCK(inp); 968eba13bbcSJohn Baldwin 969eba13bbcSJohn Baldwin return (0); 970eba13bbcSJohn Baldwin } 971eba13bbcSJohn Baldwin 972eba13bbcSJohn Baldwin static int 973eba13bbcSJohn Baldwin handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) 974eba13bbcSJohn Baldwin { 975eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) 976eba13bbcSJohn Baldwin return (handle_ddp_data_rcvbuf(toep, ddp_report, rcv_nxt, len)); 977eba13bbcSJohn Baldwin else 978eba13bbcSJohn Baldwin return (handle_ddp_data_aio(toep, ddp_report, rcv_nxt, len)); 979eba13bbcSJohn Baldwin } 980eba13bbcSJohn Baldwin 981b12c0a9eSJohn Baldwin void 982dc964385SJohn Baldwin handle_ddp_indicate(struct toepcb *toep) 983b12c0a9eSJohn Baldwin { 984b12c0a9eSJohn Baldwin 985dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 986eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 987eba13bbcSJohn Baldwin /* 988eba13bbcSJohn Baldwin * Indicates are not meaningful for RCVBUF since 989eba13bbcSJohn Baldwin * buffers are activated when the socket option is 990eba13bbcSJohn Baldwin * set. 991eba13bbcSJohn Baldwin */ 992eba13bbcSJohn Baldwin return; 993eba13bbcSJohn Baldwin } 994eba13bbcSJohn Baldwin 995125d42feSJohn Baldwin MPASS(toep->ddp.active_count == 0); 996125d42feSJohn Baldwin MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0); 997125d42feSJohn Baldwin if (toep->ddp.waiting_count == 0) { 998dc964385SJohn Baldwin /* 999dc964385SJohn Baldwin * The pending requests that triggered the request for an 1000dc964385SJohn Baldwin * an indicate were cancelled. Those cancels should have 1001dc964385SJohn Baldwin * already disabled DDP. Just ignore this as the data is 1002dc964385SJohn Baldwin * going into the socket buffer anyway. 1003dc964385SJohn Baldwin */ 1004dc964385SJohn Baldwin return; 1005dc964385SJohn Baldwin } 1006dc964385SJohn Baldwin CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__, 1007125d42feSJohn Baldwin toep->tid, toep->ddp.waiting_count); 1008dc964385SJohn Baldwin ddp_queue_toep(toep); 1009dc964385SJohn Baldwin } 1010dc964385SJohn Baldwin 1011017902fcSJohn Baldwin CTASSERT(CPL_COOKIE_DDP0 + 1 == CPL_COOKIE_DDP1); 10124535e804SNavdeep Parhar 10134535e804SNavdeep Parhar static int 10144535e804SNavdeep Parhar do_ddp_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1015dc964385SJohn Baldwin { 10164535e804SNavdeep Parhar struct adapter *sc = iq->adapter; 10174535e804SNavdeep Parhar const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 10184535e804SNavdeep Parhar unsigned int tid = GET_TID(cpl); 1019dc964385SJohn Baldwin unsigned int db_idx; 10204535e804SNavdeep Parhar struct toepcb *toep; 10214535e804SNavdeep Parhar struct inpcb *inp; 1022dc964385SJohn Baldwin struct ddp_buffer *db; 1023dc964385SJohn Baldwin struct kaiocb *job; 1024dc964385SJohn Baldwin long copied; 1025dc964385SJohn Baldwin 1026dc964385SJohn Baldwin if (cpl->status != CPL_ERR_NONE) 1027dc964385SJohn Baldwin panic("XXX: tcp_rpl failed: %d", cpl->status); 1028dc964385SJohn Baldwin 10294535e804SNavdeep Parhar toep = lookup_tid(sc, tid); 10304535e804SNavdeep Parhar inp = toep->inp; 1031dc964385SJohn Baldwin switch (cpl->cookie) { 1032017902fcSJohn Baldwin case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP0): 1033017902fcSJohn Baldwin case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP1): 1034dc964385SJohn Baldwin /* 1035dc964385SJohn Baldwin * XXX: This duplicates a lot of code with handle_ddp_data(). 1036dc964385SJohn Baldwin */ 1037eba13bbcSJohn Baldwin KASSERT((toep->ddp.flags & DDP_AIO) != 0, 1038eba13bbcSJohn Baldwin ("%s: DDP_RCVBUF", __func__)); 1039017902fcSJohn Baldwin db_idx = G_COOKIE(cpl->cookie) - CPL_COOKIE_DDP0; 10404535e804SNavdeep Parhar MPASS(db_idx < nitems(toep->ddp.db)); 1041dc964385SJohn Baldwin INP_WLOCK(inp); 1042dc964385SJohn Baldwin DDP_LOCK(toep); 1043125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 1044dc964385SJohn Baldwin 1045dc964385SJohn Baldwin /* 1046dc964385SJohn Baldwin * handle_ddp_data() should leave the job around until 1047dc964385SJohn Baldwin * this callback runs once a cancel is pending. 1048dc964385SJohn Baldwin */ 1049dc964385SJohn Baldwin MPASS(db != NULL); 1050dc964385SJohn Baldwin MPASS(db->job != NULL); 1051dc964385SJohn Baldwin MPASS(db->cancel_pending); 1052dc964385SJohn Baldwin 1053dc964385SJohn Baldwin /* 1054dc964385SJohn Baldwin * XXX: It's not clear what happens if there is data 1055dc964385SJohn Baldwin * placed when the buffer is invalidated. I suspect we 1056dc964385SJohn Baldwin * need to read the TCB to see how much data was placed. 1057dc964385SJohn Baldwin * 1058dc964385SJohn Baldwin * For now this just pretends like nothing was placed. 1059dc964385SJohn Baldwin * 1060dc964385SJohn Baldwin * XXX: Note that if we did check the PCB we would need to 1061dc964385SJohn Baldwin * also take care of updating the tp, etc. 1062dc964385SJohn Baldwin */ 1063dc964385SJohn Baldwin job = db->job; 1064fe0bdd1dSJohn Baldwin copied = job->aio_received; 1065dc964385SJohn Baldwin if (copied == 0) { 1066dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job); 1067dc964385SJohn Baldwin aio_cancel(job); 1068dc964385SJohn Baldwin } else { 1069dc964385SJohn Baldwin CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)", 1070dc964385SJohn Baldwin __func__, job, copied); 1071dc964385SJohn Baldwin aio_complete(job, copied, 0); 1072dc964385SJohn Baldwin t4_rcvd(&toep->td->tod, intotcpcb(inp)); 1073dc964385SJohn Baldwin } 1074dc964385SJohn Baldwin 1075dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 1076125d42feSJohn Baldwin if (toep->ddp.waiting_count > 0) 1077dc964385SJohn Baldwin ddp_queue_toep(toep); 1078dc964385SJohn Baldwin DDP_UNLOCK(toep); 1079dc964385SJohn Baldwin INP_WUNLOCK(inp); 1080dc964385SJohn Baldwin break; 1081dc964385SJohn Baldwin default: 1082dc964385SJohn Baldwin panic("XXX: unknown tcb_rpl offset %#x, cookie %#x", 1083dc964385SJohn Baldwin G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); 1084dc964385SJohn Baldwin } 10854535e804SNavdeep Parhar 10864535e804SNavdeep Parhar return (0); 1087dc964385SJohn Baldwin } 1088dc964385SJohn Baldwin 1089dc964385SJohn Baldwin void 1090dc964385SJohn Baldwin handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt) 1091dc964385SJohn Baldwin { 1092eba13bbcSJohn Baldwin struct socket *so = toep->inp->inp_socket; 1093eba13bbcSJohn Baldwin struct sockbuf *sb = &so->so_rcv; 1094dc964385SJohn Baldwin struct ddp_buffer *db; 1095dc964385SJohn Baldwin struct kaiocb *job; 1096dc964385SJohn Baldwin long copied; 109739d5cbdcSNavdeep Parhar unsigned int db_idx; 109839d5cbdcSNavdeep Parhar #ifdef INVARIANTS 109939d5cbdcSNavdeep Parhar unsigned int db_flag; 110039d5cbdcSNavdeep Parhar #endif 1101dc964385SJohn Baldwin int len, placed; 1102eba13bbcSJohn Baldwin bool ddp_rcvbuf; 1103dc964385SJohn Baldwin 1104b12c0a9eSJohn Baldwin INP_WLOCK_ASSERT(toep->inp); 1105dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 1106b12c0a9eSJohn Baldwin 1107eba13bbcSJohn Baldwin ddp_rcvbuf = (toep->ddp.flags & DDP_RCVBUF) != 0; 1108eba13bbcSJohn Baldwin 11095dbf8c15SJohn Baldwin /* - 1 is to ignore the byte for FIN */ 11105dbf8c15SJohn Baldwin len = be32toh(rcv_nxt) - tp->rcv_nxt - 1; 1111b12c0a9eSJohn Baldwin tp->rcv_nxt += len; 1112b12c0a9eSJohn Baldwin 1113eba13bbcSJohn Baldwin CTR(KTR_CXGBE, "%s: tid %d placed %u bytes before FIN", __func__, 1114eba13bbcSJohn Baldwin toep->tid, len); 1115125d42feSJohn Baldwin while (toep->ddp.active_count > 0) { 1116125d42feSJohn Baldwin MPASS(toep->ddp.active_id != -1); 1117125d42feSJohn Baldwin db_idx = toep->ddp.active_id; 111839d5cbdcSNavdeep Parhar #ifdef INVARIANTS 1119dc964385SJohn Baldwin db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 112039d5cbdcSNavdeep Parhar #endif 1121125d42feSJohn Baldwin MPASS((toep->ddp.flags & db_flag) != 0); 1122125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 1123eba13bbcSJohn Baldwin if (ddp_rcvbuf) { 1124eba13bbcSJohn Baldwin placed = len; 1125eba13bbcSJohn Baldwin if (placed > db->drb->len - db->placed) 1126eba13bbcSJohn Baldwin placed = db->drb->len - db->placed; 1127eba13bbcSJohn Baldwin if (placed != 0) { 1128eba13bbcSJohn Baldwin SOCKBUF_LOCK(sb); 1129eba13bbcSJohn Baldwin queue_ddp_rcvbuf_mbuf(toep, db_idx, placed); 1130eba13bbcSJohn Baldwin sorwakeup_locked(so); 1131eba13bbcSJohn Baldwin SOCKBUF_UNLOCK_ASSERT(sb); 1132eba13bbcSJohn Baldwin } 1133eba13bbcSJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 1134eba13bbcSJohn Baldwin len -= placed; 1135eba13bbcSJohn Baldwin continue; 1136eba13bbcSJohn Baldwin } 1137dc964385SJohn Baldwin job = db->job; 1138fe0bdd1dSJohn Baldwin copied = job->aio_received; 1139dc964385SJohn Baldwin placed = len; 1140dc964385SJohn Baldwin if (placed > job->uaiocb.aio_nbytes - copied) 1141dc964385SJohn Baldwin placed = job->uaiocb.aio_nbytes - copied; 1142c3d4aea6SJohn Baldwin if (placed > 0) { 1143b1012d80SJohn Baldwin job->msgrcv = 1; 1144c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_jobs++; 1145c3d4aea6SJohn Baldwin } 1146c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_octets += placed; 1147dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) { 1148dc964385SJohn Baldwin /* 1149dc964385SJohn Baldwin * Update the copied length for when 1150dc964385SJohn Baldwin * t4_aio_cancel_active() completes this 1151dc964385SJohn Baldwin * request. 1152dc964385SJohn Baldwin */ 1153fe0bdd1dSJohn Baldwin job->aio_received += placed; 1154dc964385SJohn Baldwin } else { 1155dc964385SJohn Baldwin CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d", 1156dc964385SJohn Baldwin __func__, toep->tid, db_idx, placed); 1157dc964385SJohn Baldwin aio_complete(job, copied + placed, 0); 1158dc964385SJohn Baldwin } 1159dc964385SJohn Baldwin len -= placed; 1160dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 1161dc964385SJohn Baldwin } 1162b12c0a9eSJohn Baldwin 1163dc964385SJohn Baldwin MPASS(len == 0); 1164eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) 1165dc964385SJohn Baldwin ddp_complete_all(toep, 0); 1166b12c0a9eSJohn Baldwin } 1167b12c0a9eSJohn Baldwin 1168e682d02eSNavdeep Parhar #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\ 1169e682d02eSNavdeep Parhar F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\ 1170e682d02eSNavdeep Parhar F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\ 1171e682d02eSNavdeep Parhar F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR) 1172e682d02eSNavdeep Parhar 1173671bf2b8SNavdeep Parhar extern cpl_handler_t t4_cpl_handler[]; 1174671bf2b8SNavdeep Parhar 1175e682d02eSNavdeep Parhar static int 1176e682d02eSNavdeep Parhar do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1177e682d02eSNavdeep Parhar { 1178e682d02eSNavdeep Parhar struct adapter *sc = iq->adapter; 1179e682d02eSNavdeep Parhar const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1); 1180e682d02eSNavdeep Parhar unsigned int tid = GET_TID(cpl); 1181e682d02eSNavdeep Parhar uint32_t vld; 1182e682d02eSNavdeep Parhar struct toepcb *toep = lookup_tid(sc, tid); 1183e682d02eSNavdeep Parhar 1184e682d02eSNavdeep Parhar KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1185e682d02eSNavdeep Parhar KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 1186c91bcaaaSNavdeep Parhar KASSERT(!(toep->flags & TPF_SYNQE), 1187e682d02eSNavdeep Parhar ("%s: toep %p claims to be a synq entry", __func__, toep)); 1188e682d02eSNavdeep Parhar 1189e682d02eSNavdeep Parhar vld = be32toh(cpl->ddpvld); 1190e682d02eSNavdeep Parhar if (__predict_false(vld & DDP_ERR)) { 1191e682d02eSNavdeep Parhar panic("%s: DDP error 0x%x (tid %d, toep %p)", 1192e682d02eSNavdeep Parhar __func__, vld, tid, toep); 1193e682d02eSNavdeep Parhar } 11949eb533d3SNavdeep Parhar 1195c537e887SNavdeep Parhar if (ulp_mode(toep) == ULP_MODE_ISCSI) { 1196671bf2b8SNavdeep Parhar t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m); 11970fe98277SNavdeep Parhar return (0); 11980fe98277SNavdeep Parhar } 1199e682d02eSNavdeep Parhar 1200e682d02eSNavdeep Parhar handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len)); 1201e682d02eSNavdeep Parhar 1202e682d02eSNavdeep Parhar return (0); 1203e682d02eSNavdeep Parhar } 1204e682d02eSNavdeep Parhar 1205e682d02eSNavdeep Parhar static int 1206e682d02eSNavdeep Parhar do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss, 1207e682d02eSNavdeep Parhar struct mbuf *m) 1208e682d02eSNavdeep Parhar { 1209e682d02eSNavdeep Parhar struct adapter *sc = iq->adapter; 1210e682d02eSNavdeep Parhar const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1); 1211e682d02eSNavdeep Parhar unsigned int tid = GET_TID(cpl); 1212e682d02eSNavdeep Parhar struct toepcb *toep = lookup_tid(sc, tid); 1213e682d02eSNavdeep Parhar 1214e682d02eSNavdeep Parhar KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 1215e682d02eSNavdeep Parhar KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 1216c91bcaaaSNavdeep Parhar KASSERT(!(toep->flags & TPF_SYNQE), 1217e682d02eSNavdeep Parhar ("%s: toep %p claims to be a synq entry", __func__, toep)); 1218e682d02eSNavdeep Parhar 1219e682d02eSNavdeep Parhar handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0); 1220e682d02eSNavdeep Parhar 1221e682d02eSNavdeep Parhar return (0); 1222e682d02eSNavdeep Parhar } 1223e682d02eSNavdeep Parhar 1224a5a965d7SJohn Baldwin static bool 1225a5a965d7SJohn Baldwin set_ddp_ulp_mode(struct toepcb *toep) 1226a5a965d7SJohn Baldwin { 1227a5a965d7SJohn Baldwin struct adapter *sc = toep->vi->adapter; 1228a5a965d7SJohn Baldwin struct wrqe *wr; 1229a5a965d7SJohn Baldwin struct work_request_hdr *wrh; 1230a5a965d7SJohn Baldwin struct ulp_txpkt *ulpmc; 1231a5a965d7SJohn Baldwin int fields, len; 1232a5a965d7SJohn Baldwin 1233a5a965d7SJohn Baldwin if (!sc->tt.ddp) 1234a5a965d7SJohn Baldwin return (false); 1235a5a965d7SJohn Baldwin 1236a5a965d7SJohn Baldwin fields = 0; 1237a5a965d7SJohn Baldwin 1238a5a965d7SJohn Baldwin /* Overlay region including W_TCB_RX_DDP_FLAGS */ 1239a5a965d7SJohn Baldwin fields += 3; 1240a5a965d7SJohn Baldwin 1241a5a965d7SJohn Baldwin /* W_TCB_ULP_TYPE */ 1242a5a965d7SJohn Baldwin fields++; 1243a5a965d7SJohn Baldwin 1244a5a965d7SJohn Baldwin #ifdef USE_DDP_RX_FLOW_CONTROL 1245a5a965d7SJohn Baldwin /* W_TCB_T_FLAGS */ 1246a5a965d7SJohn Baldwin fields++; 1247a5a965d7SJohn Baldwin #endif 1248a5a965d7SJohn Baldwin 1249a5a965d7SJohn Baldwin len = sizeof(*wrh) + fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16); 1250a5a965d7SJohn Baldwin KASSERT(len <= SGE_MAX_WR_LEN, 1251a5a965d7SJohn Baldwin ("%s: WR with %d TCB field updates too large", __func__, fields)); 1252a5a965d7SJohn Baldwin 1253a5a965d7SJohn Baldwin wr = alloc_wrqe(len, toep->ctrlq); 1254a5a965d7SJohn Baldwin if (wr == NULL) 1255a5a965d7SJohn Baldwin return (false); 1256a5a965d7SJohn Baldwin 1257a5a965d7SJohn Baldwin CTR(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 1258a5a965d7SJohn Baldwin 1259a5a965d7SJohn Baldwin wrh = wrtod(wr); 1260a5a965d7SJohn Baldwin INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 1261a5a965d7SJohn Baldwin ulpmc = (struct ulp_txpkt *)(wrh + 1); 1262a5a965d7SJohn Baldwin 1263a5a965d7SJohn Baldwin /* 1264a5a965d7SJohn Baldwin * Words 26/27 are zero except for the DDP_OFF flag in 1265a5a965d7SJohn Baldwin * W_TCB_RX_DDP_FLAGS (27). 1266a5a965d7SJohn Baldwin */ 126764a00f87SNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 26, 1268a5a965d7SJohn Baldwin 0xffffffffffffffff, (uint64_t)V_TF_DDP_OFF(1) << 32); 1269a5a965d7SJohn Baldwin 1270a5a965d7SJohn Baldwin /* Words 28/29 are zero. */ 127164a00f87SNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 28, 1272a5a965d7SJohn Baldwin 0xffffffffffffffff, 0); 1273a5a965d7SJohn Baldwin 1274a5a965d7SJohn Baldwin /* Words 30/31 are zero. */ 127564a00f87SNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, 30, 1276a5a965d7SJohn Baldwin 0xffffffffffffffff, 0); 1277a5a965d7SJohn Baldwin 1278a5a965d7SJohn Baldwin /* Set the ULP mode to ULP_MODE_TCPDDP. */ 1279a5a965d7SJohn Baldwin toep->params.ulp_mode = ULP_MODE_TCPDDP; 128064a00f87SNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_ULP_TYPE, 128164a00f87SNavdeep Parhar V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), V_TCB_ULP_TYPE(ULP_MODE_TCPDDP)); 1282a5a965d7SJohn Baldwin 1283a5a965d7SJohn Baldwin #ifdef USE_DDP_RX_FLOW_CONTROL 1284a5a965d7SJohn Baldwin /* Set TF_RX_FLOW_CONTROL_DDP. */ 128564a00f87SNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(sc, ulpmc, toep->tid, W_TCB_T_FLAGS, 1286a5a965d7SJohn Baldwin V_TF_RX_FLOW_CONTROL_DDP(1), V_TF_RX_FLOW_CONTROL_DDP(1)); 1287a5a965d7SJohn Baldwin #endif 1288a5a965d7SJohn Baldwin 1289a5a965d7SJohn Baldwin ddp_init_toep(toep); 1290a5a965d7SJohn Baldwin 1291a5a965d7SJohn Baldwin t4_wrq_tx(sc, wr); 1292a5a965d7SJohn Baldwin return (true); 1293a5a965d7SJohn Baldwin } 1294a5a965d7SJohn Baldwin 1295dc964385SJohn Baldwin static void 1296e682d02eSNavdeep Parhar enable_ddp(struct adapter *sc, struct toepcb *toep) 1297e682d02eSNavdeep Parhar { 1298eba13bbcSJohn Baldwin uint64_t ddp_flags; 1299e682d02eSNavdeep Parhar 1300125d42feSJohn Baldwin KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK, 1301e682d02eSNavdeep Parhar ("%s: toep %p has bad ddp_flags 0x%x", 1302125d42feSJohn Baldwin __func__, toep, toep->ddp.flags)); 1303e682d02eSNavdeep Parhar 1304e682d02eSNavdeep Parhar CTR3(KTR_CXGBE, "%s: tid %u (time %u)", 1305e682d02eSNavdeep Parhar __func__, toep->tid, time_uptime); 1306e682d02eSNavdeep Parhar 1307eba13bbcSJohn Baldwin ddp_flags = 0; 1308eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) 1309eba13bbcSJohn Baldwin ddp_flags |= V_TF_DDP_BUF0_INDICATE(1) | 1310eba13bbcSJohn Baldwin V_TF_DDP_BUF1_INDICATE(1); 1311dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 1312125d42feSJohn Baldwin toep->ddp.flags |= DDP_SC_REQ; 1313edf95febSJohn Baldwin t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS, 1314e682d02eSNavdeep Parhar V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) | 1315e682d02eSNavdeep Parhar V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) | 1316eba13bbcSJohn Baldwin V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1), ddp_flags, 0, 0); 1317edf95febSJohn Baldwin t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 1318edf95febSJohn Baldwin V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0); 1319e682d02eSNavdeep Parhar } 1320e682d02eSNavdeep Parhar 1321e682d02eSNavdeep Parhar static int 1322e682d02eSNavdeep Parhar calculate_hcf(int n1, int n2) 1323e682d02eSNavdeep Parhar { 1324e682d02eSNavdeep Parhar int a, b, t; 1325e682d02eSNavdeep Parhar 1326e682d02eSNavdeep Parhar if (n1 <= n2) { 1327e682d02eSNavdeep Parhar a = n1; 1328e682d02eSNavdeep Parhar b = n2; 1329e682d02eSNavdeep Parhar } else { 1330e682d02eSNavdeep Parhar a = n2; 1331e682d02eSNavdeep Parhar b = n1; 1332e682d02eSNavdeep Parhar } 1333e682d02eSNavdeep Parhar 1334e682d02eSNavdeep Parhar while (a != 0) { 1335e682d02eSNavdeep Parhar t = a; 1336e682d02eSNavdeep Parhar a = b % a; 1337e682d02eSNavdeep Parhar b = t; 1338e682d02eSNavdeep Parhar } 1339e682d02eSNavdeep Parhar 1340e682d02eSNavdeep Parhar return (b); 1341e682d02eSNavdeep Parhar } 1342e682d02eSNavdeep Parhar 1343968267fdSNavdeep Parhar static inline int 1344968267fdSNavdeep Parhar pages_to_nppods(int npages, int ddp_page_shift) 1345e682d02eSNavdeep Parhar { 1346dc964385SJohn Baldwin 1347968267fdSNavdeep Parhar MPASS(ddp_page_shift >= PAGE_SHIFT); 1348968267fdSNavdeep Parhar 1349968267fdSNavdeep Parhar return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES)); 1350968267fdSNavdeep Parhar } 1351968267fdSNavdeep Parhar 1352968267fdSNavdeep Parhar static int 1353968267fdSNavdeep Parhar alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx, 1354968267fdSNavdeep Parhar struct ppod_reservation *prsv) 1355968267fdSNavdeep Parhar { 1356968267fdSNavdeep Parhar vmem_addr_t addr; /* relative to start of region */ 1357968267fdSNavdeep Parhar 1358968267fdSNavdeep Parhar if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT, 1359968267fdSNavdeep Parhar &addr) != 0) 1360968267fdSNavdeep Parhar return (ENOMEM); 1361968267fdSNavdeep Parhar 136227539974SJohn Baldwin #ifdef VERBOSE_TRACES 1363968267fdSNavdeep Parhar CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d", 1364968267fdSNavdeep Parhar __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask, 1365968267fdSNavdeep Parhar nppods, 1 << pr->pr_page_shift[pgsz_idx]); 136627539974SJohn Baldwin #endif 1367968267fdSNavdeep Parhar 1368968267fdSNavdeep Parhar /* 1369968267fdSNavdeep Parhar * The hardware tagmask includes an extra invalid bit but the arena was 1370968267fdSNavdeep Parhar * seeded with valid values only. An allocation out of this arena will 1371968267fdSNavdeep Parhar * fit inside the tagmask but won't have the invalid bit set. 1372968267fdSNavdeep Parhar */ 1373968267fdSNavdeep Parhar MPASS((addr & pr->pr_tag_mask) == addr); 1374968267fdSNavdeep Parhar MPASS((addr & pr->pr_invalid_bit) == 0); 1375968267fdSNavdeep Parhar 1376968267fdSNavdeep Parhar prsv->prsv_pr = pr; 1377968267fdSNavdeep Parhar prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr; 1378968267fdSNavdeep Parhar prsv->prsv_nppods = nppods; 1379968267fdSNavdeep Parhar 1380968267fdSNavdeep Parhar return (0); 1381968267fdSNavdeep Parhar } 1382968267fdSNavdeep Parhar 13832beaefe8SJohn Baldwin static int 13842beaefe8SJohn Baldwin t4_alloc_page_pods_for_vmpages(struct ppod_region *pr, vm_page_t *pages, 13852beaefe8SJohn Baldwin int npages, struct ppod_reservation *prsv) 1386968267fdSNavdeep Parhar { 1387968267fdSNavdeep Parhar int i, hcf, seglen, idx, nppods; 1388e682d02eSNavdeep Parhar 1389e682d02eSNavdeep Parhar /* 1390e682d02eSNavdeep Parhar * The DDP page size is unrelated to the VM page size. We combine 1391e682d02eSNavdeep Parhar * contiguous physical pages into larger segments to get the best DDP 1392e682d02eSNavdeep Parhar * page size possible. This is the largest of the four sizes in 1393e682d02eSNavdeep Parhar * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in 1394e682d02eSNavdeep Parhar * the page list. 1395e682d02eSNavdeep Parhar */ 1396e682d02eSNavdeep Parhar hcf = 0; 13972beaefe8SJohn Baldwin for (i = 0; i < npages; i++) { 1398e682d02eSNavdeep Parhar seglen = PAGE_SIZE; 13992beaefe8SJohn Baldwin while (i < npages - 1 && 14002beaefe8SJohn Baldwin VM_PAGE_TO_PHYS(pages[i]) + PAGE_SIZE == 14012beaefe8SJohn Baldwin VM_PAGE_TO_PHYS(pages[i + 1])) { 1402e682d02eSNavdeep Parhar seglen += PAGE_SIZE; 1403e682d02eSNavdeep Parhar i++; 1404e682d02eSNavdeep Parhar } 1405e682d02eSNavdeep Parhar 1406e682d02eSNavdeep Parhar hcf = calculate_hcf(hcf, seglen); 1407968267fdSNavdeep Parhar if (hcf < (1 << pr->pr_page_shift[1])) { 1408e682d02eSNavdeep Parhar idx = 0; 1409e682d02eSNavdeep Parhar goto have_pgsz; /* give up, short circuit */ 1410e682d02eSNavdeep Parhar } 1411e682d02eSNavdeep Parhar } 1412e682d02eSNavdeep Parhar 1413968267fdSNavdeep Parhar #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 1414968267fdSNavdeep Parhar MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 1415968267fdSNavdeep Parhar for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 1416968267fdSNavdeep Parhar if ((hcf & PR_PAGE_MASK(idx)) == 0) 1417e682d02eSNavdeep Parhar break; 1418e682d02eSNavdeep Parhar } 1419968267fdSNavdeep Parhar #undef PR_PAGE_MASK 1420968267fdSNavdeep Parhar 1421e682d02eSNavdeep Parhar have_pgsz: 1422db8bcd1bSNavdeep Parhar MPASS(idx <= M_PPOD_PGSZ); 1423e682d02eSNavdeep Parhar 14242beaefe8SJohn Baldwin nppods = pages_to_nppods(npages, pr->pr_page_shift[idx]); 1425968267fdSNavdeep Parhar if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 14262beaefe8SJohn Baldwin return (ENOMEM); 1427968267fdSNavdeep Parhar MPASS(prsv->prsv_nppods > 0); 1428e682d02eSNavdeep Parhar 14292beaefe8SJohn Baldwin return (0); 14302beaefe8SJohn Baldwin } 14312beaefe8SJohn Baldwin 14322beaefe8SJohn Baldwin int 14332beaefe8SJohn Baldwin t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps) 14342beaefe8SJohn Baldwin { 14352beaefe8SJohn Baldwin struct ppod_reservation *prsv = &ps->prsv; 14362beaefe8SJohn Baldwin 14372beaefe8SJohn Baldwin KASSERT(prsv->prsv_nppods == 0, 14382beaefe8SJohn Baldwin ("%s: page pods already allocated", __func__)); 14392beaefe8SJohn Baldwin 14402beaefe8SJohn Baldwin return (t4_alloc_page_pods_for_vmpages(pr, ps->pages, ps->npages, 14412beaefe8SJohn Baldwin prsv)); 14422beaefe8SJohn Baldwin } 14432beaefe8SJohn Baldwin 14442beaefe8SJohn Baldwin int 14452beaefe8SJohn Baldwin t4_alloc_page_pods_for_bio(struct ppod_region *pr, struct bio *bp, 14462beaefe8SJohn Baldwin struct ppod_reservation *prsv) 14472beaefe8SJohn Baldwin { 14482beaefe8SJohn Baldwin 14492beaefe8SJohn Baldwin MPASS(bp->bio_flags & BIO_UNMAPPED); 14502beaefe8SJohn Baldwin 14512beaefe8SJohn Baldwin return (t4_alloc_page_pods_for_vmpages(pr, bp->bio_ma, bp->bio_ma_n, 14522beaefe8SJohn Baldwin prsv)); 1453e682d02eSNavdeep Parhar } 1454e682d02eSNavdeep Parhar 1455a9feb2cdSNavdeep Parhar int 1456a9feb2cdSNavdeep Parhar t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len, 1457a9feb2cdSNavdeep Parhar struct ppod_reservation *prsv) 1458a9feb2cdSNavdeep Parhar { 1459a9feb2cdSNavdeep Parhar int hcf, seglen, idx, npages, nppods; 1460a9feb2cdSNavdeep Parhar uintptr_t start_pva, end_pva, pva, p1; 1461a9feb2cdSNavdeep Parhar 1462a9feb2cdSNavdeep Parhar MPASS(buf > 0); 1463a9feb2cdSNavdeep Parhar MPASS(len > 0); 1464a9feb2cdSNavdeep Parhar 1465a9feb2cdSNavdeep Parhar /* 1466a9feb2cdSNavdeep Parhar * The DDP page size is unrelated to the VM page size. We combine 1467a9feb2cdSNavdeep Parhar * contiguous physical pages into larger segments to get the best DDP 1468a9feb2cdSNavdeep Parhar * page size possible. This is the largest of the four sizes in 1469a9feb2cdSNavdeep Parhar * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes 1470a9feb2cdSNavdeep Parhar * in the page list. 1471a9feb2cdSNavdeep Parhar */ 1472a9feb2cdSNavdeep Parhar hcf = 0; 1473a9feb2cdSNavdeep Parhar start_pva = trunc_page(buf); 1474a9feb2cdSNavdeep Parhar end_pva = trunc_page(buf + len - 1); 1475a9feb2cdSNavdeep Parhar pva = start_pva; 1476a9feb2cdSNavdeep Parhar while (pva <= end_pva) { 1477a9feb2cdSNavdeep Parhar seglen = PAGE_SIZE; 1478a9feb2cdSNavdeep Parhar p1 = pmap_kextract(pva); 1479a9feb2cdSNavdeep Parhar pva += PAGE_SIZE; 1480a9feb2cdSNavdeep Parhar while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) { 1481a9feb2cdSNavdeep Parhar seglen += PAGE_SIZE; 1482a9feb2cdSNavdeep Parhar pva += PAGE_SIZE; 1483a9feb2cdSNavdeep Parhar } 1484a9feb2cdSNavdeep Parhar 1485a9feb2cdSNavdeep Parhar hcf = calculate_hcf(hcf, seglen); 1486a9feb2cdSNavdeep Parhar if (hcf < (1 << pr->pr_page_shift[1])) { 1487a9feb2cdSNavdeep Parhar idx = 0; 1488a9feb2cdSNavdeep Parhar goto have_pgsz; /* give up, short circuit */ 1489a9feb2cdSNavdeep Parhar } 1490a9feb2cdSNavdeep Parhar } 1491a9feb2cdSNavdeep Parhar 1492a9feb2cdSNavdeep Parhar #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 1493a9feb2cdSNavdeep Parhar MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 1494a9feb2cdSNavdeep Parhar for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 1495a9feb2cdSNavdeep Parhar if ((hcf & PR_PAGE_MASK(idx)) == 0) 1496a9feb2cdSNavdeep Parhar break; 1497a9feb2cdSNavdeep Parhar } 1498a9feb2cdSNavdeep Parhar #undef PR_PAGE_MASK 1499a9feb2cdSNavdeep Parhar 1500a9feb2cdSNavdeep Parhar have_pgsz: 1501a9feb2cdSNavdeep Parhar MPASS(idx <= M_PPOD_PGSZ); 1502a9feb2cdSNavdeep Parhar 1503a9feb2cdSNavdeep Parhar npages = 1; 1504a9feb2cdSNavdeep Parhar npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; 1505a9feb2cdSNavdeep Parhar nppods = howmany(npages, PPOD_PAGES); 1506a9feb2cdSNavdeep Parhar if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 1507a9feb2cdSNavdeep Parhar return (ENOMEM); 1508a9feb2cdSNavdeep Parhar MPASS(prsv->prsv_nppods > 0); 1509a9feb2cdSNavdeep Parhar 1510a9feb2cdSNavdeep Parhar return (0); 1511a9feb2cdSNavdeep Parhar } 1512a9feb2cdSNavdeep Parhar 1513eba13bbcSJohn Baldwin static int 1514eba13bbcSJohn Baldwin t4_alloc_page_pods_for_rcvbuf(struct ppod_region *pr, 1515eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb) 1516eba13bbcSJohn Baldwin { 1517eba13bbcSJohn Baldwin struct ppod_reservation *prsv = &drb->prsv; 1518eba13bbcSJohn Baldwin 1519eba13bbcSJohn Baldwin KASSERT(prsv->prsv_nppods == 0, 1520eba13bbcSJohn Baldwin ("%s: page pods already allocated", __func__)); 1521eba13bbcSJohn Baldwin 1522eba13bbcSJohn Baldwin return (t4_alloc_page_pods_for_buf(pr, (vm_offset_t)drb->buf, drb->len, 1523eba13bbcSJohn Baldwin prsv)); 1524eba13bbcSJohn Baldwin } 1525eba13bbcSJohn Baldwin 152646bee804SJohn Baldwin int 152746bee804SJohn Baldwin t4_alloc_page_pods_for_sgl(struct ppod_region *pr, struct ctl_sg_entry *sgl, 152846bee804SJohn Baldwin int entries, struct ppod_reservation *prsv) 152946bee804SJohn Baldwin { 153046bee804SJohn Baldwin int hcf, seglen, idx = 0, npages, nppods, i, len; 153146bee804SJohn Baldwin uintptr_t start_pva, end_pva, pva, p1 ; 153246bee804SJohn Baldwin vm_offset_t buf; 153346bee804SJohn Baldwin struct ctl_sg_entry *sge; 153446bee804SJohn Baldwin 153546bee804SJohn Baldwin MPASS(entries > 0); 153646bee804SJohn Baldwin MPASS(sgl); 153746bee804SJohn Baldwin 153846bee804SJohn Baldwin /* 153946bee804SJohn Baldwin * The DDP page size is unrelated to the VM page size. We combine 154046bee804SJohn Baldwin * contiguous physical pages into larger segments to get the best DDP 154146bee804SJohn Baldwin * page size possible. This is the largest of the four sizes in 154246bee804SJohn Baldwin * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes 154346bee804SJohn Baldwin * in the page list. 154446bee804SJohn Baldwin */ 154546bee804SJohn Baldwin hcf = 0; 154646bee804SJohn Baldwin for (i = entries - 1; i >= 0; i--) { 154746bee804SJohn Baldwin sge = sgl + i; 154846bee804SJohn Baldwin buf = (vm_offset_t)sge->addr; 154946bee804SJohn Baldwin len = sge->len; 155046bee804SJohn Baldwin start_pva = trunc_page(buf); 155146bee804SJohn Baldwin end_pva = trunc_page(buf + len - 1); 155246bee804SJohn Baldwin pva = start_pva; 155346bee804SJohn Baldwin while (pva <= end_pva) { 155446bee804SJohn Baldwin seglen = PAGE_SIZE; 155546bee804SJohn Baldwin p1 = pmap_kextract(pva); 155646bee804SJohn Baldwin pva += PAGE_SIZE; 155746bee804SJohn Baldwin while (pva <= end_pva && p1 + seglen == 155846bee804SJohn Baldwin pmap_kextract(pva)) { 155946bee804SJohn Baldwin seglen += PAGE_SIZE; 156046bee804SJohn Baldwin pva += PAGE_SIZE; 156146bee804SJohn Baldwin } 156246bee804SJohn Baldwin 156346bee804SJohn Baldwin hcf = calculate_hcf(hcf, seglen); 156446bee804SJohn Baldwin if (hcf < (1 << pr->pr_page_shift[1])) { 156546bee804SJohn Baldwin idx = 0; 156646bee804SJohn Baldwin goto have_pgsz; /* give up, short circuit */ 156746bee804SJohn Baldwin } 156846bee804SJohn Baldwin } 156946bee804SJohn Baldwin } 157046bee804SJohn Baldwin #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 157146bee804SJohn Baldwin MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 157246bee804SJohn Baldwin for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 157346bee804SJohn Baldwin if ((hcf & PR_PAGE_MASK(idx)) == 0) 157446bee804SJohn Baldwin break; 157546bee804SJohn Baldwin } 157646bee804SJohn Baldwin #undef PR_PAGE_MASK 157746bee804SJohn Baldwin 157846bee804SJohn Baldwin have_pgsz: 157946bee804SJohn Baldwin MPASS(idx <= M_PPOD_PGSZ); 158046bee804SJohn Baldwin 158146bee804SJohn Baldwin npages = 0; 158246bee804SJohn Baldwin while (entries--) { 158346bee804SJohn Baldwin npages++; 15848d2b4b2eSJohn Baldwin start_pva = trunc_page((vm_offset_t)sgl->addr); 158546bee804SJohn Baldwin end_pva = trunc_page((vm_offset_t)sgl->addr + sgl->len - 1); 158646bee804SJohn Baldwin npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; 158746bee804SJohn Baldwin sgl = sgl + 1; 158846bee804SJohn Baldwin } 158946bee804SJohn Baldwin nppods = howmany(npages, PPOD_PAGES); 159046bee804SJohn Baldwin if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 159146bee804SJohn Baldwin return (ENOMEM); 159246bee804SJohn Baldwin MPASS(prsv->prsv_nppods > 0); 159346bee804SJohn Baldwin return (0); 159446bee804SJohn Baldwin } 159546bee804SJohn Baldwin 1596968267fdSNavdeep Parhar void 1597968267fdSNavdeep Parhar t4_free_page_pods(struct ppod_reservation *prsv) 1598968267fdSNavdeep Parhar { 1599968267fdSNavdeep Parhar struct ppod_region *pr = prsv->prsv_pr; 1600968267fdSNavdeep Parhar vmem_addr_t addr; 1601968267fdSNavdeep Parhar 1602968267fdSNavdeep Parhar MPASS(prsv != NULL); 1603968267fdSNavdeep Parhar MPASS(prsv->prsv_nppods != 0); 1604968267fdSNavdeep Parhar 1605968267fdSNavdeep Parhar addr = prsv->prsv_tag & pr->pr_tag_mask; 1606968267fdSNavdeep Parhar MPASS((addr & pr->pr_invalid_bit) == 0); 1607968267fdSNavdeep Parhar 160827539974SJohn Baldwin #ifdef VERBOSE_TRACES 1609968267fdSNavdeep Parhar CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__, 1610968267fdSNavdeep Parhar pr->pr_arena, addr, prsv->prsv_nppods); 161127539974SJohn Baldwin #endif 1612968267fdSNavdeep Parhar 1613968267fdSNavdeep Parhar vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods)); 1614968267fdSNavdeep Parhar prsv->prsv_nppods = 0; 1615968267fdSNavdeep Parhar } 1616968267fdSNavdeep Parhar 1617e682d02eSNavdeep Parhar #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE) 1618e682d02eSNavdeep Parhar 1619968267fdSNavdeep Parhar int 1620968267fdSNavdeep Parhar t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid, 1621968267fdSNavdeep Parhar struct pageset *ps) 1622e682d02eSNavdeep Parhar { 1623e682d02eSNavdeep Parhar struct wrqe *wr; 1624e682d02eSNavdeep Parhar struct ulp_mem_io *ulpmc; 1625e682d02eSNavdeep Parhar struct ulptx_idata *ulpsc; 1626e682d02eSNavdeep Parhar struct pagepod *ppod; 1627db8bcd1bSNavdeep Parhar int i, j, k, n, chunk, len, ddp_pgsz, idx; 1628db8bcd1bSNavdeep Parhar u_int ppod_addr; 162988c4ff7bSNavdeep Parhar uint32_t cmd; 1630968267fdSNavdeep Parhar struct ppod_reservation *prsv = &ps->prsv; 1631968267fdSNavdeep Parhar struct ppod_region *pr = prsv->prsv_pr; 163287b0e771SJohn Baldwin vm_paddr_t pa; 1633e682d02eSNavdeep Parhar 1634dc964385SJohn Baldwin KASSERT(!(ps->flags & PS_PPODS_WRITTEN), 1635dc964385SJohn Baldwin ("%s: page pods already written", __func__)); 1636968267fdSNavdeep Parhar MPASS(prsv->prsv_nppods > 0); 1637dc964385SJohn Baldwin 163888c4ff7bSNavdeep Parhar cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 163988c4ff7bSNavdeep Parhar if (is_t4(sc)) 164088c4ff7bSNavdeep Parhar cmd |= htobe32(F_ULP_MEMIO_ORDER); 164188c4ff7bSNavdeep Parhar else 164288c4ff7bSNavdeep Parhar cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1643968267fdSNavdeep Parhar ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1644968267fdSNavdeep Parhar ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1645968267fdSNavdeep Parhar for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1646e682d02eSNavdeep Parhar /* How many page pods are we writing in this cycle */ 1647968267fdSNavdeep Parhar n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1648e682d02eSNavdeep Parhar chunk = PPOD_SZ(n); 1649d14b0ac1SNavdeep Parhar len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1650e682d02eSNavdeep Parhar 1651968267fdSNavdeep Parhar wr = alloc_wrqe(len, wrq); 1652e682d02eSNavdeep Parhar if (wr == NULL) 1653e682d02eSNavdeep Parhar return (ENOMEM); /* ok to just bail out */ 1654e682d02eSNavdeep Parhar ulpmc = wrtod(wr); 1655e682d02eSNavdeep Parhar 1656e682d02eSNavdeep Parhar INIT_ULPTX_WR(ulpmc, len, 0, 0); 165788c4ff7bSNavdeep Parhar ulpmc->cmd = cmd; 1658e682d02eSNavdeep Parhar ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1659e682d02eSNavdeep Parhar ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1660e682d02eSNavdeep Parhar ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1661e682d02eSNavdeep Parhar 1662e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1663e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1664e682d02eSNavdeep Parhar ulpsc->len = htobe32(chunk); 1665e682d02eSNavdeep Parhar 1666e682d02eSNavdeep Parhar ppod = (struct pagepod *)(ulpsc + 1); 1667e682d02eSNavdeep Parhar for (j = 0; j < n; i++, j++, ppod++) { 1668e682d02eSNavdeep Parhar ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1669968267fdSNavdeep Parhar V_PPOD_TID(tid) | prsv->prsv_tag); 1670dc964385SJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) | 1671dc964385SJohn Baldwin V_PPOD_OFST(ps->offset)); 1672e682d02eSNavdeep Parhar ppod->rsvd = 0; 1673e682d02eSNavdeep Parhar idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); 167457c60f98SNavdeep Parhar for (k = 0; k < nitems(ppod->addr); k++) { 1675dc964385SJohn Baldwin if (idx < ps->npages) { 167687b0e771SJohn Baldwin pa = VM_PAGE_TO_PHYS(ps->pages[idx]); 167787b0e771SJohn Baldwin ppod->addr[k] = htobe64(pa); 1678e682d02eSNavdeep Parhar idx += ddp_pgsz / PAGE_SIZE; 1679e682d02eSNavdeep Parhar } else 1680e682d02eSNavdeep Parhar ppod->addr[k] = 0; 1681e682d02eSNavdeep Parhar #if 0 1682e682d02eSNavdeep Parhar CTR5(KTR_CXGBE, 1683e682d02eSNavdeep Parhar "%s: tid %d ppod[%d]->addr[%d] = %p", 1684bca6e339SJohn Baldwin __func__, tid, i, k, 168544e7472dSJohn Baldwin be64toh(ppod->addr[k])); 1686e682d02eSNavdeep Parhar #endif 1687e682d02eSNavdeep Parhar } 1688e682d02eSNavdeep Parhar 1689e682d02eSNavdeep Parhar } 1690e682d02eSNavdeep Parhar 1691e682d02eSNavdeep Parhar t4_wrq_tx(sc, wr); 1692e682d02eSNavdeep Parhar } 1693dc964385SJohn Baldwin ps->flags |= PS_PPODS_WRITTEN; 1694e682d02eSNavdeep Parhar 1695e682d02eSNavdeep Parhar return (0); 1696e682d02eSNavdeep Parhar } 1697e682d02eSNavdeep Parhar 1698eba13bbcSJohn Baldwin static int 1699eba13bbcSJohn Baldwin t4_write_page_pods_for_rcvbuf(struct adapter *sc, struct sge_wrq *wrq, int tid, 1700eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb) 1701eba13bbcSJohn Baldwin { 1702eba13bbcSJohn Baldwin struct wrqe *wr; 1703eba13bbcSJohn Baldwin struct ulp_mem_io *ulpmc; 1704eba13bbcSJohn Baldwin struct ulptx_idata *ulpsc; 1705eba13bbcSJohn Baldwin struct pagepod *ppod; 1706eba13bbcSJohn Baldwin int i, j, k, n, chunk, len, ddp_pgsz; 1707eba13bbcSJohn Baldwin u_int ppod_addr, offset; 1708eba13bbcSJohn Baldwin uint32_t cmd; 1709eba13bbcSJohn Baldwin struct ppod_reservation *prsv = &drb->prsv; 1710eba13bbcSJohn Baldwin struct ppod_region *pr = prsv->prsv_pr; 1711eba13bbcSJohn Baldwin uintptr_t end_pva, pva; 1712eba13bbcSJohn Baldwin vm_paddr_t pa; 1713eba13bbcSJohn Baldwin 1714eba13bbcSJohn Baldwin MPASS(prsv->prsv_nppods > 0); 1715eba13bbcSJohn Baldwin 1716eba13bbcSJohn Baldwin cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1717eba13bbcSJohn Baldwin if (is_t4(sc)) 1718eba13bbcSJohn Baldwin cmd |= htobe32(F_ULP_MEMIO_ORDER); 1719eba13bbcSJohn Baldwin else 1720eba13bbcSJohn Baldwin cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1721eba13bbcSJohn Baldwin ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1722eba13bbcSJohn Baldwin offset = (uintptr_t)drb->buf & PAGE_MASK; 1723eba13bbcSJohn Baldwin ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1724eba13bbcSJohn Baldwin pva = trunc_page((uintptr_t)drb->buf); 1725eba13bbcSJohn Baldwin end_pva = trunc_page((uintptr_t)drb->buf + drb->len - 1); 1726eba13bbcSJohn Baldwin for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1727eba13bbcSJohn Baldwin /* How many page pods are we writing in this cycle */ 1728eba13bbcSJohn Baldwin n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1729eba13bbcSJohn Baldwin MPASS(n > 0); 1730eba13bbcSJohn Baldwin chunk = PPOD_SZ(n); 1731eba13bbcSJohn Baldwin len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1732eba13bbcSJohn Baldwin 1733eba13bbcSJohn Baldwin wr = alloc_wrqe(len, wrq); 1734eba13bbcSJohn Baldwin if (wr == NULL) 1735eba13bbcSJohn Baldwin return (ENOMEM); /* ok to just bail out */ 1736eba13bbcSJohn Baldwin ulpmc = wrtod(wr); 1737eba13bbcSJohn Baldwin 1738eba13bbcSJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, 0); 1739eba13bbcSJohn Baldwin ulpmc->cmd = cmd; 1740eba13bbcSJohn Baldwin ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1741eba13bbcSJohn Baldwin ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1742eba13bbcSJohn Baldwin ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1743eba13bbcSJohn Baldwin 1744eba13bbcSJohn Baldwin ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1745eba13bbcSJohn Baldwin ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1746eba13bbcSJohn Baldwin ulpsc->len = htobe32(chunk); 1747eba13bbcSJohn Baldwin 1748eba13bbcSJohn Baldwin ppod = (struct pagepod *)(ulpsc + 1); 1749eba13bbcSJohn Baldwin for (j = 0; j < n; i++, j++, ppod++) { 1750eba13bbcSJohn Baldwin ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1751eba13bbcSJohn Baldwin V_PPOD_TID(tid) | prsv->prsv_tag); 1752eba13bbcSJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(drb->len) | 1753eba13bbcSJohn Baldwin V_PPOD_OFST(offset)); 1754eba13bbcSJohn Baldwin ppod->rsvd = 0; 1755eba13bbcSJohn Baldwin 1756eba13bbcSJohn Baldwin for (k = 0; k < nitems(ppod->addr); k++) { 1757eba13bbcSJohn Baldwin if (pva > end_pva) 1758eba13bbcSJohn Baldwin ppod->addr[k] = 0; 1759eba13bbcSJohn Baldwin else { 1760eba13bbcSJohn Baldwin pa = pmap_kextract(pva); 1761eba13bbcSJohn Baldwin ppod->addr[k] = htobe64(pa); 1762eba13bbcSJohn Baldwin pva += ddp_pgsz; 1763eba13bbcSJohn Baldwin } 1764eba13bbcSJohn Baldwin #if 0 1765eba13bbcSJohn Baldwin CTR5(KTR_CXGBE, 1766eba13bbcSJohn Baldwin "%s: tid %d ppod[%d]->addr[%d] = %p", 1767eba13bbcSJohn Baldwin __func__, tid, i, k, 1768eba13bbcSJohn Baldwin be64toh(ppod->addr[k])); 1769eba13bbcSJohn Baldwin #endif 1770eba13bbcSJohn Baldwin } 1771eba13bbcSJohn Baldwin 1772eba13bbcSJohn Baldwin /* 1773eba13bbcSJohn Baldwin * Walk back 1 segment so that the first address in the 1774eba13bbcSJohn Baldwin * next pod is the same as the last one in the current 1775eba13bbcSJohn Baldwin * pod. 1776eba13bbcSJohn Baldwin */ 1777eba13bbcSJohn Baldwin pva -= ddp_pgsz; 1778eba13bbcSJohn Baldwin } 1779eba13bbcSJohn Baldwin 1780eba13bbcSJohn Baldwin t4_wrq_tx(sc, wr); 1781eba13bbcSJohn Baldwin } 1782eba13bbcSJohn Baldwin 1783eba13bbcSJohn Baldwin MPASS(pva <= end_pva); 1784eba13bbcSJohn Baldwin 1785eba13bbcSJohn Baldwin return (0); 1786eba13bbcSJohn Baldwin } 1787eba13bbcSJohn Baldwin 17884427ac36SJohn Baldwin static struct mbuf * 17894427ac36SJohn Baldwin alloc_raw_wr_mbuf(int len) 17904427ac36SJohn Baldwin { 17914427ac36SJohn Baldwin struct mbuf *m; 17924427ac36SJohn Baldwin 17934427ac36SJohn Baldwin if (len <= MHLEN) 17944427ac36SJohn Baldwin m = m_gethdr(M_NOWAIT, MT_DATA); 17954427ac36SJohn Baldwin else if (len <= MCLBYTES) 17964427ac36SJohn Baldwin m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 17974427ac36SJohn Baldwin else 17984427ac36SJohn Baldwin m = NULL; 17994427ac36SJohn Baldwin if (m == NULL) 18004427ac36SJohn Baldwin return (NULL); 18014427ac36SJohn Baldwin m->m_pkthdr.len = len; 18024427ac36SJohn Baldwin m->m_len = len; 18034427ac36SJohn Baldwin set_mbuf_raw_wr(m, true); 18044427ac36SJohn Baldwin return (m); 18054427ac36SJohn Baldwin } 18064427ac36SJohn Baldwin 1807a9feb2cdSNavdeep Parhar int 18082beaefe8SJohn Baldwin t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep, 18092beaefe8SJohn Baldwin struct ppod_reservation *prsv, struct bio *bp, struct mbufq *wrq) 18102beaefe8SJohn Baldwin { 18112beaefe8SJohn Baldwin struct ulp_mem_io *ulpmc; 18122beaefe8SJohn Baldwin struct ulptx_idata *ulpsc; 18132beaefe8SJohn Baldwin struct pagepod *ppod; 18142beaefe8SJohn Baldwin int i, j, k, n, chunk, len, ddp_pgsz, idx; 18152beaefe8SJohn Baldwin u_int ppod_addr; 18162beaefe8SJohn Baldwin uint32_t cmd; 18172beaefe8SJohn Baldwin struct ppod_region *pr = prsv->prsv_pr; 18182beaefe8SJohn Baldwin vm_paddr_t pa; 18192beaefe8SJohn Baldwin struct mbuf *m; 18202beaefe8SJohn Baldwin 18212beaefe8SJohn Baldwin MPASS(bp->bio_flags & BIO_UNMAPPED); 18222beaefe8SJohn Baldwin 18232beaefe8SJohn Baldwin cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 18242beaefe8SJohn Baldwin if (is_t4(sc)) 18252beaefe8SJohn Baldwin cmd |= htobe32(F_ULP_MEMIO_ORDER); 18262beaefe8SJohn Baldwin else 18272beaefe8SJohn Baldwin cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 18282beaefe8SJohn Baldwin ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 18292beaefe8SJohn Baldwin ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 18302beaefe8SJohn Baldwin for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 18312beaefe8SJohn Baldwin 18322beaefe8SJohn Baldwin /* How many page pods are we writing in this cycle */ 18332beaefe8SJohn Baldwin n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 18342beaefe8SJohn Baldwin MPASS(n > 0); 18352beaefe8SJohn Baldwin chunk = PPOD_SZ(n); 18362beaefe8SJohn Baldwin len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 18372beaefe8SJohn Baldwin 18382beaefe8SJohn Baldwin m = alloc_raw_wr_mbuf(len); 18392beaefe8SJohn Baldwin if (m == NULL) 18402beaefe8SJohn Baldwin return (ENOMEM); 18412beaefe8SJohn Baldwin 18422beaefe8SJohn Baldwin ulpmc = mtod(m, struct ulp_mem_io *); 18432beaefe8SJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); 18442beaefe8SJohn Baldwin ulpmc->cmd = cmd; 18452beaefe8SJohn Baldwin ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 18462beaefe8SJohn Baldwin ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 18472beaefe8SJohn Baldwin ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 18482beaefe8SJohn Baldwin 18492beaefe8SJohn Baldwin ulpsc = (struct ulptx_idata *)(ulpmc + 1); 18502beaefe8SJohn Baldwin ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 18512beaefe8SJohn Baldwin ulpsc->len = htobe32(chunk); 18522beaefe8SJohn Baldwin 18532beaefe8SJohn Baldwin ppod = (struct pagepod *)(ulpsc + 1); 18542beaefe8SJohn Baldwin for (j = 0; j < n; i++, j++, ppod++) { 18552beaefe8SJohn Baldwin ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 18562beaefe8SJohn Baldwin V_PPOD_TID(toep->tid) | 18572beaefe8SJohn Baldwin (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 18582beaefe8SJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(bp->bio_bcount) | 18592beaefe8SJohn Baldwin V_PPOD_OFST(bp->bio_ma_offset)); 18602beaefe8SJohn Baldwin ppod->rsvd = 0; 18612beaefe8SJohn Baldwin idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); 18622beaefe8SJohn Baldwin for (k = 0; k < nitems(ppod->addr); k++) { 18632beaefe8SJohn Baldwin if (idx < bp->bio_ma_n) { 18642beaefe8SJohn Baldwin pa = VM_PAGE_TO_PHYS(bp->bio_ma[idx]); 18652beaefe8SJohn Baldwin ppod->addr[k] = htobe64(pa); 18662beaefe8SJohn Baldwin idx += ddp_pgsz / PAGE_SIZE; 18672beaefe8SJohn Baldwin } else 18682beaefe8SJohn Baldwin ppod->addr[k] = 0; 18692beaefe8SJohn Baldwin #if 0 18702beaefe8SJohn Baldwin CTR5(KTR_CXGBE, 18712beaefe8SJohn Baldwin "%s: tid %d ppod[%d]->addr[%d] = %p", 18722beaefe8SJohn Baldwin __func__, toep->tid, i, k, 18732beaefe8SJohn Baldwin be64toh(ppod->addr[k])); 18742beaefe8SJohn Baldwin #endif 18752beaefe8SJohn Baldwin } 18762beaefe8SJohn Baldwin } 18772beaefe8SJohn Baldwin 18782beaefe8SJohn Baldwin mbufq_enqueue(wrq, m); 18792beaefe8SJohn Baldwin } 18802beaefe8SJohn Baldwin 18812beaefe8SJohn Baldwin return (0); 18822beaefe8SJohn Baldwin } 18832beaefe8SJohn Baldwin 18842beaefe8SJohn Baldwin int 18854427ac36SJohn Baldwin t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep, 1886f949967cSJohn Baldwin struct ppod_reservation *prsv, vm_offset_t buf, int buflen, 1887f949967cSJohn Baldwin struct mbufq *wrq) 1888a9feb2cdSNavdeep Parhar { 1889a9feb2cdSNavdeep Parhar struct ulp_mem_io *ulpmc; 1890a9feb2cdSNavdeep Parhar struct ulptx_idata *ulpsc; 1891a9feb2cdSNavdeep Parhar struct pagepod *ppod; 1892a9feb2cdSNavdeep Parhar int i, j, k, n, chunk, len, ddp_pgsz; 1893a9feb2cdSNavdeep Parhar u_int ppod_addr, offset; 1894a9feb2cdSNavdeep Parhar uint32_t cmd; 1895a9feb2cdSNavdeep Parhar struct ppod_region *pr = prsv->prsv_pr; 1896de414339SJohn Baldwin uintptr_t end_pva, pva; 1897de414339SJohn Baldwin vm_paddr_t pa; 18984427ac36SJohn Baldwin struct mbuf *m; 1899a9feb2cdSNavdeep Parhar 1900a9feb2cdSNavdeep Parhar cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1901a9feb2cdSNavdeep Parhar if (is_t4(sc)) 1902a9feb2cdSNavdeep Parhar cmd |= htobe32(F_ULP_MEMIO_ORDER); 1903a9feb2cdSNavdeep Parhar else 1904a9feb2cdSNavdeep Parhar cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1905a9feb2cdSNavdeep Parhar ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1906a9feb2cdSNavdeep Parhar offset = buf & PAGE_MASK; 1907a9feb2cdSNavdeep Parhar ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1908a9feb2cdSNavdeep Parhar pva = trunc_page(buf); 1909a9feb2cdSNavdeep Parhar end_pva = trunc_page(buf + buflen - 1); 1910a9feb2cdSNavdeep Parhar for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1911a9feb2cdSNavdeep Parhar 1912a9feb2cdSNavdeep Parhar /* How many page pods are we writing in this cycle */ 1913a9feb2cdSNavdeep Parhar n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1914a9feb2cdSNavdeep Parhar MPASS(n > 0); 1915a9feb2cdSNavdeep Parhar chunk = PPOD_SZ(n); 1916a9feb2cdSNavdeep Parhar len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1917a9feb2cdSNavdeep Parhar 19184427ac36SJohn Baldwin m = alloc_raw_wr_mbuf(len); 1919f949967cSJohn Baldwin if (m == NULL) 19204427ac36SJohn Baldwin return (ENOMEM); 19214427ac36SJohn Baldwin ulpmc = mtod(m, struct ulp_mem_io *); 1922a9feb2cdSNavdeep Parhar 19234427ac36SJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); 1924a9feb2cdSNavdeep Parhar ulpmc->cmd = cmd; 1925a9feb2cdSNavdeep Parhar ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1926a9feb2cdSNavdeep Parhar ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1927a9feb2cdSNavdeep Parhar ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1928a9feb2cdSNavdeep Parhar 1929a9feb2cdSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1930a9feb2cdSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1931a9feb2cdSNavdeep Parhar ulpsc->len = htobe32(chunk); 1932a9feb2cdSNavdeep Parhar 1933a9feb2cdSNavdeep Parhar ppod = (struct pagepod *)(ulpsc + 1); 1934a9feb2cdSNavdeep Parhar for (j = 0; j < n; i++, j++, ppod++) { 1935a9feb2cdSNavdeep Parhar ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 19364427ac36SJohn Baldwin V_PPOD_TID(toep->tid) | 1937a9feb2cdSNavdeep Parhar (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 1938a9feb2cdSNavdeep Parhar ppod->len_offset = htobe64(V_PPOD_LEN(buflen) | 1939a9feb2cdSNavdeep Parhar V_PPOD_OFST(offset)); 1940a9feb2cdSNavdeep Parhar ppod->rsvd = 0; 1941a9feb2cdSNavdeep Parhar 1942a9feb2cdSNavdeep Parhar for (k = 0; k < nitems(ppod->addr); k++) { 1943a9feb2cdSNavdeep Parhar if (pva > end_pva) 1944a9feb2cdSNavdeep Parhar ppod->addr[k] = 0; 1945a9feb2cdSNavdeep Parhar else { 1946a9feb2cdSNavdeep Parhar pa = pmap_kextract(pva); 1947a9feb2cdSNavdeep Parhar ppod->addr[k] = htobe64(pa); 1948a9feb2cdSNavdeep Parhar pva += ddp_pgsz; 1949a9feb2cdSNavdeep Parhar } 1950a9feb2cdSNavdeep Parhar #if 0 1951a9feb2cdSNavdeep Parhar CTR5(KTR_CXGBE, 1952a9feb2cdSNavdeep Parhar "%s: tid %d ppod[%d]->addr[%d] = %p", 19534427ac36SJohn Baldwin __func__, toep->tid, i, k, 195444e7472dSJohn Baldwin be64toh(ppod->addr[k])); 1955a9feb2cdSNavdeep Parhar #endif 1956a9feb2cdSNavdeep Parhar } 1957a9feb2cdSNavdeep Parhar 1958a9feb2cdSNavdeep Parhar /* 1959a9feb2cdSNavdeep Parhar * Walk back 1 segment so that the first address in the 1960a9feb2cdSNavdeep Parhar * next pod is the same as the last one in the current 1961a9feb2cdSNavdeep Parhar * pod. 1962a9feb2cdSNavdeep Parhar */ 1963a9feb2cdSNavdeep Parhar pva -= ddp_pgsz; 1964a9feb2cdSNavdeep Parhar } 1965a9feb2cdSNavdeep Parhar 1966f949967cSJohn Baldwin mbufq_enqueue(wrq, m); 1967a9feb2cdSNavdeep Parhar } 1968a9feb2cdSNavdeep Parhar 1969a9feb2cdSNavdeep Parhar MPASS(pva <= end_pva); 1970a9feb2cdSNavdeep Parhar 1971a9feb2cdSNavdeep Parhar return (0); 1972a9feb2cdSNavdeep Parhar } 1973a9feb2cdSNavdeep Parhar 197446bee804SJohn Baldwin int 197546bee804SJohn Baldwin t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep, 197646bee804SJohn Baldwin struct ppod_reservation *prsv, struct ctl_sg_entry *sgl, int entries, 1977f949967cSJohn Baldwin int xferlen, struct mbufq *wrq) 197846bee804SJohn Baldwin { 197946bee804SJohn Baldwin struct ulp_mem_io *ulpmc; 198046bee804SJohn Baldwin struct ulptx_idata *ulpsc; 198146bee804SJohn Baldwin struct pagepod *ppod; 198246bee804SJohn Baldwin int i, j, k, n, chunk, len, ddp_pgsz; 198346bee804SJohn Baldwin u_int ppod_addr, offset, sg_offset = 0; 198446bee804SJohn Baldwin uint32_t cmd; 198546bee804SJohn Baldwin struct ppod_region *pr = prsv->prsv_pr; 1986de414339SJohn Baldwin uintptr_t pva; 1987de414339SJohn Baldwin vm_paddr_t pa; 198846bee804SJohn Baldwin struct mbuf *m; 198946bee804SJohn Baldwin 199046bee804SJohn Baldwin MPASS(sgl != NULL); 199146bee804SJohn Baldwin MPASS(entries > 0); 199246bee804SJohn Baldwin cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 199346bee804SJohn Baldwin if (is_t4(sc)) 199446bee804SJohn Baldwin cmd |= htobe32(F_ULP_MEMIO_ORDER); 199546bee804SJohn Baldwin else 199646bee804SJohn Baldwin cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 199746bee804SJohn Baldwin ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 199846bee804SJohn Baldwin offset = (vm_offset_t)sgl->addr & PAGE_MASK; 199946bee804SJohn Baldwin ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 20008d2b4b2eSJohn Baldwin pva = trunc_page((vm_offset_t)sgl->addr); 200146bee804SJohn Baldwin for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 200246bee804SJohn Baldwin 200346bee804SJohn Baldwin /* How many page pods are we writing in this cycle */ 200446bee804SJohn Baldwin n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 200546bee804SJohn Baldwin MPASS(n > 0); 200646bee804SJohn Baldwin chunk = PPOD_SZ(n); 200746bee804SJohn Baldwin len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 200846bee804SJohn Baldwin 200946bee804SJohn Baldwin m = alloc_raw_wr_mbuf(len); 2010f949967cSJohn Baldwin if (m == NULL) 201146bee804SJohn Baldwin return (ENOMEM); 201246bee804SJohn Baldwin ulpmc = mtod(m, struct ulp_mem_io *); 201346bee804SJohn Baldwin 201446bee804SJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); 201546bee804SJohn Baldwin ulpmc->cmd = cmd; 201646bee804SJohn Baldwin ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 201746bee804SJohn Baldwin ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 201846bee804SJohn Baldwin ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 201946bee804SJohn Baldwin 202046bee804SJohn Baldwin ulpsc = (struct ulptx_idata *)(ulpmc + 1); 202146bee804SJohn Baldwin ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 202246bee804SJohn Baldwin ulpsc->len = htobe32(chunk); 202346bee804SJohn Baldwin 202446bee804SJohn Baldwin ppod = (struct pagepod *)(ulpsc + 1); 202546bee804SJohn Baldwin for (j = 0; j < n; i++, j++, ppod++) { 202646bee804SJohn Baldwin ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 202746bee804SJohn Baldwin V_PPOD_TID(toep->tid) | 202846bee804SJohn Baldwin (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 202946bee804SJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(xferlen) | 203046bee804SJohn Baldwin V_PPOD_OFST(offset)); 203146bee804SJohn Baldwin ppod->rsvd = 0; 203246bee804SJohn Baldwin 203346bee804SJohn Baldwin for (k = 0; k < nitems(ppod->addr); k++) { 203446bee804SJohn Baldwin if (entries != 0) { 203546bee804SJohn Baldwin pa = pmap_kextract(pva + sg_offset); 203646bee804SJohn Baldwin ppod->addr[k] = htobe64(pa); 203746bee804SJohn Baldwin } else 203846bee804SJohn Baldwin ppod->addr[k] = 0; 203946bee804SJohn Baldwin 204046bee804SJohn Baldwin #if 0 204146bee804SJohn Baldwin CTR5(KTR_CXGBE, 204246bee804SJohn Baldwin "%s: tid %d ppod[%d]->addr[%d] = %p", 204346bee804SJohn Baldwin __func__, toep->tid, i, k, 204444e7472dSJohn Baldwin be64toh(ppod->addr[k])); 204546bee804SJohn Baldwin #endif 204646bee804SJohn Baldwin 204746bee804SJohn Baldwin /* 204846bee804SJohn Baldwin * If this is the last entry in a pod, 204946bee804SJohn Baldwin * reuse the same entry for first address 205046bee804SJohn Baldwin * in the next pod. 205146bee804SJohn Baldwin */ 205246bee804SJohn Baldwin if (k + 1 == nitems(ppod->addr)) 205346bee804SJohn Baldwin break; 205446bee804SJohn Baldwin 205546bee804SJohn Baldwin /* 205646bee804SJohn Baldwin * Don't move to the next DDP page if the 205746bee804SJohn Baldwin * sgl is already finished. 205846bee804SJohn Baldwin */ 205946bee804SJohn Baldwin if (entries == 0) 206046bee804SJohn Baldwin continue; 206146bee804SJohn Baldwin 206246bee804SJohn Baldwin sg_offset += ddp_pgsz; 206346bee804SJohn Baldwin if (sg_offset == sgl->len) { 206446bee804SJohn Baldwin /* 206546bee804SJohn Baldwin * This sgl entry is done. Go 206646bee804SJohn Baldwin * to the next. 206746bee804SJohn Baldwin */ 206846bee804SJohn Baldwin entries--; 206946bee804SJohn Baldwin sgl++; 207046bee804SJohn Baldwin sg_offset = 0; 207146bee804SJohn Baldwin if (entries != 0) 207246bee804SJohn Baldwin pva = trunc_page( 207346bee804SJohn Baldwin (vm_offset_t)sgl->addr); 207446bee804SJohn Baldwin } 207546bee804SJohn Baldwin } 207646bee804SJohn Baldwin } 207746bee804SJohn Baldwin 2078f949967cSJohn Baldwin mbufq_enqueue(wrq, m); 207946bee804SJohn Baldwin } 208046bee804SJohn Baldwin 208146bee804SJohn Baldwin return (0); 208246bee804SJohn Baldwin } 208346bee804SJohn Baldwin 2084dc964385SJohn Baldwin /* 2085eeacb3b0SMark Johnston * Prepare a pageset for DDP. This sets up page pods. 2086dc964385SJohn Baldwin */ 2087e682d02eSNavdeep Parhar static int 2088dc964385SJohn Baldwin prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps) 2089e682d02eSNavdeep Parhar { 2090dc964385SJohn Baldwin struct tom_data *td = sc->tom_softc; 2091e682d02eSNavdeep Parhar 2092968267fdSNavdeep Parhar if (ps->prsv.prsv_nppods == 0 && 20932beaefe8SJohn Baldwin t4_alloc_page_pods_for_ps(&td->pr, ps) != 0) { 2094e682d02eSNavdeep Parhar return (0); 2095e682d02eSNavdeep Parhar } 2096dc964385SJohn Baldwin if (!(ps->flags & PS_PPODS_WRITTEN) && 2097968267fdSNavdeep Parhar t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) { 2098dc964385SJohn Baldwin return (0); 2099dc964385SJohn Baldwin } 2100dc964385SJohn Baldwin 2101dc964385SJohn Baldwin return (1); 2102dc964385SJohn Baldwin } 2103e682d02eSNavdeep Parhar 2104968267fdSNavdeep Parhar int 2105968267fdSNavdeep Parhar t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz, 2106968267fdSNavdeep Parhar const char *name) 2107e682d02eSNavdeep Parhar { 2108515b36c5SNavdeep Parhar int i; 2109515b36c5SNavdeep Parhar 2110968267fdSNavdeep Parhar MPASS(pr != NULL); 2111968267fdSNavdeep Parhar MPASS(r->size > 0); 2112515b36c5SNavdeep Parhar 2113968267fdSNavdeep Parhar pr->pr_start = r->start; 2114968267fdSNavdeep Parhar pr->pr_len = r->size; 2115968267fdSNavdeep Parhar pr->pr_page_shift[0] = 12 + G_HPZ0(psz); 2116968267fdSNavdeep Parhar pr->pr_page_shift[1] = 12 + G_HPZ1(psz); 2117968267fdSNavdeep Parhar pr->pr_page_shift[2] = 12 + G_HPZ2(psz); 2118968267fdSNavdeep Parhar pr->pr_page_shift[3] = 12 + G_HPZ3(psz); 2119968267fdSNavdeep Parhar 2120968267fdSNavdeep Parhar /* The SGL -> page pod algorithm requires the sizes to be in order. */ 2121968267fdSNavdeep Parhar for (i = 1; i < nitems(pr->pr_page_shift); i++) { 2122968267fdSNavdeep Parhar if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1]) 2123968267fdSNavdeep Parhar return (ENXIO); 2124515b36c5SNavdeep Parhar } 2125e682d02eSNavdeep Parhar 2126968267fdSNavdeep Parhar pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG); 2127968267fdSNavdeep Parhar pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask; 2128968267fdSNavdeep Parhar if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0) 2129968267fdSNavdeep Parhar return (ENXIO); 2130968267fdSNavdeep Parhar pr->pr_alias_shift = fls(pr->pr_tag_mask); 2131968267fdSNavdeep Parhar pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1); 2132968267fdSNavdeep Parhar 2133968267fdSNavdeep Parhar pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0, 2134968267fdSNavdeep Parhar M_FIRSTFIT | M_NOWAIT); 2135968267fdSNavdeep Parhar if (pr->pr_arena == NULL) 2136968267fdSNavdeep Parhar return (ENOMEM); 2137968267fdSNavdeep Parhar 2138968267fdSNavdeep Parhar return (0); 2139e682d02eSNavdeep Parhar } 2140e682d02eSNavdeep Parhar 2141e682d02eSNavdeep Parhar void 2142968267fdSNavdeep Parhar t4_free_ppod_region(struct ppod_region *pr) 2143e682d02eSNavdeep Parhar { 2144e682d02eSNavdeep Parhar 2145968267fdSNavdeep Parhar MPASS(pr != NULL); 2146968267fdSNavdeep Parhar 2147968267fdSNavdeep Parhar if (pr->pr_arena) 2148968267fdSNavdeep Parhar vmem_destroy(pr->pr_arena); 2149968267fdSNavdeep Parhar bzero(pr, sizeof(*pr)); 2150e682d02eSNavdeep Parhar } 2151e682d02eSNavdeep Parhar 2152e682d02eSNavdeep Parhar static int 2153dc964385SJohn Baldwin pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages, 2154dc964385SJohn Baldwin int pgoff, int len) 2155e682d02eSNavdeep Parhar { 2156e682d02eSNavdeep Parhar 215791a65e2fSJohn Baldwin if (ps->start != start || ps->npages != npages || 215891a65e2fSJohn Baldwin ps->offset != pgoff || ps->len != len) 2159dc964385SJohn Baldwin return (1); 2160dc964385SJohn Baldwin 2161dc964385SJohn Baldwin return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp); 2162e682d02eSNavdeep Parhar } 2163e682d02eSNavdeep Parhar 2164dc964385SJohn Baldwin static int 2165dc964385SJohn Baldwin hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps) 2166688dba74SNavdeep Parhar { 2167dc964385SJohn Baldwin struct vmspace *vm; 2168dc964385SJohn Baldwin vm_map_t map; 2169dc964385SJohn Baldwin vm_offset_t start, end, pgoff; 2170dc964385SJohn Baldwin struct pageset *ps; 2171dc964385SJohn Baldwin int n; 2172688dba74SNavdeep Parhar 2173dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2174688dba74SNavdeep Parhar 2175dc964385SJohn Baldwin /* 2176dc964385SJohn Baldwin * The AIO subsystem will cancel and drain all requests before 2177dc964385SJohn Baldwin * permitting a process to exit or exec, so p_vmspace should 2178dc964385SJohn Baldwin * be stable here. 2179dc964385SJohn Baldwin */ 2180dc964385SJohn Baldwin vm = job->userproc->p_vmspace; 2181dc964385SJohn Baldwin map = &vm->vm_map; 2182dc964385SJohn Baldwin start = (uintptr_t)job->uaiocb.aio_buf; 2183dc964385SJohn Baldwin pgoff = start & PAGE_MASK; 2184dc964385SJohn Baldwin end = round_page(start + job->uaiocb.aio_nbytes); 2185dc964385SJohn Baldwin start = trunc_page(start); 2186dc964385SJohn Baldwin 2187dc964385SJohn Baldwin if (end - start > MAX_DDP_BUFFER_SIZE) { 2188dc964385SJohn Baldwin /* 2189dc964385SJohn Baldwin * Truncate the request to a short read. 2190dc964385SJohn Baldwin * Alternatively, we could DDP in chunks to the larger 2191dc964385SJohn Baldwin * buffer, but that would be quite a bit more work. 2192dc964385SJohn Baldwin * 2193dc964385SJohn Baldwin * When truncating, round the request down to avoid 2194dc964385SJohn Baldwin * crossing a cache line on the final transaction. 2195dc964385SJohn Baldwin */ 2196dc964385SJohn Baldwin end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE); 2197dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 2198dc964385SJohn Baldwin CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu", 2199dc964385SJohn Baldwin __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes, 2200dc964385SJohn Baldwin (unsigned long)(end - (start + pgoff))); 2201dc964385SJohn Baldwin job->uaiocb.aio_nbytes = end - (start + pgoff); 2202dc964385SJohn Baldwin #endif 2203dc964385SJohn Baldwin end = round_page(end); 2204688dba74SNavdeep Parhar } 2205688dba74SNavdeep Parhar 2206dc964385SJohn Baldwin n = atop(end - start); 2207688dba74SNavdeep Parhar 2208dc964385SJohn Baldwin /* 2209dc964385SJohn Baldwin * Try to reuse a cached pageset. 2210dc964385SJohn Baldwin */ 2211125d42feSJohn Baldwin TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) { 2212dc964385SJohn Baldwin if (pscmp(ps, vm, start, n, pgoff, 2213dc964385SJohn Baldwin job->uaiocb.aio_nbytes) == 0) { 2214125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 2215125d42feSJohn Baldwin toep->ddp.cached_count--; 2216dc964385SJohn Baldwin *pps = ps; 2217dc964385SJohn Baldwin return (0); 2218dc964385SJohn Baldwin } 2219688dba74SNavdeep Parhar } 2220688dba74SNavdeep Parhar 2221e682d02eSNavdeep Parhar /* 2222dc964385SJohn Baldwin * If there are too many cached pagesets to create a new one, 2223dc964385SJohn Baldwin * free a pageset before creating a new one. 2224e682d02eSNavdeep Parhar */ 2225125d42feSJohn Baldwin KASSERT(toep->ddp.active_count + toep->ddp.cached_count <= 2226125d42feSJohn Baldwin nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__)); 2227125d42feSJohn Baldwin if (toep->ddp.active_count + toep->ddp.cached_count == 2228125d42feSJohn Baldwin nitems(toep->ddp.db)) { 2229125d42feSJohn Baldwin KASSERT(toep->ddp.cached_count > 0, 2230dc964385SJohn Baldwin ("no cached pageset to free")); 2231125d42feSJohn Baldwin ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq); 2232125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 2233125d42feSJohn Baldwin toep->ddp.cached_count--; 2234dc964385SJohn Baldwin free_pageset(toep->td, ps); 2235dc964385SJohn Baldwin } 2236dc964385SJohn Baldwin DDP_UNLOCK(toep); 2237e682d02eSNavdeep Parhar 2238dc964385SJohn Baldwin /* Create a new pageset. */ 2239dc964385SJohn Baldwin ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 2240dc964385SJohn Baldwin M_ZERO); 2241dc964385SJohn Baldwin ps->pages = (vm_page_t *)(ps + 1); 2242dc964385SJohn Baldwin ps->vm_timestamp = map->timestamp; 2243dc964385SJohn Baldwin ps->npages = vm_fault_quick_hold_pages(map, start, end - start, 2244dc964385SJohn Baldwin VM_PROT_WRITE, ps->pages, n); 2245e682d02eSNavdeep Parhar 2246dc964385SJohn Baldwin DDP_LOCK(toep); 2247dc964385SJohn Baldwin if (ps->npages < 0) { 2248dc964385SJohn Baldwin free(ps, M_CXGBE); 2249dc964385SJohn Baldwin return (EFAULT); 2250e682d02eSNavdeep Parhar } 2251e682d02eSNavdeep Parhar 2252dc964385SJohn Baldwin KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d", 2253dc964385SJohn Baldwin ps->npages, n)); 2254dc964385SJohn Baldwin 2255dc964385SJohn Baldwin ps->offset = pgoff; 2256dc964385SJohn Baldwin ps->len = job->uaiocb.aio_nbytes; 2257f7db0c95SMark Johnston refcount_acquire(&vm->vm_refcnt); 2258dc964385SJohn Baldwin ps->vm = vm; 225991a65e2fSJohn Baldwin ps->start = start; 2260dc964385SJohn Baldwin 2261dc964385SJohn Baldwin CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 2262dc964385SJohn Baldwin __func__, toep->tid, ps, job, ps->npages); 2263dc964385SJohn Baldwin *pps = ps; 2264e682d02eSNavdeep Parhar return (0); 2265e682d02eSNavdeep Parhar } 2266e682d02eSNavdeep Parhar 2267dc964385SJohn Baldwin static void 2268dc964385SJohn Baldwin ddp_complete_all(struct toepcb *toep, int error) 2269e682d02eSNavdeep Parhar { 2270dc964385SJohn Baldwin struct kaiocb *job; 2271e682d02eSNavdeep Parhar 2272dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2273eba13bbcSJohn Baldwin KASSERT((toep->ddp.flags & DDP_AIO) != 0, ("%s: DDP_RCVBUF", __func__)); 2274125d42feSJohn Baldwin while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) { 2275125d42feSJohn Baldwin job = TAILQ_FIRST(&toep->ddp.aiojobq); 2276125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 2277125d42feSJohn Baldwin toep->ddp.waiting_count--; 2278dc964385SJohn Baldwin if (aio_clear_cancel_function(job)) 2279dc964385SJohn Baldwin ddp_complete_one(job, error); 2280dc964385SJohn Baldwin } 2281dc964385SJohn Baldwin } 2282dc964385SJohn Baldwin 2283dc964385SJohn Baldwin static void 2284dc964385SJohn Baldwin aio_ddp_cancel_one(struct kaiocb *job) 2285dc964385SJohn Baldwin { 2286dc964385SJohn Baldwin long copied; 2287dc964385SJohn Baldwin 2288dc964385SJohn Baldwin /* 2289dc964385SJohn Baldwin * If this job had copied data out of the socket buffer before 2290dc964385SJohn Baldwin * it was cancelled, report it as a short read rather than an 2291dc964385SJohn Baldwin * error. 2292dc964385SJohn Baldwin */ 2293fe0bdd1dSJohn Baldwin copied = job->aio_received; 2294dc964385SJohn Baldwin if (copied != 0) 2295dc964385SJohn Baldwin aio_complete(job, copied, 0); 2296e682d02eSNavdeep Parhar else 2297dc964385SJohn Baldwin aio_cancel(job); 2298e682d02eSNavdeep Parhar } 2299e682d02eSNavdeep Parhar 2300dc964385SJohn Baldwin /* 2301dc964385SJohn Baldwin * Called when the main loop wants to requeue a job to retry it later. 2302dc964385SJohn Baldwin * Deals with the race of the job being cancelled while it was being 2303dc964385SJohn Baldwin * examined. 2304dc964385SJohn Baldwin */ 2305dc964385SJohn Baldwin static void 2306dc964385SJohn Baldwin aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job) 2307dc964385SJohn Baldwin { 2308dc964385SJohn Baldwin 2309dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2310125d42feSJohn Baldwin if (!(toep->ddp.flags & DDP_DEAD) && 2311dc964385SJohn Baldwin aio_set_cancel_function(job, t4_aio_cancel_queued)) { 2312125d42feSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 2313125d42feSJohn Baldwin toep->ddp.waiting_count++; 2314dc964385SJohn Baldwin } else 2315dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2316e682d02eSNavdeep Parhar } 2317e682d02eSNavdeep Parhar 2318dc964385SJohn Baldwin static void 2319dc964385SJohn Baldwin aio_ddp_requeue(struct toepcb *toep) 2320dc964385SJohn Baldwin { 2321dc964385SJohn Baldwin struct adapter *sc = td_adapter(toep->td); 2322dc964385SJohn Baldwin struct socket *so; 2323dc964385SJohn Baldwin struct sockbuf *sb; 2324dc964385SJohn Baldwin struct inpcb *inp; 2325dc964385SJohn Baldwin struct kaiocb *job; 2326dc964385SJohn Baldwin struct ddp_buffer *db; 2327dc964385SJohn Baldwin size_t copied, offset, resid; 2328dc964385SJohn Baldwin struct pageset *ps; 2329dc964385SJohn Baldwin struct mbuf *m; 2330dc964385SJohn Baldwin uint64_t ddp_flags, ddp_flags_mask; 2331dc964385SJohn Baldwin struct wrqe *wr; 2332dc964385SJohn Baldwin int buf_flag, db_idx, error; 2333dc964385SJohn Baldwin 2334dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2335dc964385SJohn Baldwin 2336e682d02eSNavdeep Parhar restart: 2337125d42feSJohn Baldwin if (toep->ddp.flags & DDP_DEAD) { 2338125d42feSJohn Baldwin MPASS(toep->ddp.waiting_count == 0); 2339125d42feSJohn Baldwin MPASS(toep->ddp.active_count == 0); 2340dc964385SJohn Baldwin return; 2341e682d02eSNavdeep Parhar } 2342e682d02eSNavdeep Parhar 2343125d42feSJohn Baldwin if (toep->ddp.waiting_count == 0 || 2344125d42feSJohn Baldwin toep->ddp.active_count == nitems(toep->ddp.db)) { 2345dc964385SJohn Baldwin return; 2346dc964385SJohn Baldwin } 2347dc964385SJohn Baldwin 2348125d42feSJohn Baldwin job = TAILQ_FIRST(&toep->ddp.aiojobq); 2349dc964385SJohn Baldwin so = job->fd_file->f_data; 2350dc964385SJohn Baldwin sb = &so->so_rcv; 2351dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 2352dc964385SJohn Baldwin 2353dc964385SJohn Baldwin /* We will never get anything unless we are or were connected. */ 2354dc964385SJohn Baldwin if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 2355dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2356dc964385SJohn Baldwin ddp_complete_all(toep, ENOTCONN); 2357dc964385SJohn Baldwin return; 2358dc964385SJohn Baldwin } 2359dc964385SJohn Baldwin 2360125d42feSJohn Baldwin KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0, 2361dc964385SJohn Baldwin ("%s: pending sockbuf data and DDP is active", __func__)); 2362dc964385SJohn Baldwin 2363e682d02eSNavdeep Parhar /* Abort if socket has reported problems. */ 2364dc964385SJohn Baldwin /* XXX: Wait for any queued DDP's to finish and/or flush them? */ 2365dc964385SJohn Baldwin if (so->so_error && sbavail(sb) == 0) { 2366125d42feSJohn Baldwin toep->ddp.waiting_count--; 2367125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 2368dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) { 2369dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2370dc964385SJohn Baldwin goto restart; 2371dc964385SJohn Baldwin } 2372dc964385SJohn Baldwin 2373dc964385SJohn Baldwin /* 2374dc964385SJohn Baldwin * If this job has previously copied some data, report 2375dc964385SJohn Baldwin * a short read and leave the error to be reported by 2376dc964385SJohn Baldwin * a future request. 2377dc964385SJohn Baldwin */ 2378fe0bdd1dSJohn Baldwin copied = job->aio_received; 2379dc964385SJohn Baldwin if (copied != 0) { 2380dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2381dc964385SJohn Baldwin aio_complete(job, copied, 0); 2382dc964385SJohn Baldwin goto restart; 2383dc964385SJohn Baldwin } 2384e682d02eSNavdeep Parhar error = so->so_error; 2385e682d02eSNavdeep Parhar so->so_error = 0; 2386dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2387dc964385SJohn Baldwin aio_complete(job, -1, error); 2388dc964385SJohn Baldwin goto restart; 2389e682d02eSNavdeep Parhar } 2390e682d02eSNavdeep Parhar 2391e682d02eSNavdeep Parhar /* 2392dc964385SJohn Baldwin * Door is closed. If there is pending data in the socket buffer, 2393dc964385SJohn Baldwin * deliver it. If there are pending DDP requests, wait for those 2394dc964385SJohn Baldwin * to complete. Once they have completed, return EOF reads. 2395e682d02eSNavdeep Parhar */ 2396dc964385SJohn Baldwin if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 2397dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2398125d42feSJohn Baldwin if (toep->ddp.active_count != 0) 2399dc964385SJohn Baldwin return; 2400dc964385SJohn Baldwin ddp_complete_all(toep, 0); 2401dc964385SJohn Baldwin return; 2402e682d02eSNavdeep Parhar } 2403dc964385SJohn Baldwin 2404dc964385SJohn Baldwin /* 2405dc964385SJohn Baldwin * If DDP is not enabled and there is no pending socket buffer 2406dc964385SJohn Baldwin * data, try to enable DDP. 2407dc964385SJohn Baldwin */ 2408125d42feSJohn Baldwin if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) { 2409dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2410dc964385SJohn Baldwin 2411dc964385SJohn Baldwin /* 2412dc964385SJohn Baldwin * Wait for the card to ACK that DDP is enabled before 2413dc964385SJohn Baldwin * queueing any buffers. Currently this waits for an 2414dc964385SJohn Baldwin * indicate to arrive. This could use a TCB_SET_FIELD_RPL 2415dc964385SJohn Baldwin * message to know that DDP was enabled instead of waiting 2416dc964385SJohn Baldwin * for the indicate which would avoid copying the indicate 2417dc964385SJohn Baldwin * if no data is pending. 2418dc964385SJohn Baldwin * 2419dc964385SJohn Baldwin * XXX: Might want to limit the indicate size to the size 2420dc964385SJohn Baldwin * of the first queued request. 2421dc964385SJohn Baldwin */ 2422125d42feSJohn Baldwin if ((toep->ddp.flags & DDP_SC_REQ) == 0) 2423dc964385SJohn Baldwin enable_ddp(sc, toep); 2424dc964385SJohn Baldwin return; 2425e682d02eSNavdeep Parhar } 2426dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2427dc964385SJohn Baldwin 2428dc964385SJohn Baldwin /* 2429dc964385SJohn Baldwin * If another thread is queueing a buffer for DDP, let it 2430dc964385SJohn Baldwin * drain any work and return. 2431dc964385SJohn Baldwin */ 2432125d42feSJohn Baldwin if (toep->ddp.queueing != NULL) 2433dc964385SJohn Baldwin return; 2434dc964385SJohn Baldwin 2435dc964385SJohn Baldwin /* Take the next job to prep it for DDP. */ 2436125d42feSJohn Baldwin toep->ddp.waiting_count--; 2437125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 2438dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) 2439e682d02eSNavdeep Parhar goto restart; 2440125d42feSJohn Baldwin toep->ddp.queueing = job; 2441e682d02eSNavdeep Parhar 2442dc964385SJohn Baldwin /* NB: This drops DDP_LOCK while it holds the backing VM pages. */ 2443dc964385SJohn Baldwin error = hold_aio(toep, job, &ps); 2444dc964385SJohn Baldwin if (error != 0) { 2445dc964385SJohn Baldwin ddp_complete_one(job, error); 2446125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2447e682d02eSNavdeep Parhar goto restart; 2448dc964385SJohn Baldwin } 2449e682d02eSNavdeep Parhar 2450dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 2451dc964385SJohn Baldwin if (so->so_error && sbavail(sb) == 0) { 2452fe0bdd1dSJohn Baldwin copied = job->aio_received; 2453dc964385SJohn Baldwin if (copied != 0) { 2454dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2455dc964385SJohn Baldwin recycle_pageset(toep, ps); 2456dc964385SJohn Baldwin aio_complete(job, copied, 0); 2457125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2458dc964385SJohn Baldwin goto restart; 2459dc964385SJohn Baldwin } 2460e682d02eSNavdeep Parhar 2461dc964385SJohn Baldwin error = so->so_error; 2462dc964385SJohn Baldwin so->so_error = 0; 2463dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2464dc964385SJohn Baldwin recycle_pageset(toep, ps); 2465dc964385SJohn Baldwin aio_complete(job, -1, error); 2466125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2467dc964385SJohn Baldwin goto restart; 2468e682d02eSNavdeep Parhar } 2469e682d02eSNavdeep Parhar 2470dc964385SJohn Baldwin if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 2471dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2472dc964385SJohn Baldwin recycle_pageset(toep, ps); 2473125d42feSJohn Baldwin if (toep->ddp.active_count != 0) { 2474dc964385SJohn Baldwin /* 2475dc964385SJohn Baldwin * The door is closed, but there are still pending 2476dc964385SJohn Baldwin * DDP buffers. Requeue. These jobs will all be 2477dc964385SJohn Baldwin * completed once those buffers drain. 2478dc964385SJohn Baldwin */ 2479dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2480125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2481dc964385SJohn Baldwin return; 2482e682d02eSNavdeep Parhar } 2483dc964385SJohn Baldwin ddp_complete_one(job, 0); 2484dc964385SJohn Baldwin ddp_complete_all(toep, 0); 2485125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2486dc964385SJohn Baldwin return; 2487e682d02eSNavdeep Parhar } 2488dc964385SJohn Baldwin 2489dc964385SJohn Baldwin sbcopy: 2490dc964385SJohn Baldwin /* 2491dc964385SJohn Baldwin * If the toep is dead, there shouldn't be any data in the socket 2492dc964385SJohn Baldwin * buffer, so the above case should have handled this. 2493dc964385SJohn Baldwin */ 2494125d42feSJohn Baldwin MPASS(!(toep->ddp.flags & DDP_DEAD)); 2495dc964385SJohn Baldwin 2496dc964385SJohn Baldwin /* 2497dc964385SJohn Baldwin * If there is pending data in the socket buffer (either 2498dc964385SJohn Baldwin * from before the requests were queued or a DDP indicate), 2499dc964385SJohn Baldwin * copy those mbufs out directly. 2500dc964385SJohn Baldwin */ 2501dc964385SJohn Baldwin copied = 0; 2502fe0bdd1dSJohn Baldwin offset = ps->offset + job->aio_received; 2503fe0bdd1dSJohn Baldwin MPASS(job->aio_received <= job->uaiocb.aio_nbytes); 2504fe0bdd1dSJohn Baldwin resid = job->uaiocb.aio_nbytes - job->aio_received; 2505dc964385SJohn Baldwin m = sb->sb_mb; 2506125d42feSJohn Baldwin KASSERT(m == NULL || toep->ddp.active_count == 0, 2507dc964385SJohn Baldwin ("%s: sockbuf data with active DDP", __func__)); 2508dc964385SJohn Baldwin while (m != NULL && resid > 0) { 2509dc964385SJohn Baldwin struct iovec iov[1]; 2510dc964385SJohn Baldwin struct uio uio; 251139d5cbdcSNavdeep Parhar #ifdef INVARIANTS 2512dc964385SJohn Baldwin int error; 251339d5cbdcSNavdeep Parhar #endif 2514dc964385SJohn Baldwin 2515dc964385SJohn Baldwin iov[0].iov_base = mtod(m, void *); 2516dc964385SJohn Baldwin iov[0].iov_len = m->m_len; 2517dc964385SJohn Baldwin if (iov[0].iov_len > resid) 2518dc964385SJohn Baldwin iov[0].iov_len = resid; 2519dc964385SJohn Baldwin uio.uio_iov = iov; 2520dc964385SJohn Baldwin uio.uio_iovcnt = 1; 2521dc964385SJohn Baldwin uio.uio_offset = 0; 2522dc964385SJohn Baldwin uio.uio_resid = iov[0].iov_len; 2523dc964385SJohn Baldwin uio.uio_segflg = UIO_SYSSPACE; 2524dc964385SJohn Baldwin uio.uio_rw = UIO_WRITE; 252539d5cbdcSNavdeep Parhar #ifdef INVARIANTS 2526dc964385SJohn Baldwin error = uiomove_fromphys(ps->pages, offset + copied, 2527dc964385SJohn Baldwin uio.uio_resid, &uio); 252839d5cbdcSNavdeep Parhar #else 252939d5cbdcSNavdeep Parhar uiomove_fromphys(ps->pages, offset + copied, uio.uio_resid, &uio); 253039d5cbdcSNavdeep Parhar #endif 2531dc964385SJohn Baldwin MPASS(error == 0 && uio.uio_resid == 0); 2532dc964385SJohn Baldwin copied += uio.uio_offset; 2533dc964385SJohn Baldwin resid -= uio.uio_offset; 2534dc964385SJohn Baldwin m = m->m_next; 2535dc964385SJohn Baldwin } 2536dc964385SJohn Baldwin if (copied != 0) { 2537dc964385SJohn Baldwin sbdrop_locked(sb, copied); 2538fe0bdd1dSJohn Baldwin job->aio_received += copied; 2539b1012d80SJohn Baldwin job->msgrcv = 1; 2540fe0bdd1dSJohn Baldwin copied = job->aio_received; 2541dc964385SJohn Baldwin inp = sotoinpcb(so); 2542dc964385SJohn Baldwin if (!INP_TRY_WLOCK(inp)) { 2543dc964385SJohn Baldwin /* 2544dc964385SJohn Baldwin * The reference on the socket file descriptor in 2545dc964385SJohn Baldwin * the AIO job should keep 'sb' and 'inp' stable. 2546dc964385SJohn Baldwin * Our caller has a reference on the 'toep' that 2547dc964385SJohn Baldwin * keeps it stable. 2548dc964385SJohn Baldwin */ 2549dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2550dc964385SJohn Baldwin DDP_UNLOCK(toep); 2551dc964385SJohn Baldwin INP_WLOCK(inp); 2552dc964385SJohn Baldwin DDP_LOCK(toep); 2553dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 2554dc964385SJohn Baldwin 2555dc964385SJohn Baldwin /* 2556dc964385SJohn Baldwin * If the socket has been closed, we should detect 2557dc964385SJohn Baldwin * that and complete this request if needed on 2558dc964385SJohn Baldwin * the next trip around the loop. 2559dc964385SJohn Baldwin */ 2560dc964385SJohn Baldwin } 2561dc964385SJohn Baldwin t4_rcvd_locked(&toep->td->tod, intotcpcb(inp)); 2562dc964385SJohn Baldwin INP_WUNLOCK(inp); 2563125d42feSJohn Baldwin if (resid == 0 || toep->ddp.flags & DDP_DEAD) { 2564dc964385SJohn Baldwin /* 2565dc964385SJohn Baldwin * We filled the entire buffer with socket 2566dc964385SJohn Baldwin * data, DDP is not being used, or the socket 2567dc964385SJohn Baldwin * is being shut down, so complete the 2568dc964385SJohn Baldwin * request. 2569dc964385SJohn Baldwin */ 2570dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2571dc964385SJohn Baldwin recycle_pageset(toep, ps); 2572dc964385SJohn Baldwin aio_complete(job, copied, 0); 2573125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2574dc964385SJohn Baldwin goto restart; 2575dc964385SJohn Baldwin } 2576dc964385SJohn Baldwin 2577dc964385SJohn Baldwin /* 2578dc964385SJohn Baldwin * If DDP is not enabled, requeue this request and restart. 2579dc964385SJohn Baldwin * This will either enable DDP or wait for more data to 2580dc964385SJohn Baldwin * arrive on the socket buffer. 2581dc964385SJohn Baldwin */ 2582125d42feSJohn Baldwin if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) { 2583dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2584dc964385SJohn Baldwin recycle_pageset(toep, ps); 2585dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2586125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2587dc964385SJohn Baldwin goto restart; 2588dc964385SJohn Baldwin } 2589dc964385SJohn Baldwin 2590dc964385SJohn Baldwin /* 2591dc964385SJohn Baldwin * An indicate might have arrived and been added to 2592dc964385SJohn Baldwin * the socket buffer while it was unlocked after the 2593dc964385SJohn Baldwin * copy to lock the INP. If so, restart the copy. 2594dc964385SJohn Baldwin */ 2595dc964385SJohn Baldwin if (sbavail(sb) != 0) 2596dc964385SJohn Baldwin goto sbcopy; 2597dc964385SJohn Baldwin } 2598dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2599dc964385SJohn Baldwin 2600dc964385SJohn Baldwin if (prep_pageset(sc, toep, ps) == 0) { 2601dc964385SJohn Baldwin recycle_pageset(toep, ps); 2602dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2603125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2604dc964385SJohn Baldwin 2605dc964385SJohn Baldwin /* 2606dc964385SJohn Baldwin * XXX: Need to retry this later. Mostly need a trigger 2607dc964385SJohn Baldwin * when page pods are freed up. 2608dc964385SJohn Baldwin */ 2609dc964385SJohn Baldwin printf("%s: prep_pageset failed\n", __func__); 2610dc964385SJohn Baldwin return; 2611dc964385SJohn Baldwin } 2612dc964385SJohn Baldwin 2613dc964385SJohn Baldwin /* Determine which DDP buffer to use. */ 2614125d42feSJohn Baldwin if (toep->ddp.db[0].job == NULL) { 2615dc964385SJohn Baldwin db_idx = 0; 2616e682d02eSNavdeep Parhar } else { 2617125d42feSJohn Baldwin MPASS(toep->ddp.db[1].job == NULL); 2618dc964385SJohn Baldwin db_idx = 1; 2619e682d02eSNavdeep Parhar } 2620e682d02eSNavdeep Parhar 2621dc964385SJohn Baldwin ddp_flags = 0; 2622dc964385SJohn Baldwin ddp_flags_mask = 0; 2623dc964385SJohn Baldwin if (db_idx == 0) { 2624dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF0_VALID(1); 2625dc964385SJohn Baldwin if (so->so_state & SS_NBIO) 2626dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF0_FLUSH(1); 2627dc964385SJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 2628dc964385SJohn Baldwin V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) | 2629dc964385SJohn Baldwin V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1); 2630dc964385SJohn Baldwin buf_flag = DDP_BUF0_ACTIVE; 2631dc964385SJohn Baldwin } else { 2632dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF1_VALID(1); 2633dc964385SJohn Baldwin if (so->so_state & SS_NBIO) 2634dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF1_FLUSH(1); 2635dc964385SJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 2636dc964385SJohn Baldwin V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) | 2637dc964385SJohn Baldwin V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1); 2638dc964385SJohn Baldwin buf_flag = DDP_BUF1_ACTIVE; 2639e682d02eSNavdeep Parhar } 2640125d42feSJohn Baldwin MPASS((toep->ddp.flags & buf_flag) == 0); 2641125d42feSJohn Baldwin if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { 2642dc964385SJohn Baldwin MPASS(db_idx == 0); 2643125d42feSJohn Baldwin MPASS(toep->ddp.active_id == -1); 2644125d42feSJohn Baldwin MPASS(toep->ddp.active_count == 0); 2645dc964385SJohn Baldwin ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1); 2646e682d02eSNavdeep Parhar } 2647e682d02eSNavdeep Parhar 2648e682d02eSNavdeep Parhar /* 2649dc964385SJohn Baldwin * The TID for this connection should still be valid. If DDP_DEAD 2650dc964385SJohn Baldwin * is set, SBS_CANTRCVMORE should be set, so we shouldn't be 2651dc964385SJohn Baldwin * this far anyway. Even if the socket is closing on the other 2652dc964385SJohn Baldwin * end, the AIO job holds a reference on this end of the socket 2653dc964385SJohn Baldwin * which will keep it open and keep the TCP PCB attached until 2654dc964385SJohn Baldwin * after the job is completed. 2655e682d02eSNavdeep Parhar */ 2656eba13bbcSJohn Baldwin wr = mk_update_tcb_for_ddp(sc, toep, db_idx, &ps->prsv, ps->len, 2657eba13bbcSJohn Baldwin job->aio_received, ddp_flags, ddp_flags_mask); 2658dc964385SJohn Baldwin if (wr == NULL) { 2659dc964385SJohn Baldwin recycle_pageset(toep, ps); 2660dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2661125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2662dc964385SJohn Baldwin 2663dc964385SJohn Baldwin /* 2664dc964385SJohn Baldwin * XXX: Need a way to kick a retry here. 2665dc964385SJohn Baldwin * 2666dc964385SJohn Baldwin * XXX: We know the fixed size needed and could 2667dc964385SJohn Baldwin * preallocate this using a blocking request at the 2668dc964385SJohn Baldwin * start of the task to avoid having to handle this 2669dc964385SJohn Baldwin * edge case. 2670dc964385SJohn Baldwin */ 2671dc964385SJohn Baldwin printf("%s: mk_update_tcb_for_ddp failed\n", __func__); 2672dc964385SJohn Baldwin return; 2673dc964385SJohn Baldwin } 2674dc964385SJohn Baldwin 2675dc964385SJohn Baldwin if (!aio_set_cancel_function(job, t4_aio_cancel_active)) { 2676dc964385SJohn Baldwin free_wrqe(wr); 2677dc964385SJohn Baldwin recycle_pageset(toep, ps); 2678dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2679125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2680e682d02eSNavdeep Parhar goto restart; 2681e682d02eSNavdeep Parhar } 2682e682d02eSNavdeep Parhar 2683dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 26848674e626SNavdeep Parhar CTR6(KTR_CXGBE, 26858674e626SNavdeep Parhar "%s: tid %u, scheduling %p for DDP[%d] (flags %#lx/%#lx)", __func__, 26868674e626SNavdeep Parhar toep->tid, job, db_idx, ddp_flags, ddp_flags_mask); 2687dc964385SJohn Baldwin #endif 2688dc964385SJohn Baldwin /* Give the chip the go-ahead. */ 2689dc964385SJohn Baldwin t4_wrq_tx(sc, wr); 2690125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 2691dc964385SJohn Baldwin db->cancel_pending = 0; 2692dc964385SJohn Baldwin db->job = job; 2693dc964385SJohn Baldwin db->ps = ps; 2694125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2695125d42feSJohn Baldwin toep->ddp.flags |= buf_flag; 2696125d42feSJohn Baldwin toep->ddp.active_count++; 2697125d42feSJohn Baldwin if (toep->ddp.active_count == 1) { 2698125d42feSJohn Baldwin MPASS(toep->ddp.active_id == -1); 2699125d42feSJohn Baldwin toep->ddp.active_id = db_idx; 2700dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 2701125d42feSJohn Baldwin toep->ddp.active_id); 2702dc964385SJohn Baldwin } 2703dc964385SJohn Baldwin goto restart; 2704dc964385SJohn Baldwin } 2705dc964385SJohn Baldwin 2706dc964385SJohn Baldwin void 2707dc964385SJohn Baldwin ddp_queue_toep(struct toepcb *toep) 2708dc964385SJohn Baldwin { 2709dc964385SJohn Baldwin 2710dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2711125d42feSJohn Baldwin if (toep->ddp.flags & DDP_TASK_ACTIVE) 2712dc964385SJohn Baldwin return; 2713125d42feSJohn Baldwin toep->ddp.flags |= DDP_TASK_ACTIVE; 2714dc964385SJohn Baldwin hold_toepcb(toep); 2715125d42feSJohn Baldwin soaio_enqueue(&toep->ddp.requeue_task); 2716dc964385SJohn Baldwin } 2717dc964385SJohn Baldwin 2718dc964385SJohn Baldwin static void 2719dc964385SJohn Baldwin aio_ddp_requeue_task(void *context, int pending) 2720dc964385SJohn Baldwin { 2721dc964385SJohn Baldwin struct toepcb *toep = context; 2722dc964385SJohn Baldwin 2723dc964385SJohn Baldwin DDP_LOCK(toep); 2724dc964385SJohn Baldwin aio_ddp_requeue(toep); 2725125d42feSJohn Baldwin toep->ddp.flags &= ~DDP_TASK_ACTIVE; 2726dc964385SJohn Baldwin DDP_UNLOCK(toep); 2727dc964385SJohn Baldwin 2728dc964385SJohn Baldwin free_toepcb(toep); 2729dc964385SJohn Baldwin } 2730dc964385SJohn Baldwin 2731dc964385SJohn Baldwin static void 2732dc964385SJohn Baldwin t4_aio_cancel_active(struct kaiocb *job) 2733dc964385SJohn Baldwin { 2734dc964385SJohn Baldwin struct socket *so = job->fd_file->f_data; 2735e1401f75SGleb Smirnoff struct tcpcb *tp = sototcpcb(so); 2736dc964385SJohn Baldwin struct toepcb *toep = tp->t_toe; 2737dc964385SJohn Baldwin struct adapter *sc = td_adapter(toep->td); 2738dc964385SJohn Baldwin uint64_t valid_flag; 2739dc964385SJohn Baldwin int i; 2740dc964385SJohn Baldwin 2741dc964385SJohn Baldwin DDP_LOCK(toep); 2742dc964385SJohn Baldwin if (aio_cancel_cleared(job)) { 2743dc964385SJohn Baldwin DDP_UNLOCK(toep); 2744dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2745dc964385SJohn Baldwin return; 2746dc964385SJohn Baldwin } 2747dc964385SJohn Baldwin 2748125d42feSJohn Baldwin for (i = 0; i < nitems(toep->ddp.db); i++) { 2749125d42feSJohn Baldwin if (toep->ddp.db[i].job == job) { 2750dc964385SJohn Baldwin /* Should only ever get one cancel request for a job. */ 2751125d42feSJohn Baldwin MPASS(toep->ddp.db[i].cancel_pending == 0); 2752dc964385SJohn Baldwin 2753dc964385SJohn Baldwin /* 2754dc964385SJohn Baldwin * Invalidate this buffer. It will be 2755dc964385SJohn Baldwin * cancelled or partially completed once the 2756dc964385SJohn Baldwin * card ACKs the invalidate. 2757dc964385SJohn Baldwin */ 2758dc964385SJohn Baldwin valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) : 2759dc964385SJohn Baldwin V_TF_DDP_BUF1_VALID(1); 2760edf95febSJohn Baldwin t4_set_tcb_field(sc, toep->ctrlq, toep, 2761671bf2b8SNavdeep Parhar W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1, 2762017902fcSJohn Baldwin CPL_COOKIE_DDP0 + i); 2763125d42feSJohn Baldwin toep->ddp.db[i].cancel_pending = 1; 2764dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: request %p marked pending", 2765dc964385SJohn Baldwin __func__, job); 2766dc964385SJohn Baldwin break; 2767dc964385SJohn Baldwin } 2768dc964385SJohn Baldwin } 2769dc964385SJohn Baldwin DDP_UNLOCK(toep); 2770dc964385SJohn Baldwin } 2771dc964385SJohn Baldwin 2772dc964385SJohn Baldwin static void 2773dc964385SJohn Baldwin t4_aio_cancel_queued(struct kaiocb *job) 2774dc964385SJohn Baldwin { 2775dc964385SJohn Baldwin struct socket *so = job->fd_file->f_data; 2776e1401f75SGleb Smirnoff struct tcpcb *tp = sototcpcb(so); 2777dc964385SJohn Baldwin struct toepcb *toep = tp->t_toe; 2778dc964385SJohn Baldwin 2779dc964385SJohn Baldwin DDP_LOCK(toep); 2780dc964385SJohn Baldwin if (!aio_cancel_cleared(job)) { 2781125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 2782125d42feSJohn Baldwin toep->ddp.waiting_count--; 2783125d42feSJohn Baldwin if (toep->ddp.waiting_count == 0) 2784dc964385SJohn Baldwin ddp_queue_toep(toep); 2785dc964385SJohn Baldwin } 2786dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job); 2787dc964385SJohn Baldwin DDP_UNLOCK(toep); 2788dc964385SJohn Baldwin 2789dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2790dc964385SJohn Baldwin } 2791dc964385SJohn Baldwin 2792dc964385SJohn Baldwin int 2793dc964385SJohn Baldwin t4_aio_queue_ddp(struct socket *so, struct kaiocb *job) 2794dc964385SJohn Baldwin { 2795a5a965d7SJohn Baldwin struct inpcb *inp = sotoinpcb(so); 2796a5a965d7SJohn Baldwin struct tcpcb *tp = intotcpcb(inp); 2797dc964385SJohn Baldwin struct toepcb *toep = tp->t_toe; 2798dc964385SJohn Baldwin 2799dc964385SJohn Baldwin /* Ignore writes. */ 2800dc964385SJohn Baldwin if (job->uaiocb.aio_lio_opcode != LIO_READ) 2801dc964385SJohn Baldwin return (EOPNOTSUPP); 2802dc964385SJohn Baldwin 2803a5a965d7SJohn Baldwin INP_WLOCK(inp); 2804a5a965d7SJohn Baldwin if (__predict_false(ulp_mode(toep) == ULP_MODE_NONE)) { 2805a5a965d7SJohn Baldwin if (!set_ddp_ulp_mode(toep)) { 2806a5a965d7SJohn Baldwin INP_WUNLOCK(inp); 2807a5a965d7SJohn Baldwin return (EOPNOTSUPP); 2808a5a965d7SJohn Baldwin } 2809a5a965d7SJohn Baldwin } 2810a5a965d7SJohn Baldwin INP_WUNLOCK(inp); 2811a5a965d7SJohn Baldwin 2812dc964385SJohn Baldwin DDP_LOCK(toep); 2813dc964385SJohn Baldwin 2814dc964385SJohn Baldwin /* 2815eba13bbcSJohn Baldwin * If DDP is being used for all normal receive, don't use it 2816eba13bbcSJohn Baldwin * for AIO. 2817eba13bbcSJohn Baldwin */ 2818eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 2819eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 2820eba13bbcSJohn Baldwin return (EOPNOTSUPP); 2821eba13bbcSJohn Baldwin } 2822eba13bbcSJohn Baldwin 2823eba13bbcSJohn Baldwin /* 2824dc964385SJohn Baldwin * XXX: Think about possibly returning errors for ENOTCONN, 2825dc964385SJohn Baldwin * etc. Perhaps the caller would only queue the request 2826dc964385SJohn Baldwin * if it failed with EOPNOTSUPP? 2827dc964385SJohn Baldwin */ 2828dc964385SJohn Baldwin 2829dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 28308674e626SNavdeep Parhar CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid); 2831dc964385SJohn Baldwin #endif 2832dc964385SJohn Baldwin if (!aio_set_cancel_function(job, t4_aio_cancel_queued)) 2833dc964385SJohn Baldwin panic("new job was cancelled"); 2834125d42feSJohn Baldwin TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list); 2835125d42feSJohn Baldwin toep->ddp.waiting_count++; 2836eba13bbcSJohn Baldwin 2837eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) == 0) { 2838eba13bbcSJohn Baldwin toep->ddp.flags |= DDP_AIO; 2839eba13bbcSJohn Baldwin TAILQ_INIT(&toep->ddp.cached_pagesets); 2840eba13bbcSJohn Baldwin TAILQ_INIT(&toep->ddp.aiojobq); 2841eba13bbcSJohn Baldwin TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, 2842eba13bbcSJohn Baldwin toep); 2843eba13bbcSJohn Baldwin } 2844dc964385SJohn Baldwin 2845dc964385SJohn Baldwin /* 2846dc964385SJohn Baldwin * Try to handle this request synchronously. If this has 2847dc964385SJohn Baldwin * to block because the task is running, it will just bail 2848dc964385SJohn Baldwin * and let the task handle it instead. 2849dc964385SJohn Baldwin */ 2850dc964385SJohn Baldwin aio_ddp_requeue(toep); 2851dc964385SJohn Baldwin DDP_UNLOCK(toep); 2852dc964385SJohn Baldwin return (0); 2853dc964385SJohn Baldwin } 2854dc964385SJohn Baldwin 2855eba13bbcSJohn Baldwin static void 2856eba13bbcSJohn Baldwin ddp_rcvbuf_requeue(struct toepcb *toep) 2857eba13bbcSJohn Baldwin { 2858eba13bbcSJohn Baldwin struct socket *so; 2859eba13bbcSJohn Baldwin struct sockbuf *sb; 2860eba13bbcSJohn Baldwin struct inpcb *inp; 2861eba13bbcSJohn Baldwin struct ddp_rcv_buffer *drb; 2862eba13bbcSJohn Baldwin 2863eba13bbcSJohn Baldwin DDP_ASSERT_LOCKED(toep); 2864eba13bbcSJohn Baldwin restart: 2865eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_DEAD) != 0) { 2866eba13bbcSJohn Baldwin MPASS(toep->ddp.active_count == 0); 2867eba13bbcSJohn Baldwin return; 2868eba13bbcSJohn Baldwin } 2869eba13bbcSJohn Baldwin 2870eba13bbcSJohn Baldwin /* If both buffers are active, nothing to do. */ 2871eba13bbcSJohn Baldwin if (toep->ddp.active_count == nitems(toep->ddp.db)) { 2872eba13bbcSJohn Baldwin return; 2873eba13bbcSJohn Baldwin } 2874eba13bbcSJohn Baldwin 2875eba13bbcSJohn Baldwin inp = toep->inp; 2876eba13bbcSJohn Baldwin so = inp->inp_socket; 2877eba13bbcSJohn Baldwin sb = &so->so_rcv; 2878eba13bbcSJohn Baldwin 2879eba13bbcSJohn Baldwin drb = alloc_cached_ddp_rcv_buffer(toep); 2880eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 2881eba13bbcSJohn Baldwin 2882eba13bbcSJohn Baldwin if (drb == NULL) { 2883eba13bbcSJohn Baldwin drb = alloc_ddp_rcv_buffer(toep, M_WAITOK); 2884eba13bbcSJohn Baldwin if (drb == NULL) { 2885eba13bbcSJohn Baldwin printf("%s: failed to allocate buffer\n", __func__); 2886eba13bbcSJohn Baldwin DDP_LOCK(toep); 2887eba13bbcSJohn Baldwin return; 2888eba13bbcSJohn Baldwin } 2889eba13bbcSJohn Baldwin } 2890eba13bbcSJohn Baldwin 2891eba13bbcSJohn Baldwin DDP_LOCK(toep); 2892eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_DEAD) != 0 || 2893eba13bbcSJohn Baldwin toep->ddp.active_count == nitems(toep->ddp.db)) { 2894eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 2895eba13bbcSJohn Baldwin return; 2896eba13bbcSJohn Baldwin } 2897eba13bbcSJohn Baldwin 2898eba13bbcSJohn Baldwin /* We will never get anything unless we are or were connected. */ 2899eba13bbcSJohn Baldwin SOCKBUF_LOCK(sb); 2900eba13bbcSJohn Baldwin if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 2901eba13bbcSJohn Baldwin SOCKBUF_UNLOCK(sb); 2902eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 2903eba13bbcSJohn Baldwin return; 2904eba13bbcSJohn Baldwin } 2905eba13bbcSJohn Baldwin 2906eba13bbcSJohn Baldwin /* Abort if socket has reported problems or is closed. */ 2907eba13bbcSJohn Baldwin if (so->so_error != 0 || (sb->sb_state & SBS_CANTRCVMORE) != 0) { 2908eba13bbcSJohn Baldwin SOCKBUF_UNLOCK(sb); 2909eba13bbcSJohn Baldwin recycle_ddp_rcv_buffer(toep, drb); 2910eba13bbcSJohn Baldwin return; 2911eba13bbcSJohn Baldwin } 2912eba13bbcSJohn Baldwin SOCKBUF_UNLOCK(sb); 2913eba13bbcSJohn Baldwin 2914eba13bbcSJohn Baldwin if (!queue_ddp_rcvbuf(toep, drb)) { 2915eba13bbcSJohn Baldwin /* 2916eba13bbcSJohn Baldwin * XXX: Need a way to kick a retry here. 2917eba13bbcSJohn Baldwin * 2918eba13bbcSJohn Baldwin * XXX: We know the fixed size needed and could 2919eba13bbcSJohn Baldwin * preallocate the work request using a blocking 2920eba13bbcSJohn Baldwin * request at the start of the task to avoid having to 2921eba13bbcSJohn Baldwin * handle this edge case. 2922eba13bbcSJohn Baldwin */ 2923eba13bbcSJohn Baldwin return; 2924eba13bbcSJohn Baldwin } 2925eba13bbcSJohn Baldwin goto restart; 2926eba13bbcSJohn Baldwin } 2927eba13bbcSJohn Baldwin 2928eba13bbcSJohn Baldwin static void 2929eba13bbcSJohn Baldwin ddp_rcvbuf_requeue_task(void *context, int pending) 2930eba13bbcSJohn Baldwin { 2931eba13bbcSJohn Baldwin struct toepcb *toep = context; 2932eba13bbcSJohn Baldwin 2933eba13bbcSJohn Baldwin DDP_LOCK(toep); 2934eba13bbcSJohn Baldwin ddp_rcvbuf_requeue(toep); 2935eba13bbcSJohn Baldwin toep->ddp.flags &= ~DDP_TASK_ACTIVE; 2936eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 2937eba13bbcSJohn Baldwin 2938eba13bbcSJohn Baldwin free_toepcb(toep); 2939eba13bbcSJohn Baldwin } 2940eba13bbcSJohn Baldwin 2941eba13bbcSJohn Baldwin int 2942eba13bbcSJohn Baldwin t4_enable_ddp_rcv(struct socket *so, struct toepcb *toep) 2943eba13bbcSJohn Baldwin { 2944eba13bbcSJohn Baldwin struct inpcb *inp = sotoinpcb(so); 2945eba13bbcSJohn Baldwin struct adapter *sc = td_adapter(toep->td); 2946eba13bbcSJohn Baldwin 2947eba13bbcSJohn Baldwin INP_WLOCK(inp); 2948eba13bbcSJohn Baldwin switch (ulp_mode(toep)) { 2949eba13bbcSJohn Baldwin case ULP_MODE_TCPDDP: 2950eba13bbcSJohn Baldwin break; 2951eba13bbcSJohn Baldwin case ULP_MODE_NONE: 2952eba13bbcSJohn Baldwin if (set_ddp_ulp_mode(toep)) 2953eba13bbcSJohn Baldwin break; 2954eba13bbcSJohn Baldwin /* FALLTHROUGH */ 2955eba13bbcSJohn Baldwin default: 2956eba13bbcSJohn Baldwin INP_WUNLOCK(inp); 2957eba13bbcSJohn Baldwin return (EOPNOTSUPP); 2958eba13bbcSJohn Baldwin } 2959eba13bbcSJohn Baldwin INP_WUNLOCK(inp); 2960eba13bbcSJohn Baldwin 2961eba13bbcSJohn Baldwin DDP_LOCK(toep); 2962eba13bbcSJohn Baldwin 2963eba13bbcSJohn Baldwin /* 2964eba13bbcSJohn Baldwin * If DDP is being used for AIO already, don't use it for 2965eba13bbcSJohn Baldwin * normal receive. 2966eba13bbcSJohn Baldwin */ 2967eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_AIO) != 0) { 2968eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 2969eba13bbcSJohn Baldwin return (EOPNOTSUPP); 2970eba13bbcSJohn Baldwin } 2971eba13bbcSJohn Baldwin 2972eba13bbcSJohn Baldwin if ((toep->ddp.flags & DDP_RCVBUF) != 0) { 2973eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 2974eba13bbcSJohn Baldwin return (EBUSY); 2975eba13bbcSJohn Baldwin } 2976eba13bbcSJohn Baldwin 2977eba13bbcSJohn Baldwin toep->ddp.flags |= DDP_RCVBUF; 2978eba13bbcSJohn Baldwin TAILQ_INIT(&toep->ddp.cached_buffers); 2979eba13bbcSJohn Baldwin enable_ddp(sc, toep); 2980eba13bbcSJohn Baldwin TASK_INIT(&toep->ddp.requeue_task, 0, ddp_rcvbuf_requeue_task, toep); 2981eba13bbcSJohn Baldwin ddp_queue_toep(toep); 2982eba13bbcSJohn Baldwin DDP_UNLOCK(toep); 2983eba13bbcSJohn Baldwin return (0); 2984eba13bbcSJohn Baldwin } 2985eba13bbcSJohn Baldwin 29869689995dSJohn Baldwin void 2987dc964385SJohn Baldwin t4_ddp_mod_load(void) 2988dc964385SJohn Baldwin { 2989eba13bbcSJohn Baldwin if (t4_ddp_rcvbuf_len < PAGE_SIZE) 2990eba13bbcSJohn Baldwin t4_ddp_rcvbuf_len = PAGE_SIZE; 2991eba13bbcSJohn Baldwin if (t4_ddp_rcvbuf_len > MAX_DDP_BUFFER_SIZE) 2992eba13bbcSJohn Baldwin t4_ddp_rcvbuf_len = MAX_DDP_BUFFER_SIZE; 2993eba13bbcSJohn Baldwin if (!powerof2(t4_ddp_rcvbuf_len)) 2994eba13bbcSJohn Baldwin t4_ddp_rcvbuf_len = 1 << fls(t4_ddp_rcvbuf_len); 2995dc964385SJohn Baldwin 29964535e804SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 29974535e804SNavdeep Parhar CPL_COOKIE_DDP0); 29984535e804SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 29994535e804SNavdeep Parhar CPL_COOKIE_DDP1); 3000671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp); 3001671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); 3002dc964385SJohn Baldwin TAILQ_INIT(&ddp_orphan_pagesets); 3003dc964385SJohn Baldwin mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF); 3004dc964385SJohn Baldwin TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL); 3005dc964385SJohn Baldwin } 3006dc964385SJohn Baldwin 3007dc964385SJohn Baldwin void 3008dc964385SJohn Baldwin t4_ddp_mod_unload(void) 3009dc964385SJohn Baldwin { 3010dc964385SJohn Baldwin 3011dc964385SJohn Baldwin taskqueue_drain(taskqueue_thread, &ddp_orphan_task); 3012dc964385SJohn Baldwin MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets)); 3013dc964385SJohn Baldwin mtx_destroy(&ddp_orphan_pagesets_lock); 3014d6ddb084SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP0); 3015d6ddb084SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP1); 3016671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL); 3017671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL); 3018dc964385SJohn Baldwin } 3019e682d02eSNavdeep Parhar #endif 3020