1e682d02eSNavdeep Parhar /*- 24d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause 3718cf2ccSPedro F. Giffuni * 4e682d02eSNavdeep Parhar * Copyright (c) 2012 Chelsio Communications, Inc. 5e682d02eSNavdeep Parhar * All rights reserved. 6e682d02eSNavdeep Parhar * Written by: Navdeep Parhar <np@FreeBSD.org> 7e682d02eSNavdeep Parhar * 8e682d02eSNavdeep Parhar * Redistribution and use in source and binary forms, with or without 9e682d02eSNavdeep Parhar * modification, are permitted provided that the following conditions 10e682d02eSNavdeep Parhar * are met: 11e682d02eSNavdeep Parhar * 1. Redistributions of source code must retain the above copyright 12e682d02eSNavdeep Parhar * notice, this list of conditions and the following disclaimer. 13e682d02eSNavdeep Parhar * 2. Redistributions in binary form must reproduce the above copyright 14e682d02eSNavdeep Parhar * notice, this list of conditions and the following disclaimer in the 15e682d02eSNavdeep Parhar * documentation and/or other materials provided with the distribution. 16e682d02eSNavdeep Parhar * 17e682d02eSNavdeep Parhar * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18e682d02eSNavdeep Parhar * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19e682d02eSNavdeep Parhar * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20e682d02eSNavdeep Parhar * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21e682d02eSNavdeep Parhar * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22e682d02eSNavdeep Parhar * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23e682d02eSNavdeep Parhar * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24e682d02eSNavdeep Parhar * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25e682d02eSNavdeep Parhar * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26e682d02eSNavdeep Parhar * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27e682d02eSNavdeep Parhar * SUCH DAMAGE. 28e682d02eSNavdeep Parhar */ 29e682d02eSNavdeep Parhar 30e682d02eSNavdeep Parhar #include <sys/cdefs.h> 31e682d02eSNavdeep Parhar #include "opt_inet.h" 32e682d02eSNavdeep Parhar 33e682d02eSNavdeep Parhar #include <sys/param.h> 34dc964385SJohn Baldwin #include <sys/aio.h> 352beaefe8SJohn Baldwin #include <sys/bio.h> 36dc964385SJohn Baldwin #include <sys/file.h> 37e682d02eSNavdeep Parhar #include <sys/systm.h> 38e682d02eSNavdeep Parhar #include <sys/kernel.h> 39e682d02eSNavdeep Parhar #include <sys/ktr.h> 40e682d02eSNavdeep Parhar #include <sys/module.h> 41e682d02eSNavdeep Parhar #include <sys/protosw.h> 42e682d02eSNavdeep Parhar #include <sys/proc.h> 43e682d02eSNavdeep Parhar #include <sys/domain.h> 44e682d02eSNavdeep Parhar #include <sys/socket.h> 45e682d02eSNavdeep Parhar #include <sys/socketvar.h> 46dc964385SJohn Baldwin #include <sys/taskqueue.h> 47e682d02eSNavdeep Parhar #include <sys/uio.h> 48e682d02eSNavdeep Parhar #include <netinet/in.h> 49e682d02eSNavdeep Parhar #include <netinet/in_pcb.h> 50e682d02eSNavdeep Parhar #include <netinet/ip.h> 51e682d02eSNavdeep Parhar #include <netinet/tcp_var.h> 52e682d02eSNavdeep Parhar #define TCPSTATES 53e682d02eSNavdeep Parhar #include <netinet/tcp_fsm.h> 54e682d02eSNavdeep Parhar #include <netinet/toecore.h> 55e682d02eSNavdeep Parhar 56e682d02eSNavdeep Parhar #include <vm/vm.h> 57e682d02eSNavdeep Parhar #include <vm/vm_extern.h> 58e682d02eSNavdeep Parhar #include <vm/vm_param.h> 59e682d02eSNavdeep Parhar #include <vm/pmap.h> 60e682d02eSNavdeep Parhar #include <vm/vm_map.h> 61e682d02eSNavdeep Parhar #include <vm/vm_page.h> 62e682d02eSNavdeep Parhar #include <vm/vm_object.h> 63e682d02eSNavdeep Parhar 6446bee804SJohn Baldwin #include <cam/scsi/scsi_all.h> 6546bee804SJohn Baldwin #include <cam/ctl/ctl_io.h> 6646bee804SJohn Baldwin 67e682d02eSNavdeep Parhar #ifdef TCP_OFFLOAD 68e682d02eSNavdeep Parhar #include "common/common.h" 69e682d02eSNavdeep Parhar #include "common/t4_msg.h" 70e682d02eSNavdeep Parhar #include "common/t4_regs.h" 71e682d02eSNavdeep Parhar #include "common/t4_tcb.h" 72e682d02eSNavdeep Parhar #include "tom/t4_tom.h" 73e682d02eSNavdeep Parhar 74fe0bdd1dSJohn Baldwin /* 75fe0bdd1dSJohn Baldwin * Use the 'backend3' field in AIO jobs to store the amount of data 76fe0bdd1dSJohn Baldwin * received by the AIO job so far. 77fe0bdd1dSJohn Baldwin */ 78fe0bdd1dSJohn Baldwin #define aio_received backend3 79fe0bdd1dSJohn Baldwin 80dc964385SJohn Baldwin static void aio_ddp_requeue_task(void *context, int pending); 81dc964385SJohn Baldwin static void ddp_complete_all(struct toepcb *toep, int error); 82dc964385SJohn Baldwin static void t4_aio_cancel_active(struct kaiocb *job); 83dc964385SJohn Baldwin static void t4_aio_cancel_queued(struct kaiocb *job); 84b12c0a9eSJohn Baldwin 85dc964385SJohn Baldwin static TAILQ_HEAD(, pageset) ddp_orphan_pagesets; 86dc964385SJohn Baldwin static struct mtx ddp_orphan_pagesets_lock; 87dc964385SJohn Baldwin static struct task ddp_orphan_task; 88dc964385SJohn Baldwin 89e682d02eSNavdeep Parhar #define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN) 90e682d02eSNavdeep Parhar 91dc964385SJohn Baldwin /* 92dc964385SJohn Baldwin * A page set holds information about a buffer used for DDP. The page 93dc964385SJohn Baldwin * set holds resources such as the VM pages backing the buffer (either 94dc964385SJohn Baldwin * held or wired) and the page pods associated with the buffer. 95dc964385SJohn Baldwin * Recently used page sets are cached to allow for efficient reuse of 96dc964385SJohn Baldwin * buffers (avoiding the need to re-fault in pages, hold them, etc.). 97dc964385SJohn Baldwin * Note that cached page sets keep the backing pages wired. The 98dc964385SJohn Baldwin * number of wired pages is capped by only allowing for two wired 99dc964385SJohn Baldwin * pagesets per connection. This is not a perfect cap, but is a 100dc964385SJohn Baldwin * trade-off for performance. 101dc964385SJohn Baldwin * 102dc964385SJohn Baldwin * If an application ping-pongs two buffers for a connection via 103dc964385SJohn Baldwin * aio_read(2) then those buffers should remain wired and expensive VM 104dc964385SJohn Baldwin * fault lookups should be avoided after each buffer has been used 105dc964385SJohn Baldwin * once. If an application uses more than two buffers then this will 106dc964385SJohn Baldwin * fall back to doing expensive VM fault lookups for each operation. 107dc964385SJohn Baldwin */ 108dc964385SJohn Baldwin static void 109dc964385SJohn Baldwin free_pageset(struct tom_data *td, struct pageset *ps) 110dc964385SJohn Baldwin { 111dc964385SJohn Baldwin vm_page_t p; 112dc964385SJohn Baldwin int i; 113dc964385SJohn Baldwin 114968267fdSNavdeep Parhar if (ps->prsv.prsv_nppods > 0) 115968267fdSNavdeep Parhar t4_free_page_pods(&ps->prsv); 116dc964385SJohn Baldwin 117dc964385SJohn Baldwin for (i = 0; i < ps->npages; i++) { 118dc964385SJohn Baldwin p = ps->pages[i]; 119dc964385SJohn Baldwin vm_page_unwire(p, PQ_INACTIVE); 120dc964385SJohn Baldwin } 121dc964385SJohn Baldwin mtx_lock(&ddp_orphan_pagesets_lock); 122dc964385SJohn Baldwin TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link); 123dc964385SJohn Baldwin taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task); 124dc964385SJohn Baldwin mtx_unlock(&ddp_orphan_pagesets_lock); 125dc964385SJohn Baldwin } 126dc964385SJohn Baldwin 127dc964385SJohn Baldwin static void 128dc964385SJohn Baldwin ddp_free_orphan_pagesets(void *context, int pending) 129dc964385SJohn Baldwin { 130dc964385SJohn Baldwin struct pageset *ps; 131dc964385SJohn Baldwin 132dc964385SJohn Baldwin mtx_lock(&ddp_orphan_pagesets_lock); 133dc964385SJohn Baldwin while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) { 134dc964385SJohn Baldwin ps = TAILQ_FIRST(&ddp_orphan_pagesets); 135dc964385SJohn Baldwin TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link); 136dc964385SJohn Baldwin mtx_unlock(&ddp_orphan_pagesets_lock); 137dc964385SJohn Baldwin if (ps->vm) 138dc964385SJohn Baldwin vmspace_free(ps->vm); 139dc964385SJohn Baldwin free(ps, M_CXGBE); 140dc964385SJohn Baldwin mtx_lock(&ddp_orphan_pagesets_lock); 141dc964385SJohn Baldwin } 142dc964385SJohn Baldwin mtx_unlock(&ddp_orphan_pagesets_lock); 143dc964385SJohn Baldwin } 144dc964385SJohn Baldwin 145dc964385SJohn Baldwin static void 146dc964385SJohn Baldwin recycle_pageset(struct toepcb *toep, struct pageset *ps) 147dc964385SJohn Baldwin { 148dc964385SJohn Baldwin 149dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 150eeacb3b0SMark Johnston if (!(toep->ddp.flags & DDP_DEAD)) { 151125d42feSJohn Baldwin KASSERT(toep->ddp.cached_count + toep->ddp.active_count < 152125d42feSJohn Baldwin nitems(toep->ddp.db), ("too many wired pagesets")); 153125d42feSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link); 154125d42feSJohn Baldwin toep->ddp.cached_count++; 155dc964385SJohn Baldwin } else 156dc964385SJohn Baldwin free_pageset(toep->td, ps); 157dc964385SJohn Baldwin } 158dc964385SJohn Baldwin 159dc964385SJohn Baldwin static void 160dc964385SJohn Baldwin ddp_complete_one(struct kaiocb *job, int error) 161dc964385SJohn Baldwin { 162dc964385SJohn Baldwin long copied; 163dc964385SJohn Baldwin 164dc964385SJohn Baldwin /* 165dc964385SJohn Baldwin * If this job had copied data out of the socket buffer before 166dc964385SJohn Baldwin * it was cancelled, report it as a short read rather than an 167dc964385SJohn Baldwin * error. 168dc964385SJohn Baldwin */ 169fe0bdd1dSJohn Baldwin copied = job->aio_received; 170dc964385SJohn Baldwin if (copied != 0 || error == 0) 171dc964385SJohn Baldwin aio_complete(job, copied, 0); 172dc964385SJohn Baldwin else 173dc964385SJohn Baldwin aio_complete(job, -1, error); 174dc964385SJohn Baldwin } 175dc964385SJohn Baldwin 176e682d02eSNavdeep Parhar static void 177e682d02eSNavdeep Parhar free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db) 178e682d02eSNavdeep Parhar { 179e682d02eSNavdeep Parhar 180dc964385SJohn Baldwin if (db->job) { 181dc964385SJohn Baldwin /* 182dc964385SJohn Baldwin * XXX: If we are un-offloading the socket then we 183dc964385SJohn Baldwin * should requeue these on the socket somehow. If we 184dc964385SJohn Baldwin * got a FIN from the remote end, then this completes 185dc964385SJohn Baldwin * any remaining requests with an EOF read. 186dc964385SJohn Baldwin */ 187dc964385SJohn Baldwin if (!aio_clear_cancel_function(db->job)) 188dc964385SJohn Baldwin ddp_complete_one(db->job, 0); 189dc964385SJohn Baldwin } 190e682d02eSNavdeep Parhar 191dc964385SJohn Baldwin if (db->ps) 192dc964385SJohn Baldwin free_pageset(td, db->ps); 193dc964385SJohn Baldwin } 194e682d02eSNavdeep Parhar 195*a5a965d7SJohn Baldwin static void 196dc964385SJohn Baldwin ddp_init_toep(struct toepcb *toep) 197dc964385SJohn Baldwin { 198e682d02eSNavdeep Parhar 199125d42feSJohn Baldwin TAILQ_INIT(&toep->ddp.aiojobq); 200125d42feSJohn Baldwin TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, toep); 201125d42feSJohn Baldwin toep->ddp.flags = DDP_OK; 202125d42feSJohn Baldwin toep->ddp.active_id = -1; 203125d42feSJohn Baldwin mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF); 204dc964385SJohn Baldwin } 205dc964385SJohn Baldwin 206dc964385SJohn Baldwin void 207dc964385SJohn Baldwin ddp_uninit_toep(struct toepcb *toep) 208dc964385SJohn Baldwin { 209dc964385SJohn Baldwin 210125d42feSJohn Baldwin mtx_destroy(&toep->ddp.lock); 211e682d02eSNavdeep Parhar } 212e682d02eSNavdeep Parhar 213e682d02eSNavdeep Parhar void 214e682d02eSNavdeep Parhar release_ddp_resources(struct toepcb *toep) 215e682d02eSNavdeep Parhar { 216dc964385SJohn Baldwin struct pageset *ps; 217e682d02eSNavdeep Parhar int i; 218e682d02eSNavdeep Parhar 219dc964385SJohn Baldwin DDP_LOCK(toep); 22017795d82SNavdeep Parhar toep->ddp.flags |= DDP_DEAD; 221125d42feSJohn Baldwin for (i = 0; i < nitems(toep->ddp.db); i++) { 222125d42feSJohn Baldwin free_ddp_buffer(toep->td, &toep->ddp.db[i]); 223e682d02eSNavdeep Parhar } 224125d42feSJohn Baldwin while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) { 225125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 226dc964385SJohn Baldwin free_pageset(toep->td, ps); 227e682d02eSNavdeep Parhar } 228dc964385SJohn Baldwin ddp_complete_all(toep, 0); 229dc964385SJohn Baldwin DDP_UNLOCK(toep); 230dc964385SJohn Baldwin } 231dc964385SJohn Baldwin 232dc964385SJohn Baldwin #ifdef INVARIANTS 233dc964385SJohn Baldwin void 234dc964385SJohn Baldwin ddp_assert_empty(struct toepcb *toep) 235dc964385SJohn Baldwin { 236dc964385SJohn Baldwin int i; 237dc964385SJohn Baldwin 238125d42feSJohn Baldwin MPASS(!(toep->ddp.flags & DDP_TASK_ACTIVE)); 239125d42feSJohn Baldwin for (i = 0; i < nitems(toep->ddp.db); i++) { 240125d42feSJohn Baldwin MPASS(toep->ddp.db[i].job == NULL); 241125d42feSJohn Baldwin MPASS(toep->ddp.db[i].ps == NULL); 242dc964385SJohn Baldwin } 243125d42feSJohn Baldwin MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets)); 244125d42feSJohn Baldwin MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq)); 245dc964385SJohn Baldwin } 246dc964385SJohn Baldwin #endif 247dc964385SJohn Baldwin 248dc964385SJohn Baldwin static void 249dc964385SJohn Baldwin complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db, 250dc964385SJohn Baldwin unsigned int db_idx) 251dc964385SJohn Baldwin { 252dc964385SJohn Baldwin unsigned int db_flag; 253dc964385SJohn Baldwin 254125d42feSJohn Baldwin toep->ddp.active_count--; 255125d42feSJohn Baldwin if (toep->ddp.active_id == db_idx) { 256125d42feSJohn Baldwin if (toep->ddp.active_count == 0) { 257125d42feSJohn Baldwin KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL, 258dc964385SJohn Baldwin ("%s: active_count mismatch", __func__)); 259125d42feSJohn Baldwin toep->ddp.active_id = -1; 260dc964385SJohn Baldwin } else 261125d42feSJohn Baldwin toep->ddp.active_id ^= 1; 2621081d276SJohn Baldwin #ifdef VERBOSE_TRACES 2638674e626SNavdeep Parhar CTR3(KTR_CXGBE, "%s: tid %u, ddp_active_id = %d", __func__, 2648674e626SNavdeep Parhar toep->tid, toep->ddp.active_id); 2651081d276SJohn Baldwin #endif 266dc964385SJohn Baldwin } else { 267125d42feSJohn Baldwin KASSERT(toep->ddp.active_count != 0 && 268125d42feSJohn Baldwin toep->ddp.active_id != -1, 269dc964385SJohn Baldwin ("%s: active count mismatch", __func__)); 270dc964385SJohn Baldwin } 271dc964385SJohn Baldwin 272dc964385SJohn Baldwin db->cancel_pending = 0; 273dc964385SJohn Baldwin db->job = NULL; 274dc964385SJohn Baldwin recycle_pageset(toep, db->ps); 275dc964385SJohn Baldwin db->ps = NULL; 276dc964385SJohn Baldwin 277dc964385SJohn Baldwin db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 278125d42feSJohn Baldwin KASSERT(toep->ddp.flags & db_flag, 279dc964385SJohn Baldwin ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x", 280125d42feSJohn Baldwin __func__, toep, toep->ddp.flags)); 281125d42feSJohn Baldwin toep->ddp.flags &= ~db_flag; 282e682d02eSNavdeep Parhar } 283e682d02eSNavdeep Parhar 284d588c1f9SNavdeep Parhar /* XXX: handle_ddp_data code duplication */ 285d588c1f9SNavdeep Parhar void 286d588c1f9SNavdeep Parhar insert_ddp_data(struct toepcb *toep, uint32_t n) 287d588c1f9SNavdeep Parhar { 288d588c1f9SNavdeep Parhar struct inpcb *inp = toep->inp; 289d588c1f9SNavdeep Parhar struct tcpcb *tp = intotcpcb(inp); 290dc964385SJohn Baldwin struct ddp_buffer *db; 291dc964385SJohn Baldwin struct kaiocb *job; 292dc964385SJohn Baldwin size_t placed; 293dc964385SJohn Baldwin long copied; 29439d5cbdcSNavdeep Parhar unsigned int db_idx; 29539d5cbdcSNavdeep Parhar #ifdef INVARIANTS 29639d5cbdcSNavdeep Parhar unsigned int db_flag; 29739d5cbdcSNavdeep Parhar #endif 298d588c1f9SNavdeep Parhar 299d588c1f9SNavdeep Parhar INP_WLOCK_ASSERT(inp); 300dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 301d588c1f9SNavdeep Parhar 302d588c1f9SNavdeep Parhar tp->rcv_nxt += n; 303d588c1f9SNavdeep Parhar #ifndef USE_DDP_RX_FLOW_CONTROL 304d588c1f9SNavdeep Parhar KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__)); 305d588c1f9SNavdeep Parhar tp->rcv_wnd -= n; 306d588c1f9SNavdeep Parhar #endif 307dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP", 308dc964385SJohn Baldwin __func__, n); 309125d42feSJohn Baldwin while (toep->ddp.active_count > 0) { 310125d42feSJohn Baldwin MPASS(toep->ddp.active_id != -1); 311125d42feSJohn Baldwin db_idx = toep->ddp.active_id; 31239d5cbdcSNavdeep Parhar #ifdef INVARIANTS 313dc964385SJohn Baldwin db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 31439d5cbdcSNavdeep Parhar #endif 315125d42feSJohn Baldwin MPASS((toep->ddp.flags & db_flag) != 0); 316125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 317dc964385SJohn Baldwin job = db->job; 318fe0bdd1dSJohn Baldwin copied = job->aio_received; 319dc964385SJohn Baldwin placed = n; 320dc964385SJohn Baldwin if (placed > job->uaiocb.aio_nbytes - copied) 321dc964385SJohn Baldwin placed = job->uaiocb.aio_nbytes - copied; 322c3d4aea6SJohn Baldwin if (placed > 0) { 323b1012d80SJohn Baldwin job->msgrcv = 1; 324c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_jobs++; 325c3d4aea6SJohn Baldwin } 326c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_octets += placed; 327dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) { 328dc964385SJohn Baldwin /* 329dc964385SJohn Baldwin * Update the copied length for when 330dc964385SJohn Baldwin * t4_aio_cancel_active() completes this 331dc964385SJohn Baldwin * request. 332dc964385SJohn Baldwin */ 333fe0bdd1dSJohn Baldwin job->aio_received += placed; 334dc964385SJohn Baldwin } else if (copied + placed != 0) { 335dc964385SJohn Baldwin CTR4(KTR_CXGBE, 336dc964385SJohn Baldwin "%s: completing %p (copied %ld, placed %lu)", 337dc964385SJohn Baldwin __func__, job, copied, placed); 338dc964385SJohn Baldwin /* XXX: This always completes if there is some data. */ 339dc964385SJohn Baldwin aio_complete(job, copied + placed, 0); 340dc964385SJohn Baldwin } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) { 341125d42feSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 342125d42feSJohn Baldwin toep->ddp.waiting_count++; 343dc964385SJohn Baldwin } else 344dc964385SJohn Baldwin aio_cancel(job); 345dc964385SJohn Baldwin n -= placed; 346dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 347dc964385SJohn Baldwin } 348dc964385SJohn Baldwin 349dc964385SJohn Baldwin MPASS(n == 0); 350d588c1f9SNavdeep Parhar } 351d588c1f9SNavdeep Parhar 352e682d02eSNavdeep Parhar /* SET_TCB_FIELD sent as a ULP command looks like this */ 353e682d02eSNavdeep Parhar #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 354e682d02eSNavdeep Parhar sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 355e682d02eSNavdeep Parhar 356e682d02eSNavdeep Parhar /* RX_DATA_ACK sent as a ULP command looks like this */ 357e682d02eSNavdeep Parhar #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \ 358e682d02eSNavdeep Parhar sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core)) 359e682d02eSNavdeep Parhar 360e682d02eSNavdeep Parhar static inline void * 361e682d02eSNavdeep Parhar mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep, 362e682d02eSNavdeep Parhar uint64_t word, uint64_t mask, uint64_t val) 363e682d02eSNavdeep Parhar { 364e682d02eSNavdeep Parhar struct ulptx_idata *ulpsc; 365e682d02eSNavdeep Parhar struct cpl_set_tcb_field_core *req; 366e682d02eSNavdeep Parhar 367e682d02eSNavdeep Parhar ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 368e682d02eSNavdeep Parhar ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); 369e682d02eSNavdeep Parhar 370e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 371e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 372e682d02eSNavdeep Parhar ulpsc->len = htobe32(sizeof(*req)); 373e682d02eSNavdeep Parhar 374e682d02eSNavdeep Parhar req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); 375e682d02eSNavdeep Parhar OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid)); 376e682d02eSNavdeep Parhar req->reply_ctrl = htobe16(V_NO_REPLY(1) | 377e682d02eSNavdeep Parhar V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 378e682d02eSNavdeep Parhar req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 379e682d02eSNavdeep Parhar req->mask = htobe64(mask); 380e682d02eSNavdeep Parhar req->val = htobe64(val); 381e682d02eSNavdeep Parhar 382e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(req + 1); 383e682d02eSNavdeep Parhar if (LEN__SET_TCB_FIELD_ULP % 16) { 384e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 385e682d02eSNavdeep Parhar ulpsc->len = htobe32(0); 386e682d02eSNavdeep Parhar return (ulpsc + 1); 387e682d02eSNavdeep Parhar } 388e682d02eSNavdeep Parhar return (ulpsc); 389e682d02eSNavdeep Parhar } 390e682d02eSNavdeep Parhar 391e682d02eSNavdeep Parhar static inline void * 392e682d02eSNavdeep Parhar mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep) 393e682d02eSNavdeep Parhar { 394e682d02eSNavdeep Parhar struct ulptx_idata *ulpsc; 395e682d02eSNavdeep Parhar struct cpl_rx_data_ack_core *req; 396e682d02eSNavdeep Parhar 397e682d02eSNavdeep Parhar ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 398e682d02eSNavdeep Parhar ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16)); 399e682d02eSNavdeep Parhar 400e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 401e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 402e682d02eSNavdeep Parhar ulpsc->len = htobe32(sizeof(*req)); 403e682d02eSNavdeep Parhar 404e682d02eSNavdeep Parhar req = (struct cpl_rx_data_ack_core *)(ulpsc + 1); 405e682d02eSNavdeep Parhar OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid)); 406e682d02eSNavdeep Parhar req->credit_dack = htobe32(F_RX_MODULATE_RX); 407e682d02eSNavdeep Parhar 408e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(req + 1); 409e682d02eSNavdeep Parhar if (LEN__RX_DATA_ACK_ULP % 16) { 410e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 411e682d02eSNavdeep Parhar ulpsc->len = htobe32(0); 412e682d02eSNavdeep Parhar return (ulpsc + 1); 413e682d02eSNavdeep Parhar } 414e682d02eSNavdeep Parhar return (ulpsc); 415e682d02eSNavdeep Parhar } 416e682d02eSNavdeep Parhar 417e682d02eSNavdeep Parhar static struct wrqe * 418e682d02eSNavdeep Parhar mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx, 419dc964385SJohn Baldwin struct pageset *ps, int offset, uint64_t ddp_flags, uint64_t ddp_flags_mask) 420e682d02eSNavdeep Parhar { 421e682d02eSNavdeep Parhar struct wrqe *wr; 422e682d02eSNavdeep Parhar struct work_request_hdr *wrh; 423e682d02eSNavdeep Parhar struct ulp_txpkt *ulpmc; 424e682d02eSNavdeep Parhar int len; 425e682d02eSNavdeep Parhar 426e682d02eSNavdeep Parhar KASSERT(db_idx == 0 || db_idx == 1, 427e682d02eSNavdeep Parhar ("%s: bad DDP buffer index %d", __func__, db_idx)); 428e682d02eSNavdeep Parhar 429e682d02eSNavdeep Parhar /* 430e682d02eSNavdeep Parhar * We'll send a compound work request that has 3 SET_TCB_FIELDs and an 431e682d02eSNavdeep Parhar * RX_DATA_ACK (with RX_MODULATE to speed up delivery). 432e682d02eSNavdeep Parhar * 433e682d02eSNavdeep Parhar * The work request header is 16B and always ends at a 16B boundary. 434e682d02eSNavdeep Parhar * The ULPTX master commands that follow must all end at 16B boundaries 435e682d02eSNavdeep Parhar * too so we round up the size to 16. 436e682d02eSNavdeep Parhar */ 437d14b0ac1SNavdeep Parhar len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) + 438d14b0ac1SNavdeep Parhar roundup2(LEN__RX_DATA_ACK_ULP, 16); 439e682d02eSNavdeep Parhar 440e682d02eSNavdeep Parhar wr = alloc_wrqe(len, toep->ctrlq); 441e682d02eSNavdeep Parhar if (wr == NULL) 442e682d02eSNavdeep Parhar return (NULL); 443e682d02eSNavdeep Parhar wrh = wrtod(wr); 444e682d02eSNavdeep Parhar INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 445e682d02eSNavdeep Parhar ulpmc = (struct ulp_txpkt *)(wrh + 1); 446e682d02eSNavdeep Parhar 447e682d02eSNavdeep Parhar /* Write the buffer's tag */ 448e682d02eSNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 449e682d02eSNavdeep Parhar W_TCB_RX_DDP_BUF0_TAG + db_idx, 450e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG), 451968267fdSNavdeep Parhar V_TCB_RX_DDP_BUF0_TAG(ps->prsv.prsv_tag)); 452e682d02eSNavdeep Parhar 453e682d02eSNavdeep Parhar /* Update the current offset in the DDP buffer and its total length */ 454e682d02eSNavdeep Parhar if (db_idx == 0) 455e682d02eSNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 456e682d02eSNavdeep Parhar W_TCB_RX_DDP_BUF0_OFFSET, 457e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) | 458e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN), 459e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF0_OFFSET(offset) | 460dc964385SJohn Baldwin V_TCB_RX_DDP_BUF0_LEN(ps->len)); 461e682d02eSNavdeep Parhar else 462e682d02eSNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 463e682d02eSNavdeep Parhar W_TCB_RX_DDP_BUF1_OFFSET, 464e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) | 465e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32), 466e682d02eSNavdeep Parhar V_TCB_RX_DDP_BUF1_OFFSET(offset) | 467dc964385SJohn Baldwin V_TCB_RX_DDP_BUF1_LEN((u64)ps->len << 32)); 468e682d02eSNavdeep Parhar 469e682d02eSNavdeep Parhar /* Update DDP flags */ 470e682d02eSNavdeep Parhar ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS, 471dc964385SJohn Baldwin ddp_flags_mask, ddp_flags); 472e682d02eSNavdeep Parhar 473e682d02eSNavdeep Parhar /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */ 474e682d02eSNavdeep Parhar ulpmc = mk_rx_data_ack_ulp(ulpmc, toep); 475e682d02eSNavdeep Parhar 476e682d02eSNavdeep Parhar return (wr); 477e682d02eSNavdeep Parhar } 478e682d02eSNavdeep Parhar 479e682d02eSNavdeep Parhar static int 480e682d02eSNavdeep Parhar handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) 481e682d02eSNavdeep Parhar { 482e682d02eSNavdeep Parhar uint32_t report = be32toh(ddp_report); 483dc964385SJohn Baldwin unsigned int db_idx; 484e682d02eSNavdeep Parhar struct inpcb *inp = toep->inp; 485dc964385SJohn Baldwin struct ddp_buffer *db; 486e682d02eSNavdeep Parhar struct tcpcb *tp; 487e682d02eSNavdeep Parhar struct socket *so; 488e682d02eSNavdeep Parhar struct sockbuf *sb; 489dc964385SJohn Baldwin struct kaiocb *job; 490dc964385SJohn Baldwin long copied; 491e682d02eSNavdeep Parhar 492dc964385SJohn Baldwin db_idx = report & F_DDP_BUF_IDX ? 1 : 0; 493e682d02eSNavdeep Parhar 494e682d02eSNavdeep Parhar if (__predict_false(!(report & F_DDP_INV))) 495e682d02eSNavdeep Parhar CXGBE_UNIMPLEMENTED("DDP buffer still valid"); 496e682d02eSNavdeep Parhar 497e682d02eSNavdeep Parhar INP_WLOCK(inp); 498e682d02eSNavdeep Parhar so = inp_inpcbtosocket(inp); 499e682d02eSNavdeep Parhar sb = &so->so_rcv; 500dc964385SJohn Baldwin DDP_LOCK(toep); 501dc964385SJohn Baldwin 502125d42feSJohn Baldwin KASSERT(toep->ddp.active_id == db_idx, 503dc964385SJohn Baldwin ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx, 504125d42feSJohn Baldwin toep->ddp.active_id, toep->tid)); 505125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 506dc964385SJohn Baldwin job = db->job; 507dc964385SJohn Baldwin 50853af6903SGleb Smirnoff if (__predict_false(inp->inp_flags & INP_DROPPED)) { 509e682d02eSNavdeep Parhar /* 510dc964385SJohn Baldwin * This can happen due to an administrative tcpdrop(8). 511dc964385SJohn Baldwin * Just fail the request with ECONNRESET. 512e682d02eSNavdeep Parhar */ 513e682d02eSNavdeep Parhar CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x", 514e682d02eSNavdeep Parhar __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); 515dc964385SJohn Baldwin if (aio_clear_cancel_function(job)) 516dc964385SJohn Baldwin ddp_complete_one(job, ECONNRESET); 517dc964385SJohn Baldwin goto completed; 518e682d02eSNavdeep Parhar } 519e682d02eSNavdeep Parhar 520e682d02eSNavdeep Parhar tp = intotcpcb(inp); 5218fb15ddbSJohn Baldwin 5228fb15ddbSJohn Baldwin /* 5238fb15ddbSJohn Baldwin * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the 5248fb15ddbSJohn Baldwin * sequence number of the next byte to receive. The length of 5258fb15ddbSJohn Baldwin * the data received for this message must be computed by 5268fb15ddbSJohn Baldwin * comparing the new and old values of rcv_nxt. 5278fb15ddbSJohn Baldwin * 5288fb15ddbSJohn Baldwin * For RX_DATA_DDP, len might be non-zero, but it is only the 5298fb15ddbSJohn Baldwin * length of the most recent DMA. It does not include the 5308fb15ddbSJohn Baldwin * total length of the data received since the previous update 5318fb15ddbSJohn Baldwin * for this DDP buffer. rcv_nxt is the sequence number of the 5328fb15ddbSJohn Baldwin * first received byte from the most recent DMA. 5338fb15ddbSJohn Baldwin */ 534e682d02eSNavdeep Parhar len += be32toh(rcv_nxt) - tp->rcv_nxt; 535e682d02eSNavdeep Parhar tp->rcv_nxt += len; 536e682d02eSNavdeep Parhar tp->t_rcvtime = ticks; 537e682d02eSNavdeep Parhar #ifndef USE_DDP_RX_FLOW_CONTROL 538e682d02eSNavdeep Parhar KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 539e682d02eSNavdeep Parhar tp->rcv_wnd -= len; 540e682d02eSNavdeep Parhar #endif 541dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 5428674e626SNavdeep Parhar CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__, 5438674e626SNavdeep Parhar toep->tid, db_idx, len, report); 544dc964385SJohn Baldwin #endif 545e682d02eSNavdeep Parhar 54669a08863SJohn Baldwin /* receive buffer autosize */ 547a342904bSNavdeep Parhar MPASS(toep->vnet == so->so_vnet); 548a342904bSNavdeep Parhar CURVNET_SET(toep->vnet); 549dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 55069a08863SJohn Baldwin if (sb->sb_flags & SB_AUTOSIZE && 55169a08863SJohn Baldwin V_tcp_do_autorcvbuf && 55269a08863SJohn Baldwin sb->sb_hiwat < V_tcp_autorcvbuf_max && 55369a08863SJohn Baldwin len > (sbspace(sb) / 8 * 7)) { 554be09e82aSNavdeep Parhar struct adapter *sc = td_adapter(toep->td); 55569a08863SJohn Baldwin unsigned int hiwat = sb->sb_hiwat; 556be09e82aSNavdeep Parhar unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 55769a08863SJohn Baldwin V_tcp_autorcvbuf_max); 55869a08863SJohn Baldwin 55943283184SGleb Smirnoff if (!sbreserve_locked(so, SO_RCV, newsize, NULL)) 56069a08863SJohn Baldwin sb->sb_flags &= ~SB_AUTOSIZE; 56169a08863SJohn Baldwin } 562dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 563dc964385SJohn Baldwin CURVNET_RESTORE(); 56469a08863SJohn Baldwin 565b1012d80SJohn Baldwin job->msgrcv = 1; 566c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_jobs++; 567c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_octets += len; 568dc964385SJohn Baldwin if (db->cancel_pending) { 569dc964385SJohn Baldwin /* 570dc964385SJohn Baldwin * Update the job's length but defer completion to the 571dc964385SJohn Baldwin * TCB_RPL callback. 572dc964385SJohn Baldwin */ 573fe0bdd1dSJohn Baldwin job->aio_received += len; 574dc964385SJohn Baldwin goto out; 575dc964385SJohn Baldwin } else if (!aio_clear_cancel_function(job)) { 576dc964385SJohn Baldwin /* 577dc964385SJohn Baldwin * Update the copied length for when 578dc964385SJohn Baldwin * t4_aio_cancel_active() completes this request. 579dc964385SJohn Baldwin */ 580fe0bdd1dSJohn Baldwin job->aio_received += len; 581dc964385SJohn Baldwin } else { 582fe0bdd1dSJohn Baldwin copied = job->aio_received; 583dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 5848674e626SNavdeep Parhar CTR5(KTR_CXGBE, 5858674e626SNavdeep Parhar "%s: tid %u, completing %p (copied %ld, placed %d)", 5868674e626SNavdeep Parhar __func__, toep->tid, job, copied, len); 587dc964385SJohn Baldwin #endif 588dc964385SJohn Baldwin aio_complete(job, copied + len, 0); 589dc964385SJohn Baldwin t4_rcvd(&toep->td->tod, tp); 590dc964385SJohn Baldwin } 591dc964385SJohn Baldwin 592dc964385SJohn Baldwin completed: 593dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 594125d42feSJohn Baldwin if (toep->ddp.waiting_count > 0) 595dc964385SJohn Baldwin ddp_queue_toep(toep); 596dc964385SJohn Baldwin out: 597dc964385SJohn Baldwin DDP_UNLOCK(toep); 598e682d02eSNavdeep Parhar INP_WUNLOCK(inp); 599dc964385SJohn Baldwin 600e682d02eSNavdeep Parhar return (0); 601e682d02eSNavdeep Parhar } 602e682d02eSNavdeep Parhar 603b12c0a9eSJohn Baldwin void 604dc964385SJohn Baldwin handle_ddp_indicate(struct toepcb *toep) 605b12c0a9eSJohn Baldwin { 606b12c0a9eSJohn Baldwin 607dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 608125d42feSJohn Baldwin MPASS(toep->ddp.active_count == 0); 609125d42feSJohn Baldwin MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0); 610125d42feSJohn Baldwin if (toep->ddp.waiting_count == 0) { 611dc964385SJohn Baldwin /* 612dc964385SJohn Baldwin * The pending requests that triggered the request for an 613dc964385SJohn Baldwin * an indicate were cancelled. Those cancels should have 614dc964385SJohn Baldwin * already disabled DDP. Just ignore this as the data is 615dc964385SJohn Baldwin * going into the socket buffer anyway. 616dc964385SJohn Baldwin */ 617dc964385SJohn Baldwin return; 618dc964385SJohn Baldwin } 619dc964385SJohn Baldwin CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__, 620125d42feSJohn Baldwin toep->tid, toep->ddp.waiting_count); 621dc964385SJohn Baldwin ddp_queue_toep(toep); 622dc964385SJohn Baldwin } 623dc964385SJohn Baldwin 624017902fcSJohn Baldwin CTASSERT(CPL_COOKIE_DDP0 + 1 == CPL_COOKIE_DDP1); 6254535e804SNavdeep Parhar 6264535e804SNavdeep Parhar static int 6274535e804SNavdeep Parhar do_ddp_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 628dc964385SJohn Baldwin { 6294535e804SNavdeep Parhar struct adapter *sc = iq->adapter; 6304535e804SNavdeep Parhar const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 6314535e804SNavdeep Parhar unsigned int tid = GET_TID(cpl); 632dc964385SJohn Baldwin unsigned int db_idx; 6334535e804SNavdeep Parhar struct toepcb *toep; 6344535e804SNavdeep Parhar struct inpcb *inp; 635dc964385SJohn Baldwin struct ddp_buffer *db; 636dc964385SJohn Baldwin struct kaiocb *job; 637dc964385SJohn Baldwin long copied; 638dc964385SJohn Baldwin 639dc964385SJohn Baldwin if (cpl->status != CPL_ERR_NONE) 640dc964385SJohn Baldwin panic("XXX: tcp_rpl failed: %d", cpl->status); 641dc964385SJohn Baldwin 6424535e804SNavdeep Parhar toep = lookup_tid(sc, tid); 6434535e804SNavdeep Parhar inp = toep->inp; 644dc964385SJohn Baldwin switch (cpl->cookie) { 645017902fcSJohn Baldwin case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP0): 646017902fcSJohn Baldwin case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(CPL_COOKIE_DDP1): 647dc964385SJohn Baldwin /* 648dc964385SJohn Baldwin * XXX: This duplicates a lot of code with handle_ddp_data(). 649dc964385SJohn Baldwin */ 650017902fcSJohn Baldwin db_idx = G_COOKIE(cpl->cookie) - CPL_COOKIE_DDP0; 6514535e804SNavdeep Parhar MPASS(db_idx < nitems(toep->ddp.db)); 652dc964385SJohn Baldwin INP_WLOCK(inp); 653dc964385SJohn Baldwin DDP_LOCK(toep); 654125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 655dc964385SJohn Baldwin 656dc964385SJohn Baldwin /* 657dc964385SJohn Baldwin * handle_ddp_data() should leave the job around until 658dc964385SJohn Baldwin * this callback runs once a cancel is pending. 659dc964385SJohn Baldwin */ 660dc964385SJohn Baldwin MPASS(db != NULL); 661dc964385SJohn Baldwin MPASS(db->job != NULL); 662dc964385SJohn Baldwin MPASS(db->cancel_pending); 663dc964385SJohn Baldwin 664dc964385SJohn Baldwin /* 665dc964385SJohn Baldwin * XXX: It's not clear what happens if there is data 666dc964385SJohn Baldwin * placed when the buffer is invalidated. I suspect we 667dc964385SJohn Baldwin * need to read the TCB to see how much data was placed. 668dc964385SJohn Baldwin * 669dc964385SJohn Baldwin * For now this just pretends like nothing was placed. 670dc964385SJohn Baldwin * 671dc964385SJohn Baldwin * XXX: Note that if we did check the PCB we would need to 672dc964385SJohn Baldwin * also take care of updating the tp, etc. 673dc964385SJohn Baldwin */ 674dc964385SJohn Baldwin job = db->job; 675fe0bdd1dSJohn Baldwin copied = job->aio_received; 676dc964385SJohn Baldwin if (copied == 0) { 677dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job); 678dc964385SJohn Baldwin aio_cancel(job); 679dc964385SJohn Baldwin } else { 680dc964385SJohn Baldwin CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)", 681dc964385SJohn Baldwin __func__, job, copied); 682dc964385SJohn Baldwin aio_complete(job, copied, 0); 683dc964385SJohn Baldwin t4_rcvd(&toep->td->tod, intotcpcb(inp)); 684dc964385SJohn Baldwin } 685dc964385SJohn Baldwin 686dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 687125d42feSJohn Baldwin if (toep->ddp.waiting_count > 0) 688dc964385SJohn Baldwin ddp_queue_toep(toep); 689dc964385SJohn Baldwin DDP_UNLOCK(toep); 690dc964385SJohn Baldwin INP_WUNLOCK(inp); 691dc964385SJohn Baldwin break; 692dc964385SJohn Baldwin default: 693dc964385SJohn Baldwin panic("XXX: unknown tcb_rpl offset %#x, cookie %#x", 694dc964385SJohn Baldwin G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); 695dc964385SJohn Baldwin } 6964535e804SNavdeep Parhar 6974535e804SNavdeep Parhar return (0); 698dc964385SJohn Baldwin } 699dc964385SJohn Baldwin 700dc964385SJohn Baldwin void 701dc964385SJohn Baldwin handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt) 702dc964385SJohn Baldwin { 703dc964385SJohn Baldwin struct ddp_buffer *db; 704dc964385SJohn Baldwin struct kaiocb *job; 705dc964385SJohn Baldwin long copied; 70639d5cbdcSNavdeep Parhar unsigned int db_idx; 70739d5cbdcSNavdeep Parhar #ifdef INVARIANTS 70839d5cbdcSNavdeep Parhar unsigned int db_flag; 70939d5cbdcSNavdeep Parhar #endif 710dc964385SJohn Baldwin int len, placed; 711dc964385SJohn Baldwin 712b12c0a9eSJohn Baldwin INP_WLOCK_ASSERT(toep->inp); 713dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 714b12c0a9eSJohn Baldwin 7155dbf8c15SJohn Baldwin /* - 1 is to ignore the byte for FIN */ 7165dbf8c15SJohn Baldwin len = be32toh(rcv_nxt) - tp->rcv_nxt - 1; 717b12c0a9eSJohn Baldwin tp->rcv_nxt += len; 718b12c0a9eSJohn Baldwin 719125d42feSJohn Baldwin while (toep->ddp.active_count > 0) { 720125d42feSJohn Baldwin MPASS(toep->ddp.active_id != -1); 721125d42feSJohn Baldwin db_idx = toep->ddp.active_id; 72239d5cbdcSNavdeep Parhar #ifdef INVARIANTS 723dc964385SJohn Baldwin db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 72439d5cbdcSNavdeep Parhar #endif 725125d42feSJohn Baldwin MPASS((toep->ddp.flags & db_flag) != 0); 726125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 727dc964385SJohn Baldwin job = db->job; 728fe0bdd1dSJohn Baldwin copied = job->aio_received; 729dc964385SJohn Baldwin placed = len; 730dc964385SJohn Baldwin if (placed > job->uaiocb.aio_nbytes - copied) 731dc964385SJohn Baldwin placed = job->uaiocb.aio_nbytes - copied; 732c3d4aea6SJohn Baldwin if (placed > 0) { 733b1012d80SJohn Baldwin job->msgrcv = 1; 734c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_jobs++; 735c3d4aea6SJohn Baldwin } 736c3d4aea6SJohn Baldwin toep->ofld_rxq->rx_aio_ddp_octets += placed; 737dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) { 738dc964385SJohn Baldwin /* 739dc964385SJohn Baldwin * Update the copied length for when 740dc964385SJohn Baldwin * t4_aio_cancel_active() completes this 741dc964385SJohn Baldwin * request. 742dc964385SJohn Baldwin */ 743fe0bdd1dSJohn Baldwin job->aio_received += placed; 744dc964385SJohn Baldwin } else { 745dc964385SJohn Baldwin CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d", 746dc964385SJohn Baldwin __func__, toep->tid, db_idx, placed); 747dc964385SJohn Baldwin aio_complete(job, copied + placed, 0); 748dc964385SJohn Baldwin } 749dc964385SJohn Baldwin len -= placed; 750dc964385SJohn Baldwin complete_ddp_buffer(toep, db, db_idx); 751dc964385SJohn Baldwin } 752b12c0a9eSJohn Baldwin 753dc964385SJohn Baldwin MPASS(len == 0); 754dc964385SJohn Baldwin ddp_complete_all(toep, 0); 755b12c0a9eSJohn Baldwin } 756b12c0a9eSJohn Baldwin 757e682d02eSNavdeep Parhar #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\ 758e682d02eSNavdeep Parhar F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\ 759e682d02eSNavdeep Parhar F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\ 760e682d02eSNavdeep Parhar F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR) 761e682d02eSNavdeep Parhar 762671bf2b8SNavdeep Parhar extern cpl_handler_t t4_cpl_handler[]; 763671bf2b8SNavdeep Parhar 764e682d02eSNavdeep Parhar static int 765e682d02eSNavdeep Parhar do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 766e682d02eSNavdeep Parhar { 767e682d02eSNavdeep Parhar struct adapter *sc = iq->adapter; 768e682d02eSNavdeep Parhar const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1); 769e682d02eSNavdeep Parhar unsigned int tid = GET_TID(cpl); 770e682d02eSNavdeep Parhar uint32_t vld; 771e682d02eSNavdeep Parhar struct toepcb *toep = lookup_tid(sc, tid); 772e682d02eSNavdeep Parhar 773e682d02eSNavdeep Parhar KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 774e682d02eSNavdeep Parhar KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 775c91bcaaaSNavdeep Parhar KASSERT(!(toep->flags & TPF_SYNQE), 776e682d02eSNavdeep Parhar ("%s: toep %p claims to be a synq entry", __func__, toep)); 777e682d02eSNavdeep Parhar 778e682d02eSNavdeep Parhar vld = be32toh(cpl->ddpvld); 779e682d02eSNavdeep Parhar if (__predict_false(vld & DDP_ERR)) { 780e682d02eSNavdeep Parhar panic("%s: DDP error 0x%x (tid %d, toep %p)", 781e682d02eSNavdeep Parhar __func__, vld, tid, toep); 782e682d02eSNavdeep Parhar } 7839eb533d3SNavdeep Parhar 784c537e887SNavdeep Parhar if (ulp_mode(toep) == ULP_MODE_ISCSI) { 785671bf2b8SNavdeep Parhar t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m); 7860fe98277SNavdeep Parhar return (0); 7870fe98277SNavdeep Parhar } 788e682d02eSNavdeep Parhar 789e682d02eSNavdeep Parhar handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len)); 790e682d02eSNavdeep Parhar 791e682d02eSNavdeep Parhar return (0); 792e682d02eSNavdeep Parhar } 793e682d02eSNavdeep Parhar 794e682d02eSNavdeep Parhar static int 795e682d02eSNavdeep Parhar do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss, 796e682d02eSNavdeep Parhar struct mbuf *m) 797e682d02eSNavdeep Parhar { 798e682d02eSNavdeep Parhar struct adapter *sc = iq->adapter; 799e682d02eSNavdeep Parhar const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1); 800e682d02eSNavdeep Parhar unsigned int tid = GET_TID(cpl); 801e682d02eSNavdeep Parhar struct toepcb *toep = lookup_tid(sc, tid); 802e682d02eSNavdeep Parhar 803e682d02eSNavdeep Parhar KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 804e682d02eSNavdeep Parhar KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 805c91bcaaaSNavdeep Parhar KASSERT(!(toep->flags & TPF_SYNQE), 806e682d02eSNavdeep Parhar ("%s: toep %p claims to be a synq entry", __func__, toep)); 807e682d02eSNavdeep Parhar 808e682d02eSNavdeep Parhar handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0); 809e682d02eSNavdeep Parhar 810e682d02eSNavdeep Parhar return (0); 811e682d02eSNavdeep Parhar } 812e682d02eSNavdeep Parhar 813*a5a965d7SJohn Baldwin static bool 814*a5a965d7SJohn Baldwin set_ddp_ulp_mode(struct toepcb *toep) 815*a5a965d7SJohn Baldwin { 816*a5a965d7SJohn Baldwin struct adapter *sc = toep->vi->adapter; 817*a5a965d7SJohn Baldwin struct wrqe *wr; 818*a5a965d7SJohn Baldwin struct work_request_hdr *wrh; 819*a5a965d7SJohn Baldwin struct ulp_txpkt *ulpmc; 820*a5a965d7SJohn Baldwin int fields, len; 821*a5a965d7SJohn Baldwin 822*a5a965d7SJohn Baldwin if (!sc->tt.ddp) 823*a5a965d7SJohn Baldwin return (false); 824*a5a965d7SJohn Baldwin 825*a5a965d7SJohn Baldwin fields = 0; 826*a5a965d7SJohn Baldwin 827*a5a965d7SJohn Baldwin /* Overlay region including W_TCB_RX_DDP_FLAGS */ 828*a5a965d7SJohn Baldwin fields += 3; 829*a5a965d7SJohn Baldwin 830*a5a965d7SJohn Baldwin /* W_TCB_ULP_TYPE */ 831*a5a965d7SJohn Baldwin fields++; 832*a5a965d7SJohn Baldwin 833*a5a965d7SJohn Baldwin #ifdef USE_DDP_RX_FLOW_CONTROL 834*a5a965d7SJohn Baldwin /* W_TCB_T_FLAGS */ 835*a5a965d7SJohn Baldwin fields++; 836*a5a965d7SJohn Baldwin #endif 837*a5a965d7SJohn Baldwin 838*a5a965d7SJohn Baldwin len = sizeof(*wrh) + fields * roundup2(LEN__SET_TCB_FIELD_ULP, 16); 839*a5a965d7SJohn Baldwin KASSERT(len <= SGE_MAX_WR_LEN, 840*a5a965d7SJohn Baldwin ("%s: WR with %d TCB field updates too large", __func__, fields)); 841*a5a965d7SJohn Baldwin 842*a5a965d7SJohn Baldwin wr = alloc_wrqe(len, toep->ctrlq); 843*a5a965d7SJohn Baldwin if (wr == NULL) 844*a5a965d7SJohn Baldwin return (false); 845*a5a965d7SJohn Baldwin 846*a5a965d7SJohn Baldwin CTR(KTR_CXGBE, "%s: tid %u", __func__, toep->tid); 847*a5a965d7SJohn Baldwin 848*a5a965d7SJohn Baldwin wrh = wrtod(wr); 849*a5a965d7SJohn Baldwin INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 850*a5a965d7SJohn Baldwin ulpmc = (struct ulp_txpkt *)(wrh + 1); 851*a5a965d7SJohn Baldwin 852*a5a965d7SJohn Baldwin /* 853*a5a965d7SJohn Baldwin * Words 26/27 are zero except for the DDP_OFF flag in 854*a5a965d7SJohn Baldwin * W_TCB_RX_DDP_FLAGS (27). 855*a5a965d7SJohn Baldwin */ 856*a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 26, 857*a5a965d7SJohn Baldwin 0xffffffffffffffff, (uint64_t)V_TF_DDP_OFF(1) << 32); 858*a5a965d7SJohn Baldwin 859*a5a965d7SJohn Baldwin /* Words 28/29 are zero. */ 860*a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 28, 861*a5a965d7SJohn Baldwin 0xffffffffffffffff, 0); 862*a5a965d7SJohn Baldwin 863*a5a965d7SJohn Baldwin /* Words 30/31 are zero. */ 864*a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 30, 865*a5a965d7SJohn Baldwin 0xffffffffffffffff, 0); 866*a5a965d7SJohn Baldwin 867*a5a965d7SJohn Baldwin /* Set the ULP mode to ULP_MODE_TCPDDP. */ 868*a5a965d7SJohn Baldwin toep->params.ulp_mode = ULP_MODE_TCPDDP; 869*a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_ULP_TYPE, 870*a5a965d7SJohn Baldwin V_TCB_ULP_TYPE(M_TCB_ULP_TYPE), 871*a5a965d7SJohn Baldwin V_TCB_ULP_TYPE(ULP_MODE_TCPDDP)); 872*a5a965d7SJohn Baldwin 873*a5a965d7SJohn Baldwin #ifdef USE_DDP_RX_FLOW_CONTROL 874*a5a965d7SJohn Baldwin /* Set TF_RX_FLOW_CONTROL_DDP. */ 875*a5a965d7SJohn Baldwin ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_T_FLAGS, 876*a5a965d7SJohn Baldwin V_TF_RX_FLOW_CONTROL_DDP(1), V_TF_RX_FLOW_CONTROL_DDP(1)); 877*a5a965d7SJohn Baldwin #endif 878*a5a965d7SJohn Baldwin 879*a5a965d7SJohn Baldwin ddp_init_toep(toep); 880*a5a965d7SJohn Baldwin 881*a5a965d7SJohn Baldwin t4_wrq_tx(sc, wr); 882*a5a965d7SJohn Baldwin return (true); 883*a5a965d7SJohn Baldwin } 884*a5a965d7SJohn Baldwin 885dc964385SJohn Baldwin static void 886e682d02eSNavdeep Parhar enable_ddp(struct adapter *sc, struct toepcb *toep) 887e682d02eSNavdeep Parhar { 888e682d02eSNavdeep Parhar 889125d42feSJohn Baldwin KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK, 890e682d02eSNavdeep Parhar ("%s: toep %p has bad ddp_flags 0x%x", 891125d42feSJohn Baldwin __func__, toep, toep->ddp.flags)); 892e682d02eSNavdeep Parhar 893e682d02eSNavdeep Parhar CTR3(KTR_CXGBE, "%s: tid %u (time %u)", 894e682d02eSNavdeep Parhar __func__, toep->tid, time_uptime); 895e682d02eSNavdeep Parhar 896dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 897125d42feSJohn Baldwin toep->ddp.flags |= DDP_SC_REQ; 898edf95febSJohn Baldwin t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS, 899e682d02eSNavdeep Parhar V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) | 900e682d02eSNavdeep Parhar V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) | 901e682d02eSNavdeep Parhar V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1), 902edf95febSJohn Baldwin V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0); 903edf95febSJohn Baldwin t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 904edf95febSJohn Baldwin V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0); 905e682d02eSNavdeep Parhar } 906e682d02eSNavdeep Parhar 907e682d02eSNavdeep Parhar static int 908e682d02eSNavdeep Parhar calculate_hcf(int n1, int n2) 909e682d02eSNavdeep Parhar { 910e682d02eSNavdeep Parhar int a, b, t; 911e682d02eSNavdeep Parhar 912e682d02eSNavdeep Parhar if (n1 <= n2) { 913e682d02eSNavdeep Parhar a = n1; 914e682d02eSNavdeep Parhar b = n2; 915e682d02eSNavdeep Parhar } else { 916e682d02eSNavdeep Parhar a = n2; 917e682d02eSNavdeep Parhar b = n1; 918e682d02eSNavdeep Parhar } 919e682d02eSNavdeep Parhar 920e682d02eSNavdeep Parhar while (a != 0) { 921e682d02eSNavdeep Parhar t = a; 922e682d02eSNavdeep Parhar a = b % a; 923e682d02eSNavdeep Parhar b = t; 924e682d02eSNavdeep Parhar } 925e682d02eSNavdeep Parhar 926e682d02eSNavdeep Parhar return (b); 927e682d02eSNavdeep Parhar } 928e682d02eSNavdeep Parhar 929968267fdSNavdeep Parhar static inline int 930968267fdSNavdeep Parhar pages_to_nppods(int npages, int ddp_page_shift) 931e682d02eSNavdeep Parhar { 932dc964385SJohn Baldwin 933968267fdSNavdeep Parhar MPASS(ddp_page_shift >= PAGE_SHIFT); 934968267fdSNavdeep Parhar 935968267fdSNavdeep Parhar return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES)); 936968267fdSNavdeep Parhar } 937968267fdSNavdeep Parhar 938968267fdSNavdeep Parhar static int 939968267fdSNavdeep Parhar alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx, 940968267fdSNavdeep Parhar struct ppod_reservation *prsv) 941968267fdSNavdeep Parhar { 942968267fdSNavdeep Parhar vmem_addr_t addr; /* relative to start of region */ 943968267fdSNavdeep Parhar 944968267fdSNavdeep Parhar if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT, 945968267fdSNavdeep Parhar &addr) != 0) 946968267fdSNavdeep Parhar return (ENOMEM); 947968267fdSNavdeep Parhar 94827539974SJohn Baldwin #ifdef VERBOSE_TRACES 949968267fdSNavdeep Parhar CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d", 950968267fdSNavdeep Parhar __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask, 951968267fdSNavdeep Parhar nppods, 1 << pr->pr_page_shift[pgsz_idx]); 95227539974SJohn Baldwin #endif 953968267fdSNavdeep Parhar 954968267fdSNavdeep Parhar /* 955968267fdSNavdeep Parhar * The hardware tagmask includes an extra invalid bit but the arena was 956968267fdSNavdeep Parhar * seeded with valid values only. An allocation out of this arena will 957968267fdSNavdeep Parhar * fit inside the tagmask but won't have the invalid bit set. 958968267fdSNavdeep Parhar */ 959968267fdSNavdeep Parhar MPASS((addr & pr->pr_tag_mask) == addr); 960968267fdSNavdeep Parhar MPASS((addr & pr->pr_invalid_bit) == 0); 961968267fdSNavdeep Parhar 962968267fdSNavdeep Parhar prsv->prsv_pr = pr; 963968267fdSNavdeep Parhar prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr; 964968267fdSNavdeep Parhar prsv->prsv_nppods = nppods; 965968267fdSNavdeep Parhar 966968267fdSNavdeep Parhar return (0); 967968267fdSNavdeep Parhar } 968968267fdSNavdeep Parhar 9692beaefe8SJohn Baldwin static int 9702beaefe8SJohn Baldwin t4_alloc_page_pods_for_vmpages(struct ppod_region *pr, vm_page_t *pages, 9712beaefe8SJohn Baldwin int npages, struct ppod_reservation *prsv) 972968267fdSNavdeep Parhar { 973968267fdSNavdeep Parhar int i, hcf, seglen, idx, nppods; 974e682d02eSNavdeep Parhar 975e682d02eSNavdeep Parhar /* 976e682d02eSNavdeep Parhar * The DDP page size is unrelated to the VM page size. We combine 977e682d02eSNavdeep Parhar * contiguous physical pages into larger segments to get the best DDP 978e682d02eSNavdeep Parhar * page size possible. This is the largest of the four sizes in 979e682d02eSNavdeep Parhar * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in 980e682d02eSNavdeep Parhar * the page list. 981e682d02eSNavdeep Parhar */ 982e682d02eSNavdeep Parhar hcf = 0; 9832beaefe8SJohn Baldwin for (i = 0; i < npages; i++) { 984e682d02eSNavdeep Parhar seglen = PAGE_SIZE; 9852beaefe8SJohn Baldwin while (i < npages - 1 && 9862beaefe8SJohn Baldwin VM_PAGE_TO_PHYS(pages[i]) + PAGE_SIZE == 9872beaefe8SJohn Baldwin VM_PAGE_TO_PHYS(pages[i + 1])) { 988e682d02eSNavdeep Parhar seglen += PAGE_SIZE; 989e682d02eSNavdeep Parhar i++; 990e682d02eSNavdeep Parhar } 991e682d02eSNavdeep Parhar 992e682d02eSNavdeep Parhar hcf = calculate_hcf(hcf, seglen); 993968267fdSNavdeep Parhar if (hcf < (1 << pr->pr_page_shift[1])) { 994e682d02eSNavdeep Parhar idx = 0; 995e682d02eSNavdeep Parhar goto have_pgsz; /* give up, short circuit */ 996e682d02eSNavdeep Parhar } 997e682d02eSNavdeep Parhar } 998e682d02eSNavdeep Parhar 999968267fdSNavdeep Parhar #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 1000968267fdSNavdeep Parhar MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 1001968267fdSNavdeep Parhar for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 1002968267fdSNavdeep Parhar if ((hcf & PR_PAGE_MASK(idx)) == 0) 1003e682d02eSNavdeep Parhar break; 1004e682d02eSNavdeep Parhar } 1005968267fdSNavdeep Parhar #undef PR_PAGE_MASK 1006968267fdSNavdeep Parhar 1007e682d02eSNavdeep Parhar have_pgsz: 1008db8bcd1bSNavdeep Parhar MPASS(idx <= M_PPOD_PGSZ); 1009e682d02eSNavdeep Parhar 10102beaefe8SJohn Baldwin nppods = pages_to_nppods(npages, pr->pr_page_shift[idx]); 1011968267fdSNavdeep Parhar if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 10122beaefe8SJohn Baldwin return (ENOMEM); 1013968267fdSNavdeep Parhar MPASS(prsv->prsv_nppods > 0); 1014e682d02eSNavdeep Parhar 10152beaefe8SJohn Baldwin return (0); 10162beaefe8SJohn Baldwin } 10172beaefe8SJohn Baldwin 10182beaefe8SJohn Baldwin int 10192beaefe8SJohn Baldwin t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps) 10202beaefe8SJohn Baldwin { 10212beaefe8SJohn Baldwin struct ppod_reservation *prsv = &ps->prsv; 10222beaefe8SJohn Baldwin 10232beaefe8SJohn Baldwin KASSERT(prsv->prsv_nppods == 0, 10242beaefe8SJohn Baldwin ("%s: page pods already allocated", __func__)); 10252beaefe8SJohn Baldwin 10262beaefe8SJohn Baldwin return (t4_alloc_page_pods_for_vmpages(pr, ps->pages, ps->npages, 10272beaefe8SJohn Baldwin prsv)); 10282beaefe8SJohn Baldwin } 10292beaefe8SJohn Baldwin 10302beaefe8SJohn Baldwin int 10312beaefe8SJohn Baldwin t4_alloc_page_pods_for_bio(struct ppod_region *pr, struct bio *bp, 10322beaefe8SJohn Baldwin struct ppod_reservation *prsv) 10332beaefe8SJohn Baldwin { 10342beaefe8SJohn Baldwin 10352beaefe8SJohn Baldwin MPASS(bp->bio_flags & BIO_UNMAPPED); 10362beaefe8SJohn Baldwin 10372beaefe8SJohn Baldwin return (t4_alloc_page_pods_for_vmpages(pr, bp->bio_ma, bp->bio_ma_n, 10382beaefe8SJohn Baldwin prsv)); 1039e682d02eSNavdeep Parhar } 1040e682d02eSNavdeep Parhar 1041a9feb2cdSNavdeep Parhar int 1042a9feb2cdSNavdeep Parhar t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len, 1043a9feb2cdSNavdeep Parhar struct ppod_reservation *prsv) 1044a9feb2cdSNavdeep Parhar { 1045a9feb2cdSNavdeep Parhar int hcf, seglen, idx, npages, nppods; 1046a9feb2cdSNavdeep Parhar uintptr_t start_pva, end_pva, pva, p1; 1047a9feb2cdSNavdeep Parhar 1048a9feb2cdSNavdeep Parhar MPASS(buf > 0); 1049a9feb2cdSNavdeep Parhar MPASS(len > 0); 1050a9feb2cdSNavdeep Parhar 1051a9feb2cdSNavdeep Parhar /* 1052a9feb2cdSNavdeep Parhar * The DDP page size is unrelated to the VM page size. We combine 1053a9feb2cdSNavdeep Parhar * contiguous physical pages into larger segments to get the best DDP 1054a9feb2cdSNavdeep Parhar * page size possible. This is the largest of the four sizes in 1055a9feb2cdSNavdeep Parhar * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes 1056a9feb2cdSNavdeep Parhar * in the page list. 1057a9feb2cdSNavdeep Parhar */ 1058a9feb2cdSNavdeep Parhar hcf = 0; 1059a9feb2cdSNavdeep Parhar start_pva = trunc_page(buf); 1060a9feb2cdSNavdeep Parhar end_pva = trunc_page(buf + len - 1); 1061a9feb2cdSNavdeep Parhar pva = start_pva; 1062a9feb2cdSNavdeep Parhar while (pva <= end_pva) { 1063a9feb2cdSNavdeep Parhar seglen = PAGE_SIZE; 1064a9feb2cdSNavdeep Parhar p1 = pmap_kextract(pva); 1065a9feb2cdSNavdeep Parhar pva += PAGE_SIZE; 1066a9feb2cdSNavdeep Parhar while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) { 1067a9feb2cdSNavdeep Parhar seglen += PAGE_SIZE; 1068a9feb2cdSNavdeep Parhar pva += PAGE_SIZE; 1069a9feb2cdSNavdeep Parhar } 1070a9feb2cdSNavdeep Parhar 1071a9feb2cdSNavdeep Parhar hcf = calculate_hcf(hcf, seglen); 1072a9feb2cdSNavdeep Parhar if (hcf < (1 << pr->pr_page_shift[1])) { 1073a9feb2cdSNavdeep Parhar idx = 0; 1074a9feb2cdSNavdeep Parhar goto have_pgsz; /* give up, short circuit */ 1075a9feb2cdSNavdeep Parhar } 1076a9feb2cdSNavdeep Parhar } 1077a9feb2cdSNavdeep Parhar 1078a9feb2cdSNavdeep Parhar #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 1079a9feb2cdSNavdeep Parhar MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 1080a9feb2cdSNavdeep Parhar for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 1081a9feb2cdSNavdeep Parhar if ((hcf & PR_PAGE_MASK(idx)) == 0) 1082a9feb2cdSNavdeep Parhar break; 1083a9feb2cdSNavdeep Parhar } 1084a9feb2cdSNavdeep Parhar #undef PR_PAGE_MASK 1085a9feb2cdSNavdeep Parhar 1086a9feb2cdSNavdeep Parhar have_pgsz: 1087a9feb2cdSNavdeep Parhar MPASS(idx <= M_PPOD_PGSZ); 1088a9feb2cdSNavdeep Parhar 1089a9feb2cdSNavdeep Parhar npages = 1; 1090a9feb2cdSNavdeep Parhar npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; 1091a9feb2cdSNavdeep Parhar nppods = howmany(npages, PPOD_PAGES); 1092a9feb2cdSNavdeep Parhar if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 1093a9feb2cdSNavdeep Parhar return (ENOMEM); 1094a9feb2cdSNavdeep Parhar MPASS(prsv->prsv_nppods > 0); 1095a9feb2cdSNavdeep Parhar 1096a9feb2cdSNavdeep Parhar return (0); 1097a9feb2cdSNavdeep Parhar } 1098a9feb2cdSNavdeep Parhar 109946bee804SJohn Baldwin int 110046bee804SJohn Baldwin t4_alloc_page_pods_for_sgl(struct ppod_region *pr, struct ctl_sg_entry *sgl, 110146bee804SJohn Baldwin int entries, struct ppod_reservation *prsv) 110246bee804SJohn Baldwin { 110346bee804SJohn Baldwin int hcf, seglen, idx = 0, npages, nppods, i, len; 110446bee804SJohn Baldwin uintptr_t start_pva, end_pva, pva, p1 ; 110546bee804SJohn Baldwin vm_offset_t buf; 110646bee804SJohn Baldwin struct ctl_sg_entry *sge; 110746bee804SJohn Baldwin 110846bee804SJohn Baldwin MPASS(entries > 0); 110946bee804SJohn Baldwin MPASS(sgl); 111046bee804SJohn Baldwin 111146bee804SJohn Baldwin /* 111246bee804SJohn Baldwin * The DDP page size is unrelated to the VM page size. We combine 111346bee804SJohn Baldwin * contiguous physical pages into larger segments to get the best DDP 111446bee804SJohn Baldwin * page size possible. This is the largest of the four sizes in 111546bee804SJohn Baldwin * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes 111646bee804SJohn Baldwin * in the page list. 111746bee804SJohn Baldwin */ 111846bee804SJohn Baldwin hcf = 0; 111946bee804SJohn Baldwin for (i = entries - 1; i >= 0; i--) { 112046bee804SJohn Baldwin sge = sgl + i; 112146bee804SJohn Baldwin buf = (vm_offset_t)sge->addr; 112246bee804SJohn Baldwin len = sge->len; 112346bee804SJohn Baldwin start_pva = trunc_page(buf); 112446bee804SJohn Baldwin end_pva = trunc_page(buf + len - 1); 112546bee804SJohn Baldwin pva = start_pva; 112646bee804SJohn Baldwin while (pva <= end_pva) { 112746bee804SJohn Baldwin seglen = PAGE_SIZE; 112846bee804SJohn Baldwin p1 = pmap_kextract(pva); 112946bee804SJohn Baldwin pva += PAGE_SIZE; 113046bee804SJohn Baldwin while (pva <= end_pva && p1 + seglen == 113146bee804SJohn Baldwin pmap_kextract(pva)) { 113246bee804SJohn Baldwin seglen += PAGE_SIZE; 113346bee804SJohn Baldwin pva += PAGE_SIZE; 113446bee804SJohn Baldwin } 113546bee804SJohn Baldwin 113646bee804SJohn Baldwin hcf = calculate_hcf(hcf, seglen); 113746bee804SJohn Baldwin if (hcf < (1 << pr->pr_page_shift[1])) { 113846bee804SJohn Baldwin idx = 0; 113946bee804SJohn Baldwin goto have_pgsz; /* give up, short circuit */ 114046bee804SJohn Baldwin } 114146bee804SJohn Baldwin } 114246bee804SJohn Baldwin } 114346bee804SJohn Baldwin #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 114446bee804SJohn Baldwin MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 114546bee804SJohn Baldwin for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 114646bee804SJohn Baldwin if ((hcf & PR_PAGE_MASK(idx)) == 0) 114746bee804SJohn Baldwin break; 114846bee804SJohn Baldwin } 114946bee804SJohn Baldwin #undef PR_PAGE_MASK 115046bee804SJohn Baldwin 115146bee804SJohn Baldwin have_pgsz: 115246bee804SJohn Baldwin MPASS(idx <= M_PPOD_PGSZ); 115346bee804SJohn Baldwin 115446bee804SJohn Baldwin npages = 0; 115546bee804SJohn Baldwin while (entries--) { 115646bee804SJohn Baldwin npages++; 11578d2b4b2eSJohn Baldwin start_pva = trunc_page((vm_offset_t)sgl->addr); 115846bee804SJohn Baldwin end_pva = trunc_page((vm_offset_t)sgl->addr + sgl->len - 1); 115946bee804SJohn Baldwin npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; 116046bee804SJohn Baldwin sgl = sgl + 1; 116146bee804SJohn Baldwin } 116246bee804SJohn Baldwin nppods = howmany(npages, PPOD_PAGES); 116346bee804SJohn Baldwin if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 116446bee804SJohn Baldwin return (ENOMEM); 116546bee804SJohn Baldwin MPASS(prsv->prsv_nppods > 0); 116646bee804SJohn Baldwin return (0); 116746bee804SJohn Baldwin } 116846bee804SJohn Baldwin 1169968267fdSNavdeep Parhar void 1170968267fdSNavdeep Parhar t4_free_page_pods(struct ppod_reservation *prsv) 1171968267fdSNavdeep Parhar { 1172968267fdSNavdeep Parhar struct ppod_region *pr = prsv->prsv_pr; 1173968267fdSNavdeep Parhar vmem_addr_t addr; 1174968267fdSNavdeep Parhar 1175968267fdSNavdeep Parhar MPASS(prsv != NULL); 1176968267fdSNavdeep Parhar MPASS(prsv->prsv_nppods != 0); 1177968267fdSNavdeep Parhar 1178968267fdSNavdeep Parhar addr = prsv->prsv_tag & pr->pr_tag_mask; 1179968267fdSNavdeep Parhar MPASS((addr & pr->pr_invalid_bit) == 0); 1180968267fdSNavdeep Parhar 118127539974SJohn Baldwin #ifdef VERBOSE_TRACES 1182968267fdSNavdeep Parhar CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__, 1183968267fdSNavdeep Parhar pr->pr_arena, addr, prsv->prsv_nppods); 118427539974SJohn Baldwin #endif 1185968267fdSNavdeep Parhar 1186968267fdSNavdeep Parhar vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods)); 1187968267fdSNavdeep Parhar prsv->prsv_nppods = 0; 1188968267fdSNavdeep Parhar } 1189968267fdSNavdeep Parhar 1190e682d02eSNavdeep Parhar #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE) 1191e682d02eSNavdeep Parhar 1192968267fdSNavdeep Parhar int 1193968267fdSNavdeep Parhar t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid, 1194968267fdSNavdeep Parhar struct pageset *ps) 1195e682d02eSNavdeep Parhar { 1196e682d02eSNavdeep Parhar struct wrqe *wr; 1197e682d02eSNavdeep Parhar struct ulp_mem_io *ulpmc; 1198e682d02eSNavdeep Parhar struct ulptx_idata *ulpsc; 1199e682d02eSNavdeep Parhar struct pagepod *ppod; 1200db8bcd1bSNavdeep Parhar int i, j, k, n, chunk, len, ddp_pgsz, idx; 1201db8bcd1bSNavdeep Parhar u_int ppod_addr; 120288c4ff7bSNavdeep Parhar uint32_t cmd; 1203968267fdSNavdeep Parhar struct ppod_reservation *prsv = &ps->prsv; 1204968267fdSNavdeep Parhar struct ppod_region *pr = prsv->prsv_pr; 120587b0e771SJohn Baldwin vm_paddr_t pa; 1206e682d02eSNavdeep Parhar 1207dc964385SJohn Baldwin KASSERT(!(ps->flags & PS_PPODS_WRITTEN), 1208dc964385SJohn Baldwin ("%s: page pods already written", __func__)); 1209968267fdSNavdeep Parhar MPASS(prsv->prsv_nppods > 0); 1210dc964385SJohn Baldwin 121188c4ff7bSNavdeep Parhar cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 121288c4ff7bSNavdeep Parhar if (is_t4(sc)) 121388c4ff7bSNavdeep Parhar cmd |= htobe32(F_ULP_MEMIO_ORDER); 121488c4ff7bSNavdeep Parhar else 121588c4ff7bSNavdeep Parhar cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1216968267fdSNavdeep Parhar ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1217968267fdSNavdeep Parhar ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1218968267fdSNavdeep Parhar for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1219e682d02eSNavdeep Parhar 1220e682d02eSNavdeep Parhar /* How many page pods are we writing in this cycle */ 1221968267fdSNavdeep Parhar n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1222e682d02eSNavdeep Parhar chunk = PPOD_SZ(n); 1223d14b0ac1SNavdeep Parhar len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1224e682d02eSNavdeep Parhar 1225968267fdSNavdeep Parhar wr = alloc_wrqe(len, wrq); 1226e682d02eSNavdeep Parhar if (wr == NULL) 1227e682d02eSNavdeep Parhar return (ENOMEM); /* ok to just bail out */ 1228e682d02eSNavdeep Parhar ulpmc = wrtod(wr); 1229e682d02eSNavdeep Parhar 1230e682d02eSNavdeep Parhar INIT_ULPTX_WR(ulpmc, len, 0, 0); 123188c4ff7bSNavdeep Parhar ulpmc->cmd = cmd; 1232e682d02eSNavdeep Parhar ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1233e682d02eSNavdeep Parhar ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1234e682d02eSNavdeep Parhar ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1235e682d02eSNavdeep Parhar 1236e682d02eSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1237e682d02eSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1238e682d02eSNavdeep Parhar ulpsc->len = htobe32(chunk); 1239e682d02eSNavdeep Parhar 1240e682d02eSNavdeep Parhar ppod = (struct pagepod *)(ulpsc + 1); 1241e682d02eSNavdeep Parhar for (j = 0; j < n; i++, j++, ppod++) { 1242e682d02eSNavdeep Parhar ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1243968267fdSNavdeep Parhar V_PPOD_TID(tid) | prsv->prsv_tag); 1244dc964385SJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) | 1245dc964385SJohn Baldwin V_PPOD_OFST(ps->offset)); 1246e682d02eSNavdeep Parhar ppod->rsvd = 0; 1247e682d02eSNavdeep Parhar idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); 124857c60f98SNavdeep Parhar for (k = 0; k < nitems(ppod->addr); k++) { 1249dc964385SJohn Baldwin if (idx < ps->npages) { 125087b0e771SJohn Baldwin pa = VM_PAGE_TO_PHYS(ps->pages[idx]); 125187b0e771SJohn Baldwin ppod->addr[k] = htobe64(pa); 1252e682d02eSNavdeep Parhar idx += ddp_pgsz / PAGE_SIZE; 1253e682d02eSNavdeep Parhar } else 1254e682d02eSNavdeep Parhar ppod->addr[k] = 0; 1255e682d02eSNavdeep Parhar #if 0 1256e682d02eSNavdeep Parhar CTR5(KTR_CXGBE, 1257e682d02eSNavdeep Parhar "%s: tid %d ppod[%d]->addr[%d] = %p", 1258bca6e339SJohn Baldwin __func__, tid, i, k, 125944e7472dSJohn Baldwin be64toh(ppod->addr[k])); 1260e682d02eSNavdeep Parhar #endif 1261e682d02eSNavdeep Parhar } 1262e682d02eSNavdeep Parhar 1263e682d02eSNavdeep Parhar } 1264e682d02eSNavdeep Parhar 1265e682d02eSNavdeep Parhar t4_wrq_tx(sc, wr); 1266e682d02eSNavdeep Parhar } 1267dc964385SJohn Baldwin ps->flags |= PS_PPODS_WRITTEN; 1268e682d02eSNavdeep Parhar 1269e682d02eSNavdeep Parhar return (0); 1270e682d02eSNavdeep Parhar } 1271e682d02eSNavdeep Parhar 12724427ac36SJohn Baldwin static struct mbuf * 12734427ac36SJohn Baldwin alloc_raw_wr_mbuf(int len) 12744427ac36SJohn Baldwin { 12754427ac36SJohn Baldwin struct mbuf *m; 12764427ac36SJohn Baldwin 12774427ac36SJohn Baldwin if (len <= MHLEN) 12784427ac36SJohn Baldwin m = m_gethdr(M_NOWAIT, MT_DATA); 12794427ac36SJohn Baldwin else if (len <= MCLBYTES) 12804427ac36SJohn Baldwin m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR); 12814427ac36SJohn Baldwin else 12824427ac36SJohn Baldwin m = NULL; 12834427ac36SJohn Baldwin if (m == NULL) 12844427ac36SJohn Baldwin return (NULL); 12854427ac36SJohn Baldwin m->m_pkthdr.len = len; 12864427ac36SJohn Baldwin m->m_len = len; 12874427ac36SJohn Baldwin set_mbuf_raw_wr(m, true); 12884427ac36SJohn Baldwin return (m); 12894427ac36SJohn Baldwin } 12904427ac36SJohn Baldwin 1291a9feb2cdSNavdeep Parhar int 12922beaefe8SJohn Baldwin t4_write_page_pods_for_bio(struct adapter *sc, struct toepcb *toep, 12932beaefe8SJohn Baldwin struct ppod_reservation *prsv, struct bio *bp, struct mbufq *wrq) 12942beaefe8SJohn Baldwin { 12952beaefe8SJohn Baldwin struct ulp_mem_io *ulpmc; 12962beaefe8SJohn Baldwin struct ulptx_idata *ulpsc; 12972beaefe8SJohn Baldwin struct pagepod *ppod; 12982beaefe8SJohn Baldwin int i, j, k, n, chunk, len, ddp_pgsz, idx; 12992beaefe8SJohn Baldwin u_int ppod_addr; 13002beaefe8SJohn Baldwin uint32_t cmd; 13012beaefe8SJohn Baldwin struct ppod_region *pr = prsv->prsv_pr; 13022beaefe8SJohn Baldwin vm_paddr_t pa; 13032beaefe8SJohn Baldwin struct mbuf *m; 13042beaefe8SJohn Baldwin 13052beaefe8SJohn Baldwin MPASS(bp->bio_flags & BIO_UNMAPPED); 13062beaefe8SJohn Baldwin 13072beaefe8SJohn Baldwin cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 13082beaefe8SJohn Baldwin if (is_t4(sc)) 13092beaefe8SJohn Baldwin cmd |= htobe32(F_ULP_MEMIO_ORDER); 13102beaefe8SJohn Baldwin else 13112beaefe8SJohn Baldwin cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 13122beaefe8SJohn Baldwin ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 13132beaefe8SJohn Baldwin ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 13142beaefe8SJohn Baldwin for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 13152beaefe8SJohn Baldwin 13162beaefe8SJohn Baldwin /* How many page pods are we writing in this cycle */ 13172beaefe8SJohn Baldwin n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 13182beaefe8SJohn Baldwin MPASS(n > 0); 13192beaefe8SJohn Baldwin chunk = PPOD_SZ(n); 13202beaefe8SJohn Baldwin len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 13212beaefe8SJohn Baldwin 13222beaefe8SJohn Baldwin m = alloc_raw_wr_mbuf(len); 13232beaefe8SJohn Baldwin if (m == NULL) 13242beaefe8SJohn Baldwin return (ENOMEM); 13252beaefe8SJohn Baldwin 13262beaefe8SJohn Baldwin ulpmc = mtod(m, struct ulp_mem_io *); 13272beaefe8SJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); 13282beaefe8SJohn Baldwin ulpmc->cmd = cmd; 13292beaefe8SJohn Baldwin ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 13302beaefe8SJohn Baldwin ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 13312beaefe8SJohn Baldwin ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 13322beaefe8SJohn Baldwin 13332beaefe8SJohn Baldwin ulpsc = (struct ulptx_idata *)(ulpmc + 1); 13342beaefe8SJohn Baldwin ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 13352beaefe8SJohn Baldwin ulpsc->len = htobe32(chunk); 13362beaefe8SJohn Baldwin 13372beaefe8SJohn Baldwin ppod = (struct pagepod *)(ulpsc + 1); 13382beaefe8SJohn Baldwin for (j = 0; j < n; i++, j++, ppod++) { 13392beaefe8SJohn Baldwin ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 13402beaefe8SJohn Baldwin V_PPOD_TID(toep->tid) | 13412beaefe8SJohn Baldwin (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 13422beaefe8SJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(bp->bio_bcount) | 13432beaefe8SJohn Baldwin V_PPOD_OFST(bp->bio_ma_offset)); 13442beaefe8SJohn Baldwin ppod->rsvd = 0; 13452beaefe8SJohn Baldwin idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); 13462beaefe8SJohn Baldwin for (k = 0; k < nitems(ppod->addr); k++) { 13472beaefe8SJohn Baldwin if (idx < bp->bio_ma_n) { 13482beaefe8SJohn Baldwin pa = VM_PAGE_TO_PHYS(bp->bio_ma[idx]); 13492beaefe8SJohn Baldwin ppod->addr[k] = htobe64(pa); 13502beaefe8SJohn Baldwin idx += ddp_pgsz / PAGE_SIZE; 13512beaefe8SJohn Baldwin } else 13522beaefe8SJohn Baldwin ppod->addr[k] = 0; 13532beaefe8SJohn Baldwin #if 0 13542beaefe8SJohn Baldwin CTR5(KTR_CXGBE, 13552beaefe8SJohn Baldwin "%s: tid %d ppod[%d]->addr[%d] = %p", 13562beaefe8SJohn Baldwin __func__, toep->tid, i, k, 13572beaefe8SJohn Baldwin be64toh(ppod->addr[k])); 13582beaefe8SJohn Baldwin #endif 13592beaefe8SJohn Baldwin } 13602beaefe8SJohn Baldwin } 13612beaefe8SJohn Baldwin 13622beaefe8SJohn Baldwin mbufq_enqueue(wrq, m); 13632beaefe8SJohn Baldwin } 13642beaefe8SJohn Baldwin 13652beaefe8SJohn Baldwin return (0); 13662beaefe8SJohn Baldwin } 13672beaefe8SJohn Baldwin 13682beaefe8SJohn Baldwin int 13694427ac36SJohn Baldwin t4_write_page_pods_for_buf(struct adapter *sc, struct toepcb *toep, 1370f949967cSJohn Baldwin struct ppod_reservation *prsv, vm_offset_t buf, int buflen, 1371f949967cSJohn Baldwin struct mbufq *wrq) 1372a9feb2cdSNavdeep Parhar { 1373a9feb2cdSNavdeep Parhar struct ulp_mem_io *ulpmc; 1374a9feb2cdSNavdeep Parhar struct ulptx_idata *ulpsc; 1375a9feb2cdSNavdeep Parhar struct pagepod *ppod; 1376a9feb2cdSNavdeep Parhar int i, j, k, n, chunk, len, ddp_pgsz; 1377a9feb2cdSNavdeep Parhar u_int ppod_addr, offset; 1378a9feb2cdSNavdeep Parhar uint32_t cmd; 1379a9feb2cdSNavdeep Parhar struct ppod_region *pr = prsv->prsv_pr; 1380de414339SJohn Baldwin uintptr_t end_pva, pva; 1381de414339SJohn Baldwin vm_paddr_t pa; 13824427ac36SJohn Baldwin struct mbuf *m; 1383a9feb2cdSNavdeep Parhar 1384a9feb2cdSNavdeep Parhar cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1385a9feb2cdSNavdeep Parhar if (is_t4(sc)) 1386a9feb2cdSNavdeep Parhar cmd |= htobe32(F_ULP_MEMIO_ORDER); 1387a9feb2cdSNavdeep Parhar else 1388a9feb2cdSNavdeep Parhar cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1389a9feb2cdSNavdeep Parhar ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1390a9feb2cdSNavdeep Parhar offset = buf & PAGE_MASK; 1391a9feb2cdSNavdeep Parhar ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1392a9feb2cdSNavdeep Parhar pva = trunc_page(buf); 1393a9feb2cdSNavdeep Parhar end_pva = trunc_page(buf + buflen - 1); 1394a9feb2cdSNavdeep Parhar for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1395a9feb2cdSNavdeep Parhar 1396a9feb2cdSNavdeep Parhar /* How many page pods are we writing in this cycle */ 1397a9feb2cdSNavdeep Parhar n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1398a9feb2cdSNavdeep Parhar MPASS(n > 0); 1399a9feb2cdSNavdeep Parhar chunk = PPOD_SZ(n); 1400a9feb2cdSNavdeep Parhar len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1401a9feb2cdSNavdeep Parhar 14024427ac36SJohn Baldwin m = alloc_raw_wr_mbuf(len); 1403f949967cSJohn Baldwin if (m == NULL) 14044427ac36SJohn Baldwin return (ENOMEM); 14054427ac36SJohn Baldwin ulpmc = mtod(m, struct ulp_mem_io *); 1406a9feb2cdSNavdeep Parhar 14074427ac36SJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); 1408a9feb2cdSNavdeep Parhar ulpmc->cmd = cmd; 1409a9feb2cdSNavdeep Parhar ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1410a9feb2cdSNavdeep Parhar ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1411a9feb2cdSNavdeep Parhar ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1412a9feb2cdSNavdeep Parhar 1413a9feb2cdSNavdeep Parhar ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1414a9feb2cdSNavdeep Parhar ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1415a9feb2cdSNavdeep Parhar ulpsc->len = htobe32(chunk); 1416a9feb2cdSNavdeep Parhar 1417a9feb2cdSNavdeep Parhar ppod = (struct pagepod *)(ulpsc + 1); 1418a9feb2cdSNavdeep Parhar for (j = 0; j < n; i++, j++, ppod++) { 1419a9feb2cdSNavdeep Parhar ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 14204427ac36SJohn Baldwin V_PPOD_TID(toep->tid) | 1421a9feb2cdSNavdeep Parhar (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 1422a9feb2cdSNavdeep Parhar ppod->len_offset = htobe64(V_PPOD_LEN(buflen) | 1423a9feb2cdSNavdeep Parhar V_PPOD_OFST(offset)); 1424a9feb2cdSNavdeep Parhar ppod->rsvd = 0; 1425a9feb2cdSNavdeep Parhar 1426a9feb2cdSNavdeep Parhar for (k = 0; k < nitems(ppod->addr); k++) { 1427a9feb2cdSNavdeep Parhar if (pva > end_pva) 1428a9feb2cdSNavdeep Parhar ppod->addr[k] = 0; 1429a9feb2cdSNavdeep Parhar else { 1430a9feb2cdSNavdeep Parhar pa = pmap_kextract(pva); 1431a9feb2cdSNavdeep Parhar ppod->addr[k] = htobe64(pa); 1432a9feb2cdSNavdeep Parhar pva += ddp_pgsz; 1433a9feb2cdSNavdeep Parhar } 1434a9feb2cdSNavdeep Parhar #if 0 1435a9feb2cdSNavdeep Parhar CTR5(KTR_CXGBE, 1436a9feb2cdSNavdeep Parhar "%s: tid %d ppod[%d]->addr[%d] = %p", 14374427ac36SJohn Baldwin __func__, toep->tid, i, k, 143844e7472dSJohn Baldwin be64toh(ppod->addr[k])); 1439a9feb2cdSNavdeep Parhar #endif 1440a9feb2cdSNavdeep Parhar } 1441a9feb2cdSNavdeep Parhar 1442a9feb2cdSNavdeep Parhar /* 1443a9feb2cdSNavdeep Parhar * Walk back 1 segment so that the first address in the 1444a9feb2cdSNavdeep Parhar * next pod is the same as the last one in the current 1445a9feb2cdSNavdeep Parhar * pod. 1446a9feb2cdSNavdeep Parhar */ 1447a9feb2cdSNavdeep Parhar pva -= ddp_pgsz; 1448a9feb2cdSNavdeep Parhar } 1449a9feb2cdSNavdeep Parhar 1450f949967cSJohn Baldwin mbufq_enqueue(wrq, m); 1451a9feb2cdSNavdeep Parhar } 1452a9feb2cdSNavdeep Parhar 1453a9feb2cdSNavdeep Parhar MPASS(pva <= end_pva); 1454a9feb2cdSNavdeep Parhar 1455a9feb2cdSNavdeep Parhar return (0); 1456a9feb2cdSNavdeep Parhar } 1457a9feb2cdSNavdeep Parhar 145846bee804SJohn Baldwin int 145946bee804SJohn Baldwin t4_write_page_pods_for_sgl(struct adapter *sc, struct toepcb *toep, 146046bee804SJohn Baldwin struct ppod_reservation *prsv, struct ctl_sg_entry *sgl, int entries, 1461f949967cSJohn Baldwin int xferlen, struct mbufq *wrq) 146246bee804SJohn Baldwin { 146346bee804SJohn Baldwin struct ulp_mem_io *ulpmc; 146446bee804SJohn Baldwin struct ulptx_idata *ulpsc; 146546bee804SJohn Baldwin struct pagepod *ppod; 146646bee804SJohn Baldwin int i, j, k, n, chunk, len, ddp_pgsz; 146746bee804SJohn Baldwin u_int ppod_addr, offset, sg_offset = 0; 146846bee804SJohn Baldwin uint32_t cmd; 146946bee804SJohn Baldwin struct ppod_region *pr = prsv->prsv_pr; 1470de414339SJohn Baldwin uintptr_t pva; 1471de414339SJohn Baldwin vm_paddr_t pa; 147246bee804SJohn Baldwin struct mbuf *m; 147346bee804SJohn Baldwin 147446bee804SJohn Baldwin MPASS(sgl != NULL); 147546bee804SJohn Baldwin MPASS(entries > 0); 147646bee804SJohn Baldwin cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 147746bee804SJohn Baldwin if (is_t4(sc)) 147846bee804SJohn Baldwin cmd |= htobe32(F_ULP_MEMIO_ORDER); 147946bee804SJohn Baldwin else 148046bee804SJohn Baldwin cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 148146bee804SJohn Baldwin ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 148246bee804SJohn Baldwin offset = (vm_offset_t)sgl->addr & PAGE_MASK; 148346bee804SJohn Baldwin ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 14848d2b4b2eSJohn Baldwin pva = trunc_page((vm_offset_t)sgl->addr); 148546bee804SJohn Baldwin for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 148646bee804SJohn Baldwin 148746bee804SJohn Baldwin /* How many page pods are we writing in this cycle */ 148846bee804SJohn Baldwin n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 148946bee804SJohn Baldwin MPASS(n > 0); 149046bee804SJohn Baldwin chunk = PPOD_SZ(n); 149146bee804SJohn Baldwin len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 149246bee804SJohn Baldwin 149346bee804SJohn Baldwin m = alloc_raw_wr_mbuf(len); 1494f949967cSJohn Baldwin if (m == NULL) 149546bee804SJohn Baldwin return (ENOMEM); 149646bee804SJohn Baldwin ulpmc = mtod(m, struct ulp_mem_io *); 149746bee804SJohn Baldwin 149846bee804SJohn Baldwin INIT_ULPTX_WR(ulpmc, len, 0, toep->tid); 149946bee804SJohn Baldwin ulpmc->cmd = cmd; 150046bee804SJohn Baldwin ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 150146bee804SJohn Baldwin ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 150246bee804SJohn Baldwin ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 150346bee804SJohn Baldwin 150446bee804SJohn Baldwin ulpsc = (struct ulptx_idata *)(ulpmc + 1); 150546bee804SJohn Baldwin ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 150646bee804SJohn Baldwin ulpsc->len = htobe32(chunk); 150746bee804SJohn Baldwin 150846bee804SJohn Baldwin ppod = (struct pagepod *)(ulpsc + 1); 150946bee804SJohn Baldwin for (j = 0; j < n; i++, j++, ppod++) { 151046bee804SJohn Baldwin ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 151146bee804SJohn Baldwin V_PPOD_TID(toep->tid) | 151246bee804SJohn Baldwin (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 151346bee804SJohn Baldwin ppod->len_offset = htobe64(V_PPOD_LEN(xferlen) | 151446bee804SJohn Baldwin V_PPOD_OFST(offset)); 151546bee804SJohn Baldwin ppod->rsvd = 0; 151646bee804SJohn Baldwin 151746bee804SJohn Baldwin for (k = 0; k < nitems(ppod->addr); k++) { 151846bee804SJohn Baldwin if (entries != 0) { 151946bee804SJohn Baldwin pa = pmap_kextract(pva + sg_offset); 152046bee804SJohn Baldwin ppod->addr[k] = htobe64(pa); 152146bee804SJohn Baldwin } else 152246bee804SJohn Baldwin ppod->addr[k] = 0; 152346bee804SJohn Baldwin 152446bee804SJohn Baldwin #if 0 152546bee804SJohn Baldwin CTR5(KTR_CXGBE, 152646bee804SJohn Baldwin "%s: tid %d ppod[%d]->addr[%d] = %p", 152746bee804SJohn Baldwin __func__, toep->tid, i, k, 152844e7472dSJohn Baldwin be64toh(ppod->addr[k])); 152946bee804SJohn Baldwin #endif 153046bee804SJohn Baldwin 153146bee804SJohn Baldwin /* 153246bee804SJohn Baldwin * If this is the last entry in a pod, 153346bee804SJohn Baldwin * reuse the same entry for first address 153446bee804SJohn Baldwin * in the next pod. 153546bee804SJohn Baldwin */ 153646bee804SJohn Baldwin if (k + 1 == nitems(ppod->addr)) 153746bee804SJohn Baldwin break; 153846bee804SJohn Baldwin 153946bee804SJohn Baldwin /* 154046bee804SJohn Baldwin * Don't move to the next DDP page if the 154146bee804SJohn Baldwin * sgl is already finished. 154246bee804SJohn Baldwin */ 154346bee804SJohn Baldwin if (entries == 0) 154446bee804SJohn Baldwin continue; 154546bee804SJohn Baldwin 154646bee804SJohn Baldwin sg_offset += ddp_pgsz; 154746bee804SJohn Baldwin if (sg_offset == sgl->len) { 154846bee804SJohn Baldwin /* 154946bee804SJohn Baldwin * This sgl entry is done. Go 155046bee804SJohn Baldwin * to the next. 155146bee804SJohn Baldwin */ 155246bee804SJohn Baldwin entries--; 155346bee804SJohn Baldwin sgl++; 155446bee804SJohn Baldwin sg_offset = 0; 155546bee804SJohn Baldwin if (entries != 0) 155646bee804SJohn Baldwin pva = trunc_page( 155746bee804SJohn Baldwin (vm_offset_t)sgl->addr); 155846bee804SJohn Baldwin } 155946bee804SJohn Baldwin } 156046bee804SJohn Baldwin } 156146bee804SJohn Baldwin 1562f949967cSJohn Baldwin mbufq_enqueue(wrq, m); 156346bee804SJohn Baldwin } 156446bee804SJohn Baldwin 156546bee804SJohn Baldwin return (0); 156646bee804SJohn Baldwin } 156746bee804SJohn Baldwin 1568dc964385SJohn Baldwin /* 1569eeacb3b0SMark Johnston * Prepare a pageset for DDP. This sets up page pods. 1570dc964385SJohn Baldwin */ 1571e682d02eSNavdeep Parhar static int 1572dc964385SJohn Baldwin prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps) 1573e682d02eSNavdeep Parhar { 1574dc964385SJohn Baldwin struct tom_data *td = sc->tom_softc; 1575e682d02eSNavdeep Parhar 1576968267fdSNavdeep Parhar if (ps->prsv.prsv_nppods == 0 && 15772beaefe8SJohn Baldwin t4_alloc_page_pods_for_ps(&td->pr, ps) != 0) { 1578e682d02eSNavdeep Parhar return (0); 1579e682d02eSNavdeep Parhar } 1580dc964385SJohn Baldwin if (!(ps->flags & PS_PPODS_WRITTEN) && 1581968267fdSNavdeep Parhar t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) { 1582dc964385SJohn Baldwin return (0); 1583dc964385SJohn Baldwin } 1584dc964385SJohn Baldwin 1585dc964385SJohn Baldwin return (1); 1586dc964385SJohn Baldwin } 1587e682d02eSNavdeep Parhar 1588968267fdSNavdeep Parhar int 1589968267fdSNavdeep Parhar t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz, 1590968267fdSNavdeep Parhar const char *name) 1591e682d02eSNavdeep Parhar { 1592515b36c5SNavdeep Parhar int i; 1593515b36c5SNavdeep Parhar 1594968267fdSNavdeep Parhar MPASS(pr != NULL); 1595968267fdSNavdeep Parhar MPASS(r->size > 0); 1596515b36c5SNavdeep Parhar 1597968267fdSNavdeep Parhar pr->pr_start = r->start; 1598968267fdSNavdeep Parhar pr->pr_len = r->size; 1599968267fdSNavdeep Parhar pr->pr_page_shift[0] = 12 + G_HPZ0(psz); 1600968267fdSNavdeep Parhar pr->pr_page_shift[1] = 12 + G_HPZ1(psz); 1601968267fdSNavdeep Parhar pr->pr_page_shift[2] = 12 + G_HPZ2(psz); 1602968267fdSNavdeep Parhar pr->pr_page_shift[3] = 12 + G_HPZ3(psz); 1603968267fdSNavdeep Parhar 1604968267fdSNavdeep Parhar /* The SGL -> page pod algorithm requires the sizes to be in order. */ 1605968267fdSNavdeep Parhar for (i = 1; i < nitems(pr->pr_page_shift); i++) { 1606968267fdSNavdeep Parhar if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1]) 1607968267fdSNavdeep Parhar return (ENXIO); 1608515b36c5SNavdeep Parhar } 1609e682d02eSNavdeep Parhar 1610968267fdSNavdeep Parhar pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG); 1611968267fdSNavdeep Parhar pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask; 1612968267fdSNavdeep Parhar if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0) 1613968267fdSNavdeep Parhar return (ENXIO); 1614968267fdSNavdeep Parhar pr->pr_alias_shift = fls(pr->pr_tag_mask); 1615968267fdSNavdeep Parhar pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1); 1616968267fdSNavdeep Parhar 1617968267fdSNavdeep Parhar pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0, 1618968267fdSNavdeep Parhar M_FIRSTFIT | M_NOWAIT); 1619968267fdSNavdeep Parhar if (pr->pr_arena == NULL) 1620968267fdSNavdeep Parhar return (ENOMEM); 1621968267fdSNavdeep Parhar 1622968267fdSNavdeep Parhar return (0); 1623e682d02eSNavdeep Parhar } 1624e682d02eSNavdeep Parhar 1625e682d02eSNavdeep Parhar void 1626968267fdSNavdeep Parhar t4_free_ppod_region(struct ppod_region *pr) 1627e682d02eSNavdeep Parhar { 1628e682d02eSNavdeep Parhar 1629968267fdSNavdeep Parhar MPASS(pr != NULL); 1630968267fdSNavdeep Parhar 1631968267fdSNavdeep Parhar if (pr->pr_arena) 1632968267fdSNavdeep Parhar vmem_destroy(pr->pr_arena); 1633968267fdSNavdeep Parhar bzero(pr, sizeof(*pr)); 1634e682d02eSNavdeep Parhar } 1635e682d02eSNavdeep Parhar 1636e682d02eSNavdeep Parhar static int 1637dc964385SJohn Baldwin pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages, 1638dc964385SJohn Baldwin int pgoff, int len) 1639e682d02eSNavdeep Parhar { 1640e682d02eSNavdeep Parhar 164191a65e2fSJohn Baldwin if (ps->start != start || ps->npages != npages || 164291a65e2fSJohn Baldwin ps->offset != pgoff || ps->len != len) 1643dc964385SJohn Baldwin return (1); 1644dc964385SJohn Baldwin 1645dc964385SJohn Baldwin return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp); 1646e682d02eSNavdeep Parhar } 1647e682d02eSNavdeep Parhar 1648dc964385SJohn Baldwin static int 1649dc964385SJohn Baldwin hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps) 1650688dba74SNavdeep Parhar { 1651dc964385SJohn Baldwin struct vmspace *vm; 1652dc964385SJohn Baldwin vm_map_t map; 1653dc964385SJohn Baldwin vm_offset_t start, end, pgoff; 1654dc964385SJohn Baldwin struct pageset *ps; 1655dc964385SJohn Baldwin int n; 1656688dba74SNavdeep Parhar 1657dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 1658688dba74SNavdeep Parhar 1659dc964385SJohn Baldwin /* 1660dc964385SJohn Baldwin * The AIO subsystem will cancel and drain all requests before 1661dc964385SJohn Baldwin * permitting a process to exit or exec, so p_vmspace should 1662dc964385SJohn Baldwin * be stable here. 1663dc964385SJohn Baldwin */ 1664dc964385SJohn Baldwin vm = job->userproc->p_vmspace; 1665dc964385SJohn Baldwin map = &vm->vm_map; 1666dc964385SJohn Baldwin start = (uintptr_t)job->uaiocb.aio_buf; 1667dc964385SJohn Baldwin pgoff = start & PAGE_MASK; 1668dc964385SJohn Baldwin end = round_page(start + job->uaiocb.aio_nbytes); 1669dc964385SJohn Baldwin start = trunc_page(start); 1670dc964385SJohn Baldwin 1671dc964385SJohn Baldwin if (end - start > MAX_DDP_BUFFER_SIZE) { 1672dc964385SJohn Baldwin /* 1673dc964385SJohn Baldwin * Truncate the request to a short read. 1674dc964385SJohn Baldwin * Alternatively, we could DDP in chunks to the larger 1675dc964385SJohn Baldwin * buffer, but that would be quite a bit more work. 1676dc964385SJohn Baldwin * 1677dc964385SJohn Baldwin * When truncating, round the request down to avoid 1678dc964385SJohn Baldwin * crossing a cache line on the final transaction. 1679dc964385SJohn Baldwin */ 1680dc964385SJohn Baldwin end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE); 1681dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 1682dc964385SJohn Baldwin CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu", 1683dc964385SJohn Baldwin __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes, 1684dc964385SJohn Baldwin (unsigned long)(end - (start + pgoff))); 1685dc964385SJohn Baldwin job->uaiocb.aio_nbytes = end - (start + pgoff); 1686dc964385SJohn Baldwin #endif 1687dc964385SJohn Baldwin end = round_page(end); 1688688dba74SNavdeep Parhar } 1689688dba74SNavdeep Parhar 1690dc964385SJohn Baldwin n = atop(end - start); 1691688dba74SNavdeep Parhar 1692dc964385SJohn Baldwin /* 1693dc964385SJohn Baldwin * Try to reuse a cached pageset. 1694dc964385SJohn Baldwin */ 1695125d42feSJohn Baldwin TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) { 1696dc964385SJohn Baldwin if (pscmp(ps, vm, start, n, pgoff, 1697dc964385SJohn Baldwin job->uaiocb.aio_nbytes) == 0) { 1698125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 1699125d42feSJohn Baldwin toep->ddp.cached_count--; 1700dc964385SJohn Baldwin *pps = ps; 1701dc964385SJohn Baldwin return (0); 1702dc964385SJohn Baldwin } 1703688dba74SNavdeep Parhar } 1704688dba74SNavdeep Parhar 1705e682d02eSNavdeep Parhar /* 1706dc964385SJohn Baldwin * If there are too many cached pagesets to create a new one, 1707dc964385SJohn Baldwin * free a pageset before creating a new one. 1708e682d02eSNavdeep Parhar */ 1709125d42feSJohn Baldwin KASSERT(toep->ddp.active_count + toep->ddp.cached_count <= 1710125d42feSJohn Baldwin nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__)); 1711125d42feSJohn Baldwin if (toep->ddp.active_count + toep->ddp.cached_count == 1712125d42feSJohn Baldwin nitems(toep->ddp.db)) { 1713125d42feSJohn Baldwin KASSERT(toep->ddp.cached_count > 0, 1714dc964385SJohn Baldwin ("no cached pageset to free")); 1715125d42feSJohn Baldwin ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq); 1716125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 1717125d42feSJohn Baldwin toep->ddp.cached_count--; 1718dc964385SJohn Baldwin free_pageset(toep->td, ps); 1719dc964385SJohn Baldwin } 1720dc964385SJohn Baldwin DDP_UNLOCK(toep); 1721e682d02eSNavdeep Parhar 1722dc964385SJohn Baldwin /* Create a new pageset. */ 1723dc964385SJohn Baldwin ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 1724dc964385SJohn Baldwin M_ZERO); 1725dc964385SJohn Baldwin ps->pages = (vm_page_t *)(ps + 1); 1726dc964385SJohn Baldwin ps->vm_timestamp = map->timestamp; 1727dc964385SJohn Baldwin ps->npages = vm_fault_quick_hold_pages(map, start, end - start, 1728dc964385SJohn Baldwin VM_PROT_WRITE, ps->pages, n); 1729e682d02eSNavdeep Parhar 1730dc964385SJohn Baldwin DDP_LOCK(toep); 1731dc964385SJohn Baldwin if (ps->npages < 0) { 1732dc964385SJohn Baldwin free(ps, M_CXGBE); 1733dc964385SJohn Baldwin return (EFAULT); 1734e682d02eSNavdeep Parhar } 1735e682d02eSNavdeep Parhar 1736dc964385SJohn Baldwin KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d", 1737dc964385SJohn Baldwin ps->npages, n)); 1738dc964385SJohn Baldwin 1739dc964385SJohn Baldwin ps->offset = pgoff; 1740dc964385SJohn Baldwin ps->len = job->uaiocb.aio_nbytes; 1741f7db0c95SMark Johnston refcount_acquire(&vm->vm_refcnt); 1742dc964385SJohn Baldwin ps->vm = vm; 174391a65e2fSJohn Baldwin ps->start = start; 1744dc964385SJohn Baldwin 1745dc964385SJohn Baldwin CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 1746dc964385SJohn Baldwin __func__, toep->tid, ps, job, ps->npages); 1747dc964385SJohn Baldwin *pps = ps; 1748e682d02eSNavdeep Parhar return (0); 1749e682d02eSNavdeep Parhar } 1750e682d02eSNavdeep Parhar 1751dc964385SJohn Baldwin static void 1752dc964385SJohn Baldwin ddp_complete_all(struct toepcb *toep, int error) 1753e682d02eSNavdeep Parhar { 1754dc964385SJohn Baldwin struct kaiocb *job; 1755e682d02eSNavdeep Parhar 1756dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 1757125d42feSJohn Baldwin while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) { 1758125d42feSJohn Baldwin job = TAILQ_FIRST(&toep->ddp.aiojobq); 1759125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1760125d42feSJohn Baldwin toep->ddp.waiting_count--; 1761dc964385SJohn Baldwin if (aio_clear_cancel_function(job)) 1762dc964385SJohn Baldwin ddp_complete_one(job, error); 1763dc964385SJohn Baldwin } 1764dc964385SJohn Baldwin } 1765dc964385SJohn Baldwin 1766dc964385SJohn Baldwin static void 1767dc964385SJohn Baldwin aio_ddp_cancel_one(struct kaiocb *job) 1768dc964385SJohn Baldwin { 1769dc964385SJohn Baldwin long copied; 1770dc964385SJohn Baldwin 1771dc964385SJohn Baldwin /* 1772dc964385SJohn Baldwin * If this job had copied data out of the socket buffer before 1773dc964385SJohn Baldwin * it was cancelled, report it as a short read rather than an 1774dc964385SJohn Baldwin * error. 1775dc964385SJohn Baldwin */ 1776fe0bdd1dSJohn Baldwin copied = job->aio_received; 1777dc964385SJohn Baldwin if (copied != 0) 1778dc964385SJohn Baldwin aio_complete(job, copied, 0); 1779e682d02eSNavdeep Parhar else 1780dc964385SJohn Baldwin aio_cancel(job); 1781e682d02eSNavdeep Parhar } 1782e682d02eSNavdeep Parhar 1783dc964385SJohn Baldwin /* 1784dc964385SJohn Baldwin * Called when the main loop wants to requeue a job to retry it later. 1785dc964385SJohn Baldwin * Deals with the race of the job being cancelled while it was being 1786dc964385SJohn Baldwin * examined. 1787dc964385SJohn Baldwin */ 1788dc964385SJohn Baldwin static void 1789dc964385SJohn Baldwin aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job) 1790dc964385SJohn Baldwin { 1791dc964385SJohn Baldwin 1792dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 1793125d42feSJohn Baldwin if (!(toep->ddp.flags & DDP_DEAD) && 1794dc964385SJohn Baldwin aio_set_cancel_function(job, t4_aio_cancel_queued)) { 1795125d42feSJohn Baldwin TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 1796125d42feSJohn Baldwin toep->ddp.waiting_count++; 1797dc964385SJohn Baldwin } else 1798dc964385SJohn Baldwin aio_ddp_cancel_one(job); 1799e682d02eSNavdeep Parhar } 1800e682d02eSNavdeep Parhar 1801dc964385SJohn Baldwin static void 1802dc964385SJohn Baldwin aio_ddp_requeue(struct toepcb *toep) 1803dc964385SJohn Baldwin { 1804dc964385SJohn Baldwin struct adapter *sc = td_adapter(toep->td); 1805dc964385SJohn Baldwin struct socket *so; 1806dc964385SJohn Baldwin struct sockbuf *sb; 1807dc964385SJohn Baldwin struct inpcb *inp; 1808dc964385SJohn Baldwin struct kaiocb *job; 1809dc964385SJohn Baldwin struct ddp_buffer *db; 1810dc964385SJohn Baldwin size_t copied, offset, resid; 1811dc964385SJohn Baldwin struct pageset *ps; 1812dc964385SJohn Baldwin struct mbuf *m; 1813dc964385SJohn Baldwin uint64_t ddp_flags, ddp_flags_mask; 1814dc964385SJohn Baldwin struct wrqe *wr; 1815dc964385SJohn Baldwin int buf_flag, db_idx, error; 1816dc964385SJohn Baldwin 1817dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 1818dc964385SJohn Baldwin 1819e682d02eSNavdeep Parhar restart: 1820125d42feSJohn Baldwin if (toep->ddp.flags & DDP_DEAD) { 1821125d42feSJohn Baldwin MPASS(toep->ddp.waiting_count == 0); 1822125d42feSJohn Baldwin MPASS(toep->ddp.active_count == 0); 1823dc964385SJohn Baldwin return; 1824e682d02eSNavdeep Parhar } 1825e682d02eSNavdeep Parhar 1826125d42feSJohn Baldwin if (toep->ddp.waiting_count == 0 || 1827125d42feSJohn Baldwin toep->ddp.active_count == nitems(toep->ddp.db)) { 1828dc964385SJohn Baldwin return; 1829dc964385SJohn Baldwin } 1830dc964385SJohn Baldwin 1831125d42feSJohn Baldwin job = TAILQ_FIRST(&toep->ddp.aiojobq); 1832dc964385SJohn Baldwin so = job->fd_file->f_data; 1833dc964385SJohn Baldwin sb = &so->so_rcv; 1834dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 1835dc964385SJohn Baldwin 1836dc964385SJohn Baldwin /* We will never get anything unless we are or were connected. */ 1837dc964385SJohn Baldwin if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 1838dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1839dc964385SJohn Baldwin ddp_complete_all(toep, ENOTCONN); 1840dc964385SJohn Baldwin return; 1841dc964385SJohn Baldwin } 1842dc964385SJohn Baldwin 1843125d42feSJohn Baldwin KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0, 1844dc964385SJohn Baldwin ("%s: pending sockbuf data and DDP is active", __func__)); 1845dc964385SJohn Baldwin 1846e682d02eSNavdeep Parhar /* Abort if socket has reported problems. */ 1847dc964385SJohn Baldwin /* XXX: Wait for any queued DDP's to finish and/or flush them? */ 1848dc964385SJohn Baldwin if (so->so_error && sbavail(sb) == 0) { 1849125d42feSJohn Baldwin toep->ddp.waiting_count--; 1850125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1851dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) { 1852dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1853dc964385SJohn Baldwin goto restart; 1854dc964385SJohn Baldwin } 1855dc964385SJohn Baldwin 1856dc964385SJohn Baldwin /* 1857dc964385SJohn Baldwin * If this job has previously copied some data, report 1858dc964385SJohn Baldwin * a short read and leave the error to be reported by 1859dc964385SJohn Baldwin * a future request. 1860dc964385SJohn Baldwin */ 1861fe0bdd1dSJohn Baldwin copied = job->aio_received; 1862dc964385SJohn Baldwin if (copied != 0) { 1863dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1864dc964385SJohn Baldwin aio_complete(job, copied, 0); 1865dc964385SJohn Baldwin goto restart; 1866dc964385SJohn Baldwin } 1867e682d02eSNavdeep Parhar error = so->so_error; 1868e682d02eSNavdeep Parhar so->so_error = 0; 1869dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1870dc964385SJohn Baldwin aio_complete(job, -1, error); 1871dc964385SJohn Baldwin goto restart; 1872e682d02eSNavdeep Parhar } 1873e682d02eSNavdeep Parhar 1874e682d02eSNavdeep Parhar /* 1875dc964385SJohn Baldwin * Door is closed. If there is pending data in the socket buffer, 1876dc964385SJohn Baldwin * deliver it. If there are pending DDP requests, wait for those 1877dc964385SJohn Baldwin * to complete. Once they have completed, return EOF reads. 1878e682d02eSNavdeep Parhar */ 1879dc964385SJohn Baldwin if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 1880dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1881125d42feSJohn Baldwin if (toep->ddp.active_count != 0) 1882dc964385SJohn Baldwin return; 1883dc964385SJohn Baldwin ddp_complete_all(toep, 0); 1884dc964385SJohn Baldwin return; 1885e682d02eSNavdeep Parhar } 1886dc964385SJohn Baldwin 1887dc964385SJohn Baldwin /* 1888dc964385SJohn Baldwin * If DDP is not enabled and there is no pending socket buffer 1889dc964385SJohn Baldwin * data, try to enable DDP. 1890dc964385SJohn Baldwin */ 1891125d42feSJohn Baldwin if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) { 1892dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1893dc964385SJohn Baldwin 1894dc964385SJohn Baldwin /* 1895dc964385SJohn Baldwin * Wait for the card to ACK that DDP is enabled before 1896dc964385SJohn Baldwin * queueing any buffers. Currently this waits for an 1897dc964385SJohn Baldwin * indicate to arrive. This could use a TCB_SET_FIELD_RPL 1898dc964385SJohn Baldwin * message to know that DDP was enabled instead of waiting 1899dc964385SJohn Baldwin * for the indicate which would avoid copying the indicate 1900dc964385SJohn Baldwin * if no data is pending. 1901dc964385SJohn Baldwin * 1902dc964385SJohn Baldwin * XXX: Might want to limit the indicate size to the size 1903dc964385SJohn Baldwin * of the first queued request. 1904dc964385SJohn Baldwin */ 1905125d42feSJohn Baldwin if ((toep->ddp.flags & DDP_SC_REQ) == 0) 1906dc964385SJohn Baldwin enable_ddp(sc, toep); 1907dc964385SJohn Baldwin return; 1908e682d02eSNavdeep Parhar } 1909dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1910dc964385SJohn Baldwin 1911dc964385SJohn Baldwin /* 1912dc964385SJohn Baldwin * If another thread is queueing a buffer for DDP, let it 1913dc964385SJohn Baldwin * drain any work and return. 1914dc964385SJohn Baldwin */ 1915125d42feSJohn Baldwin if (toep->ddp.queueing != NULL) 1916dc964385SJohn Baldwin return; 1917dc964385SJohn Baldwin 1918dc964385SJohn Baldwin /* Take the next job to prep it for DDP. */ 1919125d42feSJohn Baldwin toep->ddp.waiting_count--; 1920125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1921dc964385SJohn Baldwin if (!aio_clear_cancel_function(job)) 1922e682d02eSNavdeep Parhar goto restart; 1923125d42feSJohn Baldwin toep->ddp.queueing = job; 1924e682d02eSNavdeep Parhar 1925dc964385SJohn Baldwin /* NB: This drops DDP_LOCK while it holds the backing VM pages. */ 1926dc964385SJohn Baldwin error = hold_aio(toep, job, &ps); 1927dc964385SJohn Baldwin if (error != 0) { 1928dc964385SJohn Baldwin ddp_complete_one(job, error); 1929125d42feSJohn Baldwin toep->ddp.queueing = NULL; 1930e682d02eSNavdeep Parhar goto restart; 1931dc964385SJohn Baldwin } 1932e682d02eSNavdeep Parhar 1933dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 1934dc964385SJohn Baldwin if (so->so_error && sbavail(sb) == 0) { 1935fe0bdd1dSJohn Baldwin copied = job->aio_received; 1936dc964385SJohn Baldwin if (copied != 0) { 1937dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1938dc964385SJohn Baldwin recycle_pageset(toep, ps); 1939dc964385SJohn Baldwin aio_complete(job, copied, 0); 1940125d42feSJohn Baldwin toep->ddp.queueing = NULL; 1941dc964385SJohn Baldwin goto restart; 1942dc964385SJohn Baldwin } 1943e682d02eSNavdeep Parhar 1944dc964385SJohn Baldwin error = so->so_error; 1945dc964385SJohn Baldwin so->so_error = 0; 1946dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1947dc964385SJohn Baldwin recycle_pageset(toep, ps); 1948dc964385SJohn Baldwin aio_complete(job, -1, error); 1949125d42feSJohn Baldwin toep->ddp.queueing = NULL; 1950dc964385SJohn Baldwin goto restart; 1951e682d02eSNavdeep Parhar } 1952e682d02eSNavdeep Parhar 1953dc964385SJohn Baldwin if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 1954dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 1955dc964385SJohn Baldwin recycle_pageset(toep, ps); 1956125d42feSJohn Baldwin if (toep->ddp.active_count != 0) { 1957dc964385SJohn Baldwin /* 1958dc964385SJohn Baldwin * The door is closed, but there are still pending 1959dc964385SJohn Baldwin * DDP buffers. Requeue. These jobs will all be 1960dc964385SJohn Baldwin * completed once those buffers drain. 1961dc964385SJohn Baldwin */ 1962dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 1963125d42feSJohn Baldwin toep->ddp.queueing = NULL; 1964dc964385SJohn Baldwin return; 1965e682d02eSNavdeep Parhar } 1966dc964385SJohn Baldwin ddp_complete_one(job, 0); 1967dc964385SJohn Baldwin ddp_complete_all(toep, 0); 1968125d42feSJohn Baldwin toep->ddp.queueing = NULL; 1969dc964385SJohn Baldwin return; 1970e682d02eSNavdeep Parhar } 1971dc964385SJohn Baldwin 1972dc964385SJohn Baldwin sbcopy: 1973dc964385SJohn Baldwin /* 1974dc964385SJohn Baldwin * If the toep is dead, there shouldn't be any data in the socket 1975dc964385SJohn Baldwin * buffer, so the above case should have handled this. 1976dc964385SJohn Baldwin */ 1977125d42feSJohn Baldwin MPASS(!(toep->ddp.flags & DDP_DEAD)); 1978dc964385SJohn Baldwin 1979dc964385SJohn Baldwin /* 1980dc964385SJohn Baldwin * If there is pending data in the socket buffer (either 1981dc964385SJohn Baldwin * from before the requests were queued or a DDP indicate), 1982dc964385SJohn Baldwin * copy those mbufs out directly. 1983dc964385SJohn Baldwin */ 1984dc964385SJohn Baldwin copied = 0; 1985fe0bdd1dSJohn Baldwin offset = ps->offset + job->aio_received; 1986fe0bdd1dSJohn Baldwin MPASS(job->aio_received <= job->uaiocb.aio_nbytes); 1987fe0bdd1dSJohn Baldwin resid = job->uaiocb.aio_nbytes - job->aio_received; 1988dc964385SJohn Baldwin m = sb->sb_mb; 1989125d42feSJohn Baldwin KASSERT(m == NULL || toep->ddp.active_count == 0, 1990dc964385SJohn Baldwin ("%s: sockbuf data with active DDP", __func__)); 1991dc964385SJohn Baldwin while (m != NULL && resid > 0) { 1992dc964385SJohn Baldwin struct iovec iov[1]; 1993dc964385SJohn Baldwin struct uio uio; 199439d5cbdcSNavdeep Parhar #ifdef INVARIANTS 1995dc964385SJohn Baldwin int error; 199639d5cbdcSNavdeep Parhar #endif 1997dc964385SJohn Baldwin 1998dc964385SJohn Baldwin iov[0].iov_base = mtod(m, void *); 1999dc964385SJohn Baldwin iov[0].iov_len = m->m_len; 2000dc964385SJohn Baldwin if (iov[0].iov_len > resid) 2001dc964385SJohn Baldwin iov[0].iov_len = resid; 2002dc964385SJohn Baldwin uio.uio_iov = iov; 2003dc964385SJohn Baldwin uio.uio_iovcnt = 1; 2004dc964385SJohn Baldwin uio.uio_offset = 0; 2005dc964385SJohn Baldwin uio.uio_resid = iov[0].iov_len; 2006dc964385SJohn Baldwin uio.uio_segflg = UIO_SYSSPACE; 2007dc964385SJohn Baldwin uio.uio_rw = UIO_WRITE; 200839d5cbdcSNavdeep Parhar #ifdef INVARIANTS 2009dc964385SJohn Baldwin error = uiomove_fromphys(ps->pages, offset + copied, 2010dc964385SJohn Baldwin uio.uio_resid, &uio); 201139d5cbdcSNavdeep Parhar #else 201239d5cbdcSNavdeep Parhar uiomove_fromphys(ps->pages, offset + copied, uio.uio_resid, &uio); 201339d5cbdcSNavdeep Parhar #endif 2014dc964385SJohn Baldwin MPASS(error == 0 && uio.uio_resid == 0); 2015dc964385SJohn Baldwin copied += uio.uio_offset; 2016dc964385SJohn Baldwin resid -= uio.uio_offset; 2017dc964385SJohn Baldwin m = m->m_next; 2018dc964385SJohn Baldwin } 2019dc964385SJohn Baldwin if (copied != 0) { 2020dc964385SJohn Baldwin sbdrop_locked(sb, copied); 2021fe0bdd1dSJohn Baldwin job->aio_received += copied; 2022b1012d80SJohn Baldwin job->msgrcv = 1; 2023fe0bdd1dSJohn Baldwin copied = job->aio_received; 2024dc964385SJohn Baldwin inp = sotoinpcb(so); 2025dc964385SJohn Baldwin if (!INP_TRY_WLOCK(inp)) { 2026dc964385SJohn Baldwin /* 2027dc964385SJohn Baldwin * The reference on the socket file descriptor in 2028dc964385SJohn Baldwin * the AIO job should keep 'sb' and 'inp' stable. 2029dc964385SJohn Baldwin * Our caller has a reference on the 'toep' that 2030dc964385SJohn Baldwin * keeps it stable. 2031dc964385SJohn Baldwin */ 2032dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2033dc964385SJohn Baldwin DDP_UNLOCK(toep); 2034dc964385SJohn Baldwin INP_WLOCK(inp); 2035dc964385SJohn Baldwin DDP_LOCK(toep); 2036dc964385SJohn Baldwin SOCKBUF_LOCK(sb); 2037dc964385SJohn Baldwin 2038dc964385SJohn Baldwin /* 2039dc964385SJohn Baldwin * If the socket has been closed, we should detect 2040dc964385SJohn Baldwin * that and complete this request if needed on 2041dc964385SJohn Baldwin * the next trip around the loop. 2042dc964385SJohn Baldwin */ 2043dc964385SJohn Baldwin } 2044dc964385SJohn Baldwin t4_rcvd_locked(&toep->td->tod, intotcpcb(inp)); 2045dc964385SJohn Baldwin INP_WUNLOCK(inp); 2046125d42feSJohn Baldwin if (resid == 0 || toep->ddp.flags & DDP_DEAD) { 2047dc964385SJohn Baldwin /* 2048dc964385SJohn Baldwin * We filled the entire buffer with socket 2049dc964385SJohn Baldwin * data, DDP is not being used, or the socket 2050dc964385SJohn Baldwin * is being shut down, so complete the 2051dc964385SJohn Baldwin * request. 2052dc964385SJohn Baldwin */ 2053dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2054dc964385SJohn Baldwin recycle_pageset(toep, ps); 2055dc964385SJohn Baldwin aio_complete(job, copied, 0); 2056125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2057dc964385SJohn Baldwin goto restart; 2058dc964385SJohn Baldwin } 2059dc964385SJohn Baldwin 2060dc964385SJohn Baldwin /* 2061dc964385SJohn Baldwin * If DDP is not enabled, requeue this request and restart. 2062dc964385SJohn Baldwin * This will either enable DDP or wait for more data to 2063dc964385SJohn Baldwin * arrive on the socket buffer. 2064dc964385SJohn Baldwin */ 2065125d42feSJohn Baldwin if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) { 2066dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2067dc964385SJohn Baldwin recycle_pageset(toep, ps); 2068dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2069125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2070dc964385SJohn Baldwin goto restart; 2071dc964385SJohn Baldwin } 2072dc964385SJohn Baldwin 2073dc964385SJohn Baldwin /* 2074dc964385SJohn Baldwin * An indicate might have arrived and been added to 2075dc964385SJohn Baldwin * the socket buffer while it was unlocked after the 2076dc964385SJohn Baldwin * copy to lock the INP. If so, restart the copy. 2077dc964385SJohn Baldwin */ 2078dc964385SJohn Baldwin if (sbavail(sb) != 0) 2079dc964385SJohn Baldwin goto sbcopy; 2080dc964385SJohn Baldwin } 2081dc964385SJohn Baldwin SOCKBUF_UNLOCK(sb); 2082dc964385SJohn Baldwin 2083dc964385SJohn Baldwin if (prep_pageset(sc, toep, ps) == 0) { 2084dc964385SJohn Baldwin recycle_pageset(toep, ps); 2085dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2086125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2087dc964385SJohn Baldwin 2088dc964385SJohn Baldwin /* 2089dc964385SJohn Baldwin * XXX: Need to retry this later. Mostly need a trigger 2090dc964385SJohn Baldwin * when page pods are freed up. 2091dc964385SJohn Baldwin */ 2092dc964385SJohn Baldwin printf("%s: prep_pageset failed\n", __func__); 2093dc964385SJohn Baldwin return; 2094dc964385SJohn Baldwin } 2095dc964385SJohn Baldwin 2096dc964385SJohn Baldwin /* Determine which DDP buffer to use. */ 2097125d42feSJohn Baldwin if (toep->ddp.db[0].job == NULL) { 2098dc964385SJohn Baldwin db_idx = 0; 2099e682d02eSNavdeep Parhar } else { 2100125d42feSJohn Baldwin MPASS(toep->ddp.db[1].job == NULL); 2101dc964385SJohn Baldwin db_idx = 1; 2102e682d02eSNavdeep Parhar } 2103e682d02eSNavdeep Parhar 2104dc964385SJohn Baldwin ddp_flags = 0; 2105dc964385SJohn Baldwin ddp_flags_mask = 0; 2106dc964385SJohn Baldwin if (db_idx == 0) { 2107dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF0_VALID(1); 2108dc964385SJohn Baldwin if (so->so_state & SS_NBIO) 2109dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF0_FLUSH(1); 2110dc964385SJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 2111dc964385SJohn Baldwin V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) | 2112dc964385SJohn Baldwin V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1); 2113dc964385SJohn Baldwin buf_flag = DDP_BUF0_ACTIVE; 2114dc964385SJohn Baldwin } else { 2115dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF1_VALID(1); 2116dc964385SJohn Baldwin if (so->so_state & SS_NBIO) 2117dc964385SJohn Baldwin ddp_flags |= V_TF_DDP_BUF1_FLUSH(1); 2118dc964385SJohn Baldwin ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 2119dc964385SJohn Baldwin V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) | 2120dc964385SJohn Baldwin V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1); 2121dc964385SJohn Baldwin buf_flag = DDP_BUF1_ACTIVE; 2122e682d02eSNavdeep Parhar } 2123125d42feSJohn Baldwin MPASS((toep->ddp.flags & buf_flag) == 0); 2124125d42feSJohn Baldwin if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { 2125dc964385SJohn Baldwin MPASS(db_idx == 0); 2126125d42feSJohn Baldwin MPASS(toep->ddp.active_id == -1); 2127125d42feSJohn Baldwin MPASS(toep->ddp.active_count == 0); 2128dc964385SJohn Baldwin ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1); 2129e682d02eSNavdeep Parhar } 2130e682d02eSNavdeep Parhar 2131e682d02eSNavdeep Parhar /* 2132dc964385SJohn Baldwin * The TID for this connection should still be valid. If DDP_DEAD 2133dc964385SJohn Baldwin * is set, SBS_CANTRCVMORE should be set, so we shouldn't be 2134dc964385SJohn Baldwin * this far anyway. Even if the socket is closing on the other 2135dc964385SJohn Baldwin * end, the AIO job holds a reference on this end of the socket 2136dc964385SJohn Baldwin * which will keep it open and keep the TCP PCB attached until 2137dc964385SJohn Baldwin * after the job is completed. 2138e682d02eSNavdeep Parhar */ 2139fe0bdd1dSJohn Baldwin wr = mk_update_tcb_for_ddp(sc, toep, db_idx, ps, job->aio_received, 2140fe0bdd1dSJohn Baldwin ddp_flags, ddp_flags_mask); 2141dc964385SJohn Baldwin if (wr == NULL) { 2142dc964385SJohn Baldwin recycle_pageset(toep, ps); 2143dc964385SJohn Baldwin aio_ddp_requeue_one(toep, job); 2144125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2145dc964385SJohn Baldwin 2146dc964385SJohn Baldwin /* 2147dc964385SJohn Baldwin * XXX: Need a way to kick a retry here. 2148dc964385SJohn Baldwin * 2149dc964385SJohn Baldwin * XXX: We know the fixed size needed and could 2150dc964385SJohn Baldwin * preallocate this using a blocking request at the 2151dc964385SJohn Baldwin * start of the task to avoid having to handle this 2152dc964385SJohn Baldwin * edge case. 2153dc964385SJohn Baldwin */ 2154dc964385SJohn Baldwin printf("%s: mk_update_tcb_for_ddp failed\n", __func__); 2155dc964385SJohn Baldwin return; 2156dc964385SJohn Baldwin } 2157dc964385SJohn Baldwin 2158dc964385SJohn Baldwin if (!aio_set_cancel_function(job, t4_aio_cancel_active)) { 2159dc964385SJohn Baldwin free_wrqe(wr); 2160dc964385SJohn Baldwin recycle_pageset(toep, ps); 2161dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2162125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2163e682d02eSNavdeep Parhar goto restart; 2164e682d02eSNavdeep Parhar } 2165e682d02eSNavdeep Parhar 2166dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 21678674e626SNavdeep Parhar CTR6(KTR_CXGBE, 21688674e626SNavdeep Parhar "%s: tid %u, scheduling %p for DDP[%d] (flags %#lx/%#lx)", __func__, 21698674e626SNavdeep Parhar toep->tid, job, db_idx, ddp_flags, ddp_flags_mask); 2170dc964385SJohn Baldwin #endif 2171dc964385SJohn Baldwin /* Give the chip the go-ahead. */ 2172dc964385SJohn Baldwin t4_wrq_tx(sc, wr); 2173125d42feSJohn Baldwin db = &toep->ddp.db[db_idx]; 2174dc964385SJohn Baldwin db->cancel_pending = 0; 2175dc964385SJohn Baldwin db->job = job; 2176dc964385SJohn Baldwin db->ps = ps; 2177125d42feSJohn Baldwin toep->ddp.queueing = NULL; 2178125d42feSJohn Baldwin toep->ddp.flags |= buf_flag; 2179125d42feSJohn Baldwin toep->ddp.active_count++; 2180125d42feSJohn Baldwin if (toep->ddp.active_count == 1) { 2181125d42feSJohn Baldwin MPASS(toep->ddp.active_id == -1); 2182125d42feSJohn Baldwin toep->ddp.active_id = db_idx; 2183dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 2184125d42feSJohn Baldwin toep->ddp.active_id); 2185dc964385SJohn Baldwin } 2186dc964385SJohn Baldwin goto restart; 2187dc964385SJohn Baldwin } 2188dc964385SJohn Baldwin 2189dc964385SJohn Baldwin void 2190dc964385SJohn Baldwin ddp_queue_toep(struct toepcb *toep) 2191dc964385SJohn Baldwin { 2192dc964385SJohn Baldwin 2193dc964385SJohn Baldwin DDP_ASSERT_LOCKED(toep); 2194125d42feSJohn Baldwin if (toep->ddp.flags & DDP_TASK_ACTIVE) 2195dc964385SJohn Baldwin return; 2196125d42feSJohn Baldwin toep->ddp.flags |= DDP_TASK_ACTIVE; 2197dc964385SJohn Baldwin hold_toepcb(toep); 2198125d42feSJohn Baldwin soaio_enqueue(&toep->ddp.requeue_task); 2199dc964385SJohn Baldwin } 2200dc964385SJohn Baldwin 2201dc964385SJohn Baldwin static void 2202dc964385SJohn Baldwin aio_ddp_requeue_task(void *context, int pending) 2203dc964385SJohn Baldwin { 2204dc964385SJohn Baldwin struct toepcb *toep = context; 2205dc964385SJohn Baldwin 2206dc964385SJohn Baldwin DDP_LOCK(toep); 2207dc964385SJohn Baldwin aio_ddp_requeue(toep); 2208125d42feSJohn Baldwin toep->ddp.flags &= ~DDP_TASK_ACTIVE; 2209dc964385SJohn Baldwin DDP_UNLOCK(toep); 2210dc964385SJohn Baldwin 2211dc964385SJohn Baldwin free_toepcb(toep); 2212dc964385SJohn Baldwin } 2213dc964385SJohn Baldwin 2214dc964385SJohn Baldwin static void 2215dc964385SJohn Baldwin t4_aio_cancel_active(struct kaiocb *job) 2216dc964385SJohn Baldwin { 2217dc964385SJohn Baldwin struct socket *so = job->fd_file->f_data; 2218e1401f75SGleb Smirnoff struct tcpcb *tp = sototcpcb(so); 2219dc964385SJohn Baldwin struct toepcb *toep = tp->t_toe; 2220dc964385SJohn Baldwin struct adapter *sc = td_adapter(toep->td); 2221dc964385SJohn Baldwin uint64_t valid_flag; 2222dc964385SJohn Baldwin int i; 2223dc964385SJohn Baldwin 2224dc964385SJohn Baldwin DDP_LOCK(toep); 2225dc964385SJohn Baldwin if (aio_cancel_cleared(job)) { 2226dc964385SJohn Baldwin DDP_UNLOCK(toep); 2227dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2228dc964385SJohn Baldwin return; 2229dc964385SJohn Baldwin } 2230dc964385SJohn Baldwin 2231125d42feSJohn Baldwin for (i = 0; i < nitems(toep->ddp.db); i++) { 2232125d42feSJohn Baldwin if (toep->ddp.db[i].job == job) { 2233dc964385SJohn Baldwin /* Should only ever get one cancel request for a job. */ 2234125d42feSJohn Baldwin MPASS(toep->ddp.db[i].cancel_pending == 0); 2235dc964385SJohn Baldwin 2236dc964385SJohn Baldwin /* 2237dc964385SJohn Baldwin * Invalidate this buffer. It will be 2238dc964385SJohn Baldwin * cancelled or partially completed once the 2239dc964385SJohn Baldwin * card ACKs the invalidate. 2240dc964385SJohn Baldwin */ 2241dc964385SJohn Baldwin valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) : 2242dc964385SJohn Baldwin V_TF_DDP_BUF1_VALID(1); 2243edf95febSJohn Baldwin t4_set_tcb_field(sc, toep->ctrlq, toep, 2244671bf2b8SNavdeep Parhar W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1, 2245017902fcSJohn Baldwin CPL_COOKIE_DDP0 + i); 2246125d42feSJohn Baldwin toep->ddp.db[i].cancel_pending = 1; 2247dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: request %p marked pending", 2248dc964385SJohn Baldwin __func__, job); 2249dc964385SJohn Baldwin break; 2250dc964385SJohn Baldwin } 2251dc964385SJohn Baldwin } 2252dc964385SJohn Baldwin DDP_UNLOCK(toep); 2253dc964385SJohn Baldwin } 2254dc964385SJohn Baldwin 2255dc964385SJohn Baldwin static void 2256dc964385SJohn Baldwin t4_aio_cancel_queued(struct kaiocb *job) 2257dc964385SJohn Baldwin { 2258dc964385SJohn Baldwin struct socket *so = job->fd_file->f_data; 2259e1401f75SGleb Smirnoff struct tcpcb *tp = sototcpcb(so); 2260dc964385SJohn Baldwin struct toepcb *toep = tp->t_toe; 2261dc964385SJohn Baldwin 2262dc964385SJohn Baldwin DDP_LOCK(toep); 2263dc964385SJohn Baldwin if (!aio_cancel_cleared(job)) { 2264125d42feSJohn Baldwin TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 2265125d42feSJohn Baldwin toep->ddp.waiting_count--; 2266125d42feSJohn Baldwin if (toep->ddp.waiting_count == 0) 2267dc964385SJohn Baldwin ddp_queue_toep(toep); 2268dc964385SJohn Baldwin } 2269dc964385SJohn Baldwin CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job); 2270dc964385SJohn Baldwin DDP_UNLOCK(toep); 2271dc964385SJohn Baldwin 2272dc964385SJohn Baldwin aio_ddp_cancel_one(job); 2273dc964385SJohn Baldwin } 2274dc964385SJohn Baldwin 2275dc964385SJohn Baldwin int 2276dc964385SJohn Baldwin t4_aio_queue_ddp(struct socket *so, struct kaiocb *job) 2277dc964385SJohn Baldwin { 2278*a5a965d7SJohn Baldwin struct inpcb *inp = sotoinpcb(so); 2279*a5a965d7SJohn Baldwin struct tcpcb *tp = intotcpcb(inp); 2280dc964385SJohn Baldwin struct toepcb *toep = tp->t_toe; 2281dc964385SJohn Baldwin 2282dc964385SJohn Baldwin 2283dc964385SJohn Baldwin /* Ignore writes. */ 2284dc964385SJohn Baldwin if (job->uaiocb.aio_lio_opcode != LIO_READ) 2285dc964385SJohn Baldwin return (EOPNOTSUPP); 2286dc964385SJohn Baldwin 2287*a5a965d7SJohn Baldwin INP_WLOCK(inp); 2288*a5a965d7SJohn Baldwin if (__predict_false(ulp_mode(toep) == ULP_MODE_NONE)) { 2289*a5a965d7SJohn Baldwin if (!set_ddp_ulp_mode(toep)) { 2290*a5a965d7SJohn Baldwin INP_WUNLOCK(inp); 2291*a5a965d7SJohn Baldwin return (EOPNOTSUPP); 2292*a5a965d7SJohn Baldwin } 2293*a5a965d7SJohn Baldwin } 2294*a5a965d7SJohn Baldwin INP_WUNLOCK(inp); 2295*a5a965d7SJohn Baldwin 2296dc964385SJohn Baldwin DDP_LOCK(toep); 2297dc964385SJohn Baldwin 2298dc964385SJohn Baldwin /* 2299dc964385SJohn Baldwin * XXX: Think about possibly returning errors for ENOTCONN, 2300dc964385SJohn Baldwin * etc. Perhaps the caller would only queue the request 2301dc964385SJohn Baldwin * if it failed with EOPNOTSUPP? 2302dc964385SJohn Baldwin */ 2303dc964385SJohn Baldwin 2304dc964385SJohn Baldwin #ifdef VERBOSE_TRACES 23058674e626SNavdeep Parhar CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid); 2306dc964385SJohn Baldwin #endif 2307dc964385SJohn Baldwin if (!aio_set_cancel_function(job, t4_aio_cancel_queued)) 2308dc964385SJohn Baldwin panic("new job was cancelled"); 2309125d42feSJohn Baldwin TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list); 2310125d42feSJohn Baldwin toep->ddp.waiting_count++; 2311125d42feSJohn Baldwin toep->ddp.flags |= DDP_OK; 2312dc964385SJohn Baldwin 2313dc964385SJohn Baldwin /* 2314dc964385SJohn Baldwin * Try to handle this request synchronously. If this has 2315dc964385SJohn Baldwin * to block because the task is running, it will just bail 2316dc964385SJohn Baldwin * and let the task handle it instead. 2317dc964385SJohn Baldwin */ 2318dc964385SJohn Baldwin aio_ddp_requeue(toep); 2319dc964385SJohn Baldwin DDP_UNLOCK(toep); 2320dc964385SJohn Baldwin return (0); 2321dc964385SJohn Baldwin } 2322dc964385SJohn Baldwin 23239689995dSJohn Baldwin void 2324dc964385SJohn Baldwin t4_ddp_mod_load(void) 2325dc964385SJohn Baldwin { 2326dc964385SJohn Baldwin 23274535e804SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 23284535e804SNavdeep Parhar CPL_COOKIE_DDP0); 23294535e804SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 23304535e804SNavdeep Parhar CPL_COOKIE_DDP1); 2331671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp); 2332671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); 2333dc964385SJohn Baldwin TAILQ_INIT(&ddp_orphan_pagesets); 2334dc964385SJohn Baldwin mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF); 2335dc964385SJohn Baldwin TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL); 2336dc964385SJohn Baldwin } 2337dc964385SJohn Baldwin 2338dc964385SJohn Baldwin void 2339dc964385SJohn Baldwin t4_ddp_mod_unload(void) 2340dc964385SJohn Baldwin { 2341dc964385SJohn Baldwin 2342dc964385SJohn Baldwin taskqueue_drain(taskqueue_thread, &ddp_orphan_task); 2343dc964385SJohn Baldwin MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets)); 2344dc964385SJohn Baldwin mtx_destroy(&ddp_orphan_pagesets_lock); 2345d6ddb084SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP0); 2346d6ddb084SNavdeep Parhar t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP1); 2347671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL); 2348671bf2b8SNavdeep Parhar t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL); 2349dc964385SJohn Baldwin } 2350e682d02eSNavdeep Parhar #endif 2351