1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 35 #include <sys/param.h> 36 #include <sys/aio.h> 37 #include <sys/file.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/module.h> 42 #include <sys/protosw.h> 43 #include <sys/proc.h> 44 #include <sys/domain.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/taskqueue.h> 48 #include <sys/uio.h> 49 #include <netinet/in.h> 50 #include <netinet/in_pcb.h> 51 #include <netinet/ip.h> 52 #include <netinet/tcp_var.h> 53 #define TCPSTATES 54 #include <netinet/tcp_fsm.h> 55 #include <netinet/toecore.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_extern.h> 59 #include <vm/vm_param.h> 60 #include <vm/pmap.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_page.h> 63 #include <vm/vm_object.h> 64 65 #ifdef TCP_OFFLOAD 66 #include "common/common.h" 67 #include "common/t4_msg.h" 68 #include "common/t4_regs.h" 69 #include "common/t4_tcb.h" 70 #include "tom/t4_tom.h" 71 72 /* 73 * Use the 'backend3' field in AIO jobs to store the amount of data 74 * received by the AIO job so far. 75 */ 76 #define aio_received backend3 77 78 static void aio_ddp_requeue_task(void *context, int pending); 79 static void ddp_complete_all(struct toepcb *toep, int error); 80 static void t4_aio_cancel_active(struct kaiocb *job); 81 static void t4_aio_cancel_queued(struct kaiocb *job); 82 83 static TAILQ_HEAD(, pageset) ddp_orphan_pagesets; 84 static struct mtx ddp_orphan_pagesets_lock; 85 static struct task ddp_orphan_task; 86 87 #define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN) 88 89 /* 90 * A page set holds information about a buffer used for DDP. The page 91 * set holds resources such as the VM pages backing the buffer (either 92 * held or wired) and the page pods associated with the buffer. 93 * Recently used page sets are cached to allow for efficient reuse of 94 * buffers (avoiding the need to re-fault in pages, hold them, etc.). 95 * Note that cached page sets keep the backing pages wired. The 96 * number of wired pages is capped by only allowing for two wired 97 * pagesets per connection. This is not a perfect cap, but is a 98 * trade-off for performance. 99 * 100 * If an application ping-pongs two buffers for a connection via 101 * aio_read(2) then those buffers should remain wired and expensive VM 102 * fault lookups should be avoided after each buffer has been used 103 * once. If an application uses more than two buffers then this will 104 * fall back to doing expensive VM fault lookups for each operation. 105 */ 106 static void 107 free_pageset(struct tom_data *td, struct pageset *ps) 108 { 109 vm_page_t p; 110 int i; 111 112 if (ps->prsv.prsv_nppods > 0) 113 t4_free_page_pods(&ps->prsv); 114 115 for (i = 0; i < ps->npages; i++) { 116 p = ps->pages[i]; 117 vm_page_lock(p); 118 vm_page_unwire(p, PQ_INACTIVE); 119 vm_page_unlock(p); 120 } 121 mtx_lock(&ddp_orphan_pagesets_lock); 122 TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link); 123 taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task); 124 mtx_unlock(&ddp_orphan_pagesets_lock); 125 } 126 127 static void 128 ddp_free_orphan_pagesets(void *context, int pending) 129 { 130 struct pageset *ps; 131 132 mtx_lock(&ddp_orphan_pagesets_lock); 133 while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) { 134 ps = TAILQ_FIRST(&ddp_orphan_pagesets); 135 TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link); 136 mtx_unlock(&ddp_orphan_pagesets_lock); 137 if (ps->vm) 138 vmspace_free(ps->vm); 139 free(ps, M_CXGBE); 140 mtx_lock(&ddp_orphan_pagesets_lock); 141 } 142 mtx_unlock(&ddp_orphan_pagesets_lock); 143 } 144 145 static void 146 recycle_pageset(struct toepcb *toep, struct pageset *ps) 147 { 148 149 DDP_ASSERT_LOCKED(toep); 150 if (!(toep->ddp.flags & DDP_DEAD)) { 151 KASSERT(toep->ddp.cached_count + toep->ddp.active_count < 152 nitems(toep->ddp.db), ("too many wired pagesets")); 153 TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link); 154 toep->ddp.cached_count++; 155 } else 156 free_pageset(toep->td, ps); 157 } 158 159 static void 160 ddp_complete_one(struct kaiocb *job, int error) 161 { 162 long copied; 163 164 /* 165 * If this job had copied data out of the socket buffer before 166 * it was cancelled, report it as a short read rather than an 167 * error. 168 */ 169 copied = job->aio_received; 170 if (copied != 0 || error == 0) 171 aio_complete(job, copied, 0); 172 else 173 aio_complete(job, -1, error); 174 } 175 176 static void 177 free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db) 178 { 179 180 if (db->job) { 181 /* 182 * XXX: If we are un-offloading the socket then we 183 * should requeue these on the socket somehow. If we 184 * got a FIN from the remote end, then this completes 185 * any remaining requests with an EOF read. 186 */ 187 if (!aio_clear_cancel_function(db->job)) 188 ddp_complete_one(db->job, 0); 189 } 190 191 if (db->ps) 192 free_pageset(td, db->ps); 193 } 194 195 void 196 ddp_init_toep(struct toepcb *toep) 197 { 198 199 TAILQ_INIT(&toep->ddp.aiojobq); 200 TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, toep); 201 toep->ddp.flags = DDP_OK; 202 toep->ddp.active_id = -1; 203 mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF); 204 } 205 206 void 207 ddp_uninit_toep(struct toepcb *toep) 208 { 209 210 mtx_destroy(&toep->ddp.lock); 211 } 212 213 void 214 release_ddp_resources(struct toepcb *toep) 215 { 216 struct pageset *ps; 217 int i; 218 219 DDP_LOCK(toep); 220 toep->ddp.flags |= DDP_DEAD; 221 for (i = 0; i < nitems(toep->ddp.db); i++) { 222 free_ddp_buffer(toep->td, &toep->ddp.db[i]); 223 } 224 while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) { 225 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 226 free_pageset(toep->td, ps); 227 } 228 ddp_complete_all(toep, 0); 229 DDP_UNLOCK(toep); 230 } 231 232 #ifdef INVARIANTS 233 void 234 ddp_assert_empty(struct toepcb *toep) 235 { 236 int i; 237 238 MPASS(!(toep->ddp.flags & DDP_TASK_ACTIVE)); 239 for (i = 0; i < nitems(toep->ddp.db); i++) { 240 MPASS(toep->ddp.db[i].job == NULL); 241 MPASS(toep->ddp.db[i].ps == NULL); 242 } 243 MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets)); 244 MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq)); 245 } 246 #endif 247 248 static void 249 complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db, 250 unsigned int db_idx) 251 { 252 unsigned int db_flag; 253 254 toep->ddp.active_count--; 255 if (toep->ddp.active_id == db_idx) { 256 if (toep->ddp.active_count == 0) { 257 KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL, 258 ("%s: active_count mismatch", __func__)); 259 toep->ddp.active_id = -1; 260 } else 261 toep->ddp.active_id ^= 1; 262 #ifdef VERBOSE_TRACES 263 CTR3(KTR_CXGBE, "%s: tid %u, ddp_active_id = %d", __func__, 264 toep->tid, toep->ddp.active_id); 265 #endif 266 } else { 267 KASSERT(toep->ddp.active_count != 0 && 268 toep->ddp.active_id != -1, 269 ("%s: active count mismatch", __func__)); 270 } 271 272 db->cancel_pending = 0; 273 db->job = NULL; 274 recycle_pageset(toep, db->ps); 275 db->ps = NULL; 276 277 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 278 KASSERT(toep->ddp.flags & db_flag, 279 ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x", 280 __func__, toep, toep->ddp.flags)); 281 toep->ddp.flags &= ~db_flag; 282 } 283 284 /* XXX: handle_ddp_data code duplication */ 285 void 286 insert_ddp_data(struct toepcb *toep, uint32_t n) 287 { 288 struct inpcb *inp = toep->inp; 289 struct tcpcb *tp = intotcpcb(inp); 290 struct ddp_buffer *db; 291 struct kaiocb *job; 292 size_t placed; 293 long copied; 294 unsigned int db_flag, db_idx; 295 296 INP_WLOCK_ASSERT(inp); 297 DDP_ASSERT_LOCKED(toep); 298 299 tp->rcv_nxt += n; 300 #ifndef USE_DDP_RX_FLOW_CONTROL 301 KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__)); 302 tp->rcv_wnd -= n; 303 #endif 304 CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP", 305 __func__, n); 306 while (toep->ddp.active_count > 0) { 307 MPASS(toep->ddp.active_id != -1); 308 db_idx = toep->ddp.active_id; 309 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 310 MPASS((toep->ddp.flags & db_flag) != 0); 311 db = &toep->ddp.db[db_idx]; 312 job = db->job; 313 copied = job->aio_received; 314 placed = n; 315 if (placed > job->uaiocb.aio_nbytes - copied) 316 placed = job->uaiocb.aio_nbytes - copied; 317 if (placed > 0) 318 job->msgrcv = 1; 319 if (!aio_clear_cancel_function(job)) { 320 /* 321 * Update the copied length for when 322 * t4_aio_cancel_active() completes this 323 * request. 324 */ 325 job->aio_received += placed; 326 } else if (copied + placed != 0) { 327 CTR4(KTR_CXGBE, 328 "%s: completing %p (copied %ld, placed %lu)", 329 __func__, job, copied, placed); 330 /* XXX: This always completes if there is some data. */ 331 aio_complete(job, copied + placed, 0); 332 } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) { 333 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 334 toep->ddp.waiting_count++; 335 } else 336 aio_cancel(job); 337 n -= placed; 338 complete_ddp_buffer(toep, db, db_idx); 339 } 340 341 MPASS(n == 0); 342 } 343 344 /* SET_TCB_FIELD sent as a ULP command looks like this */ 345 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 346 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 347 348 /* RX_DATA_ACK sent as a ULP command looks like this */ 349 #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \ 350 sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core)) 351 352 static inline void * 353 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep, 354 uint64_t word, uint64_t mask, uint64_t val) 355 { 356 struct ulptx_idata *ulpsc; 357 struct cpl_set_tcb_field_core *req; 358 359 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 360 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); 361 362 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 363 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 364 ulpsc->len = htobe32(sizeof(*req)); 365 366 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); 367 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid)); 368 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 369 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 370 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 371 req->mask = htobe64(mask); 372 req->val = htobe64(val); 373 374 ulpsc = (struct ulptx_idata *)(req + 1); 375 if (LEN__SET_TCB_FIELD_ULP % 16) { 376 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 377 ulpsc->len = htobe32(0); 378 return (ulpsc + 1); 379 } 380 return (ulpsc); 381 } 382 383 static inline void * 384 mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep) 385 { 386 struct ulptx_idata *ulpsc; 387 struct cpl_rx_data_ack_core *req; 388 389 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 390 ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16)); 391 392 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 393 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 394 ulpsc->len = htobe32(sizeof(*req)); 395 396 req = (struct cpl_rx_data_ack_core *)(ulpsc + 1); 397 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid)); 398 req->credit_dack = htobe32(F_RX_MODULATE_RX); 399 400 ulpsc = (struct ulptx_idata *)(req + 1); 401 if (LEN__RX_DATA_ACK_ULP % 16) { 402 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 403 ulpsc->len = htobe32(0); 404 return (ulpsc + 1); 405 } 406 return (ulpsc); 407 } 408 409 static struct wrqe * 410 mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx, 411 struct pageset *ps, int offset, uint64_t ddp_flags, uint64_t ddp_flags_mask) 412 { 413 struct wrqe *wr; 414 struct work_request_hdr *wrh; 415 struct ulp_txpkt *ulpmc; 416 int len; 417 418 KASSERT(db_idx == 0 || db_idx == 1, 419 ("%s: bad DDP buffer index %d", __func__, db_idx)); 420 421 /* 422 * We'll send a compound work request that has 3 SET_TCB_FIELDs and an 423 * RX_DATA_ACK (with RX_MODULATE to speed up delivery). 424 * 425 * The work request header is 16B and always ends at a 16B boundary. 426 * The ULPTX master commands that follow must all end at 16B boundaries 427 * too so we round up the size to 16. 428 */ 429 len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) + 430 roundup2(LEN__RX_DATA_ACK_ULP, 16); 431 432 wr = alloc_wrqe(len, toep->ctrlq); 433 if (wr == NULL) 434 return (NULL); 435 wrh = wrtod(wr); 436 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 437 ulpmc = (struct ulp_txpkt *)(wrh + 1); 438 439 /* Write the buffer's tag */ 440 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 441 W_TCB_RX_DDP_BUF0_TAG + db_idx, 442 V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG), 443 V_TCB_RX_DDP_BUF0_TAG(ps->prsv.prsv_tag)); 444 445 /* Update the current offset in the DDP buffer and its total length */ 446 if (db_idx == 0) 447 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 448 W_TCB_RX_DDP_BUF0_OFFSET, 449 V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) | 450 V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN), 451 V_TCB_RX_DDP_BUF0_OFFSET(offset) | 452 V_TCB_RX_DDP_BUF0_LEN(ps->len)); 453 else 454 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 455 W_TCB_RX_DDP_BUF1_OFFSET, 456 V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) | 457 V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32), 458 V_TCB_RX_DDP_BUF1_OFFSET(offset) | 459 V_TCB_RX_DDP_BUF1_LEN((u64)ps->len << 32)); 460 461 /* Update DDP flags */ 462 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS, 463 ddp_flags_mask, ddp_flags); 464 465 /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */ 466 ulpmc = mk_rx_data_ack_ulp(ulpmc, toep); 467 468 return (wr); 469 } 470 471 static int 472 handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) 473 { 474 uint32_t report = be32toh(ddp_report); 475 unsigned int db_idx; 476 struct inpcb *inp = toep->inp; 477 struct ddp_buffer *db; 478 struct tcpcb *tp; 479 struct socket *so; 480 struct sockbuf *sb; 481 struct kaiocb *job; 482 long copied; 483 484 db_idx = report & F_DDP_BUF_IDX ? 1 : 0; 485 486 if (__predict_false(!(report & F_DDP_INV))) 487 CXGBE_UNIMPLEMENTED("DDP buffer still valid"); 488 489 INP_WLOCK(inp); 490 so = inp_inpcbtosocket(inp); 491 sb = &so->so_rcv; 492 DDP_LOCK(toep); 493 494 KASSERT(toep->ddp.active_id == db_idx, 495 ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx, 496 toep->ddp.active_id, toep->tid)); 497 db = &toep->ddp.db[db_idx]; 498 job = db->job; 499 500 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) { 501 /* 502 * This can happen due to an administrative tcpdrop(8). 503 * Just fail the request with ECONNRESET. 504 */ 505 CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x", 506 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); 507 if (aio_clear_cancel_function(job)) 508 ddp_complete_one(job, ECONNRESET); 509 goto completed; 510 } 511 512 tp = intotcpcb(inp); 513 514 /* 515 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the 516 * sequence number of the next byte to receive. The length of 517 * the data received for this message must be computed by 518 * comparing the new and old values of rcv_nxt. 519 * 520 * For RX_DATA_DDP, len might be non-zero, but it is only the 521 * length of the most recent DMA. It does not include the 522 * total length of the data received since the previous update 523 * for this DDP buffer. rcv_nxt is the sequence number of the 524 * first received byte from the most recent DMA. 525 */ 526 len += be32toh(rcv_nxt) - tp->rcv_nxt; 527 tp->rcv_nxt += len; 528 tp->t_rcvtime = ticks; 529 #ifndef USE_DDP_RX_FLOW_CONTROL 530 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 531 tp->rcv_wnd -= len; 532 #endif 533 #ifdef VERBOSE_TRACES 534 CTR5(KTR_CXGBE, "%s: tid %u, DDP[%d] placed %d bytes (%#x)", __func__, 535 toep->tid, db_idx, len, report); 536 #endif 537 538 /* receive buffer autosize */ 539 MPASS(toep->vnet == so->so_vnet); 540 CURVNET_SET(toep->vnet); 541 SOCKBUF_LOCK(sb); 542 if (sb->sb_flags & SB_AUTOSIZE && 543 V_tcp_do_autorcvbuf && 544 sb->sb_hiwat < V_tcp_autorcvbuf_max && 545 len > (sbspace(sb) / 8 * 7)) { 546 struct adapter *sc = td_adapter(toep->td); 547 unsigned int hiwat = sb->sb_hiwat; 548 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 549 V_tcp_autorcvbuf_max); 550 551 if (!sbreserve_locked(sb, newsize, so, NULL)) 552 sb->sb_flags &= ~SB_AUTOSIZE; 553 } 554 SOCKBUF_UNLOCK(sb); 555 CURVNET_RESTORE(); 556 557 job->msgrcv = 1; 558 if (db->cancel_pending) { 559 /* 560 * Update the job's length but defer completion to the 561 * TCB_RPL callback. 562 */ 563 job->aio_received += len; 564 goto out; 565 } else if (!aio_clear_cancel_function(job)) { 566 /* 567 * Update the copied length for when 568 * t4_aio_cancel_active() completes this request. 569 */ 570 job->aio_received += len; 571 } else { 572 copied = job->aio_received; 573 #ifdef VERBOSE_TRACES 574 CTR5(KTR_CXGBE, 575 "%s: tid %u, completing %p (copied %ld, placed %d)", 576 __func__, toep->tid, job, copied, len); 577 #endif 578 aio_complete(job, copied + len, 0); 579 t4_rcvd(&toep->td->tod, tp); 580 } 581 582 completed: 583 complete_ddp_buffer(toep, db, db_idx); 584 if (toep->ddp.waiting_count > 0) 585 ddp_queue_toep(toep); 586 out: 587 DDP_UNLOCK(toep); 588 INP_WUNLOCK(inp); 589 590 return (0); 591 } 592 593 void 594 handle_ddp_indicate(struct toepcb *toep) 595 { 596 597 DDP_ASSERT_LOCKED(toep); 598 MPASS(toep->ddp.active_count == 0); 599 MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0); 600 if (toep->ddp.waiting_count == 0) { 601 /* 602 * The pending requests that triggered the request for an 603 * an indicate were cancelled. Those cancels should have 604 * already disabled DDP. Just ignore this as the data is 605 * going into the socket buffer anyway. 606 */ 607 return; 608 } 609 CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__, 610 toep->tid, toep->ddp.waiting_count); 611 ddp_queue_toep(toep); 612 } 613 614 enum { 615 DDP_BUF0_INVALIDATED = 0x2, 616 DDP_BUF1_INVALIDATED 617 }; 618 619 CTASSERT(DDP_BUF0_INVALIDATED == CPL_COOKIE_DDP0); 620 621 static int 622 do_ddp_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 623 { 624 struct adapter *sc = iq->adapter; 625 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 626 unsigned int tid = GET_TID(cpl); 627 unsigned int db_idx; 628 struct toepcb *toep; 629 struct inpcb *inp; 630 struct ddp_buffer *db; 631 struct kaiocb *job; 632 long copied; 633 634 if (cpl->status != CPL_ERR_NONE) 635 panic("XXX: tcp_rpl failed: %d", cpl->status); 636 637 toep = lookup_tid(sc, tid); 638 inp = toep->inp; 639 switch (cpl->cookie) { 640 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(DDP_BUF0_INVALIDATED): 641 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(DDP_BUF1_INVALIDATED): 642 /* 643 * XXX: This duplicates a lot of code with handle_ddp_data(). 644 */ 645 db_idx = G_COOKIE(cpl->cookie) - DDP_BUF0_INVALIDATED; 646 MPASS(db_idx < nitems(toep->ddp.db)); 647 INP_WLOCK(inp); 648 DDP_LOCK(toep); 649 db = &toep->ddp.db[db_idx]; 650 651 /* 652 * handle_ddp_data() should leave the job around until 653 * this callback runs once a cancel is pending. 654 */ 655 MPASS(db != NULL); 656 MPASS(db->job != NULL); 657 MPASS(db->cancel_pending); 658 659 /* 660 * XXX: It's not clear what happens if there is data 661 * placed when the buffer is invalidated. I suspect we 662 * need to read the TCB to see how much data was placed. 663 * 664 * For now this just pretends like nothing was placed. 665 * 666 * XXX: Note that if we did check the PCB we would need to 667 * also take care of updating the tp, etc. 668 */ 669 job = db->job; 670 copied = job->aio_received; 671 if (copied == 0) { 672 CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job); 673 aio_cancel(job); 674 } else { 675 CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)", 676 __func__, job, copied); 677 aio_complete(job, copied, 0); 678 t4_rcvd(&toep->td->tod, intotcpcb(inp)); 679 } 680 681 complete_ddp_buffer(toep, db, db_idx); 682 if (toep->ddp.waiting_count > 0) 683 ddp_queue_toep(toep); 684 DDP_UNLOCK(toep); 685 INP_WUNLOCK(inp); 686 break; 687 default: 688 panic("XXX: unknown tcb_rpl offset %#x, cookie %#x", 689 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); 690 } 691 692 return (0); 693 } 694 695 void 696 handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt) 697 { 698 struct ddp_buffer *db; 699 struct kaiocb *job; 700 long copied; 701 unsigned int db_flag, db_idx; 702 int len, placed; 703 704 INP_WLOCK_ASSERT(toep->inp); 705 DDP_ASSERT_LOCKED(toep); 706 707 len = be32toh(rcv_nxt) - tp->rcv_nxt; 708 tp->rcv_nxt += len; 709 710 while (toep->ddp.active_count > 0) { 711 MPASS(toep->ddp.active_id != -1); 712 db_idx = toep->ddp.active_id; 713 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 714 MPASS((toep->ddp.flags & db_flag) != 0); 715 db = &toep->ddp.db[db_idx]; 716 job = db->job; 717 copied = job->aio_received; 718 placed = len; 719 if (placed > job->uaiocb.aio_nbytes - copied) 720 placed = job->uaiocb.aio_nbytes - copied; 721 if (placed > 0) 722 job->msgrcv = 1; 723 if (!aio_clear_cancel_function(job)) { 724 /* 725 * Update the copied length for when 726 * t4_aio_cancel_active() completes this 727 * request. 728 */ 729 job->aio_received += placed; 730 } else { 731 CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d", 732 __func__, toep->tid, db_idx, placed); 733 aio_complete(job, copied + placed, 0); 734 } 735 len -= placed; 736 complete_ddp_buffer(toep, db, db_idx); 737 } 738 739 MPASS(len == 0); 740 ddp_complete_all(toep, 0); 741 } 742 743 #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\ 744 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\ 745 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\ 746 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR) 747 748 extern cpl_handler_t t4_cpl_handler[]; 749 750 static int 751 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 752 { 753 struct adapter *sc = iq->adapter; 754 const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1); 755 unsigned int tid = GET_TID(cpl); 756 uint32_t vld; 757 struct toepcb *toep = lookup_tid(sc, tid); 758 759 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 760 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 761 KASSERT(!(toep->flags & TPF_SYNQE), 762 ("%s: toep %p claims to be a synq entry", __func__, toep)); 763 764 vld = be32toh(cpl->ddpvld); 765 if (__predict_false(vld & DDP_ERR)) { 766 panic("%s: DDP error 0x%x (tid %d, toep %p)", 767 __func__, vld, tid, toep); 768 } 769 770 if (toep->ulp_mode == ULP_MODE_ISCSI) { 771 t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m); 772 return (0); 773 } 774 775 handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len)); 776 777 return (0); 778 } 779 780 static int 781 do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss, 782 struct mbuf *m) 783 { 784 struct adapter *sc = iq->adapter; 785 const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1); 786 unsigned int tid = GET_TID(cpl); 787 struct toepcb *toep = lookup_tid(sc, tid); 788 789 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 790 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 791 KASSERT(!(toep->flags & TPF_SYNQE), 792 ("%s: toep %p claims to be a synq entry", __func__, toep)); 793 794 handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0); 795 796 return (0); 797 } 798 799 static void 800 enable_ddp(struct adapter *sc, struct toepcb *toep) 801 { 802 803 KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK, 804 ("%s: toep %p has bad ddp_flags 0x%x", 805 __func__, toep, toep->ddp.flags)); 806 807 CTR3(KTR_CXGBE, "%s: tid %u (time %u)", 808 __func__, toep->tid, time_uptime); 809 810 DDP_ASSERT_LOCKED(toep); 811 toep->ddp.flags |= DDP_SC_REQ; 812 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS, 813 V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) | 814 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) | 815 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1), 816 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0); 817 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 818 V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0); 819 } 820 821 static int 822 calculate_hcf(int n1, int n2) 823 { 824 int a, b, t; 825 826 if (n1 <= n2) { 827 a = n1; 828 b = n2; 829 } else { 830 a = n2; 831 b = n1; 832 } 833 834 while (a != 0) { 835 t = a; 836 a = b % a; 837 b = t; 838 } 839 840 return (b); 841 } 842 843 static inline int 844 pages_to_nppods(int npages, int ddp_page_shift) 845 { 846 847 MPASS(ddp_page_shift >= PAGE_SHIFT); 848 849 return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES)); 850 } 851 852 static int 853 alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx, 854 struct ppod_reservation *prsv) 855 { 856 vmem_addr_t addr; /* relative to start of region */ 857 858 if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT, 859 &addr) != 0) 860 return (ENOMEM); 861 862 CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d", 863 __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask, 864 nppods, 1 << pr->pr_page_shift[pgsz_idx]); 865 866 /* 867 * The hardware tagmask includes an extra invalid bit but the arena was 868 * seeded with valid values only. An allocation out of this arena will 869 * fit inside the tagmask but won't have the invalid bit set. 870 */ 871 MPASS((addr & pr->pr_tag_mask) == addr); 872 MPASS((addr & pr->pr_invalid_bit) == 0); 873 874 prsv->prsv_pr = pr; 875 prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr; 876 prsv->prsv_nppods = nppods; 877 878 return (0); 879 } 880 881 int 882 t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps) 883 { 884 int i, hcf, seglen, idx, nppods; 885 struct ppod_reservation *prsv = &ps->prsv; 886 887 KASSERT(prsv->prsv_nppods == 0, 888 ("%s: page pods already allocated", __func__)); 889 890 /* 891 * The DDP page size is unrelated to the VM page size. We combine 892 * contiguous physical pages into larger segments to get the best DDP 893 * page size possible. This is the largest of the four sizes in 894 * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in 895 * the page list. 896 */ 897 hcf = 0; 898 for (i = 0; i < ps->npages; i++) { 899 seglen = PAGE_SIZE; 900 while (i < ps->npages - 1 && 901 ps->pages[i]->phys_addr + PAGE_SIZE == 902 ps->pages[i + 1]->phys_addr) { 903 seglen += PAGE_SIZE; 904 i++; 905 } 906 907 hcf = calculate_hcf(hcf, seglen); 908 if (hcf < (1 << pr->pr_page_shift[1])) { 909 idx = 0; 910 goto have_pgsz; /* give up, short circuit */ 911 } 912 } 913 914 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 915 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 916 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 917 if ((hcf & PR_PAGE_MASK(idx)) == 0) 918 break; 919 } 920 #undef PR_PAGE_MASK 921 922 have_pgsz: 923 MPASS(idx <= M_PPOD_PGSZ); 924 925 nppods = pages_to_nppods(ps->npages, pr->pr_page_shift[idx]); 926 if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 927 return (0); 928 MPASS(prsv->prsv_nppods > 0); 929 930 return (1); 931 } 932 933 int 934 t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len, 935 struct ppod_reservation *prsv) 936 { 937 int hcf, seglen, idx, npages, nppods; 938 uintptr_t start_pva, end_pva, pva, p1; 939 940 MPASS(buf > 0); 941 MPASS(len > 0); 942 943 /* 944 * The DDP page size is unrelated to the VM page size. We combine 945 * contiguous physical pages into larger segments to get the best DDP 946 * page size possible. This is the largest of the four sizes in 947 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes 948 * in the page list. 949 */ 950 hcf = 0; 951 start_pva = trunc_page(buf); 952 end_pva = trunc_page(buf + len - 1); 953 pva = start_pva; 954 while (pva <= end_pva) { 955 seglen = PAGE_SIZE; 956 p1 = pmap_kextract(pva); 957 pva += PAGE_SIZE; 958 while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) { 959 seglen += PAGE_SIZE; 960 pva += PAGE_SIZE; 961 } 962 963 hcf = calculate_hcf(hcf, seglen); 964 if (hcf < (1 << pr->pr_page_shift[1])) { 965 idx = 0; 966 goto have_pgsz; /* give up, short circuit */ 967 } 968 } 969 970 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 971 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 972 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 973 if ((hcf & PR_PAGE_MASK(idx)) == 0) 974 break; 975 } 976 #undef PR_PAGE_MASK 977 978 have_pgsz: 979 MPASS(idx <= M_PPOD_PGSZ); 980 981 npages = 1; 982 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; 983 nppods = howmany(npages, PPOD_PAGES); 984 if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 985 return (ENOMEM); 986 MPASS(prsv->prsv_nppods > 0); 987 988 return (0); 989 } 990 991 void 992 t4_free_page_pods(struct ppod_reservation *prsv) 993 { 994 struct ppod_region *pr = prsv->prsv_pr; 995 vmem_addr_t addr; 996 997 MPASS(prsv != NULL); 998 MPASS(prsv->prsv_nppods != 0); 999 1000 addr = prsv->prsv_tag & pr->pr_tag_mask; 1001 MPASS((addr & pr->pr_invalid_bit) == 0); 1002 1003 CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__, 1004 pr->pr_arena, addr, prsv->prsv_nppods); 1005 1006 vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods)); 1007 prsv->prsv_nppods = 0; 1008 } 1009 1010 #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE) 1011 1012 int 1013 t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid, 1014 struct pageset *ps) 1015 { 1016 struct wrqe *wr; 1017 struct ulp_mem_io *ulpmc; 1018 struct ulptx_idata *ulpsc; 1019 struct pagepod *ppod; 1020 int i, j, k, n, chunk, len, ddp_pgsz, idx; 1021 u_int ppod_addr; 1022 uint32_t cmd; 1023 struct ppod_reservation *prsv = &ps->prsv; 1024 struct ppod_region *pr = prsv->prsv_pr; 1025 1026 KASSERT(!(ps->flags & PS_PPODS_WRITTEN), 1027 ("%s: page pods already written", __func__)); 1028 MPASS(prsv->prsv_nppods > 0); 1029 1030 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1031 if (is_t4(sc)) 1032 cmd |= htobe32(F_ULP_MEMIO_ORDER); 1033 else 1034 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1035 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1036 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1037 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1038 1039 /* How many page pods are we writing in this cycle */ 1040 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1041 chunk = PPOD_SZ(n); 1042 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1043 1044 wr = alloc_wrqe(len, wrq); 1045 if (wr == NULL) 1046 return (ENOMEM); /* ok to just bail out */ 1047 ulpmc = wrtod(wr); 1048 1049 INIT_ULPTX_WR(ulpmc, len, 0, 0); 1050 ulpmc->cmd = cmd; 1051 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1052 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1053 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1054 1055 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1056 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1057 ulpsc->len = htobe32(chunk); 1058 1059 ppod = (struct pagepod *)(ulpsc + 1); 1060 for (j = 0; j < n; i++, j++, ppod++) { 1061 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1062 V_PPOD_TID(tid) | prsv->prsv_tag); 1063 ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) | 1064 V_PPOD_OFST(ps->offset)); 1065 ppod->rsvd = 0; 1066 idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); 1067 for (k = 0; k < nitems(ppod->addr); k++) { 1068 if (idx < ps->npages) { 1069 ppod->addr[k] = 1070 htobe64(ps->pages[idx]->phys_addr); 1071 idx += ddp_pgsz / PAGE_SIZE; 1072 } else 1073 ppod->addr[k] = 0; 1074 #if 0 1075 CTR5(KTR_CXGBE, 1076 "%s: tid %d ppod[%d]->addr[%d] = %p", 1077 __func__, toep->tid, i, k, 1078 htobe64(ppod->addr[k])); 1079 #endif 1080 } 1081 1082 } 1083 1084 t4_wrq_tx(sc, wr); 1085 } 1086 ps->flags |= PS_PPODS_WRITTEN; 1087 1088 return (0); 1089 } 1090 1091 int 1092 t4_write_page_pods_for_buf(struct adapter *sc, struct sge_wrq *wrq, int tid, 1093 struct ppod_reservation *prsv, vm_offset_t buf, int buflen) 1094 { 1095 struct wrqe *wr; 1096 struct ulp_mem_io *ulpmc; 1097 struct ulptx_idata *ulpsc; 1098 struct pagepod *ppod; 1099 int i, j, k, n, chunk, len, ddp_pgsz; 1100 u_int ppod_addr, offset; 1101 uint32_t cmd; 1102 struct ppod_region *pr = prsv->prsv_pr; 1103 uintptr_t end_pva, pva, pa; 1104 1105 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1106 if (is_t4(sc)) 1107 cmd |= htobe32(F_ULP_MEMIO_ORDER); 1108 else 1109 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1110 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1111 offset = buf & PAGE_MASK; 1112 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1113 pva = trunc_page(buf); 1114 end_pva = trunc_page(buf + buflen - 1); 1115 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1116 1117 /* How many page pods are we writing in this cycle */ 1118 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1119 MPASS(n > 0); 1120 chunk = PPOD_SZ(n); 1121 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1122 1123 wr = alloc_wrqe(len, wrq); 1124 if (wr == NULL) 1125 return (ENOMEM); /* ok to just bail out */ 1126 ulpmc = wrtod(wr); 1127 1128 INIT_ULPTX_WR(ulpmc, len, 0, 0); 1129 ulpmc->cmd = cmd; 1130 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1131 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1132 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1133 1134 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1135 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1136 ulpsc->len = htobe32(chunk); 1137 1138 ppod = (struct pagepod *)(ulpsc + 1); 1139 for (j = 0; j < n; i++, j++, ppod++) { 1140 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1141 V_PPOD_TID(tid) | 1142 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 1143 ppod->len_offset = htobe64(V_PPOD_LEN(buflen) | 1144 V_PPOD_OFST(offset)); 1145 ppod->rsvd = 0; 1146 1147 for (k = 0; k < nitems(ppod->addr); k++) { 1148 if (pva > end_pva) 1149 ppod->addr[k] = 0; 1150 else { 1151 pa = pmap_kextract(pva); 1152 ppod->addr[k] = htobe64(pa); 1153 pva += ddp_pgsz; 1154 } 1155 #if 0 1156 CTR5(KTR_CXGBE, 1157 "%s: tid %d ppod[%d]->addr[%d] = %p", 1158 __func__, tid, i, k, 1159 htobe64(ppod->addr[k])); 1160 #endif 1161 } 1162 1163 /* 1164 * Walk back 1 segment so that the first address in the 1165 * next pod is the same as the last one in the current 1166 * pod. 1167 */ 1168 pva -= ddp_pgsz; 1169 } 1170 1171 t4_wrq_tx(sc, wr); 1172 } 1173 1174 MPASS(pva <= end_pva); 1175 1176 return (0); 1177 } 1178 1179 /* 1180 * Prepare a pageset for DDP. This sets up page pods. 1181 */ 1182 static int 1183 prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps) 1184 { 1185 struct tom_data *td = sc->tom_softc; 1186 1187 if (ps->prsv.prsv_nppods == 0 && 1188 !t4_alloc_page_pods_for_ps(&td->pr, ps)) { 1189 return (0); 1190 } 1191 if (!(ps->flags & PS_PPODS_WRITTEN) && 1192 t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) { 1193 return (0); 1194 } 1195 1196 return (1); 1197 } 1198 1199 int 1200 t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz, 1201 const char *name) 1202 { 1203 int i; 1204 1205 MPASS(pr != NULL); 1206 MPASS(r->size > 0); 1207 1208 pr->pr_start = r->start; 1209 pr->pr_len = r->size; 1210 pr->pr_page_shift[0] = 12 + G_HPZ0(psz); 1211 pr->pr_page_shift[1] = 12 + G_HPZ1(psz); 1212 pr->pr_page_shift[2] = 12 + G_HPZ2(psz); 1213 pr->pr_page_shift[3] = 12 + G_HPZ3(psz); 1214 1215 /* The SGL -> page pod algorithm requires the sizes to be in order. */ 1216 for (i = 1; i < nitems(pr->pr_page_shift); i++) { 1217 if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1]) 1218 return (ENXIO); 1219 } 1220 1221 pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG); 1222 pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask; 1223 if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0) 1224 return (ENXIO); 1225 pr->pr_alias_shift = fls(pr->pr_tag_mask); 1226 pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1); 1227 1228 pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0, 1229 M_FIRSTFIT | M_NOWAIT); 1230 if (pr->pr_arena == NULL) 1231 return (ENOMEM); 1232 1233 return (0); 1234 } 1235 1236 void 1237 t4_free_ppod_region(struct ppod_region *pr) 1238 { 1239 1240 MPASS(pr != NULL); 1241 1242 if (pr->pr_arena) 1243 vmem_destroy(pr->pr_arena); 1244 bzero(pr, sizeof(*pr)); 1245 } 1246 1247 static int 1248 pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages, 1249 int pgoff, int len) 1250 { 1251 1252 if (ps->start != start || ps->npages != npages || 1253 ps->offset != pgoff || ps->len != len) 1254 return (1); 1255 1256 return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp); 1257 } 1258 1259 static int 1260 hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps) 1261 { 1262 struct vmspace *vm; 1263 vm_map_t map; 1264 vm_offset_t start, end, pgoff; 1265 struct pageset *ps; 1266 int n; 1267 1268 DDP_ASSERT_LOCKED(toep); 1269 1270 /* 1271 * The AIO subsystem will cancel and drain all requests before 1272 * permitting a process to exit or exec, so p_vmspace should 1273 * be stable here. 1274 */ 1275 vm = job->userproc->p_vmspace; 1276 map = &vm->vm_map; 1277 start = (uintptr_t)job->uaiocb.aio_buf; 1278 pgoff = start & PAGE_MASK; 1279 end = round_page(start + job->uaiocb.aio_nbytes); 1280 start = trunc_page(start); 1281 1282 if (end - start > MAX_DDP_BUFFER_SIZE) { 1283 /* 1284 * Truncate the request to a short read. 1285 * Alternatively, we could DDP in chunks to the larger 1286 * buffer, but that would be quite a bit more work. 1287 * 1288 * When truncating, round the request down to avoid 1289 * crossing a cache line on the final transaction. 1290 */ 1291 end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE); 1292 #ifdef VERBOSE_TRACES 1293 CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu", 1294 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes, 1295 (unsigned long)(end - (start + pgoff))); 1296 job->uaiocb.aio_nbytes = end - (start + pgoff); 1297 #endif 1298 end = round_page(end); 1299 } 1300 1301 n = atop(end - start); 1302 1303 /* 1304 * Try to reuse a cached pageset. 1305 */ 1306 TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) { 1307 if (pscmp(ps, vm, start, n, pgoff, 1308 job->uaiocb.aio_nbytes) == 0) { 1309 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 1310 toep->ddp.cached_count--; 1311 *pps = ps; 1312 return (0); 1313 } 1314 } 1315 1316 /* 1317 * If there are too many cached pagesets to create a new one, 1318 * free a pageset before creating a new one. 1319 */ 1320 KASSERT(toep->ddp.active_count + toep->ddp.cached_count <= 1321 nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__)); 1322 if (toep->ddp.active_count + toep->ddp.cached_count == 1323 nitems(toep->ddp.db)) { 1324 KASSERT(toep->ddp.cached_count > 0, 1325 ("no cached pageset to free")); 1326 ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq); 1327 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 1328 toep->ddp.cached_count--; 1329 free_pageset(toep->td, ps); 1330 } 1331 DDP_UNLOCK(toep); 1332 1333 /* Create a new pageset. */ 1334 ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 1335 M_ZERO); 1336 ps->pages = (vm_page_t *)(ps + 1); 1337 ps->vm_timestamp = map->timestamp; 1338 ps->npages = vm_fault_quick_hold_pages(map, start, end - start, 1339 VM_PROT_WRITE, ps->pages, n); 1340 1341 DDP_LOCK(toep); 1342 if (ps->npages < 0) { 1343 free(ps, M_CXGBE); 1344 return (EFAULT); 1345 } 1346 1347 KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d", 1348 ps->npages, n)); 1349 1350 ps->offset = pgoff; 1351 ps->len = job->uaiocb.aio_nbytes; 1352 atomic_add_int(&vm->vm_refcnt, 1); 1353 ps->vm = vm; 1354 ps->start = start; 1355 1356 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 1357 __func__, toep->tid, ps, job, ps->npages); 1358 *pps = ps; 1359 return (0); 1360 } 1361 1362 static void 1363 ddp_complete_all(struct toepcb *toep, int error) 1364 { 1365 struct kaiocb *job; 1366 1367 DDP_ASSERT_LOCKED(toep); 1368 while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) { 1369 job = TAILQ_FIRST(&toep->ddp.aiojobq); 1370 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1371 toep->ddp.waiting_count--; 1372 if (aio_clear_cancel_function(job)) 1373 ddp_complete_one(job, error); 1374 } 1375 } 1376 1377 static void 1378 aio_ddp_cancel_one(struct kaiocb *job) 1379 { 1380 long copied; 1381 1382 /* 1383 * If this job had copied data out of the socket buffer before 1384 * it was cancelled, report it as a short read rather than an 1385 * error. 1386 */ 1387 copied = job->aio_received; 1388 if (copied != 0) 1389 aio_complete(job, copied, 0); 1390 else 1391 aio_cancel(job); 1392 } 1393 1394 /* 1395 * Called when the main loop wants to requeue a job to retry it later. 1396 * Deals with the race of the job being cancelled while it was being 1397 * examined. 1398 */ 1399 static void 1400 aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job) 1401 { 1402 1403 DDP_ASSERT_LOCKED(toep); 1404 if (!(toep->ddp.flags & DDP_DEAD) && 1405 aio_set_cancel_function(job, t4_aio_cancel_queued)) { 1406 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 1407 toep->ddp.waiting_count++; 1408 } else 1409 aio_ddp_cancel_one(job); 1410 } 1411 1412 static void 1413 aio_ddp_requeue(struct toepcb *toep) 1414 { 1415 struct adapter *sc = td_adapter(toep->td); 1416 struct socket *so; 1417 struct sockbuf *sb; 1418 struct inpcb *inp; 1419 struct kaiocb *job; 1420 struct ddp_buffer *db; 1421 size_t copied, offset, resid; 1422 struct pageset *ps; 1423 struct mbuf *m; 1424 uint64_t ddp_flags, ddp_flags_mask; 1425 struct wrqe *wr; 1426 int buf_flag, db_idx, error; 1427 1428 DDP_ASSERT_LOCKED(toep); 1429 1430 restart: 1431 if (toep->ddp.flags & DDP_DEAD) { 1432 MPASS(toep->ddp.waiting_count == 0); 1433 MPASS(toep->ddp.active_count == 0); 1434 return; 1435 } 1436 1437 if (toep->ddp.waiting_count == 0 || 1438 toep->ddp.active_count == nitems(toep->ddp.db)) { 1439 return; 1440 } 1441 1442 job = TAILQ_FIRST(&toep->ddp.aiojobq); 1443 so = job->fd_file->f_data; 1444 sb = &so->so_rcv; 1445 SOCKBUF_LOCK(sb); 1446 1447 /* We will never get anything unless we are or were connected. */ 1448 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 1449 SOCKBUF_UNLOCK(sb); 1450 ddp_complete_all(toep, ENOTCONN); 1451 return; 1452 } 1453 1454 KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0, 1455 ("%s: pending sockbuf data and DDP is active", __func__)); 1456 1457 /* Abort if socket has reported problems. */ 1458 /* XXX: Wait for any queued DDP's to finish and/or flush them? */ 1459 if (so->so_error && sbavail(sb) == 0) { 1460 toep->ddp.waiting_count--; 1461 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1462 if (!aio_clear_cancel_function(job)) { 1463 SOCKBUF_UNLOCK(sb); 1464 goto restart; 1465 } 1466 1467 /* 1468 * If this job has previously copied some data, report 1469 * a short read and leave the error to be reported by 1470 * a future request. 1471 */ 1472 copied = job->aio_received; 1473 if (copied != 0) { 1474 SOCKBUF_UNLOCK(sb); 1475 aio_complete(job, copied, 0); 1476 goto restart; 1477 } 1478 error = so->so_error; 1479 so->so_error = 0; 1480 SOCKBUF_UNLOCK(sb); 1481 aio_complete(job, -1, error); 1482 goto restart; 1483 } 1484 1485 /* 1486 * Door is closed. If there is pending data in the socket buffer, 1487 * deliver it. If there are pending DDP requests, wait for those 1488 * to complete. Once they have completed, return EOF reads. 1489 */ 1490 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 1491 SOCKBUF_UNLOCK(sb); 1492 if (toep->ddp.active_count != 0) 1493 return; 1494 ddp_complete_all(toep, 0); 1495 return; 1496 } 1497 1498 /* 1499 * If DDP is not enabled and there is no pending socket buffer 1500 * data, try to enable DDP. 1501 */ 1502 if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) { 1503 SOCKBUF_UNLOCK(sb); 1504 1505 /* 1506 * Wait for the card to ACK that DDP is enabled before 1507 * queueing any buffers. Currently this waits for an 1508 * indicate to arrive. This could use a TCB_SET_FIELD_RPL 1509 * message to know that DDP was enabled instead of waiting 1510 * for the indicate which would avoid copying the indicate 1511 * if no data is pending. 1512 * 1513 * XXX: Might want to limit the indicate size to the size 1514 * of the first queued request. 1515 */ 1516 if ((toep->ddp.flags & DDP_SC_REQ) == 0) 1517 enable_ddp(sc, toep); 1518 return; 1519 } 1520 SOCKBUF_UNLOCK(sb); 1521 1522 /* 1523 * If another thread is queueing a buffer for DDP, let it 1524 * drain any work and return. 1525 */ 1526 if (toep->ddp.queueing != NULL) 1527 return; 1528 1529 /* Take the next job to prep it for DDP. */ 1530 toep->ddp.waiting_count--; 1531 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1532 if (!aio_clear_cancel_function(job)) 1533 goto restart; 1534 toep->ddp.queueing = job; 1535 1536 /* NB: This drops DDP_LOCK while it holds the backing VM pages. */ 1537 error = hold_aio(toep, job, &ps); 1538 if (error != 0) { 1539 ddp_complete_one(job, error); 1540 toep->ddp.queueing = NULL; 1541 goto restart; 1542 } 1543 1544 SOCKBUF_LOCK(sb); 1545 if (so->so_error && sbavail(sb) == 0) { 1546 copied = job->aio_received; 1547 if (copied != 0) { 1548 SOCKBUF_UNLOCK(sb); 1549 recycle_pageset(toep, ps); 1550 aio_complete(job, copied, 0); 1551 toep->ddp.queueing = NULL; 1552 goto restart; 1553 } 1554 1555 error = so->so_error; 1556 so->so_error = 0; 1557 SOCKBUF_UNLOCK(sb); 1558 recycle_pageset(toep, ps); 1559 aio_complete(job, -1, error); 1560 toep->ddp.queueing = NULL; 1561 goto restart; 1562 } 1563 1564 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 1565 SOCKBUF_UNLOCK(sb); 1566 recycle_pageset(toep, ps); 1567 if (toep->ddp.active_count != 0) { 1568 /* 1569 * The door is closed, but there are still pending 1570 * DDP buffers. Requeue. These jobs will all be 1571 * completed once those buffers drain. 1572 */ 1573 aio_ddp_requeue_one(toep, job); 1574 toep->ddp.queueing = NULL; 1575 return; 1576 } 1577 ddp_complete_one(job, 0); 1578 ddp_complete_all(toep, 0); 1579 toep->ddp.queueing = NULL; 1580 return; 1581 } 1582 1583 sbcopy: 1584 /* 1585 * If the toep is dead, there shouldn't be any data in the socket 1586 * buffer, so the above case should have handled this. 1587 */ 1588 MPASS(!(toep->ddp.flags & DDP_DEAD)); 1589 1590 /* 1591 * If there is pending data in the socket buffer (either 1592 * from before the requests were queued or a DDP indicate), 1593 * copy those mbufs out directly. 1594 */ 1595 copied = 0; 1596 offset = ps->offset + job->aio_received; 1597 MPASS(job->aio_received <= job->uaiocb.aio_nbytes); 1598 resid = job->uaiocb.aio_nbytes - job->aio_received; 1599 m = sb->sb_mb; 1600 KASSERT(m == NULL || toep->ddp.active_count == 0, 1601 ("%s: sockbuf data with active DDP", __func__)); 1602 while (m != NULL && resid > 0) { 1603 struct iovec iov[1]; 1604 struct uio uio; 1605 int error; 1606 1607 iov[0].iov_base = mtod(m, void *); 1608 iov[0].iov_len = m->m_len; 1609 if (iov[0].iov_len > resid) 1610 iov[0].iov_len = resid; 1611 uio.uio_iov = iov; 1612 uio.uio_iovcnt = 1; 1613 uio.uio_offset = 0; 1614 uio.uio_resid = iov[0].iov_len; 1615 uio.uio_segflg = UIO_SYSSPACE; 1616 uio.uio_rw = UIO_WRITE; 1617 error = uiomove_fromphys(ps->pages, offset + copied, 1618 uio.uio_resid, &uio); 1619 MPASS(error == 0 && uio.uio_resid == 0); 1620 copied += uio.uio_offset; 1621 resid -= uio.uio_offset; 1622 m = m->m_next; 1623 } 1624 if (copied != 0) { 1625 sbdrop_locked(sb, copied); 1626 job->aio_received += copied; 1627 job->msgrcv = 1; 1628 copied = job->aio_received; 1629 inp = sotoinpcb(so); 1630 if (!INP_TRY_WLOCK(inp)) { 1631 /* 1632 * The reference on the socket file descriptor in 1633 * the AIO job should keep 'sb' and 'inp' stable. 1634 * Our caller has a reference on the 'toep' that 1635 * keeps it stable. 1636 */ 1637 SOCKBUF_UNLOCK(sb); 1638 DDP_UNLOCK(toep); 1639 INP_WLOCK(inp); 1640 DDP_LOCK(toep); 1641 SOCKBUF_LOCK(sb); 1642 1643 /* 1644 * If the socket has been closed, we should detect 1645 * that and complete this request if needed on 1646 * the next trip around the loop. 1647 */ 1648 } 1649 t4_rcvd_locked(&toep->td->tod, intotcpcb(inp)); 1650 INP_WUNLOCK(inp); 1651 if (resid == 0 || toep->ddp.flags & DDP_DEAD) { 1652 /* 1653 * We filled the entire buffer with socket 1654 * data, DDP is not being used, or the socket 1655 * is being shut down, so complete the 1656 * request. 1657 */ 1658 SOCKBUF_UNLOCK(sb); 1659 recycle_pageset(toep, ps); 1660 aio_complete(job, copied, 0); 1661 toep->ddp.queueing = NULL; 1662 goto restart; 1663 } 1664 1665 /* 1666 * If DDP is not enabled, requeue this request and restart. 1667 * This will either enable DDP or wait for more data to 1668 * arrive on the socket buffer. 1669 */ 1670 if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) { 1671 SOCKBUF_UNLOCK(sb); 1672 recycle_pageset(toep, ps); 1673 aio_ddp_requeue_one(toep, job); 1674 toep->ddp.queueing = NULL; 1675 goto restart; 1676 } 1677 1678 /* 1679 * An indicate might have arrived and been added to 1680 * the socket buffer while it was unlocked after the 1681 * copy to lock the INP. If so, restart the copy. 1682 */ 1683 if (sbavail(sb) != 0) 1684 goto sbcopy; 1685 } 1686 SOCKBUF_UNLOCK(sb); 1687 1688 if (prep_pageset(sc, toep, ps) == 0) { 1689 recycle_pageset(toep, ps); 1690 aio_ddp_requeue_one(toep, job); 1691 toep->ddp.queueing = NULL; 1692 1693 /* 1694 * XXX: Need to retry this later. Mostly need a trigger 1695 * when page pods are freed up. 1696 */ 1697 printf("%s: prep_pageset failed\n", __func__); 1698 return; 1699 } 1700 1701 /* Determine which DDP buffer to use. */ 1702 if (toep->ddp.db[0].job == NULL) { 1703 db_idx = 0; 1704 } else { 1705 MPASS(toep->ddp.db[1].job == NULL); 1706 db_idx = 1; 1707 } 1708 1709 ddp_flags = 0; 1710 ddp_flags_mask = 0; 1711 if (db_idx == 0) { 1712 ddp_flags |= V_TF_DDP_BUF0_VALID(1); 1713 if (so->so_state & SS_NBIO) 1714 ddp_flags |= V_TF_DDP_BUF0_FLUSH(1); 1715 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 1716 V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) | 1717 V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1); 1718 buf_flag = DDP_BUF0_ACTIVE; 1719 } else { 1720 ddp_flags |= V_TF_DDP_BUF1_VALID(1); 1721 if (so->so_state & SS_NBIO) 1722 ddp_flags |= V_TF_DDP_BUF1_FLUSH(1); 1723 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 1724 V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) | 1725 V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1); 1726 buf_flag = DDP_BUF1_ACTIVE; 1727 } 1728 MPASS((toep->ddp.flags & buf_flag) == 0); 1729 if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { 1730 MPASS(db_idx == 0); 1731 MPASS(toep->ddp.active_id == -1); 1732 MPASS(toep->ddp.active_count == 0); 1733 ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1); 1734 } 1735 1736 /* 1737 * The TID for this connection should still be valid. If DDP_DEAD 1738 * is set, SBS_CANTRCVMORE should be set, so we shouldn't be 1739 * this far anyway. Even if the socket is closing on the other 1740 * end, the AIO job holds a reference on this end of the socket 1741 * which will keep it open and keep the TCP PCB attached until 1742 * after the job is completed. 1743 */ 1744 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, ps, job->aio_received, 1745 ddp_flags, ddp_flags_mask); 1746 if (wr == NULL) { 1747 recycle_pageset(toep, ps); 1748 aio_ddp_requeue_one(toep, job); 1749 toep->ddp.queueing = NULL; 1750 1751 /* 1752 * XXX: Need a way to kick a retry here. 1753 * 1754 * XXX: We know the fixed size needed and could 1755 * preallocate this using a blocking request at the 1756 * start of the task to avoid having to handle this 1757 * edge case. 1758 */ 1759 printf("%s: mk_update_tcb_for_ddp failed\n", __func__); 1760 return; 1761 } 1762 1763 if (!aio_set_cancel_function(job, t4_aio_cancel_active)) { 1764 free_wrqe(wr); 1765 recycle_pageset(toep, ps); 1766 aio_ddp_cancel_one(job); 1767 toep->ddp.queueing = NULL; 1768 goto restart; 1769 } 1770 1771 #ifdef VERBOSE_TRACES 1772 CTR6(KTR_CXGBE, 1773 "%s: tid %u, scheduling %p for DDP[%d] (flags %#lx/%#lx)", __func__, 1774 toep->tid, job, db_idx, ddp_flags, ddp_flags_mask); 1775 #endif 1776 /* Give the chip the go-ahead. */ 1777 t4_wrq_tx(sc, wr); 1778 db = &toep->ddp.db[db_idx]; 1779 db->cancel_pending = 0; 1780 db->job = job; 1781 db->ps = ps; 1782 toep->ddp.queueing = NULL; 1783 toep->ddp.flags |= buf_flag; 1784 toep->ddp.active_count++; 1785 if (toep->ddp.active_count == 1) { 1786 MPASS(toep->ddp.active_id == -1); 1787 toep->ddp.active_id = db_idx; 1788 CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 1789 toep->ddp.active_id); 1790 } 1791 goto restart; 1792 } 1793 1794 void 1795 ddp_queue_toep(struct toepcb *toep) 1796 { 1797 1798 DDP_ASSERT_LOCKED(toep); 1799 if (toep->ddp.flags & DDP_TASK_ACTIVE) 1800 return; 1801 toep->ddp.flags |= DDP_TASK_ACTIVE; 1802 hold_toepcb(toep); 1803 soaio_enqueue(&toep->ddp.requeue_task); 1804 } 1805 1806 static void 1807 aio_ddp_requeue_task(void *context, int pending) 1808 { 1809 struct toepcb *toep = context; 1810 1811 DDP_LOCK(toep); 1812 aio_ddp_requeue(toep); 1813 toep->ddp.flags &= ~DDP_TASK_ACTIVE; 1814 DDP_UNLOCK(toep); 1815 1816 free_toepcb(toep); 1817 } 1818 1819 static void 1820 t4_aio_cancel_active(struct kaiocb *job) 1821 { 1822 struct socket *so = job->fd_file->f_data; 1823 struct tcpcb *tp = so_sototcpcb(so); 1824 struct toepcb *toep = tp->t_toe; 1825 struct adapter *sc = td_adapter(toep->td); 1826 uint64_t valid_flag; 1827 int i; 1828 1829 DDP_LOCK(toep); 1830 if (aio_cancel_cleared(job)) { 1831 DDP_UNLOCK(toep); 1832 aio_ddp_cancel_one(job); 1833 return; 1834 } 1835 1836 for (i = 0; i < nitems(toep->ddp.db); i++) { 1837 if (toep->ddp.db[i].job == job) { 1838 /* Should only ever get one cancel request for a job. */ 1839 MPASS(toep->ddp.db[i].cancel_pending == 0); 1840 1841 /* 1842 * Invalidate this buffer. It will be 1843 * cancelled or partially completed once the 1844 * card ACKs the invalidate. 1845 */ 1846 valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) : 1847 V_TF_DDP_BUF1_VALID(1); 1848 t4_set_tcb_field(sc, toep->ctrlq, toep, 1849 W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1, 1850 i + DDP_BUF0_INVALIDATED); 1851 toep->ddp.db[i].cancel_pending = 1; 1852 CTR2(KTR_CXGBE, "%s: request %p marked pending", 1853 __func__, job); 1854 break; 1855 } 1856 } 1857 DDP_UNLOCK(toep); 1858 } 1859 1860 static void 1861 t4_aio_cancel_queued(struct kaiocb *job) 1862 { 1863 struct socket *so = job->fd_file->f_data; 1864 struct tcpcb *tp = so_sototcpcb(so); 1865 struct toepcb *toep = tp->t_toe; 1866 1867 DDP_LOCK(toep); 1868 if (!aio_cancel_cleared(job)) { 1869 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1870 toep->ddp.waiting_count--; 1871 if (toep->ddp.waiting_count == 0) 1872 ddp_queue_toep(toep); 1873 } 1874 CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job); 1875 DDP_UNLOCK(toep); 1876 1877 aio_ddp_cancel_one(job); 1878 } 1879 1880 int 1881 t4_aio_queue_ddp(struct socket *so, struct kaiocb *job) 1882 { 1883 struct tcpcb *tp = so_sototcpcb(so); 1884 struct toepcb *toep = tp->t_toe; 1885 1886 1887 /* Ignore writes. */ 1888 if (job->uaiocb.aio_lio_opcode != LIO_READ) 1889 return (EOPNOTSUPP); 1890 1891 DDP_LOCK(toep); 1892 1893 /* 1894 * XXX: Think about possibly returning errors for ENOTCONN, 1895 * etc. Perhaps the caller would only queue the request 1896 * if it failed with EOPNOTSUPP? 1897 */ 1898 1899 #ifdef VERBOSE_TRACES 1900 CTR3(KTR_CXGBE, "%s: queueing %p for tid %u", __func__, job, toep->tid); 1901 #endif 1902 if (!aio_set_cancel_function(job, t4_aio_cancel_queued)) 1903 panic("new job was cancelled"); 1904 TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list); 1905 toep->ddp.waiting_count++; 1906 toep->ddp.flags |= DDP_OK; 1907 1908 /* 1909 * Try to handle this request synchronously. If this has 1910 * to block because the task is running, it will just bail 1911 * and let the task handle it instead. 1912 */ 1913 aio_ddp_requeue(toep); 1914 DDP_UNLOCK(toep); 1915 return (0); 1916 } 1917 1918 void 1919 t4_ddp_mod_load(void) 1920 { 1921 1922 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 1923 CPL_COOKIE_DDP0); 1924 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 1925 CPL_COOKIE_DDP1); 1926 t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp); 1927 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); 1928 TAILQ_INIT(&ddp_orphan_pagesets); 1929 mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF); 1930 TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL); 1931 } 1932 1933 void 1934 t4_ddp_mod_unload(void) 1935 { 1936 1937 taskqueue_drain(taskqueue_thread, &ddp_orphan_task); 1938 MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets)); 1939 mtx_destroy(&ddp_orphan_pagesets_lock); 1940 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP0); 1941 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP1); 1942 t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL); 1943 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL); 1944 } 1945 #endif 1946