1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 35 #include <sys/param.h> 36 #include <sys/aio.h> 37 #include <sys/file.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/module.h> 42 #include <sys/protosw.h> 43 #include <sys/proc.h> 44 #include <sys/domain.h> 45 #include <sys/socket.h> 46 #include <sys/socketvar.h> 47 #include <sys/taskqueue.h> 48 #include <sys/uio.h> 49 #include <netinet/in.h> 50 #include <netinet/in_pcb.h> 51 #include <netinet/ip.h> 52 #include <netinet/tcp_var.h> 53 #define TCPSTATES 54 #include <netinet/tcp_fsm.h> 55 #include <netinet/toecore.h> 56 57 #include <vm/vm.h> 58 #include <vm/vm_extern.h> 59 #include <vm/vm_param.h> 60 #include <vm/pmap.h> 61 #include <vm/vm_map.h> 62 #include <vm/vm_page.h> 63 #include <vm/vm_object.h> 64 65 #ifdef TCP_OFFLOAD 66 #include "common/common.h" 67 #include "common/t4_msg.h" 68 #include "common/t4_regs.h" 69 #include "common/t4_tcb.h" 70 #include "tom/t4_tom.h" 71 72 /* 73 * Use the 'backend3' field in AIO jobs to store the amount of data 74 * received by the AIO job so far. 75 */ 76 #define aio_received backend3 77 78 static void aio_ddp_requeue_task(void *context, int pending); 79 static void ddp_complete_all(struct toepcb *toep, int error); 80 static void t4_aio_cancel_active(struct kaiocb *job); 81 static void t4_aio_cancel_queued(struct kaiocb *job); 82 83 static TAILQ_HEAD(, pageset) ddp_orphan_pagesets; 84 static struct mtx ddp_orphan_pagesets_lock; 85 static struct task ddp_orphan_task; 86 87 #define MAX_DDP_BUFFER_SIZE (M_TCB_RX_DDP_BUF0_LEN) 88 89 /* 90 * A page set holds information about a buffer used for DDP. The page 91 * set holds resources such as the VM pages backing the buffer (either 92 * held or wired) and the page pods associated with the buffer. 93 * Recently used page sets are cached to allow for efficient reuse of 94 * buffers (avoiding the need to re-fault in pages, hold them, etc.). 95 * Note that cached page sets keep the backing pages wired. The 96 * number of wired pages is capped by only allowing for two wired 97 * pagesets per connection. This is not a perfect cap, but is a 98 * trade-off for performance. 99 * 100 * If an application ping-pongs two buffers for a connection via 101 * aio_read(2) then those buffers should remain wired and expensive VM 102 * fault lookups should be avoided after each buffer has been used 103 * once. If an application uses more than two buffers then this will 104 * fall back to doing expensive VM fault lookups for each operation. 105 */ 106 static void 107 free_pageset(struct tom_data *td, struct pageset *ps) 108 { 109 vm_page_t p; 110 int i; 111 112 if (ps->prsv.prsv_nppods > 0) 113 t4_free_page_pods(&ps->prsv); 114 115 if (ps->flags & PS_WIRED) { 116 for (i = 0; i < ps->npages; i++) { 117 p = ps->pages[i]; 118 vm_page_lock(p); 119 vm_page_unwire(p, PQ_INACTIVE); 120 vm_page_unlock(p); 121 } 122 } else 123 vm_page_unhold_pages(ps->pages, ps->npages); 124 mtx_lock(&ddp_orphan_pagesets_lock); 125 TAILQ_INSERT_TAIL(&ddp_orphan_pagesets, ps, link); 126 taskqueue_enqueue(taskqueue_thread, &ddp_orphan_task); 127 mtx_unlock(&ddp_orphan_pagesets_lock); 128 } 129 130 static void 131 ddp_free_orphan_pagesets(void *context, int pending) 132 { 133 struct pageset *ps; 134 135 mtx_lock(&ddp_orphan_pagesets_lock); 136 while (!TAILQ_EMPTY(&ddp_orphan_pagesets)) { 137 ps = TAILQ_FIRST(&ddp_orphan_pagesets); 138 TAILQ_REMOVE(&ddp_orphan_pagesets, ps, link); 139 mtx_unlock(&ddp_orphan_pagesets_lock); 140 if (ps->vm) 141 vmspace_free(ps->vm); 142 free(ps, M_CXGBE); 143 mtx_lock(&ddp_orphan_pagesets_lock); 144 } 145 mtx_unlock(&ddp_orphan_pagesets_lock); 146 } 147 148 static void 149 recycle_pageset(struct toepcb *toep, struct pageset *ps) 150 { 151 152 DDP_ASSERT_LOCKED(toep); 153 if (!(toep->ddp.flags & DDP_DEAD) && ps->flags & PS_WIRED) { 154 KASSERT(toep->ddp.cached_count + toep->ddp.active_count < 155 nitems(toep->ddp.db), ("too many wired pagesets")); 156 TAILQ_INSERT_HEAD(&toep->ddp.cached_pagesets, ps, link); 157 toep->ddp.cached_count++; 158 } else 159 free_pageset(toep->td, ps); 160 } 161 162 static void 163 ddp_complete_one(struct kaiocb *job, int error) 164 { 165 long copied; 166 167 /* 168 * If this job had copied data out of the socket buffer before 169 * it was cancelled, report it as a short read rather than an 170 * error. 171 */ 172 copied = job->aio_received; 173 if (copied != 0 || error == 0) 174 aio_complete(job, copied, 0); 175 else 176 aio_complete(job, -1, error); 177 } 178 179 static void 180 free_ddp_buffer(struct tom_data *td, struct ddp_buffer *db) 181 { 182 183 if (db->job) { 184 /* 185 * XXX: If we are un-offloading the socket then we 186 * should requeue these on the socket somehow. If we 187 * got a FIN from the remote end, then this completes 188 * any remaining requests with an EOF read. 189 */ 190 if (!aio_clear_cancel_function(db->job)) 191 ddp_complete_one(db->job, 0); 192 } 193 194 if (db->ps) 195 free_pageset(td, db->ps); 196 } 197 198 void 199 ddp_init_toep(struct toepcb *toep) 200 { 201 202 TAILQ_INIT(&toep->ddp.aiojobq); 203 TASK_INIT(&toep->ddp.requeue_task, 0, aio_ddp_requeue_task, toep); 204 toep->ddp.flags = DDP_OK; 205 toep->ddp.active_id = -1; 206 mtx_init(&toep->ddp.lock, "t4 ddp", NULL, MTX_DEF); 207 } 208 209 void 210 ddp_uninit_toep(struct toepcb *toep) 211 { 212 213 mtx_destroy(&toep->ddp.lock); 214 } 215 216 void 217 release_ddp_resources(struct toepcb *toep) 218 { 219 struct pageset *ps; 220 int i; 221 222 DDP_LOCK(toep); 223 toep->ddp.flags |= DDP_DEAD; 224 for (i = 0; i < nitems(toep->ddp.db); i++) { 225 free_ddp_buffer(toep->td, &toep->ddp.db[i]); 226 } 227 while ((ps = TAILQ_FIRST(&toep->ddp.cached_pagesets)) != NULL) { 228 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 229 free_pageset(toep->td, ps); 230 } 231 ddp_complete_all(toep, 0); 232 DDP_UNLOCK(toep); 233 } 234 235 #ifdef INVARIANTS 236 void 237 ddp_assert_empty(struct toepcb *toep) 238 { 239 int i; 240 241 MPASS(!(toep->ddp.flags & DDP_TASK_ACTIVE)); 242 for (i = 0; i < nitems(toep->ddp.db); i++) { 243 MPASS(toep->ddp.db[i].job == NULL); 244 MPASS(toep->ddp.db[i].ps == NULL); 245 } 246 MPASS(TAILQ_EMPTY(&toep->ddp.cached_pagesets)); 247 MPASS(TAILQ_EMPTY(&toep->ddp.aiojobq)); 248 } 249 #endif 250 251 static void 252 complete_ddp_buffer(struct toepcb *toep, struct ddp_buffer *db, 253 unsigned int db_idx) 254 { 255 unsigned int db_flag; 256 257 toep->ddp.active_count--; 258 if (toep->ddp.active_id == db_idx) { 259 if (toep->ddp.active_count == 0) { 260 KASSERT(toep->ddp.db[db_idx ^ 1].job == NULL, 261 ("%s: active_count mismatch", __func__)); 262 toep->ddp.active_id = -1; 263 } else 264 toep->ddp.active_id ^= 1; 265 #ifdef VERBOSE_TRACES 266 CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 267 toep->ddp.active_id); 268 #endif 269 } else { 270 KASSERT(toep->ddp.active_count != 0 && 271 toep->ddp.active_id != -1, 272 ("%s: active count mismatch", __func__)); 273 } 274 275 db->cancel_pending = 0; 276 db->job = NULL; 277 recycle_pageset(toep, db->ps); 278 db->ps = NULL; 279 280 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 281 KASSERT(toep->ddp.flags & db_flag, 282 ("%s: DDP buffer not active. toep %p, ddp_flags 0x%x", 283 __func__, toep, toep->ddp.flags)); 284 toep->ddp.flags &= ~db_flag; 285 } 286 287 /* XXX: handle_ddp_data code duplication */ 288 void 289 insert_ddp_data(struct toepcb *toep, uint32_t n) 290 { 291 struct inpcb *inp = toep->inp; 292 struct tcpcb *tp = intotcpcb(inp); 293 struct ddp_buffer *db; 294 struct kaiocb *job; 295 size_t placed; 296 long copied; 297 unsigned int db_flag, db_idx; 298 299 INP_WLOCK_ASSERT(inp); 300 DDP_ASSERT_LOCKED(toep); 301 302 tp->rcv_nxt += n; 303 #ifndef USE_DDP_RX_FLOW_CONTROL 304 KASSERT(tp->rcv_wnd >= n, ("%s: negative window size", __func__)); 305 tp->rcv_wnd -= n; 306 #endif 307 CTR2(KTR_CXGBE, "%s: placed %u bytes before falling out of DDP", 308 __func__, n); 309 while (toep->ddp.active_count > 0) { 310 MPASS(toep->ddp.active_id != -1); 311 db_idx = toep->ddp.active_id; 312 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 313 MPASS((toep->ddp.flags & db_flag) != 0); 314 db = &toep->ddp.db[db_idx]; 315 job = db->job; 316 copied = job->aio_received; 317 placed = n; 318 if (placed > job->uaiocb.aio_nbytes - copied) 319 placed = job->uaiocb.aio_nbytes - copied; 320 if (placed > 0) 321 job->msgrcv = 1; 322 if (!aio_clear_cancel_function(job)) { 323 /* 324 * Update the copied length for when 325 * t4_aio_cancel_active() completes this 326 * request. 327 */ 328 job->aio_received += placed; 329 } else if (copied + placed != 0) { 330 CTR4(KTR_CXGBE, 331 "%s: completing %p (copied %ld, placed %lu)", 332 __func__, job, copied, placed); 333 /* XXX: This always completes if there is some data. */ 334 aio_complete(job, copied + placed, 0); 335 } else if (aio_set_cancel_function(job, t4_aio_cancel_queued)) { 336 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 337 toep->ddp.waiting_count++; 338 } else 339 aio_cancel(job); 340 n -= placed; 341 complete_ddp_buffer(toep, db, db_idx); 342 } 343 344 MPASS(n == 0); 345 } 346 347 /* SET_TCB_FIELD sent as a ULP command looks like this */ 348 #define LEN__SET_TCB_FIELD_ULP (sizeof(struct ulp_txpkt) + \ 349 sizeof(struct ulptx_idata) + sizeof(struct cpl_set_tcb_field_core)) 350 351 /* RX_DATA_ACK sent as a ULP command looks like this */ 352 #define LEN__RX_DATA_ACK_ULP (sizeof(struct ulp_txpkt) + \ 353 sizeof(struct ulptx_idata) + sizeof(struct cpl_rx_data_ack_core)) 354 355 static inline void * 356 mk_set_tcb_field_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep, 357 uint64_t word, uint64_t mask, uint64_t val) 358 { 359 struct ulptx_idata *ulpsc; 360 struct cpl_set_tcb_field_core *req; 361 362 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 363 ulpmc->len = htobe32(howmany(LEN__SET_TCB_FIELD_ULP, 16)); 364 365 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 366 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 367 ulpsc->len = htobe32(sizeof(*req)); 368 369 req = (struct cpl_set_tcb_field_core *)(ulpsc + 1); 370 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_SET_TCB_FIELD, toep->tid)); 371 req->reply_ctrl = htobe16(V_NO_REPLY(1) | 372 V_QUEUENO(toep->ofld_rxq->iq.abs_id)); 373 req->word_cookie = htobe16(V_WORD(word) | V_COOKIE(0)); 374 req->mask = htobe64(mask); 375 req->val = htobe64(val); 376 377 ulpsc = (struct ulptx_idata *)(req + 1); 378 if (LEN__SET_TCB_FIELD_ULP % 16) { 379 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 380 ulpsc->len = htobe32(0); 381 return (ulpsc + 1); 382 } 383 return (ulpsc); 384 } 385 386 static inline void * 387 mk_rx_data_ack_ulp(struct ulp_txpkt *ulpmc, struct toepcb *toep) 388 { 389 struct ulptx_idata *ulpsc; 390 struct cpl_rx_data_ack_core *req; 391 392 ulpmc->cmd_dest = htonl(V_ULPTX_CMD(ULP_TX_PKT) | V_ULP_TXPKT_DEST(0)); 393 ulpmc->len = htobe32(howmany(LEN__RX_DATA_ACK_ULP, 16)); 394 395 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 396 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 397 ulpsc->len = htobe32(sizeof(*req)); 398 399 req = (struct cpl_rx_data_ack_core *)(ulpsc + 1); 400 OPCODE_TID(req) = htobe32(MK_OPCODE_TID(CPL_RX_DATA_ACK, toep->tid)); 401 req->credit_dack = htobe32(F_RX_MODULATE_RX); 402 403 ulpsc = (struct ulptx_idata *)(req + 1); 404 if (LEN__RX_DATA_ACK_ULP % 16) { 405 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_NOOP)); 406 ulpsc->len = htobe32(0); 407 return (ulpsc + 1); 408 } 409 return (ulpsc); 410 } 411 412 static struct wrqe * 413 mk_update_tcb_for_ddp(struct adapter *sc, struct toepcb *toep, int db_idx, 414 struct pageset *ps, int offset, uint64_t ddp_flags, uint64_t ddp_flags_mask) 415 { 416 struct wrqe *wr; 417 struct work_request_hdr *wrh; 418 struct ulp_txpkt *ulpmc; 419 int len; 420 421 KASSERT(db_idx == 0 || db_idx == 1, 422 ("%s: bad DDP buffer index %d", __func__, db_idx)); 423 424 /* 425 * We'll send a compound work request that has 3 SET_TCB_FIELDs and an 426 * RX_DATA_ACK (with RX_MODULATE to speed up delivery). 427 * 428 * The work request header is 16B and always ends at a 16B boundary. 429 * The ULPTX master commands that follow must all end at 16B boundaries 430 * too so we round up the size to 16. 431 */ 432 len = sizeof(*wrh) + 3 * roundup2(LEN__SET_TCB_FIELD_ULP, 16) + 433 roundup2(LEN__RX_DATA_ACK_ULP, 16); 434 435 wr = alloc_wrqe(len, toep->ctrlq); 436 if (wr == NULL) 437 return (NULL); 438 wrh = wrtod(wr); 439 INIT_ULPTX_WRH(wrh, len, 1, 0); /* atomic */ 440 ulpmc = (struct ulp_txpkt *)(wrh + 1); 441 442 /* Write the buffer's tag */ 443 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 444 W_TCB_RX_DDP_BUF0_TAG + db_idx, 445 V_TCB_RX_DDP_BUF0_TAG(M_TCB_RX_DDP_BUF0_TAG), 446 V_TCB_RX_DDP_BUF0_TAG(ps->prsv.prsv_tag)); 447 448 /* Update the current offset in the DDP buffer and its total length */ 449 if (db_idx == 0) 450 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 451 W_TCB_RX_DDP_BUF0_OFFSET, 452 V_TCB_RX_DDP_BUF0_OFFSET(M_TCB_RX_DDP_BUF0_OFFSET) | 453 V_TCB_RX_DDP_BUF0_LEN(M_TCB_RX_DDP_BUF0_LEN), 454 V_TCB_RX_DDP_BUF0_OFFSET(offset) | 455 V_TCB_RX_DDP_BUF0_LEN(ps->len)); 456 else 457 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, 458 W_TCB_RX_DDP_BUF1_OFFSET, 459 V_TCB_RX_DDP_BUF1_OFFSET(M_TCB_RX_DDP_BUF1_OFFSET) | 460 V_TCB_RX_DDP_BUF1_LEN((u64)M_TCB_RX_DDP_BUF1_LEN << 32), 461 V_TCB_RX_DDP_BUF1_OFFSET(offset) | 462 V_TCB_RX_DDP_BUF1_LEN((u64)ps->len << 32)); 463 464 /* Update DDP flags */ 465 ulpmc = mk_set_tcb_field_ulp(ulpmc, toep, W_TCB_RX_DDP_FLAGS, 466 ddp_flags_mask, ddp_flags); 467 468 /* Gratuitous RX_DATA_ACK with RX_MODULATE set to speed up delivery. */ 469 ulpmc = mk_rx_data_ack_ulp(ulpmc, toep); 470 471 return (wr); 472 } 473 474 static int 475 handle_ddp_data(struct toepcb *toep, __be32 ddp_report, __be32 rcv_nxt, int len) 476 { 477 uint32_t report = be32toh(ddp_report); 478 unsigned int db_idx; 479 struct inpcb *inp = toep->inp; 480 struct ddp_buffer *db; 481 struct tcpcb *tp; 482 struct socket *so; 483 struct sockbuf *sb; 484 struct kaiocb *job; 485 long copied; 486 487 db_idx = report & F_DDP_BUF_IDX ? 1 : 0; 488 489 if (__predict_false(!(report & F_DDP_INV))) 490 CXGBE_UNIMPLEMENTED("DDP buffer still valid"); 491 492 INP_WLOCK(inp); 493 so = inp_inpcbtosocket(inp); 494 sb = &so->so_rcv; 495 DDP_LOCK(toep); 496 497 KASSERT(toep->ddp.active_id == db_idx, 498 ("completed DDP buffer (%d) != active_id (%d) for tid %d", db_idx, 499 toep->ddp.active_id, toep->tid)); 500 db = &toep->ddp.db[db_idx]; 501 job = db->job; 502 503 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) { 504 /* 505 * This can happen due to an administrative tcpdrop(8). 506 * Just fail the request with ECONNRESET. 507 */ 508 CTR5(KTR_CXGBE, "%s: tid %u, seq 0x%x, len %d, inp_flags 0x%x", 509 __func__, toep->tid, be32toh(rcv_nxt), len, inp->inp_flags); 510 if (aio_clear_cancel_function(job)) 511 ddp_complete_one(job, ECONNRESET); 512 goto completed; 513 } 514 515 tp = intotcpcb(inp); 516 517 /* 518 * For RX_DDP_COMPLETE, len will be zero and rcv_nxt is the 519 * sequence number of the next byte to receive. The length of 520 * the data received for this message must be computed by 521 * comparing the new and old values of rcv_nxt. 522 * 523 * For RX_DATA_DDP, len might be non-zero, but it is only the 524 * length of the most recent DMA. It does not include the 525 * total length of the data received since the previous update 526 * for this DDP buffer. rcv_nxt is the sequence number of the 527 * first received byte from the most recent DMA. 528 */ 529 len += be32toh(rcv_nxt) - tp->rcv_nxt; 530 tp->rcv_nxt += len; 531 tp->t_rcvtime = ticks; 532 #ifndef USE_DDP_RX_FLOW_CONTROL 533 KASSERT(tp->rcv_wnd >= len, ("%s: negative window size", __func__)); 534 tp->rcv_wnd -= len; 535 #endif 536 #ifdef VERBOSE_TRACES 537 CTR4(KTR_CXGBE, "%s: DDP[%d] placed %d bytes (%#x)", __func__, db_idx, 538 len, report); 539 #endif 540 541 /* receive buffer autosize */ 542 MPASS(toep->vnet == so->so_vnet); 543 CURVNET_SET(toep->vnet); 544 SOCKBUF_LOCK(sb); 545 if (sb->sb_flags & SB_AUTOSIZE && 546 V_tcp_do_autorcvbuf && 547 sb->sb_hiwat < V_tcp_autorcvbuf_max && 548 len > (sbspace(sb) / 8 * 7)) { 549 struct adapter *sc = td_adapter(toep->td); 550 unsigned int hiwat = sb->sb_hiwat; 551 unsigned int newsize = min(hiwat + sc->tt.autorcvbuf_inc, 552 V_tcp_autorcvbuf_max); 553 554 if (!sbreserve_locked(sb, newsize, so, NULL)) 555 sb->sb_flags &= ~SB_AUTOSIZE; 556 } 557 SOCKBUF_UNLOCK(sb); 558 CURVNET_RESTORE(); 559 560 job->msgrcv = 1; 561 if (db->cancel_pending) { 562 /* 563 * Update the job's length but defer completion to the 564 * TCB_RPL callback. 565 */ 566 job->aio_received += len; 567 goto out; 568 } else if (!aio_clear_cancel_function(job)) { 569 /* 570 * Update the copied length for when 571 * t4_aio_cancel_active() completes this request. 572 */ 573 job->aio_received += len; 574 } else { 575 copied = job->aio_received; 576 #ifdef VERBOSE_TRACES 577 CTR4(KTR_CXGBE, "%s: completing %p (copied %ld, placed %d)", 578 __func__, job, copied, len); 579 #endif 580 aio_complete(job, copied + len, 0); 581 t4_rcvd(&toep->td->tod, tp); 582 } 583 584 completed: 585 complete_ddp_buffer(toep, db, db_idx); 586 if (toep->ddp.waiting_count > 0) 587 ddp_queue_toep(toep); 588 out: 589 DDP_UNLOCK(toep); 590 INP_WUNLOCK(inp); 591 592 return (0); 593 } 594 595 void 596 handle_ddp_indicate(struct toepcb *toep) 597 { 598 599 DDP_ASSERT_LOCKED(toep); 600 MPASS(toep->ddp.active_count == 0); 601 MPASS((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0); 602 if (toep->ddp.waiting_count == 0) { 603 /* 604 * The pending requests that triggered the request for an 605 * an indicate were cancelled. Those cancels should have 606 * already disabled DDP. Just ignore this as the data is 607 * going into the socket buffer anyway. 608 */ 609 return; 610 } 611 CTR3(KTR_CXGBE, "%s: tid %d indicated (%d waiting)", __func__, 612 toep->tid, toep->ddp.waiting_count); 613 ddp_queue_toep(toep); 614 } 615 616 enum { 617 DDP_BUF0_INVALIDATED = 0x2, 618 DDP_BUF1_INVALIDATED 619 }; 620 621 CTASSERT(DDP_BUF0_INVALIDATED == CPL_COOKIE_DDP0); 622 623 static int 624 do_ddp_tcb_rpl(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 625 { 626 struct adapter *sc = iq->adapter; 627 const struct cpl_set_tcb_rpl *cpl = (const void *)(rss + 1); 628 unsigned int tid = GET_TID(cpl); 629 unsigned int db_idx; 630 struct toepcb *toep; 631 struct inpcb *inp; 632 struct ddp_buffer *db; 633 struct kaiocb *job; 634 long copied; 635 636 if (cpl->status != CPL_ERR_NONE) 637 panic("XXX: tcp_rpl failed: %d", cpl->status); 638 639 toep = lookup_tid(sc, tid); 640 inp = toep->inp; 641 switch (cpl->cookie) { 642 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(DDP_BUF0_INVALIDATED): 643 case V_WORD(W_TCB_RX_DDP_FLAGS) | V_COOKIE(DDP_BUF1_INVALIDATED): 644 /* 645 * XXX: This duplicates a lot of code with handle_ddp_data(). 646 */ 647 db_idx = G_COOKIE(cpl->cookie) - DDP_BUF0_INVALIDATED; 648 MPASS(db_idx < nitems(toep->ddp.db)); 649 INP_WLOCK(inp); 650 DDP_LOCK(toep); 651 db = &toep->ddp.db[db_idx]; 652 653 /* 654 * handle_ddp_data() should leave the job around until 655 * this callback runs once a cancel is pending. 656 */ 657 MPASS(db != NULL); 658 MPASS(db->job != NULL); 659 MPASS(db->cancel_pending); 660 661 /* 662 * XXX: It's not clear what happens if there is data 663 * placed when the buffer is invalidated. I suspect we 664 * need to read the TCB to see how much data was placed. 665 * 666 * For now this just pretends like nothing was placed. 667 * 668 * XXX: Note that if we did check the PCB we would need to 669 * also take care of updating the tp, etc. 670 */ 671 job = db->job; 672 copied = job->aio_received; 673 if (copied == 0) { 674 CTR2(KTR_CXGBE, "%s: cancelling %p", __func__, job); 675 aio_cancel(job); 676 } else { 677 CTR3(KTR_CXGBE, "%s: completing %p (copied %ld)", 678 __func__, job, copied); 679 aio_complete(job, copied, 0); 680 t4_rcvd(&toep->td->tod, intotcpcb(inp)); 681 } 682 683 complete_ddp_buffer(toep, db, db_idx); 684 if (toep->ddp.waiting_count > 0) 685 ddp_queue_toep(toep); 686 DDP_UNLOCK(toep); 687 INP_WUNLOCK(inp); 688 break; 689 default: 690 panic("XXX: unknown tcb_rpl offset %#x, cookie %#x", 691 G_WORD(cpl->cookie), G_COOKIE(cpl->cookie)); 692 } 693 694 return (0); 695 } 696 697 void 698 handle_ddp_close(struct toepcb *toep, struct tcpcb *tp, __be32 rcv_nxt) 699 { 700 struct ddp_buffer *db; 701 struct kaiocb *job; 702 long copied; 703 unsigned int db_flag, db_idx; 704 int len, placed; 705 706 INP_WLOCK_ASSERT(toep->inp); 707 DDP_ASSERT_LOCKED(toep); 708 709 len = be32toh(rcv_nxt) - tp->rcv_nxt; 710 tp->rcv_nxt += len; 711 712 while (toep->ddp.active_count > 0) { 713 MPASS(toep->ddp.active_id != -1); 714 db_idx = toep->ddp.active_id; 715 db_flag = db_idx == 1 ? DDP_BUF1_ACTIVE : DDP_BUF0_ACTIVE; 716 MPASS((toep->ddp.flags & db_flag) != 0); 717 db = &toep->ddp.db[db_idx]; 718 job = db->job; 719 copied = job->aio_received; 720 placed = len; 721 if (placed > job->uaiocb.aio_nbytes - copied) 722 placed = job->uaiocb.aio_nbytes - copied; 723 if (placed > 0) 724 job->msgrcv = 1; 725 if (!aio_clear_cancel_function(job)) { 726 /* 727 * Update the copied length for when 728 * t4_aio_cancel_active() completes this 729 * request. 730 */ 731 job->aio_received += placed; 732 } else { 733 CTR4(KTR_CXGBE, "%s: tid %d completed buf %d len %d", 734 __func__, toep->tid, db_idx, placed); 735 aio_complete(job, copied + placed, 0); 736 } 737 len -= placed; 738 complete_ddp_buffer(toep, db, db_idx); 739 } 740 741 MPASS(len == 0); 742 ddp_complete_all(toep, 0); 743 } 744 745 #define DDP_ERR (F_DDP_PPOD_MISMATCH | F_DDP_LLIMIT_ERR | F_DDP_ULIMIT_ERR |\ 746 F_DDP_PPOD_PARITY_ERR | F_DDP_PADDING_ERR | F_DDP_OFFSET_ERR |\ 747 F_DDP_INVALID_TAG | F_DDP_COLOR_ERR | F_DDP_TID_MISMATCH |\ 748 F_DDP_INVALID_PPOD | F_DDP_HDRCRC_ERR | F_DDP_DATACRC_ERR) 749 750 extern cpl_handler_t t4_cpl_handler[]; 751 752 static int 753 do_rx_data_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 754 { 755 struct adapter *sc = iq->adapter; 756 const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1); 757 unsigned int tid = GET_TID(cpl); 758 uint32_t vld; 759 struct toepcb *toep = lookup_tid(sc, tid); 760 761 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 762 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 763 KASSERT(!(toep->flags & TPF_SYNQE), 764 ("%s: toep %p claims to be a synq entry", __func__, toep)); 765 766 vld = be32toh(cpl->ddpvld); 767 if (__predict_false(vld & DDP_ERR)) { 768 panic("%s: DDP error 0x%x (tid %d, toep %p)", 769 __func__, vld, tid, toep); 770 } 771 772 if (toep->ulp_mode == ULP_MODE_ISCSI) { 773 t4_cpl_handler[CPL_RX_ISCSI_DDP](iq, rss, m); 774 return (0); 775 } 776 777 handle_ddp_data(toep, cpl->u.ddp_report, cpl->seq, be16toh(cpl->len)); 778 779 return (0); 780 } 781 782 static int 783 do_rx_ddp_complete(struct sge_iq *iq, const struct rss_header *rss, 784 struct mbuf *m) 785 { 786 struct adapter *sc = iq->adapter; 787 const struct cpl_rx_ddp_complete *cpl = (const void *)(rss + 1); 788 unsigned int tid = GET_TID(cpl); 789 struct toepcb *toep = lookup_tid(sc, tid); 790 791 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 792 KASSERT(toep->tid == tid, ("%s: toep tid/atid mismatch", __func__)); 793 KASSERT(!(toep->flags & TPF_SYNQE), 794 ("%s: toep %p claims to be a synq entry", __func__, toep)); 795 796 handle_ddp_data(toep, cpl->ddp_report, cpl->rcv_nxt, 0); 797 798 return (0); 799 } 800 801 static void 802 enable_ddp(struct adapter *sc, struct toepcb *toep) 803 { 804 805 KASSERT((toep->ddp.flags & (DDP_ON | DDP_OK | DDP_SC_REQ)) == DDP_OK, 806 ("%s: toep %p has bad ddp_flags 0x%x", 807 __func__, toep, toep->ddp.flags)); 808 809 CTR3(KTR_CXGBE, "%s: tid %u (time %u)", 810 __func__, toep->tid, time_uptime); 811 812 DDP_ASSERT_LOCKED(toep); 813 toep->ddp.flags |= DDP_SC_REQ; 814 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_RX_DDP_FLAGS, 815 V_TF_DDP_OFF(1) | V_TF_DDP_INDICATE_OUT(1) | 816 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1) | 817 V_TF_DDP_BUF0_VALID(1) | V_TF_DDP_BUF1_VALID(1), 818 V_TF_DDP_BUF0_INDICATE(1) | V_TF_DDP_BUF1_INDICATE(1), 0, 0); 819 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, 820 V_TF_RCV_COALESCE_ENABLE(1), 0, 0, 0); 821 } 822 823 static int 824 calculate_hcf(int n1, int n2) 825 { 826 int a, b, t; 827 828 if (n1 <= n2) { 829 a = n1; 830 b = n2; 831 } else { 832 a = n2; 833 b = n1; 834 } 835 836 while (a != 0) { 837 t = a; 838 a = b % a; 839 b = t; 840 } 841 842 return (b); 843 } 844 845 static inline int 846 pages_to_nppods(int npages, int ddp_page_shift) 847 { 848 849 MPASS(ddp_page_shift >= PAGE_SHIFT); 850 851 return (howmany(npages >> (ddp_page_shift - PAGE_SHIFT), PPOD_PAGES)); 852 } 853 854 static int 855 alloc_page_pods(struct ppod_region *pr, u_int nppods, u_int pgsz_idx, 856 struct ppod_reservation *prsv) 857 { 858 vmem_addr_t addr; /* relative to start of region */ 859 860 if (vmem_alloc(pr->pr_arena, PPOD_SZ(nppods), M_NOWAIT | M_FIRSTFIT, 861 &addr) != 0) 862 return (ENOMEM); 863 864 CTR5(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d, pgsz %d", 865 __func__, pr->pr_arena, (uint32_t)addr & pr->pr_tag_mask, 866 nppods, 1 << pr->pr_page_shift[pgsz_idx]); 867 868 /* 869 * The hardware tagmask includes an extra invalid bit but the arena was 870 * seeded with valid values only. An allocation out of this arena will 871 * fit inside the tagmask but won't have the invalid bit set. 872 */ 873 MPASS((addr & pr->pr_tag_mask) == addr); 874 MPASS((addr & pr->pr_invalid_bit) == 0); 875 876 prsv->prsv_pr = pr; 877 prsv->prsv_tag = V_PPOD_PGSZ(pgsz_idx) | addr; 878 prsv->prsv_nppods = nppods; 879 880 return (0); 881 } 882 883 int 884 t4_alloc_page_pods_for_ps(struct ppod_region *pr, struct pageset *ps) 885 { 886 int i, hcf, seglen, idx, nppods; 887 struct ppod_reservation *prsv = &ps->prsv; 888 889 KASSERT(prsv->prsv_nppods == 0, 890 ("%s: page pods already allocated", __func__)); 891 892 /* 893 * The DDP page size is unrelated to the VM page size. We combine 894 * contiguous physical pages into larger segments to get the best DDP 895 * page size possible. This is the largest of the four sizes in 896 * A_ULP_RX_TDDP_PSZ that evenly divides the HCF of the segment sizes in 897 * the page list. 898 */ 899 hcf = 0; 900 for (i = 0; i < ps->npages; i++) { 901 seglen = PAGE_SIZE; 902 while (i < ps->npages - 1 && 903 ps->pages[i]->phys_addr + PAGE_SIZE == 904 ps->pages[i + 1]->phys_addr) { 905 seglen += PAGE_SIZE; 906 i++; 907 } 908 909 hcf = calculate_hcf(hcf, seglen); 910 if (hcf < (1 << pr->pr_page_shift[1])) { 911 idx = 0; 912 goto have_pgsz; /* give up, short circuit */ 913 } 914 } 915 916 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 917 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 918 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 919 if ((hcf & PR_PAGE_MASK(idx)) == 0) 920 break; 921 } 922 #undef PR_PAGE_MASK 923 924 have_pgsz: 925 MPASS(idx <= M_PPOD_PGSZ); 926 927 nppods = pages_to_nppods(ps->npages, pr->pr_page_shift[idx]); 928 if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 929 return (0); 930 MPASS(prsv->prsv_nppods > 0); 931 932 return (1); 933 } 934 935 int 936 t4_alloc_page_pods_for_buf(struct ppod_region *pr, vm_offset_t buf, int len, 937 struct ppod_reservation *prsv) 938 { 939 int hcf, seglen, idx, npages, nppods; 940 uintptr_t start_pva, end_pva, pva, p1; 941 942 MPASS(buf > 0); 943 MPASS(len > 0); 944 945 /* 946 * The DDP page size is unrelated to the VM page size. We combine 947 * contiguous physical pages into larger segments to get the best DDP 948 * page size possible. This is the largest of the four sizes in 949 * A_ULP_RX_ISCSI_PSZ that evenly divides the HCF of the segment sizes 950 * in the page list. 951 */ 952 hcf = 0; 953 start_pva = trunc_page(buf); 954 end_pva = trunc_page(buf + len - 1); 955 pva = start_pva; 956 while (pva <= end_pva) { 957 seglen = PAGE_SIZE; 958 p1 = pmap_kextract(pva); 959 pva += PAGE_SIZE; 960 while (pva <= end_pva && p1 + seglen == pmap_kextract(pva)) { 961 seglen += PAGE_SIZE; 962 pva += PAGE_SIZE; 963 } 964 965 hcf = calculate_hcf(hcf, seglen); 966 if (hcf < (1 << pr->pr_page_shift[1])) { 967 idx = 0; 968 goto have_pgsz; /* give up, short circuit */ 969 } 970 } 971 972 #define PR_PAGE_MASK(x) ((1 << pr->pr_page_shift[(x)]) - 1) 973 MPASS((hcf & PR_PAGE_MASK(0)) == 0); /* PAGE_SIZE is >= 4K everywhere */ 974 for (idx = nitems(pr->pr_page_shift) - 1; idx > 0; idx--) { 975 if ((hcf & PR_PAGE_MASK(idx)) == 0) 976 break; 977 } 978 #undef PR_PAGE_MASK 979 980 have_pgsz: 981 MPASS(idx <= M_PPOD_PGSZ); 982 983 npages = 1; 984 npages += (end_pva - start_pva) >> pr->pr_page_shift[idx]; 985 nppods = howmany(npages, PPOD_PAGES); 986 if (alloc_page_pods(pr, nppods, idx, prsv) != 0) 987 return (ENOMEM); 988 MPASS(prsv->prsv_nppods > 0); 989 990 return (0); 991 } 992 993 void 994 t4_free_page_pods(struct ppod_reservation *prsv) 995 { 996 struct ppod_region *pr = prsv->prsv_pr; 997 vmem_addr_t addr; 998 999 MPASS(prsv != NULL); 1000 MPASS(prsv->prsv_nppods != 0); 1001 1002 addr = prsv->prsv_tag & pr->pr_tag_mask; 1003 MPASS((addr & pr->pr_invalid_bit) == 0); 1004 1005 CTR4(KTR_CXGBE, "%-17s arena %p, addr 0x%08x, nppods %d", __func__, 1006 pr->pr_arena, addr, prsv->prsv_nppods); 1007 1008 vmem_free(pr->pr_arena, addr, PPOD_SZ(prsv->prsv_nppods)); 1009 prsv->prsv_nppods = 0; 1010 } 1011 1012 #define NUM_ULP_TX_SC_IMM_PPODS (256 / PPOD_SIZE) 1013 1014 int 1015 t4_write_page_pods_for_ps(struct adapter *sc, struct sge_wrq *wrq, int tid, 1016 struct pageset *ps) 1017 { 1018 struct wrqe *wr; 1019 struct ulp_mem_io *ulpmc; 1020 struct ulptx_idata *ulpsc; 1021 struct pagepod *ppod; 1022 int i, j, k, n, chunk, len, ddp_pgsz, idx; 1023 u_int ppod_addr; 1024 uint32_t cmd; 1025 struct ppod_reservation *prsv = &ps->prsv; 1026 struct ppod_region *pr = prsv->prsv_pr; 1027 1028 KASSERT(!(ps->flags & PS_PPODS_WRITTEN), 1029 ("%s: page pods already written", __func__)); 1030 MPASS(prsv->prsv_nppods > 0); 1031 1032 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1033 if (is_t4(sc)) 1034 cmd |= htobe32(F_ULP_MEMIO_ORDER); 1035 else 1036 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1037 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1038 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1039 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1040 1041 /* How many page pods are we writing in this cycle */ 1042 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1043 chunk = PPOD_SZ(n); 1044 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1045 1046 wr = alloc_wrqe(len, wrq); 1047 if (wr == NULL) 1048 return (ENOMEM); /* ok to just bail out */ 1049 ulpmc = wrtod(wr); 1050 1051 INIT_ULPTX_WR(ulpmc, len, 0, 0); 1052 ulpmc->cmd = cmd; 1053 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1054 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1055 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1056 1057 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1058 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1059 ulpsc->len = htobe32(chunk); 1060 1061 ppod = (struct pagepod *)(ulpsc + 1); 1062 for (j = 0; j < n; i++, j++, ppod++) { 1063 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1064 V_PPOD_TID(tid) | prsv->prsv_tag); 1065 ppod->len_offset = htobe64(V_PPOD_LEN(ps->len) | 1066 V_PPOD_OFST(ps->offset)); 1067 ppod->rsvd = 0; 1068 idx = i * PPOD_PAGES * (ddp_pgsz / PAGE_SIZE); 1069 for (k = 0; k < nitems(ppod->addr); k++) { 1070 if (idx < ps->npages) { 1071 ppod->addr[k] = 1072 htobe64(ps->pages[idx]->phys_addr); 1073 idx += ddp_pgsz / PAGE_SIZE; 1074 } else 1075 ppod->addr[k] = 0; 1076 #if 0 1077 CTR5(KTR_CXGBE, 1078 "%s: tid %d ppod[%d]->addr[%d] = %p", 1079 __func__, toep->tid, i, k, 1080 htobe64(ppod->addr[k])); 1081 #endif 1082 } 1083 1084 } 1085 1086 t4_wrq_tx(sc, wr); 1087 } 1088 ps->flags |= PS_PPODS_WRITTEN; 1089 1090 return (0); 1091 } 1092 1093 int 1094 t4_write_page_pods_for_buf(struct adapter *sc, struct sge_wrq *wrq, int tid, 1095 struct ppod_reservation *prsv, vm_offset_t buf, int buflen) 1096 { 1097 struct wrqe *wr; 1098 struct ulp_mem_io *ulpmc; 1099 struct ulptx_idata *ulpsc; 1100 struct pagepod *ppod; 1101 int i, j, k, n, chunk, len, ddp_pgsz; 1102 u_int ppod_addr, offset; 1103 uint32_t cmd; 1104 struct ppod_region *pr = prsv->prsv_pr; 1105 uintptr_t end_pva, pva, pa; 1106 1107 cmd = htobe32(V_ULPTX_CMD(ULP_TX_MEM_WRITE)); 1108 if (is_t4(sc)) 1109 cmd |= htobe32(F_ULP_MEMIO_ORDER); 1110 else 1111 cmd |= htobe32(F_T5_ULP_MEMIO_IMM); 1112 ddp_pgsz = 1 << pr->pr_page_shift[G_PPOD_PGSZ(prsv->prsv_tag)]; 1113 offset = buf & PAGE_MASK; 1114 ppod_addr = pr->pr_start + (prsv->prsv_tag & pr->pr_tag_mask); 1115 pva = trunc_page(buf); 1116 end_pva = trunc_page(buf + buflen - 1); 1117 for (i = 0; i < prsv->prsv_nppods; ppod_addr += chunk) { 1118 1119 /* How many page pods are we writing in this cycle */ 1120 n = min(prsv->prsv_nppods - i, NUM_ULP_TX_SC_IMM_PPODS); 1121 MPASS(n > 0); 1122 chunk = PPOD_SZ(n); 1123 len = roundup2(sizeof(*ulpmc) + sizeof(*ulpsc) + chunk, 16); 1124 1125 wr = alloc_wrqe(len, wrq); 1126 if (wr == NULL) 1127 return (ENOMEM); /* ok to just bail out */ 1128 ulpmc = wrtod(wr); 1129 1130 INIT_ULPTX_WR(ulpmc, len, 0, 0); 1131 ulpmc->cmd = cmd; 1132 ulpmc->dlen = htobe32(V_ULP_MEMIO_DATA_LEN(chunk / 32)); 1133 ulpmc->len16 = htobe32(howmany(len - sizeof(ulpmc->wr), 16)); 1134 ulpmc->lock_addr = htobe32(V_ULP_MEMIO_ADDR(ppod_addr >> 5)); 1135 1136 ulpsc = (struct ulptx_idata *)(ulpmc + 1); 1137 ulpsc->cmd_more = htobe32(V_ULPTX_CMD(ULP_TX_SC_IMM)); 1138 ulpsc->len = htobe32(chunk); 1139 1140 ppod = (struct pagepod *)(ulpsc + 1); 1141 for (j = 0; j < n; i++, j++, ppod++) { 1142 ppod->vld_tid_pgsz_tag_color = htobe64(F_PPOD_VALID | 1143 V_PPOD_TID(tid) | 1144 (prsv->prsv_tag & ~V_PPOD_PGSZ(M_PPOD_PGSZ))); 1145 ppod->len_offset = htobe64(V_PPOD_LEN(buflen) | 1146 V_PPOD_OFST(offset)); 1147 ppod->rsvd = 0; 1148 1149 for (k = 0; k < nitems(ppod->addr); k++) { 1150 if (pva > end_pva) 1151 ppod->addr[k] = 0; 1152 else { 1153 pa = pmap_kextract(pva); 1154 ppod->addr[k] = htobe64(pa); 1155 pva += ddp_pgsz; 1156 } 1157 #if 0 1158 CTR5(KTR_CXGBE, 1159 "%s: tid %d ppod[%d]->addr[%d] = %p", 1160 __func__, tid, i, k, 1161 htobe64(ppod->addr[k])); 1162 #endif 1163 } 1164 1165 /* 1166 * Walk back 1 segment so that the first address in the 1167 * next pod is the same as the last one in the current 1168 * pod. 1169 */ 1170 pva -= ddp_pgsz; 1171 } 1172 1173 t4_wrq_tx(sc, wr); 1174 } 1175 1176 MPASS(pva <= end_pva); 1177 1178 return (0); 1179 } 1180 1181 static void 1182 wire_pageset(struct pageset *ps) 1183 { 1184 vm_page_t p; 1185 int i; 1186 1187 KASSERT(!(ps->flags & PS_WIRED), ("pageset already wired")); 1188 1189 for (i = 0; i < ps->npages; i++) { 1190 p = ps->pages[i]; 1191 vm_page_lock(p); 1192 vm_page_wire(p); 1193 vm_page_unhold(p); 1194 vm_page_unlock(p); 1195 } 1196 ps->flags |= PS_WIRED; 1197 } 1198 1199 /* 1200 * Prepare a pageset for DDP. This wires the pageset and sets up page 1201 * pods. 1202 */ 1203 static int 1204 prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps) 1205 { 1206 struct tom_data *td = sc->tom_softc; 1207 1208 if (!(ps->flags & PS_WIRED)) 1209 wire_pageset(ps); 1210 if (ps->prsv.prsv_nppods == 0 && 1211 !t4_alloc_page_pods_for_ps(&td->pr, ps)) { 1212 return (0); 1213 } 1214 if (!(ps->flags & PS_PPODS_WRITTEN) && 1215 t4_write_page_pods_for_ps(sc, toep->ctrlq, toep->tid, ps) != 0) { 1216 return (0); 1217 } 1218 1219 return (1); 1220 } 1221 1222 int 1223 t4_init_ppod_region(struct ppod_region *pr, struct t4_range *r, u_int psz, 1224 const char *name) 1225 { 1226 int i; 1227 1228 MPASS(pr != NULL); 1229 MPASS(r->size > 0); 1230 1231 pr->pr_start = r->start; 1232 pr->pr_len = r->size; 1233 pr->pr_page_shift[0] = 12 + G_HPZ0(psz); 1234 pr->pr_page_shift[1] = 12 + G_HPZ1(psz); 1235 pr->pr_page_shift[2] = 12 + G_HPZ2(psz); 1236 pr->pr_page_shift[3] = 12 + G_HPZ3(psz); 1237 1238 /* The SGL -> page pod algorithm requires the sizes to be in order. */ 1239 for (i = 1; i < nitems(pr->pr_page_shift); i++) { 1240 if (pr->pr_page_shift[i] <= pr->pr_page_shift[i - 1]) 1241 return (ENXIO); 1242 } 1243 1244 pr->pr_tag_mask = ((1 << fls(r->size)) - 1) & V_PPOD_TAG(M_PPOD_TAG); 1245 pr->pr_alias_mask = V_PPOD_TAG(M_PPOD_TAG) & ~pr->pr_tag_mask; 1246 if (pr->pr_tag_mask == 0 || pr->pr_alias_mask == 0) 1247 return (ENXIO); 1248 pr->pr_alias_shift = fls(pr->pr_tag_mask); 1249 pr->pr_invalid_bit = 1 << (pr->pr_alias_shift - 1); 1250 1251 pr->pr_arena = vmem_create(name, 0, pr->pr_len, PPOD_SIZE, 0, 1252 M_FIRSTFIT | M_NOWAIT); 1253 if (pr->pr_arena == NULL) 1254 return (ENOMEM); 1255 1256 return (0); 1257 } 1258 1259 void 1260 t4_free_ppod_region(struct ppod_region *pr) 1261 { 1262 1263 MPASS(pr != NULL); 1264 1265 if (pr->pr_arena) 1266 vmem_destroy(pr->pr_arena); 1267 bzero(pr, sizeof(*pr)); 1268 } 1269 1270 static int 1271 pscmp(struct pageset *ps, struct vmspace *vm, vm_offset_t start, int npages, 1272 int pgoff, int len) 1273 { 1274 1275 if (ps->start != start || ps->npages != npages || 1276 ps->offset != pgoff || ps->len != len) 1277 return (1); 1278 1279 return (ps->vm != vm || ps->vm_timestamp != vm->vm_map.timestamp); 1280 } 1281 1282 static int 1283 hold_aio(struct toepcb *toep, struct kaiocb *job, struct pageset **pps) 1284 { 1285 struct vmspace *vm; 1286 vm_map_t map; 1287 vm_offset_t start, end, pgoff; 1288 struct pageset *ps; 1289 int n; 1290 1291 DDP_ASSERT_LOCKED(toep); 1292 1293 /* 1294 * The AIO subsystem will cancel and drain all requests before 1295 * permitting a process to exit or exec, so p_vmspace should 1296 * be stable here. 1297 */ 1298 vm = job->userproc->p_vmspace; 1299 map = &vm->vm_map; 1300 start = (uintptr_t)job->uaiocb.aio_buf; 1301 pgoff = start & PAGE_MASK; 1302 end = round_page(start + job->uaiocb.aio_nbytes); 1303 start = trunc_page(start); 1304 1305 if (end - start > MAX_DDP_BUFFER_SIZE) { 1306 /* 1307 * Truncate the request to a short read. 1308 * Alternatively, we could DDP in chunks to the larger 1309 * buffer, but that would be quite a bit more work. 1310 * 1311 * When truncating, round the request down to avoid 1312 * crossing a cache line on the final transaction. 1313 */ 1314 end = rounddown2(start + MAX_DDP_BUFFER_SIZE, CACHE_LINE_SIZE); 1315 #ifdef VERBOSE_TRACES 1316 CTR4(KTR_CXGBE, "%s: tid %d, truncating size from %lu to %lu", 1317 __func__, toep->tid, (unsigned long)job->uaiocb.aio_nbytes, 1318 (unsigned long)(end - (start + pgoff))); 1319 job->uaiocb.aio_nbytes = end - (start + pgoff); 1320 #endif 1321 end = round_page(end); 1322 } 1323 1324 n = atop(end - start); 1325 1326 /* 1327 * Try to reuse a cached pageset. 1328 */ 1329 TAILQ_FOREACH(ps, &toep->ddp.cached_pagesets, link) { 1330 if (pscmp(ps, vm, start, n, pgoff, 1331 job->uaiocb.aio_nbytes) == 0) { 1332 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 1333 toep->ddp.cached_count--; 1334 *pps = ps; 1335 return (0); 1336 } 1337 } 1338 1339 /* 1340 * If there are too many cached pagesets to create a new one, 1341 * free a pageset before creating a new one. 1342 */ 1343 KASSERT(toep->ddp.active_count + toep->ddp.cached_count <= 1344 nitems(toep->ddp.db), ("%s: too many wired pagesets", __func__)); 1345 if (toep->ddp.active_count + toep->ddp.cached_count == 1346 nitems(toep->ddp.db)) { 1347 KASSERT(toep->ddp.cached_count > 0, 1348 ("no cached pageset to free")); 1349 ps = TAILQ_LAST(&toep->ddp.cached_pagesets, pagesetq); 1350 TAILQ_REMOVE(&toep->ddp.cached_pagesets, ps, link); 1351 toep->ddp.cached_count--; 1352 free_pageset(toep->td, ps); 1353 } 1354 DDP_UNLOCK(toep); 1355 1356 /* Create a new pageset. */ 1357 ps = malloc(sizeof(*ps) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK | 1358 M_ZERO); 1359 ps->pages = (vm_page_t *)(ps + 1); 1360 ps->vm_timestamp = map->timestamp; 1361 ps->npages = vm_fault_quick_hold_pages(map, start, end - start, 1362 VM_PROT_WRITE, ps->pages, n); 1363 1364 DDP_LOCK(toep); 1365 if (ps->npages < 0) { 1366 free(ps, M_CXGBE); 1367 return (EFAULT); 1368 } 1369 1370 KASSERT(ps->npages == n, ("hold_aio: page count mismatch: %d vs %d", 1371 ps->npages, n)); 1372 1373 ps->offset = pgoff; 1374 ps->len = job->uaiocb.aio_nbytes; 1375 atomic_add_int(&vm->vm_refcnt, 1); 1376 ps->vm = vm; 1377 ps->start = start; 1378 1379 CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d", 1380 __func__, toep->tid, ps, job, ps->npages); 1381 *pps = ps; 1382 return (0); 1383 } 1384 1385 static void 1386 ddp_complete_all(struct toepcb *toep, int error) 1387 { 1388 struct kaiocb *job; 1389 1390 DDP_ASSERT_LOCKED(toep); 1391 while (!TAILQ_EMPTY(&toep->ddp.aiojobq)) { 1392 job = TAILQ_FIRST(&toep->ddp.aiojobq); 1393 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1394 toep->ddp.waiting_count--; 1395 if (aio_clear_cancel_function(job)) 1396 ddp_complete_one(job, error); 1397 } 1398 } 1399 1400 static void 1401 aio_ddp_cancel_one(struct kaiocb *job) 1402 { 1403 long copied; 1404 1405 /* 1406 * If this job had copied data out of the socket buffer before 1407 * it was cancelled, report it as a short read rather than an 1408 * error. 1409 */ 1410 copied = job->aio_received; 1411 if (copied != 0) 1412 aio_complete(job, copied, 0); 1413 else 1414 aio_cancel(job); 1415 } 1416 1417 /* 1418 * Called when the main loop wants to requeue a job to retry it later. 1419 * Deals with the race of the job being cancelled while it was being 1420 * examined. 1421 */ 1422 static void 1423 aio_ddp_requeue_one(struct toepcb *toep, struct kaiocb *job) 1424 { 1425 1426 DDP_ASSERT_LOCKED(toep); 1427 if (!(toep->ddp.flags & DDP_DEAD) && 1428 aio_set_cancel_function(job, t4_aio_cancel_queued)) { 1429 TAILQ_INSERT_HEAD(&toep->ddp.aiojobq, job, list); 1430 toep->ddp.waiting_count++; 1431 } else 1432 aio_ddp_cancel_one(job); 1433 } 1434 1435 static void 1436 aio_ddp_requeue(struct toepcb *toep) 1437 { 1438 struct adapter *sc = td_adapter(toep->td); 1439 struct socket *so; 1440 struct sockbuf *sb; 1441 struct inpcb *inp; 1442 struct kaiocb *job; 1443 struct ddp_buffer *db; 1444 size_t copied, offset, resid; 1445 struct pageset *ps; 1446 struct mbuf *m; 1447 uint64_t ddp_flags, ddp_flags_mask; 1448 struct wrqe *wr; 1449 int buf_flag, db_idx, error; 1450 1451 DDP_ASSERT_LOCKED(toep); 1452 1453 restart: 1454 if (toep->ddp.flags & DDP_DEAD) { 1455 MPASS(toep->ddp.waiting_count == 0); 1456 MPASS(toep->ddp.active_count == 0); 1457 return; 1458 } 1459 1460 if (toep->ddp.waiting_count == 0 || 1461 toep->ddp.active_count == nitems(toep->ddp.db)) { 1462 return; 1463 } 1464 1465 job = TAILQ_FIRST(&toep->ddp.aiojobq); 1466 so = job->fd_file->f_data; 1467 sb = &so->so_rcv; 1468 SOCKBUF_LOCK(sb); 1469 1470 /* We will never get anything unless we are or were connected. */ 1471 if (!(so->so_state & (SS_ISCONNECTED|SS_ISDISCONNECTED))) { 1472 SOCKBUF_UNLOCK(sb); 1473 ddp_complete_all(toep, ENOTCONN); 1474 return; 1475 } 1476 1477 KASSERT(toep->ddp.active_count == 0 || sbavail(sb) == 0, 1478 ("%s: pending sockbuf data and DDP is active", __func__)); 1479 1480 /* Abort if socket has reported problems. */ 1481 /* XXX: Wait for any queued DDP's to finish and/or flush them? */ 1482 if (so->so_error && sbavail(sb) == 0) { 1483 toep->ddp.waiting_count--; 1484 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1485 if (!aio_clear_cancel_function(job)) { 1486 SOCKBUF_UNLOCK(sb); 1487 goto restart; 1488 } 1489 1490 /* 1491 * If this job has previously copied some data, report 1492 * a short read and leave the error to be reported by 1493 * a future request. 1494 */ 1495 copied = job->aio_received; 1496 if (copied != 0) { 1497 SOCKBUF_UNLOCK(sb); 1498 aio_complete(job, copied, 0); 1499 goto restart; 1500 } 1501 error = so->so_error; 1502 so->so_error = 0; 1503 SOCKBUF_UNLOCK(sb); 1504 aio_complete(job, -1, error); 1505 goto restart; 1506 } 1507 1508 /* 1509 * Door is closed. If there is pending data in the socket buffer, 1510 * deliver it. If there are pending DDP requests, wait for those 1511 * to complete. Once they have completed, return EOF reads. 1512 */ 1513 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 1514 SOCKBUF_UNLOCK(sb); 1515 if (toep->ddp.active_count != 0) 1516 return; 1517 ddp_complete_all(toep, 0); 1518 return; 1519 } 1520 1521 /* 1522 * If DDP is not enabled and there is no pending socket buffer 1523 * data, try to enable DDP. 1524 */ 1525 if (sbavail(sb) == 0 && (toep->ddp.flags & DDP_ON) == 0) { 1526 SOCKBUF_UNLOCK(sb); 1527 1528 /* 1529 * Wait for the card to ACK that DDP is enabled before 1530 * queueing any buffers. Currently this waits for an 1531 * indicate to arrive. This could use a TCB_SET_FIELD_RPL 1532 * message to know that DDP was enabled instead of waiting 1533 * for the indicate which would avoid copying the indicate 1534 * if no data is pending. 1535 * 1536 * XXX: Might want to limit the indicate size to the size 1537 * of the first queued request. 1538 */ 1539 if ((toep->ddp.flags & DDP_SC_REQ) == 0) 1540 enable_ddp(sc, toep); 1541 return; 1542 } 1543 SOCKBUF_UNLOCK(sb); 1544 1545 /* 1546 * If another thread is queueing a buffer for DDP, let it 1547 * drain any work and return. 1548 */ 1549 if (toep->ddp.queueing != NULL) 1550 return; 1551 1552 /* Take the next job to prep it for DDP. */ 1553 toep->ddp.waiting_count--; 1554 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1555 if (!aio_clear_cancel_function(job)) 1556 goto restart; 1557 toep->ddp.queueing = job; 1558 1559 /* NB: This drops DDP_LOCK while it holds the backing VM pages. */ 1560 error = hold_aio(toep, job, &ps); 1561 if (error != 0) { 1562 ddp_complete_one(job, error); 1563 toep->ddp.queueing = NULL; 1564 goto restart; 1565 } 1566 1567 SOCKBUF_LOCK(sb); 1568 if (so->so_error && sbavail(sb) == 0) { 1569 copied = job->aio_received; 1570 if (copied != 0) { 1571 SOCKBUF_UNLOCK(sb); 1572 recycle_pageset(toep, ps); 1573 aio_complete(job, copied, 0); 1574 toep->ddp.queueing = NULL; 1575 goto restart; 1576 } 1577 1578 error = so->so_error; 1579 so->so_error = 0; 1580 SOCKBUF_UNLOCK(sb); 1581 recycle_pageset(toep, ps); 1582 aio_complete(job, -1, error); 1583 toep->ddp.queueing = NULL; 1584 goto restart; 1585 } 1586 1587 if (sb->sb_state & SBS_CANTRCVMORE && sbavail(sb) == 0) { 1588 SOCKBUF_UNLOCK(sb); 1589 recycle_pageset(toep, ps); 1590 if (toep->ddp.active_count != 0) { 1591 /* 1592 * The door is closed, but there are still pending 1593 * DDP buffers. Requeue. These jobs will all be 1594 * completed once those buffers drain. 1595 */ 1596 aio_ddp_requeue_one(toep, job); 1597 toep->ddp.queueing = NULL; 1598 return; 1599 } 1600 ddp_complete_one(job, 0); 1601 ddp_complete_all(toep, 0); 1602 toep->ddp.queueing = NULL; 1603 return; 1604 } 1605 1606 sbcopy: 1607 /* 1608 * If the toep is dead, there shouldn't be any data in the socket 1609 * buffer, so the above case should have handled this. 1610 */ 1611 MPASS(!(toep->ddp.flags & DDP_DEAD)); 1612 1613 /* 1614 * If there is pending data in the socket buffer (either 1615 * from before the requests were queued or a DDP indicate), 1616 * copy those mbufs out directly. 1617 */ 1618 copied = 0; 1619 offset = ps->offset + job->aio_received; 1620 MPASS(job->aio_received <= job->uaiocb.aio_nbytes); 1621 resid = job->uaiocb.aio_nbytes - job->aio_received; 1622 m = sb->sb_mb; 1623 KASSERT(m == NULL || toep->ddp.active_count == 0, 1624 ("%s: sockbuf data with active DDP", __func__)); 1625 while (m != NULL && resid > 0) { 1626 struct iovec iov[1]; 1627 struct uio uio; 1628 int error; 1629 1630 iov[0].iov_base = mtod(m, void *); 1631 iov[0].iov_len = m->m_len; 1632 if (iov[0].iov_len > resid) 1633 iov[0].iov_len = resid; 1634 uio.uio_iov = iov; 1635 uio.uio_iovcnt = 1; 1636 uio.uio_offset = 0; 1637 uio.uio_resid = iov[0].iov_len; 1638 uio.uio_segflg = UIO_SYSSPACE; 1639 uio.uio_rw = UIO_WRITE; 1640 error = uiomove_fromphys(ps->pages, offset + copied, 1641 uio.uio_resid, &uio); 1642 MPASS(error == 0 && uio.uio_resid == 0); 1643 copied += uio.uio_offset; 1644 resid -= uio.uio_offset; 1645 m = m->m_next; 1646 } 1647 if (copied != 0) { 1648 sbdrop_locked(sb, copied); 1649 job->aio_received += copied; 1650 job->msgrcv = 1; 1651 copied = job->aio_received; 1652 inp = sotoinpcb(so); 1653 if (!INP_TRY_WLOCK(inp)) { 1654 /* 1655 * The reference on the socket file descriptor in 1656 * the AIO job should keep 'sb' and 'inp' stable. 1657 * Our caller has a reference on the 'toep' that 1658 * keeps it stable. 1659 */ 1660 SOCKBUF_UNLOCK(sb); 1661 DDP_UNLOCK(toep); 1662 INP_WLOCK(inp); 1663 DDP_LOCK(toep); 1664 SOCKBUF_LOCK(sb); 1665 1666 /* 1667 * If the socket has been closed, we should detect 1668 * that and complete this request if needed on 1669 * the next trip around the loop. 1670 */ 1671 } 1672 t4_rcvd_locked(&toep->td->tod, intotcpcb(inp)); 1673 INP_WUNLOCK(inp); 1674 if (resid == 0 || toep->ddp.flags & DDP_DEAD) { 1675 /* 1676 * We filled the entire buffer with socket 1677 * data, DDP is not being used, or the socket 1678 * is being shut down, so complete the 1679 * request. 1680 */ 1681 SOCKBUF_UNLOCK(sb); 1682 recycle_pageset(toep, ps); 1683 aio_complete(job, copied, 0); 1684 toep->ddp.queueing = NULL; 1685 goto restart; 1686 } 1687 1688 /* 1689 * If DDP is not enabled, requeue this request and restart. 1690 * This will either enable DDP or wait for more data to 1691 * arrive on the socket buffer. 1692 */ 1693 if ((toep->ddp.flags & (DDP_ON | DDP_SC_REQ)) != DDP_ON) { 1694 SOCKBUF_UNLOCK(sb); 1695 recycle_pageset(toep, ps); 1696 aio_ddp_requeue_one(toep, job); 1697 toep->ddp.queueing = NULL; 1698 goto restart; 1699 } 1700 1701 /* 1702 * An indicate might have arrived and been added to 1703 * the socket buffer while it was unlocked after the 1704 * copy to lock the INP. If so, restart the copy. 1705 */ 1706 if (sbavail(sb) != 0) 1707 goto sbcopy; 1708 } 1709 SOCKBUF_UNLOCK(sb); 1710 1711 if (prep_pageset(sc, toep, ps) == 0) { 1712 recycle_pageset(toep, ps); 1713 aio_ddp_requeue_one(toep, job); 1714 toep->ddp.queueing = NULL; 1715 1716 /* 1717 * XXX: Need to retry this later. Mostly need a trigger 1718 * when page pods are freed up. 1719 */ 1720 printf("%s: prep_pageset failed\n", __func__); 1721 return; 1722 } 1723 1724 /* Determine which DDP buffer to use. */ 1725 if (toep->ddp.db[0].job == NULL) { 1726 db_idx = 0; 1727 } else { 1728 MPASS(toep->ddp.db[1].job == NULL); 1729 db_idx = 1; 1730 } 1731 1732 ddp_flags = 0; 1733 ddp_flags_mask = 0; 1734 if (db_idx == 0) { 1735 ddp_flags |= V_TF_DDP_BUF0_VALID(1); 1736 if (so->so_state & SS_NBIO) 1737 ddp_flags |= V_TF_DDP_BUF0_FLUSH(1); 1738 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE0(1) | 1739 V_TF_DDP_PUSH_DISABLE_0(1) | V_TF_DDP_PSHF_ENABLE_0(1) | 1740 V_TF_DDP_BUF0_FLUSH(1) | V_TF_DDP_BUF0_VALID(1); 1741 buf_flag = DDP_BUF0_ACTIVE; 1742 } else { 1743 ddp_flags |= V_TF_DDP_BUF1_VALID(1); 1744 if (so->so_state & SS_NBIO) 1745 ddp_flags |= V_TF_DDP_BUF1_FLUSH(1); 1746 ddp_flags_mask |= V_TF_DDP_PSH_NO_INVALIDATE1(1) | 1747 V_TF_DDP_PUSH_DISABLE_1(1) | V_TF_DDP_PSHF_ENABLE_1(1) | 1748 V_TF_DDP_BUF1_FLUSH(1) | V_TF_DDP_BUF1_VALID(1); 1749 buf_flag = DDP_BUF1_ACTIVE; 1750 } 1751 MPASS((toep->ddp.flags & buf_flag) == 0); 1752 if ((toep->ddp.flags & (DDP_BUF0_ACTIVE | DDP_BUF1_ACTIVE)) == 0) { 1753 MPASS(db_idx == 0); 1754 MPASS(toep->ddp.active_id == -1); 1755 MPASS(toep->ddp.active_count == 0); 1756 ddp_flags_mask |= V_TF_DDP_ACTIVE_BUF(1); 1757 } 1758 1759 /* 1760 * The TID for this connection should still be valid. If DDP_DEAD 1761 * is set, SBS_CANTRCVMORE should be set, so we shouldn't be 1762 * this far anyway. Even if the socket is closing on the other 1763 * end, the AIO job holds a reference on this end of the socket 1764 * which will keep it open and keep the TCP PCB attached until 1765 * after the job is completed. 1766 */ 1767 wr = mk_update_tcb_for_ddp(sc, toep, db_idx, ps, job->aio_received, 1768 ddp_flags, ddp_flags_mask); 1769 if (wr == NULL) { 1770 recycle_pageset(toep, ps); 1771 aio_ddp_requeue_one(toep, job); 1772 toep->ddp.queueing = NULL; 1773 1774 /* 1775 * XXX: Need a way to kick a retry here. 1776 * 1777 * XXX: We know the fixed size needed and could 1778 * preallocate this using a blocking request at the 1779 * start of the task to avoid having to handle this 1780 * edge case. 1781 */ 1782 printf("%s: mk_update_tcb_for_ddp failed\n", __func__); 1783 return; 1784 } 1785 1786 if (!aio_set_cancel_function(job, t4_aio_cancel_active)) { 1787 free_wrqe(wr); 1788 recycle_pageset(toep, ps); 1789 aio_ddp_cancel_one(job); 1790 toep->ddp.queueing = NULL; 1791 goto restart; 1792 } 1793 1794 #ifdef VERBOSE_TRACES 1795 CTR5(KTR_CXGBE, "%s: scheduling %p for DDP[%d] (flags %#lx/%#lx)", 1796 __func__, job, db_idx, ddp_flags, ddp_flags_mask); 1797 #endif 1798 /* Give the chip the go-ahead. */ 1799 t4_wrq_tx(sc, wr); 1800 db = &toep->ddp.db[db_idx]; 1801 db->cancel_pending = 0; 1802 db->job = job; 1803 db->ps = ps; 1804 toep->ddp.queueing = NULL; 1805 toep->ddp.flags |= buf_flag; 1806 toep->ddp.active_count++; 1807 if (toep->ddp.active_count == 1) { 1808 MPASS(toep->ddp.active_id == -1); 1809 toep->ddp.active_id = db_idx; 1810 CTR2(KTR_CXGBE, "%s: ddp_active_id = %d", __func__, 1811 toep->ddp.active_id); 1812 } 1813 goto restart; 1814 } 1815 1816 void 1817 ddp_queue_toep(struct toepcb *toep) 1818 { 1819 1820 DDP_ASSERT_LOCKED(toep); 1821 if (toep->ddp.flags & DDP_TASK_ACTIVE) 1822 return; 1823 toep->ddp.flags |= DDP_TASK_ACTIVE; 1824 hold_toepcb(toep); 1825 soaio_enqueue(&toep->ddp.requeue_task); 1826 } 1827 1828 static void 1829 aio_ddp_requeue_task(void *context, int pending) 1830 { 1831 struct toepcb *toep = context; 1832 1833 DDP_LOCK(toep); 1834 aio_ddp_requeue(toep); 1835 toep->ddp.flags &= ~DDP_TASK_ACTIVE; 1836 DDP_UNLOCK(toep); 1837 1838 free_toepcb(toep); 1839 } 1840 1841 static void 1842 t4_aio_cancel_active(struct kaiocb *job) 1843 { 1844 struct socket *so = job->fd_file->f_data; 1845 struct tcpcb *tp = so_sototcpcb(so); 1846 struct toepcb *toep = tp->t_toe; 1847 struct adapter *sc = td_adapter(toep->td); 1848 uint64_t valid_flag; 1849 int i; 1850 1851 DDP_LOCK(toep); 1852 if (aio_cancel_cleared(job)) { 1853 DDP_UNLOCK(toep); 1854 aio_ddp_cancel_one(job); 1855 return; 1856 } 1857 1858 for (i = 0; i < nitems(toep->ddp.db); i++) { 1859 if (toep->ddp.db[i].job == job) { 1860 /* Should only ever get one cancel request for a job. */ 1861 MPASS(toep->ddp.db[i].cancel_pending == 0); 1862 1863 /* 1864 * Invalidate this buffer. It will be 1865 * cancelled or partially completed once the 1866 * card ACKs the invalidate. 1867 */ 1868 valid_flag = i == 0 ? V_TF_DDP_BUF0_VALID(1) : 1869 V_TF_DDP_BUF1_VALID(1); 1870 t4_set_tcb_field(sc, toep->ctrlq, toep, 1871 W_TCB_RX_DDP_FLAGS, valid_flag, 0, 1, 1872 i + DDP_BUF0_INVALIDATED); 1873 toep->ddp.db[i].cancel_pending = 1; 1874 CTR2(KTR_CXGBE, "%s: request %p marked pending", 1875 __func__, job); 1876 break; 1877 } 1878 } 1879 DDP_UNLOCK(toep); 1880 } 1881 1882 static void 1883 t4_aio_cancel_queued(struct kaiocb *job) 1884 { 1885 struct socket *so = job->fd_file->f_data; 1886 struct tcpcb *tp = so_sototcpcb(so); 1887 struct toepcb *toep = tp->t_toe; 1888 1889 DDP_LOCK(toep); 1890 if (!aio_cancel_cleared(job)) { 1891 TAILQ_REMOVE(&toep->ddp.aiojobq, job, list); 1892 toep->ddp.waiting_count--; 1893 if (toep->ddp.waiting_count == 0) 1894 ddp_queue_toep(toep); 1895 } 1896 CTR2(KTR_CXGBE, "%s: request %p cancelled", __func__, job); 1897 DDP_UNLOCK(toep); 1898 1899 aio_ddp_cancel_one(job); 1900 } 1901 1902 int 1903 t4_aio_queue_ddp(struct socket *so, struct kaiocb *job) 1904 { 1905 struct tcpcb *tp = so_sototcpcb(so); 1906 struct toepcb *toep = tp->t_toe; 1907 1908 1909 /* Ignore writes. */ 1910 if (job->uaiocb.aio_lio_opcode != LIO_READ) 1911 return (EOPNOTSUPP); 1912 1913 DDP_LOCK(toep); 1914 1915 /* 1916 * XXX: Think about possibly returning errors for ENOTCONN, 1917 * etc. Perhaps the caller would only queue the request 1918 * if it failed with EOPNOTSUPP? 1919 */ 1920 1921 #ifdef VERBOSE_TRACES 1922 CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job); 1923 #endif 1924 if (!aio_set_cancel_function(job, t4_aio_cancel_queued)) 1925 panic("new job was cancelled"); 1926 TAILQ_INSERT_TAIL(&toep->ddp.aiojobq, job, list); 1927 toep->ddp.waiting_count++; 1928 toep->ddp.flags |= DDP_OK; 1929 1930 /* 1931 * Try to handle this request synchronously. If this has 1932 * to block because the task is running, it will just bail 1933 * and let the task handle it instead. 1934 */ 1935 aio_ddp_requeue(toep); 1936 DDP_UNLOCK(toep); 1937 return (0); 1938 } 1939 1940 void 1941 t4_ddp_mod_load(void) 1942 { 1943 1944 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 1945 CPL_COOKIE_DDP0); 1946 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, do_ddp_tcb_rpl, 1947 CPL_COOKIE_DDP1); 1948 t4_register_cpl_handler(CPL_RX_DATA_DDP, do_rx_data_ddp); 1949 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_rx_ddp_complete); 1950 TAILQ_INIT(&ddp_orphan_pagesets); 1951 mtx_init(&ddp_orphan_pagesets_lock, "ddp orphans", NULL, MTX_DEF); 1952 TASK_INIT(&ddp_orphan_task, 0, ddp_free_orphan_pagesets, NULL); 1953 } 1954 1955 void 1956 t4_ddp_mod_unload(void) 1957 { 1958 1959 taskqueue_drain(taskqueue_thread, &ddp_orphan_task); 1960 MPASS(TAILQ_EMPTY(&ddp_orphan_pagesets)); 1961 mtx_destroy(&ddp_orphan_pagesets_lock); 1962 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP0); 1963 t4_register_shared_cpl_handler(CPL_SET_TCB_RPL, NULL, CPL_COOKIE_DDP1); 1964 t4_register_cpl_handler(CPL_RX_DATA_DDP, NULL); 1965 t4_register_cpl_handler(CPL_RX_DDP_COMPLETE, NULL); 1966 } 1967 #endif 1968