1 /*- 2 * Copyright (c) 2012 Chelsio Communications, Inc. 3 * All rights reserved. 4 * 5 * Chelsio T5xx iSCSI driver 6 * 7 * Written by: Sreenivasa Honnur <shonnur@chelsio.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include "opt_inet.h" 35 #include "opt_inet6.h" 36 37 #include <sys/types.h> 38 #include <sys/param.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/module.h> 42 #include <sys/systm.h> 43 44 #ifdef TCP_OFFLOAD 45 #include <sys/errno.h> 46 #include <sys/kthread.h> 47 #include <sys/smp.h> 48 #include <sys/socket.h> 49 #include <sys/socketvar.h> 50 #include <sys/mbuf.h> 51 #include <sys/lock.h> 52 #include <sys/mutex.h> 53 #include <sys/condvar.h> 54 55 #include <netinet/in.h> 56 #include <netinet/in_pcb.h> 57 #include <netinet/toecore.h> 58 #include <netinet/tcp_var.h> 59 #include <netinet/tcp_fsm.h> 60 61 #include <cam/scsi/scsi_all.h> 62 #include <cam/scsi/scsi_da.h> 63 #include <cam/ctl/ctl_io.h> 64 #include <cam/ctl/ctl.h> 65 #include <cam/ctl/ctl_backend.h> 66 #include <cam/ctl/ctl_error.h> 67 #include <cam/ctl/ctl_frontend.h> 68 #include <cam/ctl/ctl_debug.h> 69 #include <cam/ctl/ctl_ha.h> 70 #include <cam/ctl/ctl_ioctl.h> 71 72 #include <dev/iscsi/icl.h> 73 #include <dev/iscsi/iscsi_proto.h> 74 #include <dev/iscsi/iscsi_ioctl.h> 75 #include <dev/iscsi/iscsi.h> 76 #include <cam/ctl/ctl_frontend_iscsi.h> 77 78 #include <cam/cam.h> 79 #include <cam/cam_ccb.h> 80 #include <cam/cam_xpt.h> 81 #include <cam/cam_debug.h> 82 #include <cam/cam_sim.h> 83 #include <cam/cam_xpt_sim.h> 84 #include <cam/cam_xpt_periph.h> 85 #include <cam/cam_periph.h> 86 #include <cam/cam_compat.h> 87 #include <cam/scsi/scsi_message.h> 88 89 #include "common/common.h" 90 #include "common/t4_msg.h" 91 #include "common/t4_regs.h" /* for PCIE_MEM_ACCESS */ 92 #include "tom/t4_tom.h" 93 #include "cxgbei.h" 94 95 static int worker_thread_count; 96 static struct cxgbei_worker_thread_softc *cwt_softc; 97 static struct proc *cxgbei_proc; 98 99 static void 100 free_ci_counters(struct cxgbei_data *ci) 101 { 102 103 #define FREE_CI_COUNTER(x) do { \ 104 if (ci->x != NULL) { \ 105 counter_u64_free(ci->x); \ 106 ci->x = NULL; \ 107 } \ 108 } while (0) 109 110 FREE_CI_COUNTER(ddp_setup_ok); 111 FREE_CI_COUNTER(ddp_setup_error); 112 FREE_CI_COUNTER(ddp_bytes); 113 FREE_CI_COUNTER(ddp_pdus); 114 FREE_CI_COUNTER(fl_bytes); 115 FREE_CI_COUNTER(fl_pdus); 116 #undef FREE_CI_COUNTER 117 } 118 119 static int 120 alloc_ci_counters(struct cxgbei_data *ci) 121 { 122 123 #define ALLOC_CI_COUNTER(x) do { \ 124 ci->x = counter_u64_alloc(M_WAITOK); \ 125 if (ci->x == NULL) \ 126 goto fail; \ 127 } while (0) 128 129 ALLOC_CI_COUNTER(ddp_setup_ok); 130 ALLOC_CI_COUNTER(ddp_setup_error); 131 ALLOC_CI_COUNTER(ddp_bytes); 132 ALLOC_CI_COUNTER(ddp_pdus); 133 ALLOC_CI_COUNTER(fl_bytes); 134 ALLOC_CI_COUNTER(fl_pdus); 135 #undef ALLOC_CI_COUNTER 136 137 return (0); 138 fail: 139 free_ci_counters(ci); 140 return (ENOMEM); 141 } 142 143 static void 144 read_pdu_limits(struct adapter *sc, uint32_t *max_tx_pdu_len, 145 uint32_t *max_rx_pdu_len) 146 { 147 uint32_t tx_len, rx_len, r, v; 148 149 rx_len = t4_read_reg(sc, A_TP_PMM_RX_PAGE_SIZE); 150 tx_len = t4_read_reg(sc, A_TP_PMM_TX_PAGE_SIZE); 151 152 r = t4_read_reg(sc, A_TP_PARA_REG2); 153 rx_len = min(rx_len, G_MAXRXDATA(r)); 154 tx_len = min(tx_len, G_MAXRXDATA(r)); 155 156 r = t4_read_reg(sc, A_TP_PARA_REG7); 157 v = min(G_PMMAXXFERLEN0(r), G_PMMAXXFERLEN1(r)); 158 rx_len = min(rx_len, v); 159 tx_len = min(tx_len, v); 160 161 /* Remove after FW_FLOWC_MNEM_TXDATAPLEN_MAX fix in firmware. */ 162 tx_len = min(tx_len, 3 * 4096); 163 164 *max_tx_pdu_len = rounddown2(tx_len, 512); 165 *max_rx_pdu_len = rounddown2(rx_len, 512); 166 } 167 168 /* 169 * Initialize the software state of the iSCSI ULP driver. 170 * 171 * ENXIO means firmware didn't set up something that it was supposed to. 172 */ 173 static int 174 cxgbei_init(struct adapter *sc, struct cxgbei_data *ci) 175 { 176 struct sysctl_oid *oid; 177 struct sysctl_oid_list *children; 178 struct ppod_region *pr; 179 uint32_t r; 180 int rc; 181 182 MPASS(sc->vres.iscsi.size > 0); 183 MPASS(ci != NULL); 184 185 rc = alloc_ci_counters(ci); 186 if (rc != 0) 187 return (rc); 188 189 read_pdu_limits(sc, &ci->max_tx_pdu_len, &ci->max_rx_pdu_len); 190 191 pr = &ci->pr; 192 r = t4_read_reg(sc, A_ULP_RX_ISCSI_PSZ); 193 rc = t4_init_ppod_region(pr, &sc->vres.iscsi, r, "iSCSI page pods"); 194 if (rc != 0) { 195 device_printf(sc->dev, 196 "%s: failed to initialize the iSCSI page pod region: %u.\n", 197 __func__, rc); 198 free_ci_counters(ci); 199 return (rc); 200 } 201 202 r = t4_read_reg(sc, A_ULP_RX_ISCSI_TAGMASK); 203 r &= V_ISCSITAGMASK(M_ISCSITAGMASK); 204 if (r != pr->pr_tag_mask) { 205 /* 206 * Recent firmwares are supposed to set up the iSCSI tagmask 207 * but we'll do it ourselves it the computed value doesn't match 208 * what's in the register. 209 */ 210 device_printf(sc->dev, 211 "tagmask 0x%08x does not match computed mask 0x%08x.\n", r, 212 pr->pr_tag_mask); 213 t4_set_reg_field(sc, A_ULP_RX_ISCSI_TAGMASK, 214 V_ISCSITAGMASK(M_ISCSITAGMASK), pr->pr_tag_mask); 215 } 216 217 sysctl_ctx_init(&ci->ctx); 218 oid = device_get_sysctl_tree(sc->dev); /* dev.t5nex.X */ 219 children = SYSCTL_CHILDREN(oid); 220 221 oid = SYSCTL_ADD_NODE(&ci->ctx, children, OID_AUTO, "iscsi", 222 CTLFLAG_RD | CTLFLAG_MPSAFE, NULL, "iSCSI ULP statistics"); 223 children = SYSCTL_CHILDREN(oid); 224 225 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_setup_ok", 226 CTLFLAG_RD, &ci->ddp_setup_ok, 227 "# of times DDP buffer was setup successfully."); 228 229 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_setup_error", 230 CTLFLAG_RD, &ci->ddp_setup_error, 231 "# of times DDP buffer setup failed."); 232 233 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_bytes", 234 CTLFLAG_RD, &ci->ddp_bytes, "# of bytes placed directly"); 235 236 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "ddp_pdus", 237 CTLFLAG_RD, &ci->ddp_pdus, "# of PDUs with data placed directly."); 238 239 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "fl_bytes", 240 CTLFLAG_RD, &ci->fl_bytes, "# of data bytes delivered in freelist"); 241 242 SYSCTL_ADD_COUNTER_U64(&ci->ctx, children, OID_AUTO, "fl_pdus", 243 CTLFLAG_RD, &ci->fl_pdus, 244 "# of PDUs with data delivered in freelist"); 245 246 ci->ddp_threshold = 2048; 247 SYSCTL_ADD_UINT(&ci->ctx, children, OID_AUTO, "ddp_threshold", 248 CTLFLAG_RW, &ci->ddp_threshold, 0, "Rx zero copy threshold"); 249 250 return (0); 251 } 252 253 static int 254 do_rx_iscsi_hdr(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 255 { 256 struct adapter *sc = iq->adapter; 257 struct cpl_iscsi_hdr *cpl = mtod(m, struct cpl_iscsi_hdr *); 258 u_int tid = GET_TID(cpl); 259 struct toepcb *toep = lookup_tid(sc, tid); 260 struct icl_pdu *ip; 261 struct icl_cxgbei_pdu *icp; 262 uint16_t len_ddp = be16toh(cpl->pdu_len_ddp); 263 uint16_t len = be16toh(cpl->len); 264 265 M_ASSERTPKTHDR(m); 266 MPASS(m->m_pkthdr.len == len + sizeof(*cpl)); 267 268 ip = icl_cxgbei_new_pdu(M_NOWAIT); 269 if (ip == NULL) 270 CXGBE_UNIMPLEMENTED("PDU allocation failure"); 271 m_copydata(m, sizeof(*cpl), ISCSI_BHS_SIZE, (caddr_t)ip->ip_bhs); 272 ip->ip_data_len = G_ISCSI_PDU_LEN(len_ddp) - len; 273 icp = ip_to_icp(ip); 274 icp->icp_seq = ntohl(cpl->seq); 275 icp->icp_flags = ICPF_RX_HDR; 276 277 /* This is the start of a new PDU. There should be no old state. */ 278 MPASS(toep->ulpcb2 == NULL); 279 toep->ulpcb2 = icp; 280 281 #if 0 282 CTR5(KTR_CXGBE, "%s: tid %u, cpl->len %u, pdu_len_ddp 0x%04x, icp %p", 283 __func__, tid, len, len_ddp, icp); 284 #endif 285 286 m_freem(m); 287 return (0); 288 } 289 290 static int 291 do_rx_iscsi_data(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 292 { 293 struct adapter *sc = iq->adapter; 294 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 295 struct cpl_iscsi_data *cpl = mtod(m, struct cpl_iscsi_data *); 296 u_int tid = GET_TID(cpl); 297 struct toepcb *toep = lookup_tid(sc, tid); 298 struct icl_cxgbei_pdu *icp = toep->ulpcb2; 299 300 M_ASSERTPKTHDR(m); 301 MPASS(m->m_pkthdr.len == be16toh(cpl->len) + sizeof(*cpl)); 302 303 /* Must already have received the header (but not the data). */ 304 MPASS(icp != NULL); 305 MPASS(icp->icp_flags == ICPF_RX_HDR); 306 MPASS(icp->ip.ip_data_mbuf == NULL); 307 308 309 m_adj(m, sizeof(*cpl)); 310 MPASS(icp->ip.ip_data_len == m->m_pkthdr.len); 311 312 icp->icp_flags |= ICPF_RX_FLBUF; 313 icp->ip.ip_data_mbuf = m; 314 counter_u64_add(ci->fl_pdus, 1); 315 counter_u64_add(ci->fl_bytes, m->m_pkthdr.len); 316 317 #if 0 318 CTR3(KTR_CXGBE, "%s: tid %u, cpl->len %u", __func__, tid, 319 be16toh(cpl->len)); 320 #endif 321 322 return (0); 323 } 324 325 static int 326 do_rx_iscsi_ddp(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 327 { 328 struct adapter *sc = iq->adapter; 329 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 330 const struct cpl_rx_data_ddp *cpl = (const void *)(rss + 1); 331 u_int tid = GET_TID(cpl); 332 struct toepcb *toep = lookup_tid(sc, tid); 333 struct inpcb *inp = toep->inp; 334 struct socket *so; 335 struct sockbuf *sb; 336 struct tcpcb *tp; 337 struct icl_cxgbei_conn *icc; 338 struct icl_conn *ic; 339 struct icl_cxgbei_pdu *icp = toep->ulpcb2; 340 struct icl_pdu *ip; 341 u_int pdu_len, val; 342 struct epoch_tracker et; 343 344 MPASS(m == NULL); 345 346 /* Must already be assembling a PDU. */ 347 MPASS(icp != NULL); 348 MPASS(icp->icp_flags & ICPF_RX_HDR); /* Data is optional. */ 349 MPASS((icp->icp_flags & ICPF_RX_STATUS) == 0); 350 351 pdu_len = be16toh(cpl->len); /* includes everything. */ 352 val = be32toh(cpl->ddpvld); 353 354 #if 0 355 CTR5(KTR_CXGBE, 356 "%s: tid %u, cpl->len %u, ddpvld 0x%08x, icp_flags 0x%08x", 357 __func__, tid, pdu_len, val, icp->icp_flags); 358 #endif 359 360 icp->icp_flags |= ICPF_RX_STATUS; 361 ip = &icp->ip; 362 if (val & F_DDP_PADDING_ERR) 363 icp->icp_flags |= ICPF_PAD_ERR; 364 if (val & F_DDP_HDRCRC_ERR) 365 icp->icp_flags |= ICPF_HCRC_ERR; 366 if (val & F_DDP_DATACRC_ERR) 367 icp->icp_flags |= ICPF_DCRC_ERR; 368 if (val & F_DDP_PDU && ip->ip_data_mbuf == NULL) { 369 MPASS((icp->icp_flags & ICPF_RX_FLBUF) == 0); 370 MPASS(ip->ip_data_len > 0); 371 icp->icp_flags |= ICPF_RX_DDP; 372 counter_u64_add(ci->ddp_pdus, 1); 373 counter_u64_add(ci->ddp_bytes, ip->ip_data_len); 374 } 375 376 INP_WLOCK(inp); 377 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT))) { 378 CTR4(KTR_CXGBE, "%s: tid %u, rx (%d bytes), inp_flags 0x%x", 379 __func__, tid, pdu_len, inp->inp_flags); 380 INP_WUNLOCK(inp); 381 icl_cxgbei_conn_pdu_free(NULL, ip); 382 #ifdef INVARIANTS 383 toep->ulpcb2 = NULL; 384 #endif 385 return (0); 386 } 387 388 tp = intotcpcb(inp); 389 MPASS(icp->icp_seq == tp->rcv_nxt); 390 MPASS(tp->rcv_wnd >= pdu_len); 391 tp->rcv_nxt += pdu_len; 392 tp->rcv_wnd -= pdu_len; 393 tp->t_rcvtime = ticks; 394 395 /* update rx credits */ 396 t4_rcvd(&toep->td->tod, tp); /* XXX: sc->tom_softc.tod */ 397 398 so = inp->inp_socket; 399 sb = &so->so_rcv; 400 SOCKBUF_LOCK(sb); 401 402 icc = toep->ulpcb; 403 if (__predict_false(icc == NULL || sb->sb_state & SBS_CANTRCVMORE)) { 404 CTR5(KTR_CXGBE, 405 "%s: tid %u, excess rx (%d bytes), icc %p, sb_state 0x%x", 406 __func__, tid, pdu_len, icc, sb->sb_state); 407 SOCKBUF_UNLOCK(sb); 408 INP_WUNLOCK(inp); 409 410 CURVNET_SET(so->so_vnet); 411 NET_EPOCH_ENTER(et); 412 INP_WLOCK(inp); 413 tp = tcp_drop(tp, ECONNRESET); 414 if (tp) 415 INP_WUNLOCK(inp); 416 NET_EPOCH_EXIT(et); 417 CURVNET_RESTORE(); 418 419 icl_cxgbei_conn_pdu_free(NULL, ip); 420 #ifdef INVARIANTS 421 toep->ulpcb2 = NULL; 422 #endif 423 return (0); 424 } 425 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 426 ic = &icc->ic; 427 icl_cxgbei_new_pdu_set_conn(ip, ic); 428 429 MPASS(m == NULL); /* was unused, we'll use it now. */ 430 m = sbcut_locked(sb, sbused(sb)); /* XXXNP: toep->sb_cc accounting? */ 431 if (__predict_false(m != NULL)) { 432 int len = m_length(m, NULL); 433 434 /* 435 * PDUs were received before the tid transitioned to ULP mode. 436 * Convert them to icl_cxgbei_pdus and send them to ICL before 437 * the PDU in icp/ip. 438 */ 439 CTR3(KTR_CXGBE, "%s: tid %u, %u bytes in so_rcv", __func__, tid, 440 len); 441 442 /* XXXNP: needs to be rewritten. */ 443 if (len == sizeof(struct iscsi_bhs) || len == 4 + sizeof(struct 444 iscsi_bhs)) { 445 struct icl_cxgbei_pdu *icp0; 446 struct icl_pdu *ip0; 447 448 ip0 = icl_cxgbei_new_pdu(M_NOWAIT); 449 if (ip0 == NULL) 450 CXGBE_UNIMPLEMENTED("PDU allocation failure"); 451 icl_cxgbei_new_pdu_set_conn(ip0, ic); 452 icp0 = ip_to_icp(ip0); 453 icp0->icp_seq = 0; /* XXX */ 454 icp0->icp_flags = ICPF_RX_HDR | ICPF_RX_STATUS; 455 m_copydata(m, 0, sizeof(struct iscsi_bhs), (void *)ip0->ip_bhs); 456 STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip0, ip_next); 457 } 458 m_freem(m); 459 } 460 461 STAILQ_INSERT_TAIL(&icc->rcvd_pdus, ip, ip_next); 462 if ((icc->rx_flags & RXF_ACTIVE) == 0) { 463 struct cxgbei_worker_thread_softc *cwt = &cwt_softc[icc->cwt]; 464 465 mtx_lock(&cwt->cwt_lock); 466 icc->rx_flags |= RXF_ACTIVE; 467 TAILQ_INSERT_TAIL(&cwt->rx_head, icc, rx_link); 468 if (cwt->cwt_state == CWT_SLEEPING) { 469 cwt->cwt_state = CWT_RUNNING; 470 cv_signal(&cwt->cwt_cv); 471 } 472 mtx_unlock(&cwt->cwt_lock); 473 } 474 SOCKBUF_UNLOCK(sb); 475 INP_WUNLOCK(inp); 476 477 #ifdef INVARIANTS 478 toep->ulpcb2 = NULL; 479 #endif 480 481 return (0); 482 } 483 484 static int 485 cxgbei_activate(struct adapter *sc) 486 { 487 struct cxgbei_data *ci; 488 int rc; 489 490 ASSERT_SYNCHRONIZED_OP(sc); 491 492 if (uld_active(sc, ULD_ISCSI)) { 493 KASSERT(0, ("%s: iSCSI offload already enabled on adapter %p", 494 __func__, sc)); 495 return (0); 496 } 497 498 if (sc->iscsicaps == 0 || sc->vres.iscsi.size == 0) { 499 device_printf(sc->dev, 500 "not iSCSI offload capable, or capability disabled.\n"); 501 return (ENOSYS); 502 } 503 504 /* per-adapter softc for iSCSI */ 505 ci = malloc(sizeof(*ci), M_CXGBE, M_ZERO | M_WAITOK); 506 if (ci == NULL) 507 return (ENOMEM); 508 509 rc = cxgbei_init(sc, ci); 510 if (rc != 0) { 511 free(ci, M_CXGBE); 512 return (rc); 513 } 514 515 sc->iscsi_ulp_softc = ci; 516 517 return (0); 518 } 519 520 static int 521 cxgbei_deactivate(struct adapter *sc) 522 { 523 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 524 525 ASSERT_SYNCHRONIZED_OP(sc); 526 527 if (ci != NULL) { 528 sysctl_ctx_free(&ci->ctx); 529 t4_free_ppod_region(&ci->pr); 530 free_ci_counters(ci); 531 free(ci, M_CXGBE); 532 sc->iscsi_ulp_softc = NULL; 533 } 534 535 return (0); 536 } 537 538 static void 539 cxgbei_activate_all(struct adapter *sc, void *arg __unused) 540 { 541 542 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4isact") != 0) 543 return; 544 545 /* Activate iSCSI if any port on this adapter has IFCAP_TOE enabled. */ 546 if (sc->offload_map && !uld_active(sc, ULD_ISCSI)) 547 (void) t4_activate_uld(sc, ULD_ISCSI); 548 549 end_synchronized_op(sc, 0); 550 } 551 552 static void 553 cxgbei_deactivate_all(struct adapter *sc, void *arg __unused) 554 { 555 556 if (begin_synchronized_op(sc, NULL, SLEEP_OK | INTR_OK, "t4isdea") != 0) 557 return; 558 559 if (uld_active(sc, ULD_ISCSI)) 560 (void) t4_deactivate_uld(sc, ULD_ISCSI); 561 562 end_synchronized_op(sc, 0); 563 } 564 565 static struct uld_info cxgbei_uld_info = { 566 .uld_id = ULD_ISCSI, 567 .activate = cxgbei_activate, 568 .deactivate = cxgbei_deactivate, 569 }; 570 571 static void 572 cwt_main(void *arg) 573 { 574 struct cxgbei_worker_thread_softc *cwt = arg; 575 struct icl_cxgbei_conn *icc = NULL; 576 struct icl_conn *ic; 577 struct icl_pdu *ip; 578 struct sockbuf *sb; 579 STAILQ_HEAD(, icl_pdu) rx_pdus = STAILQ_HEAD_INITIALIZER(rx_pdus); 580 581 MPASS(cwt != NULL); 582 583 mtx_lock(&cwt->cwt_lock); 584 MPASS(cwt->cwt_state == 0); 585 cwt->cwt_state = CWT_RUNNING; 586 cv_signal(&cwt->cwt_cv); 587 588 while (__predict_true(cwt->cwt_state != CWT_STOP)) { 589 cwt->cwt_state = CWT_RUNNING; 590 while ((icc = TAILQ_FIRST(&cwt->rx_head)) != NULL) { 591 TAILQ_REMOVE(&cwt->rx_head, icc, rx_link); 592 mtx_unlock(&cwt->cwt_lock); 593 594 ic = &icc->ic; 595 sb = &ic->ic_socket->so_rcv; 596 597 SOCKBUF_LOCK(sb); 598 MPASS(icc->rx_flags & RXF_ACTIVE); 599 if (__predict_true(!(sb->sb_state & SBS_CANTRCVMORE))) { 600 MPASS(STAILQ_EMPTY(&rx_pdus)); 601 STAILQ_SWAP(&icc->rcvd_pdus, &rx_pdus, icl_pdu); 602 SOCKBUF_UNLOCK(sb); 603 604 /* Hand over PDUs to ICL. */ 605 while ((ip = STAILQ_FIRST(&rx_pdus)) != NULL) { 606 STAILQ_REMOVE_HEAD(&rx_pdus, ip_next); 607 ic->ic_receive(ip); 608 } 609 610 SOCKBUF_LOCK(sb); 611 MPASS(STAILQ_EMPTY(&rx_pdus)); 612 } 613 MPASS(icc->rx_flags & RXF_ACTIVE); 614 if (STAILQ_EMPTY(&icc->rcvd_pdus) || 615 __predict_false(sb->sb_state & SBS_CANTRCVMORE)) { 616 icc->rx_flags &= ~RXF_ACTIVE; 617 } else { 618 /* 619 * More PDUs were received while we were busy 620 * handing over the previous batch to ICL. 621 * Re-add this connection to the end of the 622 * queue. 623 */ 624 mtx_lock(&cwt->cwt_lock); 625 TAILQ_INSERT_TAIL(&cwt->rx_head, icc, 626 rx_link); 627 mtx_unlock(&cwt->cwt_lock); 628 } 629 SOCKBUF_UNLOCK(sb); 630 631 mtx_lock(&cwt->cwt_lock); 632 } 633 634 /* Inner loop doesn't check for CWT_STOP, do that first. */ 635 if (__predict_false(cwt->cwt_state == CWT_STOP)) 636 break; 637 cwt->cwt_state = CWT_SLEEPING; 638 cv_wait(&cwt->cwt_cv, &cwt->cwt_lock); 639 } 640 641 MPASS(TAILQ_FIRST(&cwt->rx_head) == NULL); 642 mtx_assert(&cwt->cwt_lock, MA_OWNED); 643 cwt->cwt_state = CWT_STOPPED; 644 cv_signal(&cwt->cwt_cv); 645 mtx_unlock(&cwt->cwt_lock); 646 kthread_exit(); 647 } 648 649 static int 650 start_worker_threads(void) 651 { 652 int i, rc; 653 struct cxgbei_worker_thread_softc *cwt; 654 655 worker_thread_count = min(mp_ncpus, 32); 656 cwt_softc = malloc(worker_thread_count * sizeof(*cwt), M_CXGBE, 657 M_WAITOK | M_ZERO); 658 659 MPASS(cxgbei_proc == NULL); 660 for (i = 0, cwt = &cwt_softc[0]; i < worker_thread_count; i++, cwt++) { 661 mtx_init(&cwt->cwt_lock, "cwt lock", NULL, MTX_DEF); 662 cv_init(&cwt->cwt_cv, "cwt cv"); 663 TAILQ_INIT(&cwt->rx_head); 664 rc = kproc_kthread_add(cwt_main, cwt, &cxgbei_proc, NULL, 0, 0, 665 "cxgbei", "%d", i); 666 if (rc != 0) { 667 printf("cxgbei: failed to start thread #%d/%d (%d)\n", 668 i + 1, worker_thread_count, rc); 669 mtx_destroy(&cwt->cwt_lock); 670 cv_destroy(&cwt->cwt_cv); 671 bzero(cwt, sizeof(*cwt)); 672 if (i == 0) { 673 free(cwt_softc, M_CXGBE); 674 worker_thread_count = 0; 675 676 return (rc); 677 } 678 679 /* Not fatal, carry on with fewer threads. */ 680 worker_thread_count = i; 681 rc = 0; 682 break; 683 } 684 685 /* Wait for thread to start before moving on to the next one. */ 686 mtx_lock(&cwt->cwt_lock); 687 while (cwt->cwt_state == 0) 688 cv_wait(&cwt->cwt_cv, &cwt->cwt_lock); 689 mtx_unlock(&cwt->cwt_lock); 690 } 691 692 MPASS(cwt_softc != NULL); 693 MPASS(worker_thread_count > 0); 694 return (0); 695 } 696 697 static void 698 stop_worker_threads(void) 699 { 700 int i; 701 struct cxgbei_worker_thread_softc *cwt = &cwt_softc[0]; 702 703 MPASS(worker_thread_count >= 0); 704 705 for (i = 0, cwt = &cwt_softc[0]; i < worker_thread_count; i++, cwt++) { 706 mtx_lock(&cwt->cwt_lock); 707 MPASS(cwt->cwt_state == CWT_RUNNING || 708 cwt->cwt_state == CWT_SLEEPING); 709 cwt->cwt_state = CWT_STOP; 710 cv_signal(&cwt->cwt_cv); 711 do { 712 cv_wait(&cwt->cwt_cv, &cwt->cwt_lock); 713 } while (cwt->cwt_state != CWT_STOPPED); 714 mtx_unlock(&cwt->cwt_lock); 715 mtx_destroy(&cwt->cwt_lock); 716 cv_destroy(&cwt->cwt_cv); 717 } 718 free(cwt_softc, M_CXGBE); 719 } 720 721 /* Select a worker thread for a connection. */ 722 u_int 723 cxgbei_select_worker_thread(struct icl_cxgbei_conn *icc) 724 { 725 struct adapter *sc = icc->sc; 726 struct toepcb *toep = icc->toep; 727 u_int i, n; 728 729 n = worker_thread_count / sc->sge.nofldrxq; 730 if (n > 0) 731 i = toep->vi->pi->port_id * n + arc4random() % n; 732 else 733 i = arc4random() % worker_thread_count; 734 735 CTR3(KTR_CXGBE, "%s: tid %u, cwt %u", __func__, toep->tid, i); 736 737 return (i); 738 } 739 740 static int 741 cxgbei_mod_load(void) 742 { 743 int rc; 744 745 t4_register_cpl_handler(CPL_ISCSI_HDR, do_rx_iscsi_hdr); 746 t4_register_cpl_handler(CPL_ISCSI_DATA, do_rx_iscsi_data); 747 t4_register_cpl_handler(CPL_RX_ISCSI_DDP, do_rx_iscsi_ddp); 748 749 rc = start_worker_threads(); 750 if (rc != 0) 751 return (rc); 752 753 rc = t4_register_uld(&cxgbei_uld_info); 754 if (rc != 0) { 755 stop_worker_threads(); 756 return (rc); 757 } 758 759 t4_iterate(cxgbei_activate_all, NULL); 760 761 return (rc); 762 } 763 764 static int 765 cxgbei_mod_unload(void) 766 { 767 768 t4_iterate(cxgbei_deactivate_all, NULL); 769 770 if (t4_unregister_uld(&cxgbei_uld_info) == EBUSY) 771 return (EBUSY); 772 773 stop_worker_threads(); 774 775 t4_register_cpl_handler(CPL_ISCSI_HDR, NULL); 776 t4_register_cpl_handler(CPL_ISCSI_DATA, NULL); 777 t4_register_cpl_handler(CPL_RX_ISCSI_DDP, NULL); 778 779 return (0); 780 } 781 #endif 782 783 static int 784 cxgbei_modevent(module_t mod, int cmd, void *arg) 785 { 786 int rc = 0; 787 788 #ifdef TCP_OFFLOAD 789 switch (cmd) { 790 case MOD_LOAD: 791 rc = cxgbei_mod_load(); 792 if (rc == 0) 793 rc = icl_cxgbei_mod_load(); 794 break; 795 796 case MOD_UNLOAD: 797 rc = icl_cxgbei_mod_unload(); 798 if (rc == 0) 799 rc = cxgbei_mod_unload(); 800 break; 801 802 default: 803 rc = EINVAL; 804 } 805 #else 806 printf("cxgbei: compiled without TCP_OFFLOAD support.\n"); 807 rc = EOPNOTSUPP; 808 #endif 809 810 return (rc); 811 } 812 813 static moduledata_t cxgbei_mod = { 814 "cxgbei", 815 cxgbei_modevent, 816 NULL, 817 }; 818 819 MODULE_VERSION(cxgbei, 1); 820 DECLARE_MODULE(cxgbei, cxgbei_mod, SI_SUB_EXEC, SI_ORDER_ANY); 821 MODULE_DEPEND(cxgbei, t4_tom, 1, 1, 1); 822 MODULE_DEPEND(cxgbei, cxgbe, 1, 1, 1); 823 MODULE_DEPEND(cxgbei, icl, 1, 1, 1); 824