1 /*- 2 * Copyright (c) 2012 The FreeBSD Foundation 3 * Copyright (c) 2015 Chelsio Communications, Inc. 4 * All rights reserved. 5 * 6 * This software was developed by Edward Tomasz Napierala under sponsorship 7 * from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * cxgbei implementation of iSCSI Common Layer kobj(9) interface. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_inet.h" 40 #include "opt_inet6.h" 41 42 #ifdef TCP_OFFLOAD 43 #include <sys/param.h> 44 #include <sys/capsicum.h> 45 #include <sys/condvar.h> 46 #include <sys/conf.h> 47 #include <sys/file.h> 48 #include <sys/kernel.h> 49 #include <sys/kthread.h> 50 #include <sys/lock.h> 51 #include <sys/mbuf.h> 52 #include <sys/mutex.h> 53 #include <sys/module.h> 54 #include <sys/protosw.h> 55 #include <sys/socket.h> 56 #include <sys/socketvar.h> 57 #include <sys/sysctl.h> 58 #include <sys/systm.h> 59 #include <sys/sx.h> 60 #include <sys/uio.h> 61 #include <machine/bus.h> 62 #include <vm/uma.h> 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 #include <netinet/in.h> 66 #include <netinet/in_pcb.h> 67 #include <netinet/tcp.h> 68 #include <netinet/tcp_var.h> 69 #include <netinet/toecore.h> 70 71 #include <dev/iscsi/icl.h> 72 #include <dev/iscsi/iscsi_proto.h> 73 #include <icl_conn_if.h> 74 75 #include <cam/scsi/scsi_all.h> 76 #include <cam/scsi/scsi_da.h> 77 #include <cam/ctl/ctl_io.h> 78 #include <cam/ctl/ctl.h> 79 #include <cam/ctl/ctl_backend.h> 80 #include <cam/ctl/ctl_error.h> 81 #include <cam/ctl/ctl_frontend.h> 82 #include <cam/ctl/ctl_debug.h> 83 #include <cam/ctl/ctl_ha.h> 84 #include <cam/ctl/ctl_ioctl.h> 85 86 #include <cam/cam.h> 87 #include <cam/cam_ccb.h> 88 #include <cam/cam_xpt.h> 89 #include <cam/cam_debug.h> 90 #include <cam/cam_sim.h> 91 #include <cam/cam_xpt_sim.h> 92 #include <cam/cam_xpt_periph.h> 93 #include <cam/cam_periph.h> 94 #include <cam/cam_compat.h> 95 #include <cam/scsi/scsi_message.h> 96 97 #include "common/common.h" 98 #include "common/t4_tcb.h" 99 #include "tom/t4_tom.h" 100 #include "cxgbei.h" 101 102 SYSCTL_NODE(_kern_icl, OID_AUTO, cxgbei, CTLFLAG_RD, 0, "Chelsio iSCSI offload"); 103 static int coalesce = 1; 104 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, coalesce, CTLFLAG_RWTUN, 105 &coalesce, 0, "Try to coalesce PDUs before sending"); 106 static int partial_receive_len = 128 * 1024; 107 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, partial_receive_len, CTLFLAG_RWTUN, 108 &partial_receive_len, 0, "Minimum read size for partially received " 109 "data segment"); 110 static int sendspace = 1048576; 111 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, sendspace, CTLFLAG_RWTUN, 112 &sendspace, 0, "Default send socket buffer size"); 113 static int recvspace = 1048576; 114 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, recvspace, CTLFLAG_RWTUN, 115 &recvspace, 0, "Default receive socket buffer size"); 116 117 static uma_zone_t prsv_zone; 118 static volatile u_int icl_cxgbei_ncons; 119 120 #define ICL_CONN_LOCK(X) mtx_lock(X->ic_lock) 121 #define ICL_CONN_UNLOCK(X) mtx_unlock(X->ic_lock) 122 #define ICL_CONN_LOCK_ASSERT(X) mtx_assert(X->ic_lock, MA_OWNED) 123 #define ICL_CONN_LOCK_ASSERT_NOT(X) mtx_assert(X->ic_lock, MA_NOTOWNED) 124 125 struct icl_pdu *icl_cxgbei_new_pdu(int); 126 void icl_cxgbei_new_pdu_set_conn(struct icl_pdu *, struct icl_conn *); 127 128 static icl_conn_new_pdu_t icl_cxgbei_conn_new_pdu; 129 icl_conn_pdu_free_t icl_cxgbei_conn_pdu_free; 130 static icl_conn_pdu_data_segment_length_t 131 icl_cxgbei_conn_pdu_data_segment_length; 132 static icl_conn_pdu_append_data_t icl_cxgbei_conn_pdu_append_data; 133 static icl_conn_pdu_get_data_t icl_cxgbei_conn_pdu_get_data; 134 static icl_conn_pdu_queue_t icl_cxgbei_conn_pdu_queue; 135 static icl_conn_handoff_t icl_cxgbei_conn_handoff; 136 static icl_conn_free_t icl_cxgbei_conn_free; 137 static icl_conn_close_t icl_cxgbei_conn_close; 138 static icl_conn_task_setup_t icl_cxgbei_conn_task_setup; 139 static icl_conn_task_done_t icl_cxgbei_conn_task_done; 140 static icl_conn_transfer_setup_t icl_cxgbei_conn_transfer_setup; 141 static icl_conn_transfer_done_t icl_cxgbei_conn_transfer_done; 142 143 static kobj_method_t icl_cxgbei_methods[] = { 144 KOBJMETHOD(icl_conn_new_pdu, icl_cxgbei_conn_new_pdu), 145 KOBJMETHOD(icl_conn_pdu_free, icl_cxgbei_conn_pdu_free), 146 KOBJMETHOD(icl_conn_pdu_data_segment_length, 147 icl_cxgbei_conn_pdu_data_segment_length), 148 KOBJMETHOD(icl_conn_pdu_append_data, icl_cxgbei_conn_pdu_append_data), 149 KOBJMETHOD(icl_conn_pdu_get_data, icl_cxgbei_conn_pdu_get_data), 150 KOBJMETHOD(icl_conn_pdu_queue, icl_cxgbei_conn_pdu_queue), 151 KOBJMETHOD(icl_conn_handoff, icl_cxgbei_conn_handoff), 152 KOBJMETHOD(icl_conn_free, icl_cxgbei_conn_free), 153 KOBJMETHOD(icl_conn_close, icl_cxgbei_conn_close), 154 KOBJMETHOD(icl_conn_task_setup, icl_cxgbei_conn_task_setup), 155 KOBJMETHOD(icl_conn_task_done, icl_cxgbei_conn_task_done), 156 KOBJMETHOD(icl_conn_transfer_setup, icl_cxgbei_conn_transfer_setup), 157 KOBJMETHOD(icl_conn_transfer_done, icl_cxgbei_conn_transfer_done), 158 { 0, 0 } 159 }; 160 161 DEFINE_CLASS(icl_cxgbei, icl_cxgbei_methods, sizeof(struct icl_cxgbei_conn)); 162 163 void 164 icl_cxgbei_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip) 165 { 166 #ifdef INVARIANTS 167 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 168 #endif 169 170 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 171 MPASS(ic == ip->ip_conn); 172 MPASS(ip->ip_bhs_mbuf != NULL); 173 174 m_freem(ip->ip_ahs_mbuf); 175 m_freem(ip->ip_data_mbuf); 176 m_freem(ip->ip_bhs_mbuf); /* storage for icl_cxgbei_pdu itself */ 177 178 #ifdef DIAGNOSTIC 179 if (__predict_true(ic != NULL)) 180 refcount_release(&ic->ic_outstanding_pdus); 181 #endif 182 } 183 184 struct icl_pdu * 185 icl_cxgbei_new_pdu(int flags) 186 { 187 struct icl_cxgbei_pdu *icp; 188 struct icl_pdu *ip; 189 struct mbuf *m; 190 uintptr_t a; 191 192 m = m_gethdr(flags, MT_DATA); 193 if (__predict_false(m == NULL)) 194 return (NULL); 195 196 a = roundup2(mtod(m, uintptr_t), _Alignof(struct icl_cxgbei_pdu)); 197 icp = (struct icl_cxgbei_pdu *)a; 198 bzero(icp, sizeof(*icp)); 199 200 icp->icp_signature = CXGBEI_PDU_SIGNATURE; 201 ip = &icp->ip; 202 ip->ip_bhs_mbuf = m; 203 204 a = roundup2((uintptr_t)(icp + 1), _Alignof(struct iscsi_bhs *)); 205 ip->ip_bhs = (struct iscsi_bhs *)a; 206 #ifdef INVARIANTS 207 /* Everything must fit entirely in the mbuf. */ 208 a = (uintptr_t)(ip->ip_bhs + 1); 209 MPASS(a <= (uintptr_t)m + MSIZE); 210 #endif 211 bzero(ip->ip_bhs, sizeof(*ip->ip_bhs)); 212 213 m->m_data = (void *)ip->ip_bhs; 214 m->m_len = sizeof(struct iscsi_bhs); 215 m->m_pkthdr.len = m->m_len; 216 217 return (ip); 218 } 219 220 void 221 icl_cxgbei_new_pdu_set_conn(struct icl_pdu *ip, struct icl_conn *ic) 222 { 223 224 ip->ip_conn = ic; 225 #ifdef DIAGNOSTIC 226 refcount_acquire(&ic->ic_outstanding_pdus); 227 #endif 228 } 229 230 /* 231 * Allocate icl_pdu with empty BHS to fill up by the caller. 232 */ 233 static struct icl_pdu * 234 icl_cxgbei_conn_new_pdu(struct icl_conn *ic, int flags) 235 { 236 struct icl_pdu *ip; 237 238 ip = icl_cxgbei_new_pdu(flags); 239 if (__predict_false(ip == NULL)) 240 return (NULL); 241 icl_cxgbei_new_pdu_set_conn(ip, ic); 242 243 return (ip); 244 } 245 246 static size_t 247 icl_pdu_data_segment_length(const struct icl_pdu *request) 248 { 249 uint32_t len = 0; 250 251 len += request->ip_bhs->bhs_data_segment_len[0]; 252 len <<= 8; 253 len += request->ip_bhs->bhs_data_segment_len[1]; 254 len <<= 8; 255 len += request->ip_bhs->bhs_data_segment_len[2]; 256 257 return (len); 258 } 259 260 size_t 261 icl_cxgbei_conn_pdu_data_segment_length(struct icl_conn *ic, 262 const struct icl_pdu *request) 263 { 264 265 return (icl_pdu_data_segment_length(request)); 266 } 267 268 static struct mbuf * 269 finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp) 270 { 271 struct icl_pdu *ip = &icp->ip; 272 uint8_t ulp_submode, padding; 273 struct mbuf *m, *last; 274 struct iscsi_bhs *bhs; 275 276 /* 277 * Fix up the data segment mbuf first. 278 */ 279 m = ip->ip_data_mbuf; 280 ulp_submode = icc->ulp_submode; 281 if (m) { 282 last = m_last(m); 283 284 /* 285 * Round up the data segment to a 4B boundary. Pad with 0 if 286 * necessary. There will definitely be room in the mbuf. 287 */ 288 padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len; 289 if (padding) { 290 bzero(mtod(last, uint8_t *) + last->m_len, padding); 291 last->m_len += padding; 292 } 293 } else { 294 MPASS(ip->ip_data_len == 0); 295 ulp_submode &= ~ULP_CRC_DATA; 296 padding = 0; 297 } 298 299 /* 300 * Now the header mbuf that has the BHS. 301 */ 302 m = ip->ip_bhs_mbuf; 303 MPASS(m->m_pkthdr.len == sizeof(struct iscsi_bhs)); 304 MPASS(m->m_len == sizeof(struct iscsi_bhs)); 305 306 bhs = ip->ip_bhs; 307 bhs->bhs_data_segment_len[2] = ip->ip_data_len; 308 bhs->bhs_data_segment_len[1] = ip->ip_data_len >> 8; 309 bhs->bhs_data_segment_len[0] = ip->ip_data_len >> 16; 310 311 /* "Convert" PDU to mbuf chain. Do not use icp/ip after this. */ 312 m->m_pkthdr.len = sizeof(struct iscsi_bhs) + ip->ip_data_len + padding; 313 m->m_next = ip->ip_data_mbuf; 314 set_mbuf_ulp_submode(m, ulp_submode); 315 #ifdef INVARIANTS 316 bzero(icp, sizeof(*icp)); 317 #endif 318 #ifdef DIAGNOSTIC 319 refcount_release(&icc->ic.ic_outstanding_pdus); 320 #endif 321 322 return (m); 323 } 324 325 int 326 icl_cxgbei_conn_pdu_append_data(struct icl_conn *ic, struct icl_pdu *ip, 327 const void *addr, size_t len, int flags) 328 { 329 struct mbuf *m; 330 #ifdef INVARIANTS 331 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 332 #endif 333 334 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 335 MPASS(ic == ip->ip_conn); 336 KASSERT(len > 0, ("%s: len is %jd", __func__, (intmax_t)len)); 337 338 m = ip->ip_data_mbuf; 339 if (m == NULL) { 340 m = m_getjcl(M_NOWAIT, MT_DATA, 0, MJUM16BYTES); 341 if (__predict_false(m == NULL)) 342 return (ENOMEM); 343 344 ip->ip_data_mbuf = m; 345 } 346 347 if (__predict_true(m_append(m, len, addr) != 0)) { 348 ip->ip_data_len += len; 349 MPASS(ip->ip_data_len <= ic->ic_max_data_segment_length); 350 return (0); 351 } else { 352 if (flags & M_WAITOK) { 353 CXGBE_UNIMPLEMENTED("fail safe append"); 354 } 355 ip->ip_data_len = m_length(m, NULL); 356 return (1); 357 } 358 } 359 360 void 361 icl_cxgbei_conn_pdu_get_data(struct icl_conn *ic, struct icl_pdu *ip, 362 size_t off, void *addr, size_t len) 363 { 364 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 365 366 if (icp->icp_flags & ICPF_RX_DDP) 367 return; /* data is DDP'ed, no need to copy */ 368 m_copydata(ip->ip_data_mbuf, off, len, addr); 369 } 370 371 void 372 icl_cxgbei_conn_pdu_queue(struct icl_conn *ic, struct icl_pdu *ip) 373 { 374 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 375 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 376 struct socket *so = ic->ic_socket; 377 struct toepcb *toep = icc->toep; 378 struct inpcb *inp; 379 struct mbuf *m; 380 381 MPASS(ic == ip->ip_conn); 382 MPASS(ip->ip_bhs_mbuf != NULL); 383 /* The kernel doesn't generate PDUs with AHS. */ 384 MPASS(ip->ip_ahs_mbuf == NULL && ip->ip_ahs_len == 0); 385 386 ICL_CONN_LOCK_ASSERT(ic); 387 /* NOTE: sowriteable without so_snd lock is a mostly harmless race. */ 388 if (ic->ic_disconnecting || so == NULL || !sowriteable(so)) { 389 icl_cxgbei_conn_pdu_free(ic, ip); 390 return; 391 } 392 393 m = finalize_pdu(icc, icp); 394 M_ASSERTPKTHDR(m); 395 MPASS((m->m_pkthdr.len & 3) == 0); 396 397 /* 398 * Do not get inp from toep->inp as the toepcb might have detached 399 * already. 400 */ 401 inp = sotoinpcb(so); 402 INP_WLOCK(inp); 403 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) || 404 __predict_false((toep->flags & TPF_ATTACHED) == 0)) 405 m_freem(m); 406 else { 407 mbufq_enqueue(&toep->ulp_pduq, m); 408 t4_push_pdus(icc->sc, toep, 0); 409 } 410 INP_WUNLOCK(inp); 411 } 412 413 static struct icl_conn * 414 icl_cxgbei_new_conn(const char *name, struct mtx *lock) 415 { 416 struct icl_cxgbei_conn *icc; 417 struct icl_conn *ic; 418 419 refcount_acquire(&icl_cxgbei_ncons); 420 421 icc = (struct icl_cxgbei_conn *)kobj_create(&icl_cxgbei_class, M_CXGBE, 422 M_WAITOK | M_ZERO); 423 icc->icc_signature = CXGBEI_CONN_SIGNATURE; 424 STAILQ_INIT(&icc->rcvd_pdus); 425 426 ic = &icc->ic; 427 ic->ic_lock = lock; 428 429 /* XXXNP: review. Most of these icl_conn fields aren't really used */ 430 STAILQ_INIT(&ic->ic_to_send); 431 cv_init(&ic->ic_send_cv, "icl_cxgbei_tx"); 432 cv_init(&ic->ic_receive_cv, "icl_cxgbei_rx"); 433 #ifdef DIAGNOSTIC 434 refcount_init(&ic->ic_outstanding_pdus, 0); 435 #endif 436 /* This is a stop-gap value that will be corrected during handoff. */ 437 ic->ic_max_data_segment_length = 16384; 438 ic->ic_name = name; 439 ic->ic_offload = "cxgbei"; 440 ic->ic_unmapped = false; 441 442 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc); 443 444 return (ic); 445 } 446 447 void 448 icl_cxgbei_conn_free(struct icl_conn *ic) 449 { 450 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 451 452 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 453 454 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc); 455 456 cv_destroy(&ic->ic_send_cv); 457 cv_destroy(&ic->ic_receive_cv); 458 459 kobj_delete((struct kobj *)icc, M_CXGBE); 460 refcount_release(&icl_cxgbei_ncons); 461 } 462 463 static int 464 icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace, 465 int rspace) 466 { 467 struct sockopt opt; 468 int error, one = 1, ss, rs; 469 470 ss = max(sendspace, sspace); 471 rs = max(recvspace, rspace); 472 473 error = soreserve(so, ss, rs); 474 if (error != 0) { 475 icl_cxgbei_conn_close(ic); 476 return (error); 477 } 478 SOCKBUF_LOCK(&so->so_snd); 479 so->so_snd.sb_flags |= SB_AUTOSIZE; 480 SOCKBUF_UNLOCK(&so->so_snd); 481 SOCKBUF_LOCK(&so->so_rcv); 482 so->so_rcv.sb_flags |= SB_AUTOSIZE; 483 SOCKBUF_UNLOCK(&so->so_rcv); 484 485 /* 486 * Disable Nagle. 487 */ 488 bzero(&opt, sizeof(opt)); 489 opt.sopt_dir = SOPT_SET; 490 opt.sopt_level = IPPROTO_TCP; 491 opt.sopt_name = TCP_NODELAY; 492 opt.sopt_val = &one; 493 opt.sopt_valsize = sizeof(one); 494 error = sosetopt(so, &opt); 495 if (error != 0) { 496 icl_cxgbei_conn_close(ic); 497 return (error); 498 } 499 500 return (0); 501 } 502 503 /* 504 * Request/response structure used to find out the adapter offloading a socket. 505 */ 506 struct find_ofld_adapter_rr { 507 struct socket *so; 508 struct adapter *sc; /* result */ 509 }; 510 511 static void 512 find_offload_adapter(struct adapter *sc, void *arg) 513 { 514 struct find_ofld_adapter_rr *fa = arg; 515 struct socket *so = fa->so; 516 struct tom_data *td = sc->tom_softc; 517 struct tcpcb *tp; 518 struct inpcb *inp; 519 520 /* Non-TCP were filtered out earlier. */ 521 MPASS(so->so_proto->pr_protocol == IPPROTO_TCP); 522 523 if (fa->sc != NULL) 524 return; /* Found already. */ 525 526 if (td == NULL) 527 return; /* TOE not enabled on this adapter. */ 528 529 inp = sotoinpcb(so); 530 INP_WLOCK(inp); 531 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 532 tp = intotcpcb(inp); 533 if (tp->t_flags & TF_TOE && tp->tod == &td->tod) 534 fa->sc = sc; /* Found. */ 535 } 536 INP_WUNLOCK(inp); 537 } 538 539 /* XXXNP: move this to t4_tom. */ 540 static void 541 send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen) 542 { 543 struct wrqe *wr; 544 struct fw_flowc_wr *flowc; 545 const u_int nparams = 1; 546 u_int flowclen; 547 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 548 549 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 550 551 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 552 if (wr == NULL) { 553 /* XXX */ 554 panic("%s: allocation failure.", __func__); 555 } 556 flowc = wrtod(wr); 557 memset(flowc, 0, wr->wr_len); 558 559 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 560 V_FW_FLOWC_WR_NPARAMS(nparams)); 561 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 562 V_FW_WR_FLOWID(toep->tid)); 563 564 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 565 flowc->mnemval[0].val = htobe32(maxlen); 566 567 txsd->tx_credits = howmany(flowclen, 16); 568 txsd->plen = 0; 569 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 570 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 571 toep->tx_credits -= txsd->tx_credits; 572 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 573 toep->txsd_pidx = 0; 574 toep->txsd_avail--; 575 576 t4_wrq_tx(sc, wr); 577 } 578 579 static void 580 set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, int hcrc, int dcrc) 581 { 582 uint64_t val = ULP_MODE_ISCSI; 583 584 if (hcrc) 585 val |= ULP_CRC_HEADER << 4; 586 if (dcrc) 587 val |= ULP_CRC_DATA << 4; 588 589 CTR4(KTR_CXGBE, "%s: tid %u, ULP_MODE_ISCSI, CRC hdr=%d data=%d", 590 __func__, toep->tid, hcrc, dcrc); 591 592 t4_set_tcb_field(sc, toep->ctrlq, toep->tid, W_TCB_ULP_TYPE, 593 V_TCB_ULP_TYPE(M_TCB_ULP_TYPE) | V_TCB_ULP_RAW(M_TCB_ULP_RAW), val, 594 0, 0, toep->ofld_rxq->iq.abs_id); 595 } 596 597 /* 598 * XXXNP: Who is responsible for cleaning up the socket if this returns with an 599 * error? Review all error paths. 600 * 601 * XXXNP: What happens to the socket's fd reference if the operation is 602 * successful, and how does that affect the socket's life cycle? 603 */ 604 int 605 icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd) 606 { 607 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 608 struct cxgbei_data *ci; 609 struct find_ofld_adapter_rr fa; 610 struct file *fp; 611 struct socket *so; 612 struct inpcb *inp; 613 struct tcpcb *tp; 614 struct toepcb *toep; 615 cap_rights_t rights; 616 int error; 617 618 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 619 ICL_CONN_LOCK_ASSERT_NOT(ic); 620 621 /* 622 * Steal the socket from userland. 623 */ 624 error = fget(curthread, fd, 625 cap_rights_init(&rights, CAP_SOCK_CLIENT), &fp); 626 if (error != 0) 627 return (error); 628 if (fp->f_type != DTYPE_SOCKET) { 629 fdrop(fp, curthread); 630 return (EINVAL); 631 } 632 so = fp->f_data; 633 if (so->so_type != SOCK_STREAM || 634 so->so_proto->pr_protocol != IPPROTO_TCP) { 635 fdrop(fp, curthread); 636 return (EINVAL); 637 } 638 639 ICL_CONN_LOCK(ic); 640 if (ic->ic_socket != NULL) { 641 ICL_CONN_UNLOCK(ic); 642 fdrop(fp, curthread); 643 return (EBUSY); 644 } 645 ic->ic_disconnecting = false; 646 ic->ic_socket = so; 647 fp->f_ops = &badfileops; 648 fp->f_data = NULL; 649 fdrop(fp, curthread); 650 ICL_CONN_UNLOCK(ic); 651 652 /* Find the adapter offloading this socket. */ 653 fa.sc = NULL; 654 fa.so = so; 655 t4_iterate(find_offload_adapter, &fa); 656 if (fa.sc == NULL) 657 return (EINVAL); 658 icc->sc = fa.sc; 659 ci = icc->sc->iscsi_ulp_softc; 660 661 inp = sotoinpcb(so); 662 INP_WLOCK(inp); 663 tp = intotcpcb(inp); 664 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) 665 error = EBUSY; 666 else { 667 /* 668 * socket could not have been "unoffloaded" if here. 669 */ 670 MPASS(tp->t_flags & TF_TOE); 671 MPASS(tp->tod != NULL); 672 MPASS(tp->t_toe != NULL); 673 toep = tp->t_toe; 674 MPASS(toep->vi->pi->adapter == icc->sc); 675 icc->toep = toep; 676 icc->cwt = cxgbei_select_worker_thread(icc); 677 678 /* 679 * We maintain the _send_ DSL in this field just to have a 680 * convenient way to assert that the kernel never sends 681 * oversized PDUs. This field is otherwise unused in the driver 682 * or the kernel. 683 */ 684 ic->ic_max_data_segment_length = ci->max_tx_pdu_len - 685 ISCSI_BHS_SIZE; 686 687 icc->ulp_submode = 0; 688 if (ic->ic_header_crc32c) { 689 icc->ulp_submode |= ULP_CRC_HEADER; 690 ic->ic_max_data_segment_length -= 691 ISCSI_HEADER_DIGEST_SIZE; 692 } 693 if (ic->ic_data_crc32c) { 694 icc->ulp_submode |= ULP_CRC_DATA; 695 ic->ic_max_data_segment_length -= 696 ISCSI_DATA_DIGEST_SIZE; 697 } 698 so->so_options |= SO_NO_DDP; 699 toep->ulp_mode = ULP_MODE_ISCSI; 700 toep->ulpcb = icc; 701 702 send_iscsi_flowc_wr(icc->sc, toep, ci->max_tx_pdu_len); 703 set_ulp_mode_iscsi(icc->sc, toep, ic->ic_header_crc32c, 704 ic->ic_data_crc32c); 705 error = 0; 706 } 707 INP_WUNLOCK(inp); 708 709 if (error == 0) { 710 error = icl_cxgbei_setsockopt(ic, so, ci->max_tx_pdu_len, 711 ci->max_rx_pdu_len); 712 } 713 714 return (error); 715 } 716 717 void 718 icl_cxgbei_conn_close(struct icl_conn *ic) 719 { 720 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 721 struct icl_pdu *ip; 722 struct socket *so; 723 struct sockbuf *sb; 724 struct inpcb *inp; 725 struct toepcb *toep = icc->toep; 726 727 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 728 ICL_CONN_LOCK_ASSERT_NOT(ic); 729 730 ICL_CONN_LOCK(ic); 731 so = ic->ic_socket; 732 if (ic->ic_disconnecting || so == NULL) { 733 CTR4(KTR_CXGBE, "%s: icc %p (disconnecting = %d), so %p", 734 __func__, icc, ic->ic_disconnecting, so); 735 ICL_CONN_UNLOCK(ic); 736 return; 737 } 738 ic->ic_disconnecting = true; 739 740 /* These are unused in this driver right now. */ 741 MPASS(STAILQ_EMPTY(&ic->ic_to_send)); 742 MPASS(ic->ic_receive_pdu == NULL); 743 744 #ifdef DIAGNOSTIC 745 KASSERT(ic->ic_outstanding_pdus == 0, 746 ("destroying session with %d outstanding PDUs", 747 ic->ic_outstanding_pdus)); 748 #endif 749 ICL_CONN_UNLOCK(ic); 750 751 CTR3(KTR_CXGBE, "%s: tid %d, icc %p", __func__, toep ? toep->tid : -1, 752 icc); 753 inp = sotoinpcb(so); 754 sb = &so->so_rcv; 755 INP_WLOCK(inp); 756 if (toep != NULL) { /* NULL if connection was never offloaded. */ 757 toep->ulpcb = NULL; 758 mbufq_drain(&toep->ulp_pduq); 759 SOCKBUF_LOCK(sb); 760 if (icc->rx_flags & RXF_ACTIVE) { 761 volatile u_int *p = &icc->rx_flags; 762 763 SOCKBUF_UNLOCK(sb); 764 INP_WUNLOCK(inp); 765 766 while (*p & RXF_ACTIVE) 767 pause("conclo", 1); 768 769 INP_WLOCK(inp); 770 SOCKBUF_LOCK(sb); 771 } 772 773 while (!STAILQ_EMPTY(&icc->rcvd_pdus)) { 774 ip = STAILQ_FIRST(&icc->rcvd_pdus); 775 STAILQ_REMOVE_HEAD(&icc->rcvd_pdus, ip_next); 776 icl_cxgbei_conn_pdu_free(ic, ip); 777 } 778 SOCKBUF_UNLOCK(sb); 779 } 780 INP_WUNLOCK(inp); 781 782 ICL_CONN_LOCK(ic); 783 ic->ic_socket = NULL; 784 ICL_CONN_UNLOCK(ic); 785 786 /* 787 * XXXNP: we should send RST instead of FIN when PDUs held in various 788 * queues were purged instead of delivered reliably but soabort isn't 789 * really general purpose and wouldn't do the right thing here. 790 */ 791 soclose(so); 792 } 793 794 int 795 icl_cxgbei_conn_task_setup(struct icl_conn *ic, struct icl_pdu *ip, 796 struct ccb_scsiio *csio, uint32_t *ittp, void **arg) 797 { 798 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 799 struct toepcb *toep = icc->toep; 800 struct adapter *sc = icc->sc; 801 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 802 struct ppod_region *pr = &ci->pr; 803 struct ppod_reservation *prsv; 804 uint32_t itt; 805 int rc = 0; 806 807 /* This is for the offload driver's state. Must not be set already. */ 808 MPASS(arg != NULL); 809 MPASS(*arg == NULL); 810 811 if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN || 812 csio->dxfer_len < ci->ddp_threshold) { 813 no_ddp: 814 /* 815 * No DDP for this I/O. Allocate an ITT (based on the one 816 * passed in) that cannot be a valid hardware DDP tag in the 817 * iSCSI region. 818 */ 819 itt = *ittp & M_PPOD_TAG; 820 itt = V_PPOD_TAG(itt) | pr->pr_invalid_bit; 821 *ittp = htobe32(itt); 822 MPASS(*arg == NULL); /* State is maintained for DDP only. */ 823 if (rc != 0) 824 counter_u64_add(ci->ddp_setup_error, 1); 825 return (0); 826 } 827 828 /* 829 * Reserve resources for DDP, update the itt that should be used in the 830 * PDU, and save DDP specific state for this I/O in *arg. 831 */ 832 833 prsv = uma_zalloc(prsv_zone, M_NOWAIT); 834 if (prsv == NULL) { 835 rc = ENOMEM; 836 goto no_ddp; 837 } 838 839 /* XXX add support for all CAM_DATA_ types */ 840 MPASS((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR); 841 rc = t4_alloc_page_pods_for_buf(pr, (vm_offset_t)csio->data_ptr, 842 csio->dxfer_len, prsv); 843 if (rc != 0) { 844 uma_zfree(prsv_zone, prsv); 845 goto no_ddp; 846 } 847 848 rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid, prsv, 849 (vm_offset_t)csio->data_ptr, csio->dxfer_len); 850 if (rc != 0) { 851 t4_free_page_pods(prsv); 852 uma_zfree(prsv_zone, prsv); 853 goto no_ddp; 854 } 855 856 *ittp = htobe32(prsv->prsv_tag); 857 *arg = prsv; 858 counter_u64_add(ci->ddp_setup_ok, 1); 859 return (0); 860 } 861 862 void 863 icl_cxgbei_conn_task_done(struct icl_conn *ic, void *arg) 864 { 865 866 if (arg != NULL) { 867 struct ppod_reservation *prsv = arg; 868 869 t4_free_page_pods(prsv); 870 uma_zfree(prsv_zone, prsv); 871 } 872 } 873 874 /* XXXNP: PDU should be passed in as parameter, like on the initiator. */ 875 #define io_to_request_pdu(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr) 876 #define io_to_ppod_reservation(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr) 877 878 int 879 icl_cxgbei_conn_transfer_setup(struct icl_conn *ic, union ctl_io *io, 880 uint32_t *tttp, void **arg) 881 { 882 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 883 struct toepcb *toep = icc->toep; 884 struct ctl_scsiio *ctsio = &io->scsiio; 885 struct adapter *sc = icc->sc; 886 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 887 struct ppod_region *pr = &ci->pr; 888 struct ppod_reservation *prsv; 889 uint32_t ttt; 890 int xferlen, rc = 0, alias; 891 892 /* This is for the offload driver's state. Must not be set already. */ 893 MPASS(arg != NULL); 894 MPASS(*arg == NULL); 895 896 if (ctsio->ext_data_filled == 0) { 897 int first_burst; 898 struct icl_pdu *ip = io_to_request_pdu(io); 899 vm_offset_t buf; 900 #ifdef INVARIANTS 901 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 902 903 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 904 MPASS(ic == ip->ip_conn); 905 MPASS(ip->ip_bhs_mbuf != NULL); 906 #endif 907 first_burst = icl_pdu_data_segment_length(ip); 908 909 /* 910 * Note that ICL calls conn_transfer_setup even if the first 911 * burst had everything and there's nothing left to transfer. 912 */ 913 MPASS(ctsio->kern_data_len >= first_burst); 914 xferlen = ctsio->kern_data_len; 915 if (xferlen - first_burst < ci->ddp_threshold) { 916 no_ddp: 917 /* 918 * No DDP for this transfer. Allocate a TTT (based on 919 * the one passed in) that cannot be a valid hardware 920 * DDP tag in the iSCSI region. 921 */ 922 ttt = *tttp & M_PPOD_TAG; 923 ttt = V_PPOD_TAG(ttt) | pr->pr_invalid_bit; 924 *tttp = htobe32(ttt); 925 MPASS(io_to_ppod_reservation(io) == NULL); 926 if (rc != 0) 927 counter_u64_add(ci->ddp_setup_error, 1); 928 return (0); 929 } 930 931 if (ctsio->kern_sg_entries == 0) 932 buf = (vm_offset_t)ctsio->kern_data_ptr; 933 else if (ctsio->kern_sg_entries == 1) { 934 struct ctl_sg_entry *sgl = (void *)ctsio->kern_data_ptr; 935 936 MPASS(sgl->len == xferlen); 937 buf = (vm_offset_t)sgl->addr; 938 } else { 939 rc = EAGAIN; /* XXX implement */ 940 goto no_ddp; 941 } 942 943 944 /* 945 * Reserve resources for DDP, update the ttt that should be used 946 * in the PDU, and save DDP specific state for this I/O. 947 */ 948 949 MPASS(io_to_ppod_reservation(io) == NULL); 950 prsv = uma_zalloc(prsv_zone, M_NOWAIT); 951 if (prsv == NULL) { 952 rc = ENOMEM; 953 goto no_ddp; 954 } 955 956 rc = t4_alloc_page_pods_for_buf(pr, buf, xferlen, prsv); 957 if (rc != 0) { 958 uma_zfree(prsv_zone, prsv); 959 goto no_ddp; 960 } 961 962 rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid, 963 prsv, buf, xferlen); 964 if (rc != 0) { 965 t4_free_page_pods(prsv); 966 uma_zfree(prsv_zone, prsv); 967 goto no_ddp; 968 } 969 970 *tttp = htobe32(prsv->prsv_tag); 971 io_to_ppod_reservation(io) = prsv; 972 *arg = ctsio; 973 counter_u64_add(ci->ddp_setup_ok, 1); 974 return (0); 975 } 976 977 /* 978 * In the middle of an I/O. A non-NULL page pod reservation indicates 979 * that a DDP buffer is being used for the I/O. 980 */ 981 982 prsv = io_to_ppod_reservation(ctsio); 983 if (prsv == NULL) 984 goto no_ddp; 985 986 alias = (prsv->prsv_tag & pr->pr_alias_mask) >> pr->pr_alias_shift; 987 alias++; 988 prsv->prsv_tag &= ~pr->pr_alias_mask; 989 prsv->prsv_tag |= alias << pr->pr_alias_shift & pr->pr_alias_mask; 990 991 *tttp = htobe32(prsv->prsv_tag); 992 *arg = ctsio; 993 994 return (0); 995 } 996 997 void 998 icl_cxgbei_conn_transfer_done(struct icl_conn *ic, void *arg) 999 { 1000 struct ctl_scsiio *ctsio = arg; 1001 1002 if (ctsio != NULL && ctsio->kern_data_len == ctsio->ext_data_filled) { 1003 struct ppod_reservation *prsv; 1004 1005 prsv = io_to_ppod_reservation(ctsio); 1006 MPASS(prsv != NULL); 1007 1008 t4_free_page_pods(prsv); 1009 uma_zfree(prsv_zone, prsv); 1010 } 1011 } 1012 1013 static void 1014 cxgbei_limits(struct adapter *sc, void *arg) 1015 { 1016 struct icl_drv_limits *idl = arg; 1017 struct cxgbei_data *ci; 1018 int max_dsl; 1019 1020 if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims") != 0) 1021 return; 1022 1023 if (uld_active(sc, ULD_ISCSI)) { 1024 ci = sc->iscsi_ulp_softc; 1025 MPASS(ci != NULL); 1026 1027 /* 1028 * AHS is not supported by the kernel so we'll not account for 1029 * it either in our PDU len -> data segment len conversions. 1030 */ 1031 1032 max_dsl = ci->max_rx_pdu_len - ISCSI_BHS_SIZE - 1033 ISCSI_HEADER_DIGEST_SIZE - ISCSI_DATA_DIGEST_SIZE; 1034 if (idl->idl_max_recv_data_segment_length > max_dsl) 1035 idl->idl_max_recv_data_segment_length = max_dsl; 1036 1037 max_dsl = ci->max_tx_pdu_len - ISCSI_BHS_SIZE - 1038 ISCSI_HEADER_DIGEST_SIZE - ISCSI_DATA_DIGEST_SIZE; 1039 if (idl->idl_max_send_data_segment_length > max_dsl) 1040 idl->idl_max_send_data_segment_length = max_dsl; 1041 } 1042 1043 end_synchronized_op(sc, LOCK_HELD); 1044 } 1045 1046 static int 1047 icl_cxgbei_limits(struct icl_drv_limits *idl) 1048 { 1049 1050 /* Maximum allowed by the RFC. cxgbei_limits will clip them. */ 1051 idl->idl_max_recv_data_segment_length = (1 << 24) - 1; 1052 idl->idl_max_send_data_segment_length = (1 << 24) - 1; 1053 1054 /* These are somewhat arbitrary. */ 1055 idl->idl_max_burst_length = 2 * 1024 * 1024; 1056 idl->idl_first_burst_length = 8192; 1057 1058 t4_iterate(cxgbei_limits, idl); 1059 1060 return (0); 1061 } 1062 1063 int 1064 icl_cxgbei_mod_load(void) 1065 { 1066 int rc; 1067 1068 /* 1069 * Space to track pagepod reservations. 1070 */ 1071 prsv_zone = uma_zcreate("Pagepod reservations", 1072 sizeof(struct ppod_reservation), NULL, NULL, NULL, NULL, 1073 CACHE_LINE_SIZE, 0); 1074 1075 refcount_init(&icl_cxgbei_ncons, 0); 1076 1077 rc = icl_register("cxgbei", false, -100, icl_cxgbei_limits, 1078 icl_cxgbei_new_conn); 1079 1080 return (rc); 1081 } 1082 1083 int 1084 icl_cxgbei_mod_unload(void) 1085 { 1086 1087 if (icl_cxgbei_ncons != 0) 1088 return (EBUSY); 1089 1090 icl_unregister("cxgbei", false); 1091 1092 uma_zdestroy(prsv_zone); 1093 1094 return (0); 1095 } 1096 #endif 1097