1 /*- 2 * Copyright (c) 2012 The FreeBSD Foundation 3 * Copyright (c) 2015 Chelsio Communications, Inc. 4 * All rights reserved. 5 * 6 * This software was developed by Edward Tomasz Napierala under sponsorship 7 * from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * cxgbei implementation of iSCSI Common Layer kobj(9) interface. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_inet.h" 40 #include "opt_inet6.h" 41 42 #ifdef TCP_OFFLOAD 43 #include <sys/param.h> 44 #include <sys/capsicum.h> 45 #include <sys/condvar.h> 46 #include <sys/conf.h> 47 #include <sys/file.h> 48 #include <sys/kernel.h> 49 #include <sys/kthread.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/mbuf.h> 53 #include <sys/mutex.h> 54 #include <sys/module.h> 55 #include <sys/protosw.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/sysctl.h> 59 #include <sys/systm.h> 60 #include <sys/sx.h> 61 #include <sys/uio.h> 62 #include <machine/bus.h> 63 #include <vm/uma.h> 64 #include <vm/vm.h> 65 #include <vm/pmap.h> 66 #include <netinet/in.h> 67 #include <netinet/in_pcb.h> 68 #include <netinet/tcp.h> 69 #include <netinet/tcp_var.h> 70 #include <netinet/toecore.h> 71 72 #include <dev/iscsi/icl.h> 73 #include <dev/iscsi/iscsi_proto.h> 74 #include <icl_conn_if.h> 75 76 #include <cam/scsi/scsi_all.h> 77 #include <cam/scsi/scsi_da.h> 78 #include <cam/ctl/ctl_io.h> 79 #include <cam/ctl/ctl.h> 80 #include <cam/ctl/ctl_backend.h> 81 #include <cam/ctl/ctl_error.h> 82 #include <cam/ctl/ctl_frontend.h> 83 #include <cam/ctl/ctl_debug.h> 84 #include <cam/ctl/ctl_ha.h> 85 #include <cam/ctl/ctl_ioctl.h> 86 87 #include <cam/cam.h> 88 #include <cam/cam_ccb.h> 89 #include <cam/cam_xpt.h> 90 #include <cam/cam_debug.h> 91 #include <cam/cam_sim.h> 92 #include <cam/cam_xpt_sim.h> 93 #include <cam/cam_xpt_periph.h> 94 #include <cam/cam_periph.h> 95 #include <cam/cam_compat.h> 96 #include <cam/scsi/scsi_message.h> 97 98 #include "common/common.h" 99 #include "common/t4_tcb.h" 100 #include "tom/t4_tom.h" 101 #include "cxgbei.h" 102 103 SYSCTL_NODE(_kern_icl, OID_AUTO, cxgbei, CTLFLAG_RD, 0, "Chelsio iSCSI offload"); 104 static int coalesce = 1; 105 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, coalesce, CTLFLAG_RWTUN, 106 &coalesce, 0, "Try to coalesce PDUs before sending"); 107 static int partial_receive_len = 128 * 1024; 108 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, partial_receive_len, CTLFLAG_RWTUN, 109 &partial_receive_len, 0, "Minimum read size for partially received " 110 "data segment"); 111 static int sendspace = 1048576; 112 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, sendspace, CTLFLAG_RWTUN, 113 &sendspace, 0, "Default send socket buffer size"); 114 static int recvspace = 1048576; 115 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, recvspace, CTLFLAG_RWTUN, 116 &recvspace, 0, "Default receive socket buffer size"); 117 118 static uma_zone_t prsv_zone; 119 static volatile u_int icl_cxgbei_ncons; 120 121 #define ICL_CONN_LOCK(X) mtx_lock(X->ic_lock) 122 #define ICL_CONN_UNLOCK(X) mtx_unlock(X->ic_lock) 123 #define ICL_CONN_LOCK_ASSERT(X) mtx_assert(X->ic_lock, MA_OWNED) 124 #define ICL_CONN_LOCK_ASSERT_NOT(X) mtx_assert(X->ic_lock, MA_NOTOWNED) 125 126 struct icl_pdu *icl_cxgbei_new_pdu(int); 127 void icl_cxgbei_new_pdu_set_conn(struct icl_pdu *, struct icl_conn *); 128 129 static icl_conn_new_pdu_t icl_cxgbei_conn_new_pdu; 130 icl_conn_pdu_free_t icl_cxgbei_conn_pdu_free; 131 static icl_conn_pdu_data_segment_length_t 132 icl_cxgbei_conn_pdu_data_segment_length; 133 static icl_conn_pdu_append_data_t icl_cxgbei_conn_pdu_append_data; 134 static icl_conn_pdu_get_data_t icl_cxgbei_conn_pdu_get_data; 135 static icl_conn_pdu_queue_t icl_cxgbei_conn_pdu_queue; 136 static icl_conn_handoff_t icl_cxgbei_conn_handoff; 137 static icl_conn_free_t icl_cxgbei_conn_free; 138 static icl_conn_close_t icl_cxgbei_conn_close; 139 static icl_conn_task_setup_t icl_cxgbei_conn_task_setup; 140 static icl_conn_task_done_t icl_cxgbei_conn_task_done; 141 static icl_conn_transfer_setup_t icl_cxgbei_conn_transfer_setup; 142 static icl_conn_transfer_done_t icl_cxgbei_conn_transfer_done; 143 144 static kobj_method_t icl_cxgbei_methods[] = { 145 KOBJMETHOD(icl_conn_new_pdu, icl_cxgbei_conn_new_pdu), 146 KOBJMETHOD(icl_conn_pdu_free, icl_cxgbei_conn_pdu_free), 147 KOBJMETHOD(icl_conn_pdu_data_segment_length, 148 icl_cxgbei_conn_pdu_data_segment_length), 149 KOBJMETHOD(icl_conn_pdu_append_data, icl_cxgbei_conn_pdu_append_data), 150 KOBJMETHOD(icl_conn_pdu_get_data, icl_cxgbei_conn_pdu_get_data), 151 KOBJMETHOD(icl_conn_pdu_queue, icl_cxgbei_conn_pdu_queue), 152 KOBJMETHOD(icl_conn_handoff, icl_cxgbei_conn_handoff), 153 KOBJMETHOD(icl_conn_free, icl_cxgbei_conn_free), 154 KOBJMETHOD(icl_conn_close, icl_cxgbei_conn_close), 155 KOBJMETHOD(icl_conn_task_setup, icl_cxgbei_conn_task_setup), 156 KOBJMETHOD(icl_conn_task_done, icl_cxgbei_conn_task_done), 157 KOBJMETHOD(icl_conn_transfer_setup, icl_cxgbei_conn_transfer_setup), 158 KOBJMETHOD(icl_conn_transfer_done, icl_cxgbei_conn_transfer_done), 159 { 0, 0 } 160 }; 161 162 DEFINE_CLASS(icl_cxgbei, icl_cxgbei_methods, sizeof(struct icl_cxgbei_conn)); 163 164 void 165 icl_cxgbei_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip) 166 { 167 #ifdef INVARIANTS 168 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 169 #endif 170 171 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 172 MPASS(ic == ip->ip_conn); 173 MPASS(ip->ip_bhs_mbuf != NULL); 174 175 m_freem(ip->ip_ahs_mbuf); 176 m_freem(ip->ip_data_mbuf); 177 m_freem(ip->ip_bhs_mbuf); /* storage for icl_cxgbei_pdu itself */ 178 179 #ifdef DIAGNOSTIC 180 if (__predict_true(ic != NULL)) 181 refcount_release(&ic->ic_outstanding_pdus); 182 #endif 183 } 184 185 struct icl_pdu * 186 icl_cxgbei_new_pdu(int flags) 187 { 188 struct icl_cxgbei_pdu *icp; 189 struct icl_pdu *ip; 190 struct mbuf *m; 191 uintptr_t a; 192 193 m = m_gethdr(flags, MT_DATA); 194 if (__predict_false(m == NULL)) 195 return (NULL); 196 197 a = roundup2(mtod(m, uintptr_t), _Alignof(struct icl_cxgbei_pdu)); 198 icp = (struct icl_cxgbei_pdu *)a; 199 bzero(icp, sizeof(*icp)); 200 201 icp->icp_signature = CXGBEI_PDU_SIGNATURE; 202 ip = &icp->ip; 203 ip->ip_bhs_mbuf = m; 204 205 a = roundup2((uintptr_t)(icp + 1), _Alignof(struct iscsi_bhs *)); 206 ip->ip_bhs = (struct iscsi_bhs *)a; 207 #ifdef INVARIANTS 208 /* Everything must fit entirely in the mbuf. */ 209 a = (uintptr_t)(ip->ip_bhs + 1); 210 MPASS(a <= (uintptr_t)m + MSIZE); 211 #endif 212 bzero(ip->ip_bhs, sizeof(*ip->ip_bhs)); 213 214 m->m_data = (void *)ip->ip_bhs; 215 m->m_len = sizeof(struct iscsi_bhs); 216 m->m_pkthdr.len = m->m_len; 217 218 return (ip); 219 } 220 221 void 222 icl_cxgbei_new_pdu_set_conn(struct icl_pdu *ip, struct icl_conn *ic) 223 { 224 225 ip->ip_conn = ic; 226 #ifdef DIAGNOSTIC 227 refcount_acquire(&ic->ic_outstanding_pdus); 228 #endif 229 } 230 231 /* 232 * Allocate icl_pdu with empty BHS to fill up by the caller. 233 */ 234 static struct icl_pdu * 235 icl_cxgbei_conn_new_pdu(struct icl_conn *ic, int flags) 236 { 237 struct icl_pdu *ip; 238 239 ip = icl_cxgbei_new_pdu(flags); 240 if (__predict_false(ip == NULL)) 241 return (NULL); 242 icl_cxgbei_new_pdu_set_conn(ip, ic); 243 244 return (ip); 245 } 246 247 static size_t 248 icl_pdu_data_segment_length(const struct icl_pdu *request) 249 { 250 uint32_t len = 0; 251 252 len += request->ip_bhs->bhs_data_segment_len[0]; 253 len <<= 8; 254 len += request->ip_bhs->bhs_data_segment_len[1]; 255 len <<= 8; 256 len += request->ip_bhs->bhs_data_segment_len[2]; 257 258 return (len); 259 } 260 261 size_t 262 icl_cxgbei_conn_pdu_data_segment_length(struct icl_conn *ic, 263 const struct icl_pdu *request) 264 { 265 266 return (icl_pdu_data_segment_length(request)); 267 } 268 269 static struct mbuf * 270 finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp) 271 { 272 struct icl_pdu *ip = &icp->ip; 273 uint8_t ulp_submode, padding; 274 struct mbuf *m, *last; 275 struct iscsi_bhs *bhs; 276 277 /* 278 * Fix up the data segment mbuf first. 279 */ 280 m = ip->ip_data_mbuf; 281 ulp_submode = icc->ulp_submode; 282 if (m) { 283 last = m_last(m); 284 285 /* 286 * Round up the data segment to a 4B boundary. Pad with 0 if 287 * necessary. There will definitely be room in the mbuf. 288 */ 289 padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len; 290 if (padding) { 291 bzero(mtod(last, uint8_t *) + last->m_len, padding); 292 last->m_len += padding; 293 } 294 } else { 295 MPASS(ip->ip_data_len == 0); 296 ulp_submode &= ~ULP_CRC_DATA; 297 padding = 0; 298 } 299 300 /* 301 * Now the header mbuf that has the BHS. 302 */ 303 m = ip->ip_bhs_mbuf; 304 MPASS(m->m_pkthdr.len == sizeof(struct iscsi_bhs)); 305 MPASS(m->m_len == sizeof(struct iscsi_bhs)); 306 307 bhs = ip->ip_bhs; 308 bhs->bhs_data_segment_len[2] = ip->ip_data_len; 309 bhs->bhs_data_segment_len[1] = ip->ip_data_len >> 8; 310 bhs->bhs_data_segment_len[0] = ip->ip_data_len >> 16; 311 312 /* "Convert" PDU to mbuf chain. Do not use icp/ip after this. */ 313 m->m_pkthdr.len = sizeof(struct iscsi_bhs) + ip->ip_data_len + padding; 314 m->m_next = ip->ip_data_mbuf; 315 set_mbuf_ulp_submode(m, ulp_submode); 316 #ifdef INVARIANTS 317 bzero(icp, sizeof(*icp)); 318 #endif 319 #ifdef DIAGNOSTIC 320 refcount_release(&icc->ic.ic_outstanding_pdus); 321 #endif 322 323 return (m); 324 } 325 326 int 327 icl_cxgbei_conn_pdu_append_data(struct icl_conn *ic, struct icl_pdu *ip, 328 const void *addr, size_t len, int flags) 329 { 330 struct mbuf *m; 331 #ifdef INVARIANTS 332 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 333 #endif 334 335 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 336 MPASS(ic == ip->ip_conn); 337 KASSERT(len > 0, ("%s: len is %jd", __func__, (intmax_t)len)); 338 339 m = ip->ip_data_mbuf; 340 if (m == NULL) { 341 m = m_getjcl(M_NOWAIT, MT_DATA, 0, MJUM16BYTES); 342 if (__predict_false(m == NULL)) 343 return (ENOMEM); 344 345 ip->ip_data_mbuf = m; 346 } 347 348 if (__predict_true(m_append(m, len, addr) != 0)) { 349 ip->ip_data_len += len; 350 MPASS(ip->ip_data_len <= ic->ic_max_data_segment_length); 351 return (0); 352 } else { 353 if (flags & M_WAITOK) { 354 CXGBE_UNIMPLEMENTED("fail safe append"); 355 } 356 ip->ip_data_len = m_length(m, NULL); 357 return (1); 358 } 359 } 360 361 void 362 icl_cxgbei_conn_pdu_get_data(struct icl_conn *ic, struct icl_pdu *ip, 363 size_t off, void *addr, size_t len) 364 { 365 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 366 367 if (icp->icp_flags & ICPF_RX_DDP) 368 return; /* data is DDP'ed, no need to copy */ 369 m_copydata(ip->ip_data_mbuf, off, len, addr); 370 } 371 372 void 373 icl_cxgbei_conn_pdu_queue(struct icl_conn *ic, struct icl_pdu *ip) 374 { 375 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 376 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 377 struct socket *so = ic->ic_socket; 378 struct toepcb *toep = icc->toep; 379 struct inpcb *inp; 380 struct mbuf *m; 381 382 MPASS(ic == ip->ip_conn); 383 MPASS(ip->ip_bhs_mbuf != NULL); 384 /* The kernel doesn't generate PDUs with AHS. */ 385 MPASS(ip->ip_ahs_mbuf == NULL && ip->ip_ahs_len == 0); 386 387 ICL_CONN_LOCK_ASSERT(ic); 388 /* NOTE: sowriteable without so_snd lock is a mostly harmless race. */ 389 if (ic->ic_disconnecting || so == NULL || !sowriteable(so)) { 390 icl_cxgbei_conn_pdu_free(ic, ip); 391 return; 392 } 393 394 m = finalize_pdu(icc, icp); 395 M_ASSERTPKTHDR(m); 396 MPASS((m->m_pkthdr.len & 3) == 0); 397 398 /* 399 * Do not get inp from toep->inp as the toepcb might have detached 400 * already. 401 */ 402 inp = sotoinpcb(so); 403 INP_WLOCK(inp); 404 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) || 405 __predict_false((toep->flags & TPF_ATTACHED) == 0)) 406 m_freem(m); 407 else { 408 mbufq_enqueue(&toep->ulp_pduq, m); 409 t4_push_pdus(icc->sc, toep, 0); 410 } 411 INP_WUNLOCK(inp); 412 } 413 414 static struct icl_conn * 415 icl_cxgbei_new_conn(const char *name, struct mtx *lock) 416 { 417 struct icl_cxgbei_conn *icc; 418 struct icl_conn *ic; 419 420 refcount_acquire(&icl_cxgbei_ncons); 421 422 icc = (struct icl_cxgbei_conn *)kobj_create(&icl_cxgbei_class, M_CXGBE, 423 M_WAITOK | M_ZERO); 424 icc->icc_signature = CXGBEI_CONN_SIGNATURE; 425 STAILQ_INIT(&icc->rcvd_pdus); 426 427 ic = &icc->ic; 428 ic->ic_lock = lock; 429 430 /* XXXNP: review. Most of these icl_conn fields aren't really used */ 431 STAILQ_INIT(&ic->ic_to_send); 432 cv_init(&ic->ic_send_cv, "icl_cxgbei_tx"); 433 cv_init(&ic->ic_receive_cv, "icl_cxgbei_rx"); 434 #ifdef DIAGNOSTIC 435 refcount_init(&ic->ic_outstanding_pdus, 0); 436 #endif 437 /* This is a stop-gap value that will be corrected during handoff. */ 438 ic->ic_max_data_segment_length = 16384; 439 ic->ic_name = name; 440 ic->ic_offload = "cxgbei"; 441 ic->ic_unmapped = false; 442 443 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc); 444 445 return (ic); 446 } 447 448 void 449 icl_cxgbei_conn_free(struct icl_conn *ic) 450 { 451 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 452 453 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 454 455 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc); 456 457 cv_destroy(&ic->ic_send_cv); 458 cv_destroy(&ic->ic_receive_cv); 459 460 kobj_delete((struct kobj *)icc, M_CXGBE); 461 refcount_release(&icl_cxgbei_ncons); 462 } 463 464 static int 465 icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace, 466 int rspace) 467 { 468 struct sockopt opt; 469 int error, one = 1, ss, rs; 470 471 ss = max(sendspace, sspace); 472 rs = max(recvspace, rspace); 473 474 error = soreserve(so, ss, rs); 475 if (error != 0) { 476 icl_cxgbei_conn_close(ic); 477 return (error); 478 } 479 SOCKBUF_LOCK(&so->so_snd); 480 so->so_snd.sb_flags |= SB_AUTOSIZE; 481 SOCKBUF_UNLOCK(&so->so_snd); 482 SOCKBUF_LOCK(&so->so_rcv); 483 so->so_rcv.sb_flags |= SB_AUTOSIZE; 484 SOCKBUF_UNLOCK(&so->so_rcv); 485 486 /* 487 * Disable Nagle. 488 */ 489 bzero(&opt, sizeof(opt)); 490 opt.sopt_dir = SOPT_SET; 491 opt.sopt_level = IPPROTO_TCP; 492 opt.sopt_name = TCP_NODELAY; 493 opt.sopt_val = &one; 494 opt.sopt_valsize = sizeof(one); 495 error = sosetopt(so, &opt); 496 if (error != 0) { 497 icl_cxgbei_conn_close(ic); 498 return (error); 499 } 500 501 return (0); 502 } 503 504 /* 505 * Request/response structure used to find out the adapter offloading a socket. 506 */ 507 struct find_ofld_adapter_rr { 508 struct socket *so; 509 struct adapter *sc; /* result */ 510 }; 511 512 static void 513 find_offload_adapter(struct adapter *sc, void *arg) 514 { 515 struct find_ofld_adapter_rr *fa = arg; 516 struct socket *so = fa->so; 517 struct tom_data *td = sc->tom_softc; 518 struct tcpcb *tp; 519 struct inpcb *inp; 520 521 /* Non-TCP were filtered out earlier. */ 522 MPASS(so->so_proto->pr_protocol == IPPROTO_TCP); 523 524 if (fa->sc != NULL) 525 return; /* Found already. */ 526 527 if (td == NULL) 528 return; /* TOE not enabled on this adapter. */ 529 530 inp = sotoinpcb(so); 531 INP_WLOCK(inp); 532 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 533 tp = intotcpcb(inp); 534 if (tp->t_flags & TF_TOE && tp->tod == &td->tod) 535 fa->sc = sc; /* Found. */ 536 } 537 INP_WUNLOCK(inp); 538 } 539 540 /* XXXNP: move this to t4_tom. */ 541 static void 542 send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen) 543 { 544 struct wrqe *wr; 545 struct fw_flowc_wr *flowc; 546 const u_int nparams = 1; 547 u_int flowclen; 548 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 549 550 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 551 552 wr = alloc_wrqe(roundup2(flowclen, 16), toep->ofld_txq); 553 if (wr == NULL) { 554 /* XXX */ 555 panic("%s: allocation failure.", __func__); 556 } 557 flowc = wrtod(wr); 558 memset(flowc, 0, wr->wr_len); 559 560 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 561 V_FW_FLOWC_WR_NPARAMS(nparams)); 562 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 563 V_FW_WR_FLOWID(toep->tid)); 564 565 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 566 flowc->mnemval[0].val = htobe32(maxlen); 567 568 txsd->tx_credits = howmany(flowclen, 16); 569 txsd->plen = 0; 570 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 571 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 572 toep->tx_credits -= txsd->tx_credits; 573 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 574 toep->txsd_pidx = 0; 575 toep->txsd_avail--; 576 577 t4_wrq_tx(sc, wr); 578 } 579 580 static void 581 set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, int hcrc, int dcrc) 582 { 583 uint64_t val = ULP_MODE_ISCSI; 584 585 if (hcrc) 586 val |= ULP_CRC_HEADER << 4; 587 if (dcrc) 588 val |= ULP_CRC_DATA << 4; 589 590 CTR4(KTR_CXGBE, "%s: tid %u, ULP_MODE_ISCSI, CRC hdr=%d data=%d", 591 __func__, toep->tid, hcrc, dcrc); 592 593 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_ULP_TYPE, 594 V_TCB_ULP_TYPE(M_TCB_ULP_TYPE) | V_TCB_ULP_RAW(M_TCB_ULP_RAW), val, 595 0, 0); 596 } 597 598 /* 599 * XXXNP: Who is responsible for cleaning up the socket if this returns with an 600 * error? Review all error paths. 601 * 602 * XXXNP: What happens to the socket's fd reference if the operation is 603 * successful, and how does that affect the socket's life cycle? 604 */ 605 int 606 icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd) 607 { 608 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 609 struct cxgbei_data *ci; 610 struct find_ofld_adapter_rr fa; 611 struct file *fp; 612 struct socket *so; 613 struct inpcb *inp; 614 struct tcpcb *tp; 615 struct toepcb *toep; 616 cap_rights_t rights; 617 int error; 618 619 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 620 ICL_CONN_LOCK_ASSERT_NOT(ic); 621 622 /* 623 * Steal the socket from userland. 624 */ 625 error = fget(curthread, fd, 626 cap_rights_init(&rights, CAP_SOCK_CLIENT), &fp); 627 if (error != 0) 628 return (error); 629 if (fp->f_type != DTYPE_SOCKET) { 630 fdrop(fp, curthread); 631 return (EINVAL); 632 } 633 so = fp->f_data; 634 if (so->so_type != SOCK_STREAM || 635 so->so_proto->pr_protocol != IPPROTO_TCP) { 636 fdrop(fp, curthread); 637 return (EINVAL); 638 } 639 640 ICL_CONN_LOCK(ic); 641 if (ic->ic_socket != NULL) { 642 ICL_CONN_UNLOCK(ic); 643 fdrop(fp, curthread); 644 return (EBUSY); 645 } 646 ic->ic_disconnecting = false; 647 ic->ic_socket = so; 648 fp->f_ops = &badfileops; 649 fp->f_data = NULL; 650 fdrop(fp, curthread); 651 ICL_CONN_UNLOCK(ic); 652 653 /* Find the adapter offloading this socket. */ 654 fa.sc = NULL; 655 fa.so = so; 656 t4_iterate(find_offload_adapter, &fa); 657 if (fa.sc == NULL) 658 return (EINVAL); 659 icc->sc = fa.sc; 660 ci = icc->sc->iscsi_ulp_softc; 661 662 inp = sotoinpcb(so); 663 INP_WLOCK(inp); 664 tp = intotcpcb(inp); 665 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) 666 error = EBUSY; 667 else { 668 /* 669 * socket could not have been "unoffloaded" if here. 670 */ 671 MPASS(tp->t_flags & TF_TOE); 672 MPASS(tp->tod != NULL); 673 MPASS(tp->t_toe != NULL); 674 toep = tp->t_toe; 675 MPASS(toep->vi->pi->adapter == icc->sc); 676 icc->toep = toep; 677 icc->cwt = cxgbei_select_worker_thread(icc); 678 679 /* 680 * We maintain the _send_ DSL in this field just to have a 681 * convenient way to assert that the kernel never sends 682 * oversized PDUs. This field is otherwise unused in the driver 683 * or the kernel. 684 */ 685 ic->ic_max_data_segment_length = ci->max_tx_pdu_len - 686 ISCSI_BHS_SIZE; 687 688 icc->ulp_submode = 0; 689 if (ic->ic_header_crc32c) { 690 icc->ulp_submode |= ULP_CRC_HEADER; 691 ic->ic_max_data_segment_length -= 692 ISCSI_HEADER_DIGEST_SIZE; 693 } 694 if (ic->ic_data_crc32c) { 695 icc->ulp_submode |= ULP_CRC_DATA; 696 ic->ic_max_data_segment_length -= 697 ISCSI_DATA_DIGEST_SIZE; 698 } 699 so->so_options |= SO_NO_DDP; 700 toep->ulp_mode = ULP_MODE_ISCSI; 701 toep->ulpcb = icc; 702 703 send_iscsi_flowc_wr(icc->sc, toep, ci->max_tx_pdu_len); 704 set_ulp_mode_iscsi(icc->sc, toep, ic->ic_header_crc32c, 705 ic->ic_data_crc32c); 706 error = 0; 707 } 708 INP_WUNLOCK(inp); 709 710 if (error == 0) { 711 error = icl_cxgbei_setsockopt(ic, so, ci->max_tx_pdu_len, 712 ci->max_rx_pdu_len); 713 } 714 715 return (error); 716 } 717 718 void 719 icl_cxgbei_conn_close(struct icl_conn *ic) 720 { 721 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 722 struct icl_pdu *ip; 723 struct socket *so; 724 struct sockbuf *sb; 725 struct inpcb *inp; 726 struct toepcb *toep = icc->toep; 727 728 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 729 ICL_CONN_LOCK_ASSERT_NOT(ic); 730 731 ICL_CONN_LOCK(ic); 732 so = ic->ic_socket; 733 if (ic->ic_disconnecting || so == NULL) { 734 CTR4(KTR_CXGBE, "%s: icc %p (disconnecting = %d), so %p", 735 __func__, icc, ic->ic_disconnecting, so); 736 ICL_CONN_UNLOCK(ic); 737 return; 738 } 739 ic->ic_disconnecting = true; 740 741 /* These are unused in this driver right now. */ 742 MPASS(STAILQ_EMPTY(&ic->ic_to_send)); 743 MPASS(ic->ic_receive_pdu == NULL); 744 745 #ifdef DIAGNOSTIC 746 KASSERT(ic->ic_outstanding_pdus == 0, 747 ("destroying session with %d outstanding PDUs", 748 ic->ic_outstanding_pdus)); 749 #endif 750 ICL_CONN_UNLOCK(ic); 751 752 CTR3(KTR_CXGBE, "%s: tid %d, icc %p", __func__, toep ? toep->tid : -1, 753 icc); 754 inp = sotoinpcb(so); 755 sb = &so->so_rcv; 756 INP_WLOCK(inp); 757 if (toep != NULL) { /* NULL if connection was never offloaded. */ 758 toep->ulpcb = NULL; 759 mbufq_drain(&toep->ulp_pduq); 760 SOCKBUF_LOCK(sb); 761 if (icc->rx_flags & RXF_ACTIVE) { 762 volatile u_int *p = &icc->rx_flags; 763 764 SOCKBUF_UNLOCK(sb); 765 INP_WUNLOCK(inp); 766 767 while (*p & RXF_ACTIVE) 768 pause("conclo", 1); 769 770 INP_WLOCK(inp); 771 SOCKBUF_LOCK(sb); 772 } 773 774 while (!STAILQ_EMPTY(&icc->rcvd_pdus)) { 775 ip = STAILQ_FIRST(&icc->rcvd_pdus); 776 STAILQ_REMOVE_HEAD(&icc->rcvd_pdus, ip_next); 777 icl_cxgbei_conn_pdu_free(ic, ip); 778 } 779 SOCKBUF_UNLOCK(sb); 780 } 781 INP_WUNLOCK(inp); 782 783 ICL_CONN_LOCK(ic); 784 ic->ic_socket = NULL; 785 ICL_CONN_UNLOCK(ic); 786 787 /* 788 * XXXNP: we should send RST instead of FIN when PDUs held in various 789 * queues were purged instead of delivered reliably but soabort isn't 790 * really general purpose and wouldn't do the right thing here. 791 */ 792 soclose(so); 793 } 794 795 int 796 icl_cxgbei_conn_task_setup(struct icl_conn *ic, struct icl_pdu *ip, 797 struct ccb_scsiio *csio, uint32_t *ittp, void **arg) 798 { 799 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 800 struct toepcb *toep = icc->toep; 801 struct adapter *sc = icc->sc; 802 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 803 struct ppod_region *pr = &ci->pr; 804 struct ppod_reservation *prsv; 805 uint32_t itt; 806 int rc = 0; 807 808 /* This is for the offload driver's state. Must not be set already. */ 809 MPASS(arg != NULL); 810 MPASS(*arg == NULL); 811 812 if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN || 813 csio->dxfer_len < ci->ddp_threshold) { 814 no_ddp: 815 /* 816 * No DDP for this I/O. Allocate an ITT (based on the one 817 * passed in) that cannot be a valid hardware DDP tag in the 818 * iSCSI region. 819 */ 820 itt = *ittp & M_PPOD_TAG; 821 itt = V_PPOD_TAG(itt) | pr->pr_invalid_bit; 822 *ittp = htobe32(itt); 823 MPASS(*arg == NULL); /* State is maintained for DDP only. */ 824 if (rc != 0) 825 counter_u64_add(ci->ddp_setup_error, 1); 826 return (0); 827 } 828 829 /* 830 * Reserve resources for DDP, update the itt that should be used in the 831 * PDU, and save DDP specific state for this I/O in *arg. 832 */ 833 834 prsv = uma_zalloc(prsv_zone, M_NOWAIT); 835 if (prsv == NULL) { 836 rc = ENOMEM; 837 goto no_ddp; 838 } 839 840 /* XXX add support for all CAM_DATA_ types */ 841 MPASS((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR); 842 rc = t4_alloc_page_pods_for_buf(pr, (vm_offset_t)csio->data_ptr, 843 csio->dxfer_len, prsv); 844 if (rc != 0) { 845 uma_zfree(prsv_zone, prsv); 846 goto no_ddp; 847 } 848 849 rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid, prsv, 850 (vm_offset_t)csio->data_ptr, csio->dxfer_len); 851 if (rc != 0) { 852 t4_free_page_pods(prsv); 853 uma_zfree(prsv_zone, prsv); 854 goto no_ddp; 855 } 856 857 *ittp = htobe32(prsv->prsv_tag); 858 *arg = prsv; 859 counter_u64_add(ci->ddp_setup_ok, 1); 860 return (0); 861 } 862 863 void 864 icl_cxgbei_conn_task_done(struct icl_conn *ic, void *arg) 865 { 866 867 if (arg != NULL) { 868 struct ppod_reservation *prsv = arg; 869 870 t4_free_page_pods(prsv); 871 uma_zfree(prsv_zone, prsv); 872 } 873 } 874 875 /* XXXNP: PDU should be passed in as parameter, like on the initiator. */ 876 #define io_to_request_pdu(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr) 877 #define io_to_ppod_reservation(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr) 878 879 int 880 icl_cxgbei_conn_transfer_setup(struct icl_conn *ic, union ctl_io *io, 881 uint32_t *tttp, void **arg) 882 { 883 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 884 struct toepcb *toep = icc->toep; 885 struct ctl_scsiio *ctsio = &io->scsiio; 886 struct adapter *sc = icc->sc; 887 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 888 struct ppod_region *pr = &ci->pr; 889 struct ppod_reservation *prsv; 890 uint32_t ttt; 891 int xferlen, rc = 0, alias; 892 893 /* This is for the offload driver's state. Must not be set already. */ 894 MPASS(arg != NULL); 895 MPASS(*arg == NULL); 896 897 if (ctsio->ext_data_filled == 0) { 898 int first_burst; 899 struct icl_pdu *ip = io_to_request_pdu(io); 900 vm_offset_t buf; 901 #ifdef INVARIANTS 902 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 903 904 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 905 MPASS(ic == ip->ip_conn); 906 MPASS(ip->ip_bhs_mbuf != NULL); 907 #endif 908 first_burst = icl_pdu_data_segment_length(ip); 909 910 /* 911 * Note that ICL calls conn_transfer_setup even if the first 912 * burst had everything and there's nothing left to transfer. 913 */ 914 MPASS(ctsio->kern_data_len >= first_burst); 915 xferlen = ctsio->kern_data_len; 916 if (xferlen - first_burst < ci->ddp_threshold) { 917 no_ddp: 918 /* 919 * No DDP for this transfer. Allocate a TTT (based on 920 * the one passed in) that cannot be a valid hardware 921 * DDP tag in the iSCSI region. 922 */ 923 ttt = *tttp & M_PPOD_TAG; 924 ttt = V_PPOD_TAG(ttt) | pr->pr_invalid_bit; 925 *tttp = htobe32(ttt); 926 MPASS(io_to_ppod_reservation(io) == NULL); 927 if (rc != 0) 928 counter_u64_add(ci->ddp_setup_error, 1); 929 return (0); 930 } 931 932 if (ctsio->kern_sg_entries == 0) 933 buf = (vm_offset_t)ctsio->kern_data_ptr; 934 else if (ctsio->kern_sg_entries == 1) { 935 struct ctl_sg_entry *sgl = (void *)ctsio->kern_data_ptr; 936 937 MPASS(sgl->len == xferlen); 938 buf = (vm_offset_t)sgl->addr; 939 } else { 940 rc = EAGAIN; /* XXX implement */ 941 goto no_ddp; 942 } 943 944 945 /* 946 * Reserve resources for DDP, update the ttt that should be used 947 * in the PDU, and save DDP specific state for this I/O. 948 */ 949 950 MPASS(io_to_ppod_reservation(io) == NULL); 951 prsv = uma_zalloc(prsv_zone, M_NOWAIT); 952 if (prsv == NULL) { 953 rc = ENOMEM; 954 goto no_ddp; 955 } 956 957 rc = t4_alloc_page_pods_for_buf(pr, buf, xferlen, prsv); 958 if (rc != 0) { 959 uma_zfree(prsv_zone, prsv); 960 goto no_ddp; 961 } 962 963 rc = t4_write_page_pods_for_buf(sc, toep->ofld_txq, toep->tid, 964 prsv, buf, xferlen); 965 if (rc != 0) { 966 t4_free_page_pods(prsv); 967 uma_zfree(prsv_zone, prsv); 968 goto no_ddp; 969 } 970 971 *tttp = htobe32(prsv->prsv_tag); 972 io_to_ppod_reservation(io) = prsv; 973 *arg = ctsio; 974 counter_u64_add(ci->ddp_setup_ok, 1); 975 return (0); 976 } 977 978 /* 979 * In the middle of an I/O. A non-NULL page pod reservation indicates 980 * that a DDP buffer is being used for the I/O. 981 */ 982 983 prsv = io_to_ppod_reservation(ctsio); 984 if (prsv == NULL) 985 goto no_ddp; 986 987 alias = (prsv->prsv_tag & pr->pr_alias_mask) >> pr->pr_alias_shift; 988 alias++; 989 prsv->prsv_tag &= ~pr->pr_alias_mask; 990 prsv->prsv_tag |= alias << pr->pr_alias_shift & pr->pr_alias_mask; 991 992 *tttp = htobe32(prsv->prsv_tag); 993 *arg = ctsio; 994 995 return (0); 996 } 997 998 void 999 icl_cxgbei_conn_transfer_done(struct icl_conn *ic, void *arg) 1000 { 1001 struct ctl_scsiio *ctsio = arg; 1002 1003 if (ctsio != NULL && ctsio->kern_data_len == ctsio->ext_data_filled) { 1004 struct ppod_reservation *prsv; 1005 1006 prsv = io_to_ppod_reservation(ctsio); 1007 MPASS(prsv != NULL); 1008 1009 t4_free_page_pods(prsv); 1010 uma_zfree(prsv_zone, prsv); 1011 } 1012 } 1013 1014 static void 1015 cxgbei_limits(struct adapter *sc, void *arg) 1016 { 1017 struct icl_drv_limits *idl = arg; 1018 struct cxgbei_data *ci; 1019 int max_dsl; 1020 1021 if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims") != 0) 1022 return; 1023 1024 if (uld_active(sc, ULD_ISCSI)) { 1025 ci = sc->iscsi_ulp_softc; 1026 MPASS(ci != NULL); 1027 1028 /* 1029 * AHS is not supported by the kernel so we'll not account for 1030 * it either in our PDU len -> data segment len conversions. 1031 */ 1032 1033 max_dsl = ci->max_rx_pdu_len - ISCSI_BHS_SIZE - 1034 ISCSI_HEADER_DIGEST_SIZE - ISCSI_DATA_DIGEST_SIZE; 1035 if (idl->idl_max_recv_data_segment_length > max_dsl) 1036 idl->idl_max_recv_data_segment_length = max_dsl; 1037 1038 max_dsl = ci->max_tx_pdu_len - ISCSI_BHS_SIZE - 1039 ISCSI_HEADER_DIGEST_SIZE - ISCSI_DATA_DIGEST_SIZE; 1040 if (idl->idl_max_send_data_segment_length > max_dsl) 1041 idl->idl_max_send_data_segment_length = max_dsl; 1042 } 1043 1044 end_synchronized_op(sc, LOCK_HELD); 1045 } 1046 1047 static int 1048 icl_cxgbei_limits(struct icl_drv_limits *idl) 1049 { 1050 1051 /* Maximum allowed by the RFC. cxgbei_limits will clip them. */ 1052 idl->idl_max_recv_data_segment_length = (1 << 24) - 1; 1053 idl->idl_max_send_data_segment_length = (1 << 24) - 1; 1054 1055 /* These are somewhat arbitrary. */ 1056 idl->idl_max_burst_length = 2 * 1024 * 1024; 1057 idl->idl_first_burst_length = 8192; 1058 1059 t4_iterate(cxgbei_limits, idl); 1060 1061 return (0); 1062 } 1063 1064 int 1065 icl_cxgbei_mod_load(void) 1066 { 1067 int rc; 1068 1069 /* 1070 * Space to track pagepod reservations. 1071 */ 1072 prsv_zone = uma_zcreate("Pagepod reservations", 1073 sizeof(struct ppod_reservation), NULL, NULL, NULL, NULL, 1074 UMA_ALIGN_CACHE, 0); 1075 1076 refcount_init(&icl_cxgbei_ncons, 0); 1077 1078 rc = icl_register("cxgbei", false, -100, icl_cxgbei_limits, 1079 icl_cxgbei_new_conn); 1080 1081 return (rc); 1082 } 1083 1084 int 1085 icl_cxgbei_mod_unload(void) 1086 { 1087 1088 if (icl_cxgbei_ncons != 0) 1089 return (EBUSY); 1090 1091 icl_unregister("cxgbei", false); 1092 1093 uma_zdestroy(prsv_zone); 1094 1095 return (0); 1096 } 1097 #endif 1098