1 /*- 2 * Copyright (c) 2012 The FreeBSD Foundation 3 * Copyright (c) 2015 Chelsio Communications, Inc. 4 * All rights reserved. 5 * 6 * This software was developed by Edward Tomasz Napierala under sponsorship 7 * from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * cxgbei implementation of iSCSI Common Layer kobj(9) interface. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_inet.h" 40 #include "opt_inet6.h" 41 42 #ifdef TCP_OFFLOAD 43 #include <sys/param.h> 44 #include <sys/capsicum.h> 45 #include <sys/condvar.h> 46 #include <sys/conf.h> 47 #include <sys/file.h> 48 #include <sys/kernel.h> 49 #include <sys/kthread.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/mbuf.h> 53 #include <sys/mutex.h> 54 #include <sys/module.h> 55 #include <sys/protosw.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/sysctl.h> 59 #include <sys/systm.h> 60 #include <sys/sx.h> 61 #include <sys/uio.h> 62 #include <machine/bus.h> 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 #include <netinet/in.h> 66 #include <netinet/in_pcb.h> 67 #include <netinet/tcp.h> 68 #include <netinet/tcp_var.h> 69 #include <netinet/toecore.h> 70 71 #include <dev/iscsi/icl.h> 72 #include <dev/iscsi/iscsi_proto.h> 73 #include <icl_conn_if.h> 74 75 #include <cam/scsi/scsi_all.h> 76 #include <cam/scsi/scsi_da.h> 77 #include <cam/ctl/ctl_io.h> 78 #include <cam/ctl/ctl.h> 79 #include <cam/ctl/ctl_backend.h> 80 #include <cam/ctl/ctl_error.h> 81 #include <cam/ctl/ctl_frontend.h> 82 #include <cam/ctl/ctl_debug.h> 83 #include <cam/ctl/ctl_ha.h> 84 #include <cam/ctl/ctl_ioctl.h> 85 86 #include <cam/cam.h> 87 #include <cam/cam_ccb.h> 88 #include <cam/cam_xpt.h> 89 #include <cam/cam_debug.h> 90 #include <cam/cam_sim.h> 91 #include <cam/cam_xpt_sim.h> 92 #include <cam/cam_xpt_periph.h> 93 #include <cam/cam_periph.h> 94 #include <cam/cam_compat.h> 95 #include <cam/scsi/scsi_message.h> 96 97 #include "common/common.h" 98 #include "common/t4_regs.h" 99 #include "common/t4_tcb.h" 100 #include "tom/t4_tom.h" 101 #include "cxgbei.h" 102 103 /* 104 * Use the page pod tag for the TT hash. 105 */ 106 #define TT_HASH(icc, tt) (G_PPOD_TAG(tt) & (icc)->cmp_hash_mask) 107 108 struct cxgbei_ddp_state { 109 struct ppod_reservation prsv; 110 struct cxgbei_cmp cmp; 111 }; 112 113 static MALLOC_DEFINE(M_CXGBEI, "cxgbei", "cxgbei(4)"); 114 115 SYSCTL_NODE(_kern_icl, OID_AUTO, cxgbei, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 116 "Chelsio iSCSI offload"); 117 static int first_burst_length = 8192; 118 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, first_burst_length, CTLFLAG_RWTUN, 119 &first_burst_length, 0, "First burst length"); 120 static int max_burst_length = 2 * 1024 * 1024; 121 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, max_burst_length, CTLFLAG_RWTUN, 122 &max_burst_length, 0, "Maximum burst length"); 123 static int sendspace = 1048576; 124 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, sendspace, CTLFLAG_RWTUN, 125 &sendspace, 0, "Default send socket buffer size"); 126 static int recvspace = 1048576; 127 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, recvspace, CTLFLAG_RWTUN, 128 &recvspace, 0, "Default receive socket buffer size"); 129 130 static volatile u_int icl_cxgbei_ncons; 131 132 static icl_conn_new_pdu_t icl_cxgbei_conn_new_pdu; 133 static icl_conn_pdu_data_segment_length_t 134 icl_cxgbei_conn_pdu_data_segment_length; 135 static icl_conn_pdu_append_data_t icl_cxgbei_conn_pdu_append_data; 136 static icl_conn_pdu_get_data_t icl_cxgbei_conn_pdu_get_data; 137 static icl_conn_pdu_queue_t icl_cxgbei_conn_pdu_queue; 138 static icl_conn_pdu_queue_cb_t icl_cxgbei_conn_pdu_queue_cb; 139 static icl_conn_handoff_t icl_cxgbei_conn_handoff; 140 static icl_conn_free_t icl_cxgbei_conn_free; 141 static icl_conn_close_t icl_cxgbei_conn_close; 142 static icl_conn_task_setup_t icl_cxgbei_conn_task_setup; 143 static icl_conn_task_done_t icl_cxgbei_conn_task_done; 144 static icl_conn_transfer_setup_t icl_cxgbei_conn_transfer_setup; 145 static icl_conn_transfer_done_t icl_cxgbei_conn_transfer_done; 146 147 static kobj_method_t icl_cxgbei_methods[] = { 148 KOBJMETHOD(icl_conn_new_pdu, icl_cxgbei_conn_new_pdu), 149 KOBJMETHOD(icl_conn_pdu_free, icl_cxgbei_conn_pdu_free), 150 KOBJMETHOD(icl_conn_pdu_data_segment_length, 151 icl_cxgbei_conn_pdu_data_segment_length), 152 KOBJMETHOD(icl_conn_pdu_append_data, icl_cxgbei_conn_pdu_append_data), 153 KOBJMETHOD(icl_conn_pdu_get_data, icl_cxgbei_conn_pdu_get_data), 154 KOBJMETHOD(icl_conn_pdu_queue, icl_cxgbei_conn_pdu_queue), 155 KOBJMETHOD(icl_conn_pdu_queue_cb, icl_cxgbei_conn_pdu_queue_cb), 156 KOBJMETHOD(icl_conn_handoff, icl_cxgbei_conn_handoff), 157 KOBJMETHOD(icl_conn_free, icl_cxgbei_conn_free), 158 KOBJMETHOD(icl_conn_close, icl_cxgbei_conn_close), 159 KOBJMETHOD(icl_conn_task_setup, icl_cxgbei_conn_task_setup), 160 KOBJMETHOD(icl_conn_task_done, icl_cxgbei_conn_task_done), 161 KOBJMETHOD(icl_conn_transfer_setup, icl_cxgbei_conn_transfer_setup), 162 KOBJMETHOD(icl_conn_transfer_done, icl_cxgbei_conn_transfer_done), 163 { 0, 0 } 164 }; 165 166 DEFINE_CLASS(icl_cxgbei, icl_cxgbei_methods, sizeof(struct icl_cxgbei_conn)); 167 168 void 169 icl_cxgbei_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip) 170 { 171 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 172 173 KASSERT(icp->ref_cnt != 0, ("freeing deleted PDU")); 174 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 175 MPASS(ic == ip->ip_conn); 176 177 m_freem(ip->ip_ahs_mbuf); 178 m_freem(ip->ip_data_mbuf); 179 m_freem(ip->ip_bhs_mbuf); 180 181 KASSERT(ic != NULL || icp->ref_cnt == 1, 182 ("orphaned PDU has oustanding references")); 183 184 if (atomic_fetchadd_int(&icp->ref_cnt, -1) != 1) 185 return; 186 187 free(icp, M_CXGBEI); 188 #ifdef DIAGNOSTIC 189 if (__predict_true(ic != NULL)) 190 refcount_release(&ic->ic_outstanding_pdus); 191 #endif 192 } 193 194 static void 195 icl_cxgbei_pdu_call_cb(struct icl_pdu *ip) 196 { 197 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 198 199 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 200 201 if (icp->cb != NULL) 202 icp->cb(ip, icp->error); 203 #ifdef DIAGNOSTIC 204 if (__predict_true(ip->ip_conn != NULL)) 205 refcount_release(&ip->ip_conn->ic_outstanding_pdus); 206 #endif 207 free(icp, M_CXGBEI); 208 } 209 210 static void 211 icl_cxgbei_pdu_done(struct icl_pdu *ip, int error) 212 { 213 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 214 215 if (error != 0) 216 icp->error = error; 217 218 m_freem(ip->ip_ahs_mbuf); 219 ip->ip_ahs_mbuf = NULL; 220 m_freem(ip->ip_data_mbuf); 221 ip->ip_data_mbuf = NULL; 222 m_freem(ip->ip_bhs_mbuf); 223 ip->ip_bhs_mbuf = NULL; 224 225 /* 226 * All other references to this PDU should have been dropped 227 * by the m_freem() of ip_data_mbuf. 228 */ 229 if (atomic_fetchadd_int(&icp->ref_cnt, -1) == 1) 230 icl_cxgbei_pdu_call_cb(ip); 231 else 232 __assert_unreachable(); 233 } 234 235 static void 236 icl_cxgbei_mbuf_done(struct mbuf *mb) 237 { 238 239 struct icl_cxgbei_pdu *icp = (struct icl_cxgbei_pdu *)mb->m_ext.ext_arg1; 240 241 /* 242 * NB: mb_free_mext() might leave ref_cnt as 1 without 243 * decrementing it if it hits the fast path in the ref_cnt 244 * check. 245 */ 246 icl_cxgbei_pdu_call_cb(&icp->ip); 247 } 248 249 struct icl_pdu * 250 icl_cxgbei_new_pdu(int flags) 251 { 252 struct icl_cxgbei_pdu *icp; 253 struct icl_pdu *ip; 254 struct mbuf *m; 255 256 icp = malloc(sizeof(*icp), M_CXGBEI, flags | M_ZERO); 257 if (__predict_false(icp == NULL)) 258 return (NULL); 259 260 icp->icp_signature = CXGBEI_PDU_SIGNATURE; 261 icp->ref_cnt = 1; 262 ip = &icp->ip; 263 264 m = m_gethdr(flags, MT_DATA); 265 if (__predict_false(m == NULL)) { 266 free(icp, M_CXGBEI); 267 return (NULL); 268 } 269 270 ip->ip_bhs_mbuf = m; 271 ip->ip_bhs = mtod(m, struct iscsi_bhs *); 272 memset(ip->ip_bhs, 0, sizeof(*ip->ip_bhs)); 273 m->m_len = sizeof(struct iscsi_bhs); 274 m->m_pkthdr.len = m->m_len; 275 276 return (ip); 277 } 278 279 void 280 icl_cxgbei_new_pdu_set_conn(struct icl_pdu *ip, struct icl_conn *ic) 281 { 282 283 ip->ip_conn = ic; 284 #ifdef DIAGNOSTIC 285 refcount_acquire(&ic->ic_outstanding_pdus); 286 #endif 287 } 288 289 /* 290 * Allocate icl_pdu with empty BHS to fill up by the caller. 291 */ 292 static struct icl_pdu * 293 icl_cxgbei_conn_new_pdu(struct icl_conn *ic, int flags) 294 { 295 struct icl_pdu *ip; 296 297 ip = icl_cxgbei_new_pdu(flags); 298 if (__predict_false(ip == NULL)) 299 return (NULL); 300 icl_cxgbei_new_pdu_set_conn(ip, ic); 301 302 return (ip); 303 } 304 305 static size_t 306 icl_pdu_data_segment_length(const struct icl_pdu *request) 307 { 308 uint32_t len = 0; 309 310 len += request->ip_bhs->bhs_data_segment_len[0]; 311 len <<= 8; 312 len += request->ip_bhs->bhs_data_segment_len[1]; 313 len <<= 8; 314 len += request->ip_bhs->bhs_data_segment_len[2]; 315 316 return (len); 317 } 318 319 size_t 320 icl_cxgbei_conn_pdu_data_segment_length(struct icl_conn *ic, 321 const struct icl_pdu *request) 322 { 323 324 return (icl_pdu_data_segment_length(request)); 325 } 326 327 static struct mbuf * 328 finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp) 329 { 330 struct icl_pdu *ip = &icp->ip; 331 uint8_t ulp_submode, padding; 332 struct mbuf *m, *last; 333 struct iscsi_bhs *bhs; 334 int data_len; 335 336 /* 337 * Fix up the data segment mbuf first. 338 */ 339 m = ip->ip_data_mbuf; 340 ulp_submode = icc->ulp_submode; 341 if (m != NULL) { 342 last = m_last(m); 343 344 /* 345 * Round up the data segment to a 4B boundary. Pad with 0 if 346 * necessary. There will definitely be room in the mbuf. 347 */ 348 padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len; 349 if (padding != 0) { 350 MPASS(padding <= M_TRAILINGSPACE(last)); 351 bzero(mtod(last, uint8_t *) + last->m_len, padding); 352 last->m_len += padding; 353 } 354 } else { 355 MPASS(ip->ip_data_len == 0); 356 ulp_submode &= ~ULP_CRC_DATA; 357 padding = 0; 358 } 359 360 /* 361 * Now the header mbuf that has the BHS. 362 */ 363 m = ip->ip_bhs_mbuf; 364 MPASS(m->m_pkthdr.len == sizeof(struct iscsi_bhs)); 365 MPASS(m->m_len == sizeof(struct iscsi_bhs)); 366 367 bhs = ip->ip_bhs; 368 data_len = ip->ip_data_len; 369 if (data_len > icc->ic.ic_max_send_data_segment_length) { 370 struct iscsi_bhs_data_in *bhsdi; 371 int flags; 372 373 KASSERT(padding == 0, ("%s: ISO with padding %d for icp %p", 374 __func__, padding, icp)); 375 switch (bhs->bhs_opcode) { 376 case ISCSI_BHS_OPCODE_SCSI_DATA_OUT: 377 flags = 1; 378 break; 379 case ISCSI_BHS_OPCODE_SCSI_DATA_IN: 380 flags = 2; 381 break; 382 default: 383 panic("invalid opcode %#x for ISO", bhs->bhs_opcode); 384 } 385 data_len = icc->ic.ic_max_send_data_segment_length; 386 bhsdi = (struct iscsi_bhs_data_in *)bhs; 387 if (bhsdi->bhsdi_flags & BHSDI_FLAGS_F) { 388 /* 389 * Firmware will set F on the final PDU in the 390 * burst. 391 */ 392 flags |= CXGBE_ISO_F; 393 bhsdi->bhsdi_flags &= ~BHSDI_FLAGS_F; 394 } 395 set_mbuf_iscsi_iso(m, true); 396 set_mbuf_iscsi_iso_flags(m, flags); 397 set_mbuf_iscsi_iso_mss(m, data_len); 398 } 399 400 bhs->bhs_data_segment_len[2] = data_len; 401 bhs->bhs_data_segment_len[1] = data_len >> 8; 402 bhs->bhs_data_segment_len[0] = data_len >> 16; 403 404 /* 405 * Extract mbuf chain from PDU. 406 */ 407 m->m_pkthdr.len += ip->ip_data_len + padding; 408 m->m_next = ip->ip_data_mbuf; 409 set_mbuf_ulp_submode(m, ulp_submode); 410 ip->ip_bhs_mbuf = NULL; 411 ip->ip_data_mbuf = NULL; 412 ip->ip_bhs = NULL; 413 414 /* 415 * Drop PDU reference on icp. Additional references might 416 * still be held by zero-copy PDU buffers (ICL_NOCOPY). 417 */ 418 if (atomic_fetchadd_int(&icp->ref_cnt, -1) == 1) 419 icl_cxgbei_pdu_call_cb(ip); 420 421 return (m); 422 } 423 424 int 425 icl_cxgbei_conn_pdu_append_data(struct icl_conn *ic, struct icl_pdu *ip, 426 const void *addr, size_t len, int flags) 427 { 428 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 429 struct mbuf *m, *m_tail; 430 const char *src; 431 432 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 433 MPASS(ic == ip->ip_conn); 434 KASSERT(len > 0, ("%s: len is %jd", __func__, (intmax_t)len)); 435 436 m_tail = ip->ip_data_mbuf; 437 if (m_tail != NULL) 438 for (; m_tail->m_next != NULL; m_tail = m_tail->m_next) 439 ; 440 441 if (flags & ICL_NOCOPY) { 442 m = m_get(flags & ~ICL_NOCOPY, MT_DATA); 443 if (m == NULL) { 444 ICL_WARN("failed to allocate mbuf"); 445 return (ENOMEM); 446 } 447 448 m->m_flags |= M_RDONLY; 449 m_extaddref(m, __DECONST(char *, addr), len, &icp->ref_cnt, 450 icl_cxgbei_mbuf_done, icp, NULL); 451 m->m_len = len; 452 if (ip->ip_data_mbuf == NULL) { 453 ip->ip_data_mbuf = m; 454 ip->ip_data_len = len; 455 } else { 456 m_tail->m_next = m; 457 m_tail = m_tail->m_next; 458 ip->ip_data_len += len; 459 } 460 461 return (0); 462 } 463 464 src = (const char *)addr; 465 466 /* Allocate as jumbo mbufs of size MJUM16BYTES. */ 467 while (len >= MJUM16BYTES) { 468 m = m_getjcl(M_NOWAIT, MT_DATA, 0, MJUM16BYTES); 469 if (__predict_false(m == NULL)) { 470 if ((flags & M_WAITOK) != 0) { 471 /* Fall back to non-jumbo mbufs. */ 472 break; 473 } 474 return (ENOMEM); 475 } 476 memcpy(mtod(m, void *), src, MJUM16BYTES); 477 m->m_len = MJUM16BYTES; 478 if (ip->ip_data_mbuf == NULL) { 479 ip->ip_data_mbuf = m_tail = m; 480 ip->ip_data_len = MJUM16BYTES; 481 } else { 482 m_tail->m_next = m; 483 m_tail = m_tail->m_next; 484 ip->ip_data_len += MJUM16BYTES; 485 } 486 src += MJUM16BYTES; 487 len -= MJUM16BYTES; 488 } 489 490 /* Allocate mbuf chain for the remaining data. */ 491 if (len != 0) { 492 m = m_getm2(NULL, len, flags, MT_DATA, 0); 493 if (__predict_false(m == NULL)) 494 return (ENOMEM); 495 if (ip->ip_data_mbuf == NULL) { 496 ip->ip_data_mbuf = m; 497 ip->ip_data_len = len; 498 } else { 499 m_tail->m_next = m; 500 ip->ip_data_len += len; 501 } 502 for (; m != NULL; m = m->m_next) { 503 m->m_len = min(len, M_SIZE(m)); 504 memcpy(mtod(m, void *), src, m->m_len); 505 src += m->m_len; 506 len -= m->m_len; 507 } 508 MPASS(len == 0); 509 } 510 MPASS(ip->ip_data_len <= max(ic->ic_max_send_data_segment_length, 511 ic->ic_hw_isomax)); 512 513 return (0); 514 } 515 516 void 517 icl_cxgbei_conn_pdu_get_data(struct icl_conn *ic, struct icl_pdu *ip, 518 size_t off, void *addr, size_t len) 519 { 520 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 521 522 if (icp->icp_flags & ICPF_RX_DDP) 523 return; /* data is DDP'ed, no need to copy */ 524 m_copydata(ip->ip_data_mbuf, off, len, addr); 525 } 526 527 void 528 icl_cxgbei_conn_pdu_queue(struct icl_conn *ic, struct icl_pdu *ip) 529 { 530 icl_cxgbei_conn_pdu_queue_cb(ic, ip, NULL); 531 } 532 533 void 534 icl_cxgbei_conn_pdu_queue_cb(struct icl_conn *ic, struct icl_pdu *ip, 535 icl_pdu_cb cb) 536 { 537 struct epoch_tracker et; 538 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 539 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 540 struct socket *so = ic->ic_socket; 541 struct toepcb *toep = icc->toep; 542 struct inpcb *inp; 543 struct mbuf *m; 544 545 MPASS(ic == ip->ip_conn); 546 MPASS(ip->ip_bhs_mbuf != NULL); 547 /* The kernel doesn't generate PDUs with AHS. */ 548 MPASS(ip->ip_ahs_mbuf == NULL && ip->ip_ahs_len == 0); 549 550 ICL_CONN_LOCK_ASSERT(ic); 551 552 icp->cb = cb; 553 554 /* NOTE: sowriteable without so_snd lock is a mostly harmless race. */ 555 if (ic->ic_disconnecting || so == NULL || !sowriteable(so)) { 556 icl_cxgbei_pdu_done(ip, ENOTCONN); 557 return; 558 } 559 560 m = finalize_pdu(icc, icp); 561 M_ASSERTPKTHDR(m); 562 MPASS((m->m_pkthdr.len & 3) == 0); 563 564 /* 565 * Do not get inp from toep->inp as the toepcb might have detached 566 * already. 567 */ 568 inp = sotoinpcb(so); 569 CURVNET_SET(toep->vnet); 570 NET_EPOCH_ENTER(et); 571 INP_WLOCK(inp); 572 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) || 573 __predict_false((toep->flags & TPF_ATTACHED) == 0)) 574 m_freem(m); 575 else { 576 mbufq_enqueue(&toep->ulp_pduq, m); 577 t4_push_pdus(icc->sc, toep, 0); 578 } 579 INP_WUNLOCK(inp); 580 NET_EPOCH_EXIT(et); 581 CURVNET_RESTORE(); 582 } 583 584 static struct icl_conn * 585 icl_cxgbei_new_conn(const char *name, struct mtx *lock) 586 { 587 struct icl_cxgbei_conn *icc; 588 struct icl_conn *ic; 589 590 refcount_acquire(&icl_cxgbei_ncons); 591 592 icc = (struct icl_cxgbei_conn *)kobj_create(&icl_cxgbei_class, M_CXGBE, 593 M_WAITOK | M_ZERO); 594 icc->icc_signature = CXGBEI_CONN_SIGNATURE; 595 STAILQ_INIT(&icc->rcvd_pdus); 596 597 icc->cmp_table = hashinit(64, M_CXGBEI, &icc->cmp_hash_mask); 598 mtx_init(&icc->cmp_lock, "cxgbei_cmp", NULL, MTX_DEF); 599 600 ic = &icc->ic; 601 ic->ic_lock = lock; 602 603 #ifdef DIAGNOSTIC 604 refcount_init(&ic->ic_outstanding_pdus, 0); 605 #endif 606 ic->ic_name = name; 607 ic->ic_offload = "cxgbei"; 608 ic->ic_unmapped = false; 609 610 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc); 611 612 return (ic); 613 } 614 615 void 616 icl_cxgbei_conn_free(struct icl_conn *ic) 617 { 618 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 619 620 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 621 622 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc); 623 624 mtx_destroy(&icc->cmp_lock); 625 hashdestroy(icc->cmp_table, M_CXGBEI, icc->cmp_hash_mask); 626 kobj_delete((struct kobj *)icc, M_CXGBE); 627 refcount_release(&icl_cxgbei_ncons); 628 } 629 630 static int 631 icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace, 632 int rspace) 633 { 634 struct sockopt opt; 635 int error, one = 1, ss, rs; 636 637 ss = max(sendspace, sspace); 638 rs = max(recvspace, rspace); 639 640 error = soreserve(so, ss, rs); 641 if (error != 0) { 642 icl_cxgbei_conn_close(ic); 643 return (error); 644 } 645 SOCKBUF_LOCK(&so->so_snd); 646 so->so_snd.sb_flags |= SB_AUTOSIZE; 647 SOCKBUF_UNLOCK(&so->so_snd); 648 SOCKBUF_LOCK(&so->so_rcv); 649 so->so_rcv.sb_flags |= SB_AUTOSIZE; 650 SOCKBUF_UNLOCK(&so->so_rcv); 651 652 /* 653 * Disable Nagle. 654 */ 655 bzero(&opt, sizeof(opt)); 656 opt.sopt_dir = SOPT_SET; 657 opt.sopt_level = IPPROTO_TCP; 658 opt.sopt_name = TCP_NODELAY; 659 opt.sopt_val = &one; 660 opt.sopt_valsize = sizeof(one); 661 error = sosetopt(so, &opt); 662 if (error != 0) { 663 icl_cxgbei_conn_close(ic); 664 return (error); 665 } 666 667 return (0); 668 } 669 670 /* 671 * Request/response structure used to find out the adapter offloading a socket. 672 */ 673 struct find_ofld_adapter_rr { 674 struct socket *so; 675 struct adapter *sc; /* result */ 676 }; 677 678 static void 679 find_offload_adapter(struct adapter *sc, void *arg) 680 { 681 struct find_ofld_adapter_rr *fa = arg; 682 struct socket *so = fa->so; 683 struct tom_data *td = sc->tom_softc; 684 struct tcpcb *tp; 685 struct inpcb *inp; 686 687 /* Non-TCP were filtered out earlier. */ 688 MPASS(so->so_proto->pr_protocol == IPPROTO_TCP); 689 690 if (fa->sc != NULL) 691 return; /* Found already. */ 692 693 if (td == NULL) 694 return; /* TOE not enabled on this adapter. */ 695 696 inp = sotoinpcb(so); 697 INP_WLOCK(inp); 698 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 699 tp = intotcpcb(inp); 700 if (tp->t_flags & TF_TOE && tp->tod == &td->tod) 701 fa->sc = sc; /* Found. */ 702 } 703 INP_WUNLOCK(inp); 704 } 705 706 static bool 707 is_memfree(struct adapter *sc) 708 { 709 uint32_t em; 710 711 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 712 if ((em & F_EXT_MEM_ENABLE) != 0) 713 return (false); 714 if (is_t5(sc) && (em & F_EXT_MEM1_ENABLE) != 0) 715 return (false); 716 return (true); 717 } 718 719 /* XXXNP: move this to t4_tom. */ 720 static void 721 send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen) 722 { 723 struct wrqe *wr; 724 struct fw_flowc_wr *flowc; 725 const u_int nparams = 1; 726 u_int flowclen; 727 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 728 729 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 730 731 wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq); 732 if (wr == NULL) { 733 /* XXX */ 734 panic("%s: allocation failure.", __func__); 735 } 736 flowc = wrtod(wr); 737 memset(flowc, 0, wr->wr_len); 738 739 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 740 V_FW_FLOWC_WR_NPARAMS(nparams)); 741 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 742 V_FW_WR_FLOWID(toep->tid)); 743 744 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 745 flowc->mnemval[0].val = htobe32(maxlen); 746 747 txsd->tx_credits = howmany(flowclen, 16); 748 txsd->plen = 0; 749 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 750 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 751 toep->tx_credits -= txsd->tx_credits; 752 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 753 toep->txsd_pidx = 0; 754 toep->txsd_avail--; 755 756 t4_wrq_tx(sc, wr); 757 } 758 759 static void 760 set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, u_int ulp_submode) 761 { 762 uint64_t val; 763 764 CTR3(KTR_CXGBE, "%s: tid %u, ULP_MODE_ISCSI, submode=%#x", 765 __func__, toep->tid, ulp_submode); 766 767 val = V_TCB_ULP_TYPE(ULP_MODE_ISCSI) | V_TCB_ULP_RAW(ulp_submode); 768 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_ULP_TYPE, 769 V_TCB_ULP_TYPE(M_TCB_ULP_TYPE) | V_TCB_ULP_RAW(M_TCB_ULP_RAW), val, 770 0, 0); 771 772 val = V_TF_RX_FLOW_CONTROL_DISABLE(1ULL); 773 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, val, val, 0, 0); 774 } 775 776 /* 777 * XXXNP: Who is responsible for cleaning up the socket if this returns with an 778 * error? Review all error paths. 779 * 780 * XXXNP: What happens to the socket's fd reference if the operation is 781 * successful, and how does that affect the socket's life cycle? 782 */ 783 int 784 icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd) 785 { 786 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 787 struct find_ofld_adapter_rr fa; 788 struct file *fp; 789 struct socket *so; 790 struct inpcb *inp; 791 struct tcpcb *tp; 792 struct toepcb *toep; 793 cap_rights_t rights; 794 u_int max_rx_pdu_len, max_tx_pdu_len; 795 int error, max_iso_pdus; 796 797 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 798 ICL_CONN_LOCK_ASSERT_NOT(ic); 799 800 /* 801 * Steal the socket from userland. 802 */ 803 error = fget(curthread, fd, 804 cap_rights_init_one(&rights, CAP_SOCK_CLIENT), &fp); 805 if (error != 0) 806 return (error); 807 if (fp->f_type != DTYPE_SOCKET) { 808 fdrop(fp, curthread); 809 return (EINVAL); 810 } 811 so = fp->f_data; 812 if (so->so_type != SOCK_STREAM || 813 so->so_proto->pr_protocol != IPPROTO_TCP) { 814 fdrop(fp, curthread); 815 return (EINVAL); 816 } 817 818 ICL_CONN_LOCK(ic); 819 if (ic->ic_socket != NULL) { 820 ICL_CONN_UNLOCK(ic); 821 fdrop(fp, curthread); 822 return (EBUSY); 823 } 824 ic->ic_disconnecting = false; 825 ic->ic_socket = so; 826 fp->f_ops = &badfileops; 827 fp->f_data = NULL; 828 fdrop(fp, curthread); 829 ICL_CONN_UNLOCK(ic); 830 831 /* Find the adapter offloading this socket. */ 832 fa.sc = NULL; 833 fa.so = so; 834 t4_iterate(find_offload_adapter, &fa); 835 if (fa.sc == NULL) 836 return (EINVAL); 837 icc->sc = fa.sc; 838 839 max_rx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_recv_data_segment_length; 840 max_tx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_send_data_segment_length; 841 if (ic->ic_header_crc32c) { 842 max_rx_pdu_len += ISCSI_HEADER_DIGEST_SIZE; 843 max_tx_pdu_len += ISCSI_HEADER_DIGEST_SIZE; 844 } 845 if (ic->ic_data_crc32c) { 846 max_rx_pdu_len += ISCSI_DATA_DIGEST_SIZE; 847 max_tx_pdu_len += ISCSI_DATA_DIGEST_SIZE; 848 } 849 850 inp = sotoinpcb(so); 851 INP_WLOCK(inp); 852 tp = intotcpcb(inp); 853 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 854 INP_WUNLOCK(inp); 855 return (EBUSY); 856 } 857 858 /* 859 * socket could not have been "unoffloaded" if here. 860 */ 861 MPASS(tp->t_flags & TF_TOE); 862 MPASS(tp->tod != NULL); 863 MPASS(tp->t_toe != NULL); 864 toep = tp->t_toe; 865 MPASS(toep->vi->adapter == icc->sc); 866 867 if (ulp_mode(toep) != ULP_MODE_NONE) { 868 INP_WUNLOCK(inp); 869 return (EINVAL); 870 } 871 872 icc->toep = toep; 873 icc->cwt = cxgbei_select_worker_thread(icc); 874 875 icc->ulp_submode = 0; 876 if (ic->ic_header_crc32c) 877 icc->ulp_submode |= ULP_CRC_HEADER; 878 if (ic->ic_data_crc32c) 879 icc->ulp_submode |= ULP_CRC_DATA; 880 881 if (icc->sc->tt.iso && chip_id(icc->sc) >= CHELSIO_T5 && 882 !is_memfree(icc->sc)) { 883 max_iso_pdus = CXGBEI_MAX_ISO_PAYLOAD / max_tx_pdu_len; 884 ic->ic_hw_isomax = max_iso_pdus * 885 ic->ic_max_send_data_segment_length; 886 } else 887 max_iso_pdus = 1; 888 889 toep->params.ulp_mode = ULP_MODE_ISCSI; 890 toep->ulpcb = icc; 891 892 send_iscsi_flowc_wr(icc->sc, toep, 893 roundup(max_iso_pdus * max_tx_pdu_len, tp->t_maxseg)); 894 set_ulp_mode_iscsi(icc->sc, toep, icc->ulp_submode); 895 INP_WUNLOCK(inp); 896 897 return (icl_cxgbei_setsockopt(ic, so, max_tx_pdu_len, max_rx_pdu_len)); 898 } 899 900 void 901 icl_cxgbei_conn_close(struct icl_conn *ic) 902 { 903 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 904 struct icl_pdu *ip; 905 struct socket *so; 906 struct sockbuf *sb; 907 struct inpcb *inp; 908 struct toepcb *toep = icc->toep; 909 910 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 911 ICL_CONN_LOCK_ASSERT_NOT(ic); 912 913 ICL_CONN_LOCK(ic); 914 so = ic->ic_socket; 915 if (ic->ic_disconnecting || so == NULL) { 916 CTR4(KTR_CXGBE, "%s: icc %p (disconnecting = %d), so %p", 917 __func__, icc, ic->ic_disconnecting, so); 918 ICL_CONN_UNLOCK(ic); 919 return; 920 } 921 ic->ic_disconnecting = true; 922 923 #ifdef DIAGNOSTIC 924 KASSERT(ic->ic_outstanding_pdus == 0, 925 ("destroying session with %d outstanding PDUs", 926 ic->ic_outstanding_pdus)); 927 #endif 928 ICL_CONN_UNLOCK(ic); 929 930 CTR3(KTR_CXGBE, "%s: tid %d, icc %p", __func__, toep ? toep->tid : -1, 931 icc); 932 inp = sotoinpcb(so); 933 sb = &so->so_rcv; 934 INP_WLOCK(inp); 935 if (toep != NULL) { /* NULL if connection was never offloaded. */ 936 toep->ulpcb = NULL; 937 938 /* Discard PDUs queued for TX. */ 939 mbufq_drain(&toep->ulp_pduq); 940 941 /* 942 * Wait for the cwt threads to stop processing this 943 * connection. 944 */ 945 SOCKBUF_LOCK(sb); 946 if (icc->rx_flags & RXF_ACTIVE) { 947 volatile u_int *p = &icc->rx_flags; 948 949 SOCKBUF_UNLOCK(sb); 950 INP_WUNLOCK(inp); 951 952 while (*p & RXF_ACTIVE) 953 pause("conclo", 1); 954 955 INP_WLOCK(inp); 956 SOCKBUF_LOCK(sb); 957 } 958 959 /* 960 * Discard received PDUs not passed to the iSCSI 961 * layer. 962 */ 963 while (!STAILQ_EMPTY(&icc->rcvd_pdus)) { 964 ip = STAILQ_FIRST(&icc->rcvd_pdus); 965 STAILQ_REMOVE_HEAD(&icc->rcvd_pdus, ip_next); 966 icl_cxgbei_pdu_done(ip, ENOTCONN); 967 } 968 SOCKBUF_UNLOCK(sb); 969 970 /* 971 * Grab a reference to use when waiting for the final 972 * CPL to be received. If toep->inp is NULL, then 973 * final_cpl_received() has already been called (e.g. 974 * due to the peer sending a RST). 975 */ 976 if (toep->inp != NULL) { 977 toep = hold_toepcb(toep); 978 toep->flags |= TPF_WAITING_FOR_FINAL; 979 } else 980 toep = NULL; 981 } 982 INP_WUNLOCK(inp); 983 984 ICL_CONN_LOCK(ic); 985 ic->ic_socket = NULL; 986 ICL_CONN_UNLOCK(ic); 987 988 /* 989 * XXXNP: we should send RST instead of FIN when PDUs held in various 990 * queues were purged instead of delivered reliably but soabort isn't 991 * really general purpose and wouldn't do the right thing here. 992 */ 993 soclose(so); 994 995 /* 996 * Wait for the socket to fully close. This ensures any 997 * pending received data has been received (and in particular, 998 * any data that would be received by DDP has been handled). 999 * Callers assume that it is safe to free buffers for tasks 1000 * and transfers after this function returns. 1001 */ 1002 if (toep != NULL) { 1003 struct mtx *lock = mtx_pool_find(mtxpool_sleep, toep); 1004 1005 mtx_lock(lock); 1006 while ((toep->flags & TPF_WAITING_FOR_FINAL) != 0) 1007 mtx_sleep(toep, lock, PSOCK, "conclo2", 0); 1008 mtx_unlock(lock); 1009 free_toepcb(toep); 1010 } 1011 } 1012 1013 static void 1014 cxgbei_insert_cmp(struct icl_cxgbei_conn *icc, struct cxgbei_cmp *cmp, 1015 uint32_t tt) 1016 { 1017 #ifdef INVARIANTS 1018 struct cxgbei_cmp *cmp2; 1019 #endif 1020 1021 cmp->tt = tt; 1022 1023 mtx_lock(&icc->cmp_lock); 1024 #ifdef INVARIANTS 1025 LIST_FOREACH(cmp2, &icc->cmp_table[TT_HASH(icc, tt)], link) { 1026 KASSERT(cmp2->tt != tt, ("%s: duplicate cmp", __func__)); 1027 } 1028 #endif 1029 LIST_INSERT_HEAD(&icc->cmp_table[TT_HASH(icc, tt)], cmp, link); 1030 mtx_unlock(&icc->cmp_lock); 1031 } 1032 1033 struct cxgbei_cmp * 1034 cxgbei_find_cmp(struct icl_cxgbei_conn *icc, uint32_t tt) 1035 { 1036 struct cxgbei_cmp *cmp; 1037 1038 mtx_lock(&icc->cmp_lock); 1039 LIST_FOREACH(cmp, &icc->cmp_table[TT_HASH(icc, tt)], link) { 1040 if (cmp->tt == tt) 1041 break; 1042 } 1043 mtx_unlock(&icc->cmp_lock); 1044 return (cmp); 1045 } 1046 1047 static void 1048 cxgbei_rm_cmp(struct icl_cxgbei_conn *icc, struct cxgbei_cmp *cmp) 1049 { 1050 #ifdef INVARIANTS 1051 struct cxgbei_cmp *cmp2; 1052 #endif 1053 1054 mtx_lock(&icc->cmp_lock); 1055 1056 #ifdef INVARIANTS 1057 LIST_FOREACH(cmp2, &icc->cmp_table[TT_HASH(icc, cmp->tt)], link) { 1058 if (cmp2 == cmp) 1059 goto found; 1060 } 1061 panic("%s: could not find cmp", __func__); 1062 found: 1063 #endif 1064 LIST_REMOVE(cmp, link); 1065 mtx_unlock(&icc->cmp_lock); 1066 } 1067 1068 int 1069 icl_cxgbei_conn_task_setup(struct icl_conn *ic, struct icl_pdu *ip, 1070 struct ccb_scsiio *csio, uint32_t *ittp, void **arg) 1071 { 1072 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 1073 struct toepcb *toep = icc->toep; 1074 struct adapter *sc = icc->sc; 1075 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 1076 struct ppod_region *pr = &ci->pr; 1077 struct cxgbei_ddp_state *ddp; 1078 struct ppod_reservation *prsv; 1079 struct inpcb *inp; 1080 struct mbufq mq; 1081 uint32_t itt; 1082 int rc = 0; 1083 1084 ICL_CONN_LOCK_ASSERT(ic); 1085 1086 /* This is for the offload driver's state. Must not be set already. */ 1087 MPASS(arg != NULL); 1088 MPASS(*arg == NULL); 1089 1090 if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN || 1091 csio->dxfer_len < ci->ddp_threshold || ic->ic_disconnecting || 1092 ic->ic_socket == NULL) { 1093 no_ddp: 1094 /* 1095 * No DDP for this I/O. Allocate an ITT (based on the one 1096 * passed in) that cannot be a valid hardware DDP tag in the 1097 * iSCSI region. 1098 */ 1099 itt = *ittp & M_PPOD_TAG; 1100 itt = V_PPOD_TAG(itt) | pr->pr_invalid_bit; 1101 *ittp = htobe32(itt); 1102 MPASS(*arg == NULL); /* State is maintained for DDP only. */ 1103 if (rc != 0) 1104 counter_u64_add( 1105 toep->ofld_rxq->rx_iscsi_ddp_setup_error, 1); 1106 return (0); 1107 } 1108 1109 /* 1110 * Reserve resources for DDP, update the itt that should be used in the 1111 * PDU, and save DDP specific state for this I/O in *arg. 1112 */ 1113 ddp = malloc(sizeof(*ddp), M_CXGBEI, M_NOWAIT | M_ZERO); 1114 if (ddp == NULL) { 1115 rc = ENOMEM; 1116 goto no_ddp; 1117 } 1118 prsv = &ddp->prsv; 1119 1120 /* XXX add support for all CAM_DATA_ types */ 1121 MPASS((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR); 1122 rc = t4_alloc_page_pods_for_buf(pr, (vm_offset_t)csio->data_ptr, 1123 csio->dxfer_len, prsv); 1124 if (rc != 0) { 1125 free(ddp, M_CXGBEI); 1126 goto no_ddp; 1127 } 1128 1129 mbufq_init(&mq, INT_MAX); 1130 rc = t4_write_page_pods_for_buf(sc, toep, prsv, 1131 (vm_offset_t)csio->data_ptr, csio->dxfer_len, &mq); 1132 if (__predict_false(rc != 0)) { 1133 mbufq_drain(&mq); 1134 t4_free_page_pods(prsv); 1135 free(ddp, M_CXGBEI); 1136 goto no_ddp; 1137 } 1138 1139 /* 1140 * Do not get inp from toep->inp as the toepcb might have 1141 * detached already. 1142 */ 1143 inp = sotoinpcb(ic->ic_socket); 1144 INP_WLOCK(inp); 1145 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) { 1146 INP_WUNLOCK(inp); 1147 mbufq_drain(&mq); 1148 t4_free_page_pods(prsv); 1149 free(ddp, M_CXGBEI); 1150 goto no_ddp; 1151 } 1152 mbufq_concat(&toep->ulp_pduq, &mq); 1153 INP_WUNLOCK(inp); 1154 1155 ddp->cmp.last_datasn = -1; 1156 cxgbei_insert_cmp(icc, &ddp->cmp, prsv->prsv_tag); 1157 *ittp = htobe32(prsv->prsv_tag); 1158 *arg = prsv; 1159 counter_u64_add(toep->ofld_rxq->rx_iscsi_ddp_setup_ok, 1); 1160 return (0); 1161 } 1162 1163 void 1164 icl_cxgbei_conn_task_done(struct icl_conn *ic, void *arg) 1165 { 1166 1167 if (arg != NULL) { 1168 struct cxgbei_ddp_state *ddp = arg; 1169 1170 cxgbei_rm_cmp(ic_to_icc(ic), &ddp->cmp); 1171 t4_free_page_pods(&ddp->prsv); 1172 free(ddp, M_CXGBEI); 1173 } 1174 } 1175 1176 static inline bool 1177 ddp_sgl_check(struct ctl_sg_entry *sg, int entries, int xferlen) 1178 { 1179 #ifdef INVARIANTS 1180 int total_len = 0; 1181 #endif 1182 1183 MPASS(entries > 0); 1184 if (((vm_offset_t)sg[--entries].addr & 3U) != 0) 1185 return (false); 1186 1187 #ifdef INVARIANTS 1188 total_len += sg[entries].len; 1189 #endif 1190 1191 while (--entries >= 0) { 1192 if (((vm_offset_t)sg[entries].addr & PAGE_MASK) != 0 || 1193 (sg[entries].len % PAGE_SIZE) != 0) 1194 return (false); 1195 #ifdef INVARIANTS 1196 total_len += sg[entries].len; 1197 #endif 1198 } 1199 1200 MPASS(total_len == xferlen); 1201 return (true); 1202 } 1203 1204 #define io_to_ddp_state(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr) 1205 1206 int 1207 icl_cxgbei_conn_transfer_setup(struct icl_conn *ic, struct icl_pdu *ip, 1208 union ctl_io *io, uint32_t *tttp, void **arg) 1209 { 1210 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 1211 struct toepcb *toep = icc->toep; 1212 struct ctl_scsiio *ctsio = &io->scsiio; 1213 struct adapter *sc = icc->sc; 1214 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 1215 struct ppod_region *pr = &ci->pr; 1216 struct cxgbei_ddp_state *ddp; 1217 struct ppod_reservation *prsv; 1218 struct ctl_sg_entry *sgl, sg_entry; 1219 struct inpcb *inp; 1220 struct mbufq mq; 1221 int sg_entries = ctsio->kern_sg_entries; 1222 uint32_t ttt; 1223 int xferlen, rc = 0, alias; 1224 1225 /* This is for the offload driver's state. Must not be set already. */ 1226 MPASS(arg != NULL); 1227 MPASS(*arg == NULL); 1228 1229 if (ctsio->ext_data_filled == 0) { 1230 int first_burst; 1231 #ifdef INVARIANTS 1232 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 1233 1234 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 1235 MPASS(ic == ip->ip_conn); 1236 MPASS(ip->ip_bhs_mbuf != NULL); 1237 #endif 1238 first_burst = icl_pdu_data_segment_length(ip); 1239 1240 /* 1241 * Note that ICL calls conn_transfer_setup even if the first 1242 * burst had everything and there's nothing left to transfer. 1243 * 1244 * NB: The CTL frontend might have provided a buffer 1245 * whose length (kern_data_len) is smaller than the 1246 * FirstBurstLength of unsolicited data. Treat those 1247 * as an empty transfer. 1248 */ 1249 xferlen = ctsio->kern_data_len; 1250 if (xferlen < first_burst || 1251 xferlen - first_burst < ci->ddp_threshold) { 1252 no_ddp: 1253 /* 1254 * No DDP for this transfer. Allocate a TTT (based on 1255 * the one passed in) that cannot be a valid hardware 1256 * DDP tag in the iSCSI region. 1257 */ 1258 ttt = *tttp & M_PPOD_TAG; 1259 ttt = V_PPOD_TAG(ttt) | pr->pr_invalid_bit; 1260 *tttp = htobe32(ttt); 1261 MPASS(io_to_ddp_state(io) == NULL); 1262 if (rc != 0) 1263 counter_u64_add( 1264 toep->ofld_rxq->rx_iscsi_ddp_setup_error, 1); 1265 return (0); 1266 } 1267 1268 if (sg_entries == 0) { 1269 sgl = &sg_entry; 1270 sgl->len = xferlen; 1271 sgl->addr = (void *)ctsio->kern_data_ptr; 1272 sg_entries = 1; 1273 } else 1274 sgl = (void *)ctsio->kern_data_ptr; 1275 1276 if (!ddp_sgl_check(sgl, sg_entries, xferlen)) 1277 goto no_ddp; 1278 1279 /* 1280 * Reserve resources for DDP, update the ttt that should be used 1281 * in the PDU, and save DDP specific state for this I/O. 1282 */ 1283 MPASS(io_to_ddp_state(io) == NULL); 1284 ddp = malloc(sizeof(*ddp), M_CXGBEI, M_NOWAIT | M_ZERO); 1285 if (ddp == NULL) { 1286 rc = ENOMEM; 1287 goto no_ddp; 1288 } 1289 prsv = &ddp->prsv; 1290 1291 rc = t4_alloc_page_pods_for_sgl(pr, sgl, sg_entries, prsv); 1292 if (rc != 0) { 1293 free(ddp, M_CXGBEI); 1294 goto no_ddp; 1295 } 1296 1297 mbufq_init(&mq, INT_MAX); 1298 rc = t4_write_page_pods_for_sgl(sc, toep, prsv, sgl, sg_entries, 1299 xferlen, &mq); 1300 if (__predict_false(rc != 0)) { 1301 mbufq_drain(&mq); 1302 t4_free_page_pods(prsv); 1303 free(ddp, M_CXGBEI); 1304 goto no_ddp; 1305 } 1306 1307 /* 1308 * Do not get inp from toep->inp as the toepcb might 1309 * have detached already. 1310 */ 1311 ICL_CONN_LOCK(ic); 1312 if (ic->ic_disconnecting || ic->ic_socket == NULL) { 1313 ICL_CONN_UNLOCK(ic); 1314 mbufq_drain(&mq); 1315 t4_free_page_pods(prsv); 1316 free(ddp, M_CXGBEI); 1317 return (ECONNRESET); 1318 } 1319 inp = sotoinpcb(ic->ic_socket); 1320 INP_WLOCK(inp); 1321 ICL_CONN_UNLOCK(ic); 1322 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) { 1323 INP_WUNLOCK(inp); 1324 mbufq_drain(&mq); 1325 t4_free_page_pods(prsv); 1326 free(ddp, M_CXGBEI); 1327 return (ECONNRESET); 1328 } 1329 mbufq_concat(&toep->ulp_pduq, &mq); 1330 INP_WUNLOCK(inp); 1331 1332 ddp->cmp.next_buffer_offset = ctsio->kern_rel_offset + 1333 first_burst; 1334 ddp->cmp.last_datasn = -1; 1335 cxgbei_insert_cmp(icc, &ddp->cmp, prsv->prsv_tag); 1336 *tttp = htobe32(prsv->prsv_tag); 1337 io_to_ddp_state(io) = ddp; 1338 *arg = ctsio; 1339 counter_u64_add(toep->ofld_rxq->rx_iscsi_ddp_setup_ok, 1); 1340 return (0); 1341 } 1342 1343 /* 1344 * In the middle of an I/O. A non-NULL page pod reservation indicates 1345 * that a DDP buffer is being used for the I/O. 1346 */ 1347 ddp = io_to_ddp_state(ctsio); 1348 if (ddp == NULL) 1349 goto no_ddp; 1350 prsv = &ddp->prsv; 1351 1352 alias = (prsv->prsv_tag & pr->pr_alias_mask) >> pr->pr_alias_shift; 1353 alias++; 1354 prsv->prsv_tag &= ~pr->pr_alias_mask; 1355 prsv->prsv_tag |= alias << pr->pr_alias_shift & pr->pr_alias_mask; 1356 1357 ddp->cmp.last_datasn = -1; 1358 cxgbei_insert_cmp(icc, &ddp->cmp, prsv->prsv_tag); 1359 *tttp = htobe32(prsv->prsv_tag); 1360 *arg = ctsio; 1361 1362 return (0); 1363 } 1364 1365 void 1366 icl_cxgbei_conn_transfer_done(struct icl_conn *ic, void *arg) 1367 { 1368 struct ctl_scsiio *ctsio = arg; 1369 1370 if (ctsio != NULL) { 1371 struct cxgbei_ddp_state *ddp; 1372 1373 ddp = io_to_ddp_state(ctsio); 1374 MPASS(ddp != NULL); 1375 1376 cxgbei_rm_cmp(ic_to_icc(ic), &ddp->cmp); 1377 if (ctsio->kern_data_len == ctsio->ext_data_filled || 1378 ic->ic_disconnecting) { 1379 t4_free_page_pods(&ddp->prsv); 1380 free(ddp, M_CXGBEI); 1381 io_to_ddp_state(ctsio) = NULL; 1382 } 1383 } 1384 } 1385 1386 static void 1387 cxgbei_limits(struct adapter *sc, void *arg) 1388 { 1389 struct icl_drv_limits *idl = arg; 1390 struct cxgbei_data *ci; 1391 int max_dsl; 1392 1393 if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims") != 0) 1394 return; 1395 1396 if (uld_active(sc, ULD_ISCSI)) { 1397 ci = sc->iscsi_ulp_softc; 1398 MPASS(ci != NULL); 1399 1400 1401 max_dsl = ci->max_rx_data_len; 1402 if (idl->idl_max_recv_data_segment_length > max_dsl) 1403 idl->idl_max_recv_data_segment_length = max_dsl; 1404 1405 max_dsl = ci->max_tx_data_len; 1406 if (idl->idl_max_send_data_segment_length > max_dsl) 1407 idl->idl_max_send_data_segment_length = max_dsl; 1408 } 1409 1410 end_synchronized_op(sc, LOCK_HELD); 1411 } 1412 1413 static int 1414 icl_cxgbei_limits(struct icl_drv_limits *idl) 1415 { 1416 1417 /* Maximum allowed by the RFC. cxgbei_limits will clip them. */ 1418 idl->idl_max_recv_data_segment_length = (1 << 24) - 1; 1419 idl->idl_max_send_data_segment_length = (1 << 24) - 1; 1420 1421 /* These are somewhat arbitrary. */ 1422 idl->idl_max_burst_length = max_burst_length; 1423 idl->idl_first_burst_length = first_burst_length; 1424 1425 t4_iterate(cxgbei_limits, idl); 1426 1427 return (0); 1428 } 1429 1430 int 1431 icl_cxgbei_mod_load(void) 1432 { 1433 int rc; 1434 1435 refcount_init(&icl_cxgbei_ncons, 0); 1436 1437 rc = icl_register("cxgbei", false, -100, icl_cxgbei_limits, 1438 icl_cxgbei_new_conn); 1439 1440 return (rc); 1441 } 1442 1443 int 1444 icl_cxgbei_mod_unload(void) 1445 { 1446 1447 if (icl_cxgbei_ncons != 0) 1448 return (EBUSY); 1449 1450 icl_unregister("cxgbei", false); 1451 1452 return (0); 1453 } 1454 #endif 1455