1 /*- 2 * Copyright (c) 2012 The FreeBSD Foundation 3 * Copyright (c) 2015 Chelsio Communications, Inc. 4 * All rights reserved. 5 * 6 * This software was developed by Edward Tomasz Napierala under sponsorship 7 * from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 * 30 */ 31 32 /* 33 * cxgbei implementation of iSCSI Common Layer kobj(9) interface. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include "opt_inet.h" 40 #include "opt_inet6.h" 41 42 #ifdef TCP_OFFLOAD 43 #include <sys/param.h> 44 #include <sys/capsicum.h> 45 #include <sys/condvar.h> 46 #include <sys/conf.h> 47 #include <sys/file.h> 48 #include <sys/kernel.h> 49 #include <sys/kthread.h> 50 #include <sys/ktr.h> 51 #include <sys/lock.h> 52 #include <sys/mbuf.h> 53 #include <sys/mutex.h> 54 #include <sys/module.h> 55 #include <sys/protosw.h> 56 #include <sys/socket.h> 57 #include <sys/socketvar.h> 58 #include <sys/sysctl.h> 59 #include <sys/systm.h> 60 #include <sys/sx.h> 61 #include <sys/uio.h> 62 #include <machine/bus.h> 63 #include <vm/vm.h> 64 #include <vm/pmap.h> 65 #include <netinet/in.h> 66 #include <netinet/in_pcb.h> 67 #include <netinet/tcp.h> 68 #include <netinet/tcp_var.h> 69 #include <netinet/toecore.h> 70 71 #include <dev/iscsi/icl.h> 72 #include <dev/iscsi/iscsi_proto.h> 73 #include <icl_conn_if.h> 74 75 #include <cam/scsi/scsi_all.h> 76 #include <cam/scsi/scsi_da.h> 77 #include <cam/ctl/ctl_io.h> 78 #include <cam/ctl/ctl.h> 79 #include <cam/ctl/ctl_backend.h> 80 #include <cam/ctl/ctl_error.h> 81 #include <cam/ctl/ctl_frontend.h> 82 #include <cam/ctl/ctl_debug.h> 83 #include <cam/ctl/ctl_ha.h> 84 #include <cam/ctl/ctl_ioctl.h> 85 86 #include <cam/cam.h> 87 #include <cam/cam_ccb.h> 88 #include <cam/cam_xpt.h> 89 #include <cam/cam_debug.h> 90 #include <cam/cam_sim.h> 91 #include <cam/cam_xpt_sim.h> 92 #include <cam/cam_xpt_periph.h> 93 #include <cam/cam_periph.h> 94 #include <cam/cam_compat.h> 95 #include <cam/scsi/scsi_message.h> 96 97 #include "common/common.h" 98 #include "common/t4_regs.h" 99 #include "common/t4_tcb.h" 100 #include "tom/t4_tom.h" 101 #include "cxgbei.h" 102 103 /* 104 * Use the page pod tag for the TT hash. 105 */ 106 #define TT_HASH(icc, tt) (G_PPOD_TAG(tt) & (icc)->cmp_hash_mask) 107 108 struct cxgbei_ddp_state { 109 struct ppod_reservation prsv; 110 struct cxgbei_cmp cmp; 111 }; 112 113 static MALLOC_DEFINE(M_CXGBEI, "cxgbei", "cxgbei(4)"); 114 115 SYSCTL_NODE(_kern_icl, OID_AUTO, cxgbei, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 116 "Chelsio iSCSI offload"); 117 static int first_burst_length = 8192; 118 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, first_burst_length, CTLFLAG_RWTUN, 119 &first_burst_length, 0, "First burst length"); 120 static int max_burst_length = 2 * 1024 * 1024; 121 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, max_burst_length, CTLFLAG_RWTUN, 122 &max_burst_length, 0, "Maximum burst length"); 123 static int sendspace = 1048576; 124 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, sendspace, CTLFLAG_RWTUN, 125 &sendspace, 0, "Default send socket buffer size"); 126 static int recvspace = 1048576; 127 SYSCTL_INT(_kern_icl_cxgbei, OID_AUTO, recvspace, CTLFLAG_RWTUN, 128 &recvspace, 0, "Default receive socket buffer size"); 129 130 static volatile u_int icl_cxgbei_ncons; 131 132 static icl_conn_new_pdu_t icl_cxgbei_conn_new_pdu; 133 static icl_conn_pdu_data_segment_length_t 134 icl_cxgbei_conn_pdu_data_segment_length; 135 static icl_conn_pdu_append_data_t icl_cxgbei_conn_pdu_append_data; 136 static icl_conn_pdu_get_data_t icl_cxgbei_conn_pdu_get_data; 137 static icl_conn_pdu_queue_t icl_cxgbei_conn_pdu_queue; 138 static icl_conn_pdu_queue_cb_t icl_cxgbei_conn_pdu_queue_cb; 139 static icl_conn_handoff_t icl_cxgbei_conn_handoff; 140 static icl_conn_free_t icl_cxgbei_conn_free; 141 static icl_conn_close_t icl_cxgbei_conn_close; 142 static icl_conn_task_setup_t icl_cxgbei_conn_task_setup; 143 static icl_conn_task_done_t icl_cxgbei_conn_task_done; 144 static icl_conn_transfer_setup_t icl_cxgbei_conn_transfer_setup; 145 static icl_conn_transfer_done_t icl_cxgbei_conn_transfer_done; 146 147 static kobj_method_t icl_cxgbei_methods[] = { 148 KOBJMETHOD(icl_conn_new_pdu, icl_cxgbei_conn_new_pdu), 149 KOBJMETHOD(icl_conn_pdu_free, icl_cxgbei_conn_pdu_free), 150 KOBJMETHOD(icl_conn_pdu_data_segment_length, 151 icl_cxgbei_conn_pdu_data_segment_length), 152 KOBJMETHOD(icl_conn_pdu_append_data, icl_cxgbei_conn_pdu_append_data), 153 KOBJMETHOD(icl_conn_pdu_get_data, icl_cxgbei_conn_pdu_get_data), 154 KOBJMETHOD(icl_conn_pdu_queue, icl_cxgbei_conn_pdu_queue), 155 KOBJMETHOD(icl_conn_pdu_queue_cb, icl_cxgbei_conn_pdu_queue_cb), 156 KOBJMETHOD(icl_conn_handoff, icl_cxgbei_conn_handoff), 157 KOBJMETHOD(icl_conn_free, icl_cxgbei_conn_free), 158 KOBJMETHOD(icl_conn_close, icl_cxgbei_conn_close), 159 KOBJMETHOD(icl_conn_task_setup, icl_cxgbei_conn_task_setup), 160 KOBJMETHOD(icl_conn_task_done, icl_cxgbei_conn_task_done), 161 KOBJMETHOD(icl_conn_transfer_setup, icl_cxgbei_conn_transfer_setup), 162 KOBJMETHOD(icl_conn_transfer_done, icl_cxgbei_conn_transfer_done), 163 { 0, 0 } 164 }; 165 166 DEFINE_CLASS(icl_cxgbei, icl_cxgbei_methods, sizeof(struct icl_cxgbei_conn)); 167 168 void 169 icl_cxgbei_conn_pdu_free(struct icl_conn *ic, struct icl_pdu *ip) 170 { 171 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 172 173 KASSERT(icp->ref_cnt != 0, ("freeing deleted PDU")); 174 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 175 MPASS(ic == ip->ip_conn); 176 177 m_freem(ip->ip_ahs_mbuf); 178 m_freem(ip->ip_data_mbuf); 179 m_freem(ip->ip_bhs_mbuf); 180 181 KASSERT(ic != NULL || icp->ref_cnt == 1, 182 ("orphaned PDU has oustanding references")); 183 184 if (atomic_fetchadd_int(&icp->ref_cnt, -1) != 1) 185 return; 186 187 free(icp, M_CXGBEI); 188 #ifdef DIAGNOSTIC 189 if (__predict_true(ic != NULL)) 190 refcount_release(&ic->ic_outstanding_pdus); 191 #endif 192 } 193 194 static void 195 icl_cxgbei_pdu_call_cb(struct icl_pdu *ip) 196 { 197 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 198 199 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 200 201 if (icp->cb != NULL) 202 icp->cb(ip, icp->error); 203 #ifdef DIAGNOSTIC 204 if (__predict_true(ip->ip_conn != NULL)) 205 refcount_release(&ip->ip_conn->ic_outstanding_pdus); 206 #endif 207 free(icp, M_CXGBEI); 208 } 209 210 static void 211 icl_cxgbei_pdu_done(struct icl_pdu *ip, int error) 212 { 213 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 214 215 if (error != 0) 216 icp->error = error; 217 218 m_freem(ip->ip_ahs_mbuf); 219 ip->ip_ahs_mbuf = NULL; 220 m_freem(ip->ip_data_mbuf); 221 ip->ip_data_mbuf = NULL; 222 m_freem(ip->ip_bhs_mbuf); 223 ip->ip_bhs_mbuf = NULL; 224 225 /* 226 * All other references to this PDU should have been dropped 227 * by the m_freem() of ip_data_mbuf. 228 */ 229 if (atomic_fetchadd_int(&icp->ref_cnt, -1) == 1) 230 icl_cxgbei_pdu_call_cb(ip); 231 else 232 __assert_unreachable(); 233 } 234 235 static void 236 icl_cxgbei_mbuf_done(struct mbuf *mb) 237 { 238 239 struct icl_cxgbei_pdu *icp = (struct icl_cxgbei_pdu *)mb->m_ext.ext_arg1; 240 241 /* 242 * NB: mb_free_mext() might leave ref_cnt as 1 without 243 * decrementing it if it hits the fast path in the ref_cnt 244 * check. 245 */ 246 icl_cxgbei_pdu_call_cb(&icp->ip); 247 } 248 249 struct icl_pdu * 250 icl_cxgbei_new_pdu(int flags) 251 { 252 struct icl_cxgbei_pdu *icp; 253 struct icl_pdu *ip; 254 struct mbuf *m; 255 256 icp = malloc(sizeof(*icp), M_CXGBEI, flags | M_ZERO); 257 if (__predict_false(icp == NULL)) 258 return (NULL); 259 260 icp->icp_signature = CXGBEI_PDU_SIGNATURE; 261 icp->ref_cnt = 1; 262 ip = &icp->ip; 263 264 m = m_gethdr(flags, MT_DATA); 265 if (__predict_false(m == NULL)) { 266 free(icp, M_CXGBEI); 267 return (NULL); 268 } 269 270 ip->ip_bhs_mbuf = m; 271 ip->ip_bhs = mtod(m, struct iscsi_bhs *); 272 memset(ip->ip_bhs, 0, sizeof(*ip->ip_bhs)); 273 m->m_len = sizeof(struct iscsi_bhs); 274 m->m_pkthdr.len = m->m_len; 275 276 return (ip); 277 } 278 279 void 280 icl_cxgbei_new_pdu_set_conn(struct icl_pdu *ip, struct icl_conn *ic) 281 { 282 283 ip->ip_conn = ic; 284 #ifdef DIAGNOSTIC 285 refcount_acquire(&ic->ic_outstanding_pdus); 286 #endif 287 } 288 289 /* 290 * Allocate icl_pdu with empty BHS to fill up by the caller. 291 */ 292 static struct icl_pdu * 293 icl_cxgbei_conn_new_pdu(struct icl_conn *ic, int flags) 294 { 295 struct icl_pdu *ip; 296 297 ip = icl_cxgbei_new_pdu(flags); 298 if (__predict_false(ip == NULL)) 299 return (NULL); 300 icl_cxgbei_new_pdu_set_conn(ip, ic); 301 302 return (ip); 303 } 304 305 static size_t 306 icl_pdu_data_segment_length(const struct icl_pdu *request) 307 { 308 uint32_t len = 0; 309 310 len += request->ip_bhs->bhs_data_segment_len[0]; 311 len <<= 8; 312 len += request->ip_bhs->bhs_data_segment_len[1]; 313 len <<= 8; 314 len += request->ip_bhs->bhs_data_segment_len[2]; 315 316 return (len); 317 } 318 319 size_t 320 icl_cxgbei_conn_pdu_data_segment_length(struct icl_conn *ic, 321 const struct icl_pdu *request) 322 { 323 324 return (icl_pdu_data_segment_length(request)); 325 } 326 327 static struct mbuf * 328 finalize_pdu(struct icl_cxgbei_conn *icc, struct icl_cxgbei_pdu *icp) 329 { 330 struct icl_pdu *ip = &icp->ip; 331 uint8_t ulp_submode, padding; 332 struct mbuf *m, *last; 333 struct iscsi_bhs *bhs; 334 int data_len; 335 336 /* 337 * Fix up the data segment mbuf first. 338 */ 339 m = ip->ip_data_mbuf; 340 ulp_submode = icc->ulp_submode; 341 if (m != NULL) { 342 last = m_last(m); 343 344 /* 345 * Round up the data segment to a 4B boundary. Pad with 0 if 346 * necessary. There will definitely be room in the mbuf. 347 */ 348 padding = roundup2(ip->ip_data_len, 4) - ip->ip_data_len; 349 if (padding != 0) { 350 MPASS(padding <= M_TRAILINGSPACE(last)); 351 bzero(mtod(last, uint8_t *) + last->m_len, padding); 352 last->m_len += padding; 353 } 354 } else { 355 MPASS(ip->ip_data_len == 0); 356 ulp_submode &= ~ULP_CRC_DATA; 357 padding = 0; 358 } 359 360 /* 361 * Now the header mbuf that has the BHS. 362 */ 363 m = ip->ip_bhs_mbuf; 364 MPASS(m->m_pkthdr.len == sizeof(struct iscsi_bhs)); 365 MPASS(m->m_len == sizeof(struct iscsi_bhs)); 366 367 bhs = ip->ip_bhs; 368 data_len = ip->ip_data_len; 369 if (data_len > icc->ic.ic_max_send_data_segment_length) { 370 struct iscsi_bhs_data_in *bhsdi; 371 int flags; 372 373 KASSERT(padding == 0, ("%s: ISO with padding %d for icp %p", 374 __func__, padding, icp)); 375 switch (bhs->bhs_opcode) { 376 case ISCSI_BHS_OPCODE_SCSI_DATA_OUT: 377 flags = 1; 378 break; 379 case ISCSI_BHS_OPCODE_SCSI_DATA_IN: 380 flags = 2; 381 break; 382 default: 383 panic("invalid opcode %#x for ISO", bhs->bhs_opcode); 384 } 385 data_len = icc->ic.ic_max_send_data_segment_length; 386 bhsdi = (struct iscsi_bhs_data_in *)bhs; 387 if (bhsdi->bhsdi_flags & BHSDI_FLAGS_F) { 388 /* 389 * Firmware will set F on the final PDU in the 390 * burst. 391 */ 392 flags |= CXGBE_ISO_F; 393 bhsdi->bhsdi_flags &= ~BHSDI_FLAGS_F; 394 } 395 set_mbuf_iscsi_iso(m, true); 396 set_mbuf_iscsi_iso_flags(m, flags); 397 set_mbuf_iscsi_iso_mss(m, data_len); 398 } 399 400 bhs->bhs_data_segment_len[2] = data_len; 401 bhs->bhs_data_segment_len[1] = data_len >> 8; 402 bhs->bhs_data_segment_len[0] = data_len >> 16; 403 404 /* 405 * Extract mbuf chain from PDU. 406 */ 407 m->m_pkthdr.len += ip->ip_data_len + padding; 408 m->m_next = ip->ip_data_mbuf; 409 set_mbuf_ulp_submode(m, ulp_submode); 410 ip->ip_bhs_mbuf = NULL; 411 ip->ip_data_mbuf = NULL; 412 ip->ip_bhs = NULL; 413 414 /* 415 * Drop PDU reference on icp. Additional references might 416 * still be held by zero-copy PDU buffers (ICL_NOCOPY). 417 */ 418 if (atomic_fetchadd_int(&icp->ref_cnt, -1) == 1) 419 icl_cxgbei_pdu_call_cb(ip); 420 421 return (m); 422 } 423 424 int 425 icl_cxgbei_conn_pdu_append_data(struct icl_conn *ic, struct icl_pdu *ip, 426 const void *addr, size_t len, int flags) 427 { 428 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 429 struct mbuf *m, *m_tail; 430 const char *src; 431 432 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 433 MPASS(ic == ip->ip_conn); 434 KASSERT(len > 0, ("%s: len is %jd", __func__, (intmax_t)len)); 435 436 m_tail = ip->ip_data_mbuf; 437 if (m_tail != NULL) 438 for (; m_tail->m_next != NULL; m_tail = m_tail->m_next) 439 ; 440 441 if (flags & ICL_NOCOPY) { 442 m = m_get(flags & ~ICL_NOCOPY, MT_DATA); 443 if (m == NULL) { 444 ICL_WARN("failed to allocate mbuf"); 445 return (ENOMEM); 446 } 447 448 m->m_flags |= M_RDONLY; 449 m_extaddref(m, __DECONST(char *, addr), len, &icp->ref_cnt, 450 icl_cxgbei_mbuf_done, icp, NULL); 451 m->m_len = len; 452 if (ip->ip_data_mbuf == NULL) { 453 ip->ip_data_mbuf = m; 454 ip->ip_data_len = len; 455 } else { 456 m_tail->m_next = m; 457 m_tail = m_tail->m_next; 458 ip->ip_data_len += len; 459 } 460 461 return (0); 462 } 463 464 src = (const char *)addr; 465 466 /* Allocate as jumbo mbufs of size MJUM16BYTES. */ 467 while (len >= MJUM16BYTES) { 468 m = m_getjcl(M_NOWAIT, MT_DATA, 0, MJUM16BYTES); 469 if (__predict_false(m == NULL)) { 470 if ((flags & M_WAITOK) != 0) { 471 /* Fall back to non-jumbo mbufs. */ 472 break; 473 } 474 return (ENOMEM); 475 } 476 memcpy(mtod(m, void *), src, MJUM16BYTES); 477 m->m_len = MJUM16BYTES; 478 if (ip->ip_data_mbuf == NULL) { 479 ip->ip_data_mbuf = m_tail = m; 480 ip->ip_data_len = MJUM16BYTES; 481 } else { 482 m_tail->m_next = m; 483 m_tail = m_tail->m_next; 484 ip->ip_data_len += MJUM16BYTES; 485 } 486 src += MJUM16BYTES; 487 len -= MJUM16BYTES; 488 } 489 490 /* Allocate mbuf chain for the remaining data. */ 491 if (len != 0) { 492 m = m_getm2(NULL, len, flags, MT_DATA, 0); 493 if (__predict_false(m == NULL)) 494 return (ENOMEM); 495 if (ip->ip_data_mbuf == NULL) { 496 ip->ip_data_mbuf = m; 497 ip->ip_data_len = len; 498 } else { 499 m_tail->m_next = m; 500 ip->ip_data_len += len; 501 } 502 for (; m != NULL; m = m->m_next) { 503 m->m_len = min(len, M_SIZE(m)); 504 memcpy(mtod(m, void *), src, m->m_len); 505 src += m->m_len; 506 len -= m->m_len; 507 } 508 MPASS(len == 0); 509 } 510 MPASS(ip->ip_data_len <= max(ic->ic_max_send_data_segment_length, 511 ic->ic_hw_isomax)); 512 513 return (0); 514 } 515 516 void 517 icl_cxgbei_conn_pdu_get_data(struct icl_conn *ic, struct icl_pdu *ip, 518 size_t off, void *addr, size_t len) 519 { 520 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 521 522 if (icp->icp_flags & ICPF_RX_DDP) 523 return; /* data is DDP'ed, no need to copy */ 524 m_copydata(ip->ip_data_mbuf, off, len, addr); 525 } 526 527 void 528 icl_cxgbei_conn_pdu_queue(struct icl_conn *ic, struct icl_pdu *ip) 529 { 530 icl_cxgbei_conn_pdu_queue_cb(ic, ip, NULL); 531 } 532 533 void 534 icl_cxgbei_conn_pdu_queue_cb(struct icl_conn *ic, struct icl_pdu *ip, 535 icl_pdu_cb cb) 536 { 537 struct epoch_tracker et; 538 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 539 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 540 struct socket *so = ic->ic_socket; 541 struct toepcb *toep = icc->toep; 542 struct inpcb *inp; 543 struct mbuf *m; 544 545 MPASS(ic == ip->ip_conn); 546 MPASS(ip->ip_bhs_mbuf != NULL); 547 /* The kernel doesn't generate PDUs with AHS. */ 548 MPASS(ip->ip_ahs_mbuf == NULL && ip->ip_ahs_len == 0); 549 550 ICL_CONN_LOCK_ASSERT(ic); 551 552 icp->cb = cb; 553 554 /* NOTE: sowriteable without so_snd lock is a mostly harmless race. */ 555 if (ic->ic_disconnecting || so == NULL || !sowriteable(so)) { 556 icl_cxgbei_pdu_done(ip, ENOTCONN); 557 return; 558 } 559 560 m = finalize_pdu(icc, icp); 561 M_ASSERTPKTHDR(m); 562 MPASS((m->m_pkthdr.len & 3) == 0); 563 564 /* 565 * Do not get inp from toep->inp as the toepcb might have detached 566 * already. 567 */ 568 inp = sotoinpcb(so); 569 CURVNET_SET(toep->vnet); 570 NET_EPOCH_ENTER(et); 571 INP_WLOCK(inp); 572 if (__predict_false(inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) || 573 __predict_false((toep->flags & TPF_ATTACHED) == 0)) 574 m_freem(m); 575 else { 576 mbufq_enqueue(&toep->ulp_pduq, m); 577 t4_push_pdus(icc->sc, toep, 0); 578 } 579 INP_WUNLOCK(inp); 580 NET_EPOCH_EXIT(et); 581 CURVNET_RESTORE(); 582 } 583 584 static struct icl_conn * 585 icl_cxgbei_new_conn(const char *name, struct mtx *lock) 586 { 587 struct icl_cxgbei_conn *icc; 588 struct icl_conn *ic; 589 590 refcount_acquire(&icl_cxgbei_ncons); 591 592 icc = (struct icl_cxgbei_conn *)kobj_create(&icl_cxgbei_class, M_CXGBE, 593 M_WAITOK | M_ZERO); 594 icc->icc_signature = CXGBEI_CONN_SIGNATURE; 595 STAILQ_INIT(&icc->rcvd_pdus); 596 597 icc->cmp_table = hashinit(64, M_CXGBEI, &icc->cmp_hash_mask); 598 mtx_init(&icc->cmp_lock, "cxgbei_cmp", NULL, MTX_DEF); 599 600 ic = &icc->ic; 601 ic->ic_lock = lock; 602 603 #ifdef DIAGNOSTIC 604 refcount_init(&ic->ic_outstanding_pdus, 0); 605 #endif 606 ic->ic_name = name; 607 ic->ic_offload = "cxgbei"; 608 ic->ic_unmapped = false; 609 610 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc); 611 612 return (ic); 613 } 614 615 void 616 icl_cxgbei_conn_free(struct icl_conn *ic) 617 { 618 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 619 620 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 621 622 CTR2(KTR_CXGBE, "%s: icc %p", __func__, icc); 623 624 mtx_destroy(&icc->cmp_lock); 625 hashdestroy(icc->cmp_table, M_CXGBEI, icc->cmp_hash_mask); 626 kobj_delete((struct kobj *)icc, M_CXGBE); 627 refcount_release(&icl_cxgbei_ncons); 628 } 629 630 static int 631 icl_cxgbei_setsockopt(struct icl_conn *ic, struct socket *so, int sspace, 632 int rspace) 633 { 634 struct sockopt opt; 635 int error, one = 1, ss, rs; 636 637 ss = max(sendspace, sspace); 638 rs = max(recvspace, rspace); 639 640 error = soreserve(so, ss, rs); 641 if (error != 0) { 642 icl_cxgbei_conn_close(ic); 643 return (error); 644 } 645 SOCKBUF_LOCK(&so->so_snd); 646 so->so_snd.sb_flags |= SB_AUTOSIZE; 647 SOCKBUF_UNLOCK(&so->so_snd); 648 SOCKBUF_LOCK(&so->so_rcv); 649 so->so_rcv.sb_flags |= SB_AUTOSIZE; 650 SOCKBUF_UNLOCK(&so->so_rcv); 651 652 /* 653 * Disable Nagle. 654 */ 655 bzero(&opt, sizeof(opt)); 656 opt.sopt_dir = SOPT_SET; 657 opt.sopt_level = IPPROTO_TCP; 658 opt.sopt_name = TCP_NODELAY; 659 opt.sopt_val = &one; 660 opt.sopt_valsize = sizeof(one); 661 error = sosetopt(so, &opt); 662 if (error != 0) { 663 icl_cxgbei_conn_close(ic); 664 return (error); 665 } 666 667 return (0); 668 } 669 670 /* 671 * Request/response structure used to find out the adapter offloading a socket. 672 */ 673 struct find_ofld_adapter_rr { 674 struct socket *so; 675 struct adapter *sc; /* result */ 676 }; 677 678 static void 679 find_offload_adapter(struct adapter *sc, void *arg) 680 { 681 struct find_ofld_adapter_rr *fa = arg; 682 struct socket *so = fa->so; 683 struct tom_data *td = sc->tom_softc; 684 struct tcpcb *tp; 685 struct inpcb *inp; 686 687 /* Non-TCP were filtered out earlier. */ 688 MPASS(so->so_proto->pr_protocol == IPPROTO_TCP); 689 690 if (fa->sc != NULL) 691 return; /* Found already. */ 692 693 if (td == NULL) 694 return; /* TOE not enabled on this adapter. */ 695 696 inp = sotoinpcb(so); 697 INP_WLOCK(inp); 698 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) == 0) { 699 tp = intotcpcb(inp); 700 if (tp->t_flags & TF_TOE && tp->tod == &td->tod) 701 fa->sc = sc; /* Found. */ 702 } 703 INP_WUNLOCK(inp); 704 } 705 706 static bool 707 is_memfree(struct adapter *sc) 708 { 709 uint32_t em; 710 711 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 712 if ((em & F_EXT_MEM_ENABLE) != 0) 713 return (false); 714 if (is_t5(sc) && (em & F_EXT_MEM1_ENABLE) != 0) 715 return (false); 716 return (true); 717 } 718 719 /* XXXNP: move this to t4_tom. */ 720 static void 721 send_iscsi_flowc_wr(struct adapter *sc, struct toepcb *toep, int maxlen) 722 { 723 struct wrqe *wr; 724 struct fw_flowc_wr *flowc; 725 const u_int nparams = 1; 726 u_int flowclen; 727 struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx]; 728 729 flowclen = sizeof(*flowc) + nparams * sizeof(struct fw_flowc_mnemval); 730 731 wr = alloc_wrqe(roundup2(flowclen, 16), &toep->ofld_txq->wrq); 732 if (wr == NULL) { 733 /* XXX */ 734 panic("%s: allocation failure.", __func__); 735 } 736 flowc = wrtod(wr); 737 memset(flowc, 0, wr->wr_len); 738 739 flowc->op_to_nparams = htobe32(V_FW_WR_OP(FW_FLOWC_WR) | 740 V_FW_FLOWC_WR_NPARAMS(nparams)); 741 flowc->flowid_len16 = htonl(V_FW_WR_LEN16(howmany(flowclen, 16)) | 742 V_FW_WR_FLOWID(toep->tid)); 743 744 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_TXDATAPLEN_MAX; 745 flowc->mnemval[0].val = htobe32(maxlen); 746 747 txsd->tx_credits = howmany(flowclen, 16); 748 txsd->plen = 0; 749 KASSERT(toep->tx_credits >= txsd->tx_credits && toep->txsd_avail > 0, 750 ("%s: not enough credits (%d)", __func__, toep->tx_credits)); 751 toep->tx_credits -= txsd->tx_credits; 752 if (__predict_false(++toep->txsd_pidx == toep->txsd_total)) 753 toep->txsd_pidx = 0; 754 toep->txsd_avail--; 755 756 t4_wrq_tx(sc, wr); 757 } 758 759 static void 760 set_ulp_mode_iscsi(struct adapter *sc, struct toepcb *toep, u_int ulp_submode) 761 { 762 uint64_t val; 763 764 CTR3(KTR_CXGBE, "%s: tid %u, ULP_MODE_ISCSI, submode=%#x", 765 __func__, toep->tid, ulp_submode); 766 767 val = V_TCB_ULP_TYPE(ULP_MODE_ISCSI) | V_TCB_ULP_RAW(ulp_submode); 768 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_ULP_TYPE, 769 V_TCB_ULP_TYPE(M_TCB_ULP_TYPE) | V_TCB_ULP_RAW(M_TCB_ULP_RAW), val, 770 0, 0); 771 772 val = V_TF_RX_FLOW_CONTROL_DISABLE(1ULL); 773 t4_set_tcb_field(sc, toep->ctrlq, toep, W_TCB_T_FLAGS, val, val, 0, 0); 774 } 775 776 /* 777 * XXXNP: Who is responsible for cleaning up the socket if this returns with an 778 * error? Review all error paths. 779 * 780 * XXXNP: What happens to the socket's fd reference if the operation is 781 * successful, and how does that affect the socket's life cycle? 782 */ 783 int 784 icl_cxgbei_conn_handoff(struct icl_conn *ic, int fd) 785 { 786 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 787 struct cxgbei_data *ci; 788 struct find_ofld_adapter_rr fa; 789 struct file *fp; 790 struct socket *so; 791 struct inpcb *inp; 792 struct tcpcb *tp; 793 struct toepcb *toep; 794 cap_rights_t rights; 795 u_int max_rx_pdu_len, max_tx_pdu_len; 796 int error, max_iso_pdus; 797 798 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 799 ICL_CONN_LOCK_ASSERT_NOT(ic); 800 801 /* 802 * Steal the socket from userland. 803 */ 804 error = fget(curthread, fd, 805 cap_rights_init_one(&rights, CAP_SOCK_CLIENT), &fp); 806 if (error != 0) 807 return (error); 808 if (fp->f_type != DTYPE_SOCKET) { 809 fdrop(fp, curthread); 810 return (EINVAL); 811 } 812 so = fp->f_data; 813 if (so->so_type != SOCK_STREAM || 814 so->so_proto->pr_protocol != IPPROTO_TCP) { 815 fdrop(fp, curthread); 816 return (EINVAL); 817 } 818 819 ICL_CONN_LOCK(ic); 820 if (ic->ic_socket != NULL) { 821 ICL_CONN_UNLOCK(ic); 822 fdrop(fp, curthread); 823 return (EBUSY); 824 } 825 ic->ic_disconnecting = false; 826 ic->ic_socket = so; 827 fp->f_ops = &badfileops; 828 fp->f_data = NULL; 829 fdrop(fp, curthread); 830 ICL_CONN_UNLOCK(ic); 831 832 /* Find the adapter offloading this socket. */ 833 fa.sc = NULL; 834 fa.so = so; 835 t4_iterate(find_offload_adapter, &fa); 836 if (fa.sc == NULL) 837 return (EINVAL); 838 icc->sc = fa.sc; 839 ci = icc->sc->iscsi_ulp_softc; 840 841 max_rx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_recv_data_segment_length; 842 max_tx_pdu_len = ISCSI_BHS_SIZE + ic->ic_max_send_data_segment_length; 843 if (ic->ic_header_crc32c) { 844 max_rx_pdu_len += ISCSI_HEADER_DIGEST_SIZE; 845 max_tx_pdu_len += ISCSI_HEADER_DIGEST_SIZE; 846 } 847 if (ic->ic_data_crc32c) { 848 max_rx_pdu_len += ISCSI_DATA_DIGEST_SIZE; 849 max_tx_pdu_len += ISCSI_DATA_DIGEST_SIZE; 850 } 851 852 inp = sotoinpcb(so); 853 INP_WLOCK(inp); 854 tp = intotcpcb(inp); 855 if (inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) { 856 INP_WUNLOCK(inp); 857 return (EBUSY); 858 } 859 860 /* 861 * socket could not have been "unoffloaded" if here. 862 */ 863 MPASS(tp->t_flags & TF_TOE); 864 MPASS(tp->tod != NULL); 865 MPASS(tp->t_toe != NULL); 866 toep = tp->t_toe; 867 MPASS(toep->vi->adapter == icc->sc); 868 869 if (ulp_mode(toep) != ULP_MODE_NONE) { 870 INP_WUNLOCK(inp); 871 return (EINVAL); 872 } 873 874 icc->toep = toep; 875 icc->cwt = cxgbei_select_worker_thread(icc); 876 877 icc->ulp_submode = 0; 878 if (ic->ic_header_crc32c) 879 icc->ulp_submode |= ULP_CRC_HEADER; 880 if (ic->ic_data_crc32c) 881 icc->ulp_submode |= ULP_CRC_DATA; 882 883 if (icc->sc->tt.iso && chip_id(icc->sc) >= CHELSIO_T5 && 884 !is_memfree(icc->sc)) { 885 max_iso_pdus = CXGBEI_MAX_ISO_PAYLOAD / max_tx_pdu_len; 886 ic->ic_hw_isomax = max_iso_pdus * 887 ic->ic_max_send_data_segment_length; 888 } else 889 max_iso_pdus = 1; 890 891 toep->params.ulp_mode = ULP_MODE_ISCSI; 892 toep->ulpcb = icc; 893 894 send_iscsi_flowc_wr(icc->sc, toep, 895 roundup(max_iso_pdus * max_tx_pdu_len, tp->t_maxseg)); 896 set_ulp_mode_iscsi(icc->sc, toep, icc->ulp_submode); 897 INP_WUNLOCK(inp); 898 899 return (icl_cxgbei_setsockopt(ic, so, max_tx_pdu_len, max_rx_pdu_len)); 900 } 901 902 void 903 icl_cxgbei_conn_close(struct icl_conn *ic) 904 { 905 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 906 struct icl_pdu *ip; 907 struct socket *so; 908 struct sockbuf *sb; 909 struct inpcb *inp; 910 struct toepcb *toep = icc->toep; 911 912 MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); 913 ICL_CONN_LOCK_ASSERT_NOT(ic); 914 915 ICL_CONN_LOCK(ic); 916 so = ic->ic_socket; 917 if (ic->ic_disconnecting || so == NULL) { 918 CTR4(KTR_CXGBE, "%s: icc %p (disconnecting = %d), so %p", 919 __func__, icc, ic->ic_disconnecting, so); 920 ICL_CONN_UNLOCK(ic); 921 return; 922 } 923 ic->ic_disconnecting = true; 924 925 #ifdef DIAGNOSTIC 926 KASSERT(ic->ic_outstanding_pdus == 0, 927 ("destroying session with %d outstanding PDUs", 928 ic->ic_outstanding_pdus)); 929 #endif 930 ICL_CONN_UNLOCK(ic); 931 932 CTR3(KTR_CXGBE, "%s: tid %d, icc %p", __func__, toep ? toep->tid : -1, 933 icc); 934 inp = sotoinpcb(so); 935 sb = &so->so_rcv; 936 INP_WLOCK(inp); 937 if (toep != NULL) { /* NULL if connection was never offloaded. */ 938 toep->ulpcb = NULL; 939 940 /* Discard PDUs queued for TX. */ 941 mbufq_drain(&toep->ulp_pduq); 942 943 /* 944 * Wait for the cwt threads to stop processing this 945 * connection. 946 */ 947 SOCKBUF_LOCK(sb); 948 if (icc->rx_flags & RXF_ACTIVE) { 949 volatile u_int *p = &icc->rx_flags; 950 951 SOCKBUF_UNLOCK(sb); 952 INP_WUNLOCK(inp); 953 954 while (*p & RXF_ACTIVE) 955 pause("conclo", 1); 956 957 INP_WLOCK(inp); 958 SOCKBUF_LOCK(sb); 959 } 960 961 /* 962 * Discard received PDUs not passed to the iSCSI 963 * layer. 964 */ 965 while (!STAILQ_EMPTY(&icc->rcvd_pdus)) { 966 ip = STAILQ_FIRST(&icc->rcvd_pdus); 967 STAILQ_REMOVE_HEAD(&icc->rcvd_pdus, ip_next); 968 icl_cxgbei_pdu_done(ip, ENOTCONN); 969 } 970 SOCKBUF_UNLOCK(sb); 971 972 /* 973 * Grab a reference to use when waiting for the final 974 * CPL to be received. If toep->inp is NULL, then 975 * final_cpl_received() has already been called (e.g. 976 * due to the peer sending a RST). 977 */ 978 if (toep->inp != NULL) { 979 toep = hold_toepcb(toep); 980 toep->flags |= TPF_WAITING_FOR_FINAL; 981 } else 982 toep = NULL; 983 } 984 INP_WUNLOCK(inp); 985 986 ICL_CONN_LOCK(ic); 987 ic->ic_socket = NULL; 988 ICL_CONN_UNLOCK(ic); 989 990 /* 991 * XXXNP: we should send RST instead of FIN when PDUs held in various 992 * queues were purged instead of delivered reliably but soabort isn't 993 * really general purpose and wouldn't do the right thing here. 994 */ 995 soclose(so); 996 997 /* 998 * Wait for the socket to fully close. This ensures any 999 * pending received data has been received (and in particular, 1000 * any data that would be received by DDP has been handled). 1001 * Callers assume that it is safe to free buffers for tasks 1002 * and transfers after this function returns. 1003 */ 1004 if (toep != NULL) { 1005 struct mtx *lock = mtx_pool_find(mtxpool_sleep, toep); 1006 1007 mtx_lock(lock); 1008 while ((toep->flags & TPF_WAITING_FOR_FINAL) != 0) 1009 mtx_sleep(toep, lock, PSOCK, "conclo2", 0); 1010 mtx_unlock(lock); 1011 free_toepcb(toep); 1012 } 1013 } 1014 1015 static void 1016 cxgbei_insert_cmp(struct icl_cxgbei_conn *icc, struct cxgbei_cmp *cmp, 1017 uint32_t tt) 1018 { 1019 #ifdef INVARIANTS 1020 struct cxgbei_cmp *cmp2; 1021 #endif 1022 1023 cmp->tt = tt; 1024 1025 mtx_lock(&icc->cmp_lock); 1026 #ifdef INVARIANTS 1027 LIST_FOREACH(cmp2, &icc->cmp_table[TT_HASH(icc, tt)], link) { 1028 KASSERT(cmp2->tt != tt, ("%s: duplicate cmp", __func__)); 1029 } 1030 #endif 1031 LIST_INSERT_HEAD(&icc->cmp_table[TT_HASH(icc, tt)], cmp, link); 1032 mtx_unlock(&icc->cmp_lock); 1033 } 1034 1035 struct cxgbei_cmp * 1036 cxgbei_find_cmp(struct icl_cxgbei_conn *icc, uint32_t tt) 1037 { 1038 struct cxgbei_cmp *cmp; 1039 1040 mtx_lock(&icc->cmp_lock); 1041 LIST_FOREACH(cmp, &icc->cmp_table[TT_HASH(icc, tt)], link) { 1042 if (cmp->tt == tt) 1043 break; 1044 } 1045 mtx_unlock(&icc->cmp_lock); 1046 return (cmp); 1047 } 1048 1049 static void 1050 cxgbei_rm_cmp(struct icl_cxgbei_conn *icc, struct cxgbei_cmp *cmp) 1051 { 1052 #ifdef INVARIANTS 1053 struct cxgbei_cmp *cmp2; 1054 #endif 1055 1056 mtx_lock(&icc->cmp_lock); 1057 1058 #ifdef INVARIANTS 1059 LIST_FOREACH(cmp2, &icc->cmp_table[TT_HASH(icc, cmp->tt)], link) { 1060 if (cmp2 == cmp) 1061 goto found; 1062 } 1063 panic("%s: could not find cmp", __func__); 1064 found: 1065 #endif 1066 LIST_REMOVE(cmp, link); 1067 mtx_unlock(&icc->cmp_lock); 1068 } 1069 1070 int 1071 icl_cxgbei_conn_task_setup(struct icl_conn *ic, struct icl_pdu *ip, 1072 struct ccb_scsiio *csio, uint32_t *ittp, void **arg) 1073 { 1074 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 1075 struct toepcb *toep = icc->toep; 1076 struct adapter *sc = icc->sc; 1077 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 1078 struct ppod_region *pr = &ci->pr; 1079 struct cxgbei_ddp_state *ddp; 1080 struct ppod_reservation *prsv; 1081 struct inpcb *inp; 1082 struct mbufq mq; 1083 uint32_t itt; 1084 int rc = 0; 1085 1086 ICL_CONN_LOCK_ASSERT(ic); 1087 1088 /* This is for the offload driver's state. Must not be set already. */ 1089 MPASS(arg != NULL); 1090 MPASS(*arg == NULL); 1091 1092 if (ic->ic_disconnecting || ic->ic_socket == NULL) 1093 return (ECONNRESET); 1094 1095 if ((csio->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_IN || 1096 csio->dxfer_len < ci->ddp_threshold) { 1097 no_ddp: 1098 /* 1099 * No DDP for this I/O. Allocate an ITT (based on the one 1100 * passed in) that cannot be a valid hardware DDP tag in the 1101 * iSCSI region. 1102 */ 1103 itt = *ittp & M_PPOD_TAG; 1104 itt = V_PPOD_TAG(itt) | pr->pr_invalid_bit; 1105 *ittp = htobe32(itt); 1106 MPASS(*arg == NULL); /* State is maintained for DDP only. */ 1107 if (rc != 0) 1108 counter_u64_add( 1109 toep->ofld_rxq->rx_iscsi_ddp_setup_error, 1); 1110 return (0); 1111 } 1112 1113 /* 1114 * Reserve resources for DDP, update the itt that should be used in the 1115 * PDU, and save DDP specific state for this I/O in *arg. 1116 */ 1117 ddp = malloc(sizeof(*ddp), M_CXGBEI, M_NOWAIT | M_ZERO); 1118 if (ddp == NULL) { 1119 rc = ENOMEM; 1120 goto no_ddp; 1121 } 1122 prsv = &ddp->prsv; 1123 1124 /* XXX add support for all CAM_DATA_ types */ 1125 MPASS((csio->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_VADDR); 1126 rc = t4_alloc_page_pods_for_buf(pr, (vm_offset_t)csio->data_ptr, 1127 csio->dxfer_len, prsv); 1128 if (rc != 0) { 1129 free(ddp, M_CXGBEI); 1130 goto no_ddp; 1131 } 1132 1133 mbufq_init(&mq, INT_MAX); 1134 rc = t4_write_page_pods_for_buf(sc, toep, prsv, 1135 (vm_offset_t)csio->data_ptr, csio->dxfer_len, &mq); 1136 if (__predict_false(rc != 0)) { 1137 mbufq_drain(&mq); 1138 t4_free_page_pods(prsv); 1139 free(ddp, M_CXGBEI); 1140 goto no_ddp; 1141 } 1142 1143 /* 1144 * Do not get inp from toep->inp as the toepcb might have 1145 * detached already. 1146 */ 1147 inp = sotoinpcb(ic->ic_socket); 1148 INP_WLOCK(inp); 1149 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) { 1150 INP_WUNLOCK(inp); 1151 mbufq_drain(&mq); 1152 t4_free_page_pods(prsv); 1153 free(ddp, M_CXGBEI); 1154 return (ECONNRESET); 1155 } 1156 mbufq_concat(&toep->ulp_pduq, &mq); 1157 INP_WUNLOCK(inp); 1158 1159 ddp->cmp.last_datasn = -1; 1160 cxgbei_insert_cmp(icc, &ddp->cmp, prsv->prsv_tag); 1161 *ittp = htobe32(prsv->prsv_tag); 1162 *arg = prsv; 1163 counter_u64_add(toep->ofld_rxq->rx_iscsi_ddp_setup_ok, 1); 1164 return (0); 1165 } 1166 1167 void 1168 icl_cxgbei_conn_task_done(struct icl_conn *ic, void *arg) 1169 { 1170 1171 if (arg != NULL) { 1172 struct cxgbei_ddp_state *ddp = arg; 1173 1174 cxgbei_rm_cmp(ic_to_icc(ic), &ddp->cmp); 1175 t4_free_page_pods(&ddp->prsv); 1176 free(ddp, M_CXGBEI); 1177 } 1178 } 1179 1180 static inline bool 1181 ddp_sgl_check(struct ctl_sg_entry *sg, int entries, int xferlen) 1182 { 1183 int total_len = 0; 1184 1185 MPASS(entries > 0); 1186 if (((vm_offset_t)sg[--entries].addr & 3U) != 0) 1187 return (false); 1188 1189 total_len += sg[entries].len; 1190 1191 while (--entries >= 0) { 1192 if (((vm_offset_t)sg[entries].addr & PAGE_MASK) != 0 || 1193 (sg[entries].len % PAGE_SIZE) != 0) 1194 return (false); 1195 total_len += sg[entries].len; 1196 } 1197 1198 MPASS(total_len == xferlen); 1199 return (true); 1200 } 1201 1202 /* XXXNP: PDU should be passed in as parameter, like on the initiator. */ 1203 #define io_to_request_pdu(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND].ptr) 1204 #define io_to_ddp_state(io) ((io)->io_hdr.ctl_private[CTL_PRIV_FRONTEND2].ptr) 1205 1206 int 1207 icl_cxgbei_conn_transfer_setup(struct icl_conn *ic, union ctl_io *io, 1208 uint32_t *tttp, void **arg) 1209 { 1210 struct icl_cxgbei_conn *icc = ic_to_icc(ic); 1211 struct toepcb *toep = icc->toep; 1212 struct ctl_scsiio *ctsio = &io->scsiio; 1213 struct adapter *sc = icc->sc; 1214 struct cxgbei_data *ci = sc->iscsi_ulp_softc; 1215 struct ppod_region *pr = &ci->pr; 1216 struct cxgbei_ddp_state *ddp; 1217 struct ppod_reservation *prsv; 1218 struct ctl_sg_entry *sgl, sg_entry; 1219 struct inpcb *inp; 1220 struct mbufq mq; 1221 int sg_entries = ctsio->kern_sg_entries; 1222 uint32_t ttt; 1223 int xferlen, rc = 0, alias; 1224 1225 /* This is for the offload driver's state. Must not be set already. */ 1226 MPASS(arg != NULL); 1227 MPASS(*arg == NULL); 1228 1229 if (ctsio->ext_data_filled == 0) { 1230 int first_burst; 1231 struct icl_pdu *ip = io_to_request_pdu(io); 1232 #ifdef INVARIANTS 1233 struct icl_cxgbei_pdu *icp = ip_to_icp(ip); 1234 1235 MPASS(icp->icp_signature == CXGBEI_PDU_SIGNATURE); 1236 MPASS(ic == ip->ip_conn); 1237 MPASS(ip->ip_bhs_mbuf != NULL); 1238 #endif 1239 first_burst = icl_pdu_data_segment_length(ip); 1240 1241 /* 1242 * Note that ICL calls conn_transfer_setup even if the first 1243 * burst had everything and there's nothing left to transfer. 1244 * 1245 * NB: The CTL frontend might have provided a buffer 1246 * whose length (kern_data_len) is smaller than the 1247 * FirstBurstLength of unsolicited data. Treat those 1248 * as an empty transfer. 1249 */ 1250 xferlen = ctsio->kern_data_len; 1251 if (xferlen < first_burst || 1252 xferlen - first_burst < ci->ddp_threshold) { 1253 no_ddp: 1254 /* 1255 * No DDP for this transfer. Allocate a TTT (based on 1256 * the one passed in) that cannot be a valid hardware 1257 * DDP tag in the iSCSI region. 1258 */ 1259 ttt = *tttp & M_PPOD_TAG; 1260 ttt = V_PPOD_TAG(ttt) | pr->pr_invalid_bit; 1261 *tttp = htobe32(ttt); 1262 MPASS(io_to_ddp_state(io) == NULL); 1263 if (rc != 0) 1264 counter_u64_add( 1265 toep->ofld_rxq->rx_iscsi_ddp_setup_error, 1); 1266 return (0); 1267 } 1268 1269 if (sg_entries == 0) { 1270 sgl = &sg_entry; 1271 sgl->len = xferlen; 1272 sgl->addr = (void *)ctsio->kern_data_ptr; 1273 sg_entries = 1; 1274 } else 1275 sgl = (void *)ctsio->kern_data_ptr; 1276 1277 if (!ddp_sgl_check(sgl, sg_entries, xferlen)) 1278 goto no_ddp; 1279 1280 /* 1281 * Reserve resources for DDP, update the ttt that should be used 1282 * in the PDU, and save DDP specific state for this I/O. 1283 */ 1284 MPASS(io_to_ddp_state(io) == NULL); 1285 ddp = malloc(sizeof(*ddp), M_CXGBEI, M_NOWAIT | M_ZERO); 1286 if (ddp == NULL) { 1287 rc = ENOMEM; 1288 goto no_ddp; 1289 } 1290 prsv = &ddp->prsv; 1291 1292 rc = t4_alloc_page_pods_for_sgl(pr, sgl, sg_entries, prsv); 1293 if (rc != 0) { 1294 free(ddp, M_CXGBEI); 1295 goto no_ddp; 1296 } 1297 1298 mbufq_init(&mq, INT_MAX); 1299 rc = t4_write_page_pods_for_sgl(sc, toep, prsv, sgl, sg_entries, 1300 xferlen, &mq); 1301 if (__predict_false(rc != 0)) { 1302 mbufq_drain(&mq); 1303 t4_free_page_pods(prsv); 1304 free(ddp, M_CXGBEI); 1305 goto no_ddp; 1306 } 1307 1308 /* 1309 * Do not get inp from toep->inp as the toepcb might 1310 * have detached already. 1311 */ 1312 ICL_CONN_LOCK(ic); 1313 if (ic->ic_disconnecting || ic->ic_socket == NULL) { 1314 ICL_CONN_UNLOCK(ic); 1315 mbufq_drain(&mq); 1316 t4_free_page_pods(prsv); 1317 free(ddp, M_CXGBEI); 1318 return (ECONNRESET); 1319 } 1320 inp = sotoinpcb(ic->ic_socket); 1321 INP_WLOCK(inp); 1322 ICL_CONN_UNLOCK(ic); 1323 if ((inp->inp_flags & (INP_DROPPED | INP_TIMEWAIT)) != 0) { 1324 INP_WUNLOCK(inp); 1325 mbufq_drain(&mq); 1326 t4_free_page_pods(prsv); 1327 free(ddp, M_CXGBEI); 1328 return (ECONNRESET); 1329 } 1330 mbufq_concat(&toep->ulp_pduq, &mq); 1331 INP_WUNLOCK(inp); 1332 1333 ddp->cmp.next_buffer_offset = ctsio->kern_rel_offset + 1334 first_burst; 1335 ddp->cmp.last_datasn = -1; 1336 cxgbei_insert_cmp(icc, &ddp->cmp, prsv->prsv_tag); 1337 *tttp = htobe32(prsv->prsv_tag); 1338 io_to_ddp_state(io) = ddp; 1339 *arg = ctsio; 1340 counter_u64_add(toep->ofld_rxq->rx_iscsi_ddp_setup_ok, 1); 1341 return (0); 1342 } 1343 1344 /* 1345 * In the middle of an I/O. A non-NULL page pod reservation indicates 1346 * that a DDP buffer is being used for the I/O. 1347 */ 1348 ddp = io_to_ddp_state(ctsio); 1349 if (ddp == NULL) 1350 goto no_ddp; 1351 prsv = &ddp->prsv; 1352 1353 alias = (prsv->prsv_tag & pr->pr_alias_mask) >> pr->pr_alias_shift; 1354 alias++; 1355 prsv->prsv_tag &= ~pr->pr_alias_mask; 1356 prsv->prsv_tag |= alias << pr->pr_alias_shift & pr->pr_alias_mask; 1357 1358 ddp->cmp.last_datasn = -1; 1359 cxgbei_insert_cmp(icc, &ddp->cmp, prsv->prsv_tag); 1360 *tttp = htobe32(prsv->prsv_tag); 1361 *arg = ctsio; 1362 1363 return (0); 1364 } 1365 1366 void 1367 icl_cxgbei_conn_transfer_done(struct icl_conn *ic, void *arg) 1368 { 1369 struct ctl_scsiio *ctsio = arg; 1370 1371 if (ctsio != NULL) { 1372 struct cxgbei_ddp_state *ddp; 1373 1374 ddp = io_to_ddp_state(ctsio); 1375 MPASS(ddp != NULL); 1376 1377 cxgbei_rm_cmp(ic_to_icc(ic), &ddp->cmp); 1378 if (ctsio->kern_data_len == ctsio->ext_data_filled || 1379 ic->ic_disconnecting) { 1380 t4_free_page_pods(&ddp->prsv); 1381 free(ddp, M_CXGBEI); 1382 io_to_ddp_state(ctsio) = NULL; 1383 } 1384 } 1385 } 1386 1387 static void 1388 cxgbei_limits(struct adapter *sc, void *arg) 1389 { 1390 struct icl_drv_limits *idl = arg; 1391 struct cxgbei_data *ci; 1392 int max_dsl; 1393 1394 if (begin_synchronized_op(sc, NULL, HOLD_LOCK, "t4lims") != 0) 1395 return; 1396 1397 if (uld_active(sc, ULD_ISCSI)) { 1398 ci = sc->iscsi_ulp_softc; 1399 MPASS(ci != NULL); 1400 1401 1402 max_dsl = ci->max_rx_data_len; 1403 if (idl->idl_max_recv_data_segment_length > max_dsl) 1404 idl->idl_max_recv_data_segment_length = max_dsl; 1405 1406 max_dsl = ci->max_tx_data_len; 1407 if (idl->idl_max_send_data_segment_length > max_dsl) 1408 idl->idl_max_send_data_segment_length = max_dsl; 1409 } 1410 1411 end_synchronized_op(sc, LOCK_HELD); 1412 } 1413 1414 static int 1415 icl_cxgbei_limits(struct icl_drv_limits *idl) 1416 { 1417 1418 /* Maximum allowed by the RFC. cxgbei_limits will clip them. */ 1419 idl->idl_max_recv_data_segment_length = (1 << 24) - 1; 1420 idl->idl_max_send_data_segment_length = (1 << 24) - 1; 1421 1422 /* These are somewhat arbitrary. */ 1423 idl->idl_max_burst_length = max_burst_length; 1424 idl->idl_first_burst_length = first_burst_length; 1425 1426 t4_iterate(cxgbei_limits, idl); 1427 1428 return (0); 1429 } 1430 1431 int 1432 icl_cxgbei_mod_load(void) 1433 { 1434 int rc; 1435 1436 refcount_init(&icl_cxgbei_ncons, 0); 1437 1438 rc = icl_register("cxgbei", false, -100, icl_cxgbei_limits, 1439 icl_cxgbei_new_conn); 1440 1441 return (rc); 1442 } 1443 1444 int 1445 icl_cxgbei_mod_unload(void) 1446 { 1447 1448 if (icl_cxgbei_ncons != 0) 1449 return (EBUSY); 1450 1451 icl_unregister("cxgbei", false); 1452 1453 return (0); 1454 } 1455 #endif 1456