1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 36 #ifdef TCP_OFFLOAD 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/module.h> 42 #include <sys/protosw.h> 43 #include <sys/domain.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/if_types.h> 50 #include <net/if_vlan_var.h> 51 #include <net/route.h> 52 #include <netinet/in.h> 53 #include <netinet/in_pcb.h> 54 #include <netinet/ip.h> 55 #define TCPSTATES 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_var.h> 58 #include <netinet/toecore.h> 59 #include <netinet/cc/cc.h> 60 61 #include "common/common.h" 62 #include "common/t4_msg.h" 63 #include "common/t4_regs.h" 64 #include "common/t4_regs_values.h" 65 #include "t4_clip.h" 66 #include "tom/t4_tom_l2t.h" 67 #include "tom/t4_tom.h" 68 69 /* 70 * Active open succeeded. 71 */ 72 static int 73 do_act_establish(struct sge_iq *iq, const struct rss_header *rss, 74 struct mbuf *m) 75 { 76 struct adapter *sc = iq->adapter; 77 const struct cpl_act_establish *cpl = (const void *)(rss + 1); 78 u_int tid = GET_TID(cpl); 79 u_int atid = G_TID_TID(ntohl(cpl->tos_atid)); 80 struct toepcb *toep = lookup_atid(sc, atid); 81 struct inpcb *inp = toep->inp; 82 83 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 84 KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__)); 85 86 CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid); 87 free_atid(sc, atid); 88 89 CURVNET_SET(toep->vnet); 90 INP_WLOCK(inp); 91 toep->tid = tid; 92 insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1); 93 if (inp->inp_flags & INP_DROPPED) { 94 95 /* socket closed by the kernel before hw told us it connected */ 96 97 send_flowc_wr(toep, NULL); 98 send_reset(sc, toep, be32toh(cpl->snd_isn)); 99 goto done; 100 } 101 102 make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt); 103 104 if (toep->ulp_mode == ULP_MODE_TLS) 105 tls_establish(toep); 106 107 done: 108 INP_WUNLOCK(inp); 109 CURVNET_RESTORE(); 110 return (0); 111 } 112 113 void 114 act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status) 115 { 116 struct toepcb *toep = lookup_atid(sc, atid); 117 struct inpcb *inp = toep->inp; 118 struct toedev *tod = &toep->td->tod; 119 struct epoch_tracker et; 120 121 free_atid(sc, atid); 122 toep->tid = -1; 123 124 CURVNET_SET(toep->vnet); 125 if (status != EAGAIN) 126 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 127 INP_WLOCK(inp); 128 toe_connect_failed(tod, inp, status); 129 final_cpl_received(toep); /* unlocks inp */ 130 if (status != EAGAIN) 131 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 132 CURVNET_RESTORE(); 133 } 134 135 /* 136 * Active open failed. 137 */ 138 static int 139 do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss, 140 struct mbuf *m) 141 { 142 struct adapter *sc = iq->adapter; 143 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 144 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status))); 145 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status)); 146 struct toepcb *toep = lookup_atid(sc, atid); 147 int rc; 148 149 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 150 KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__)); 151 152 CTR3(KTR_CXGBE, "%s: atid %u, status %u ", __func__, atid, status); 153 154 /* Ignore negative advice */ 155 if (negative_advice(status)) 156 return (0); 157 158 if (status && act_open_has_tid(status)) 159 release_tid(sc, GET_TID(cpl), toep->ctrlq); 160 161 rc = act_open_rpl_status_to_errno(status); 162 act_open_failure_cleanup(sc, atid, rc); 163 164 return (0); 165 } 166 167 /* 168 * Options2 for active open. 169 */ 170 static uint32_t 171 calc_opt2a(struct socket *so, struct toepcb *toep, 172 const struct offload_settings *s) 173 { 174 struct tcpcb *tp = so_sototcpcb(so); 175 struct port_info *pi = toep->vi->pi; 176 struct adapter *sc = pi->adapter; 177 uint32_t opt2 = 0; 178 179 /* 180 * rx flow control, rx coalesce, congestion control, and tx pace are all 181 * explicitly set by the driver. On T5+ the ISS is also set by the 182 * driver to the value picked by the kernel. 183 */ 184 if (is_t4(sc)) { 185 opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID; 186 opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID; 187 } else { 188 opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */ 189 opt2 |= F_T5_ISS; /* ISS provided in CPL */ 190 } 191 192 if (s->sack > 0 || (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT))) 193 opt2 |= F_SACK_EN; 194 195 if (s->tstamp > 0 || (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP))) 196 opt2 |= F_TSTAMPS_EN; 197 198 if (tp->t_flags & TF_REQ_SCALE) 199 opt2 |= F_WND_SCALE_EN; 200 201 if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1)) 202 opt2 |= F_CCTRL_ECN; 203 204 /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */ 205 206 opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]); 207 208 /* These defaults are subject to ULP specific fixups later. */ 209 opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0); 210 211 opt2 |= V_PACE(0); 212 213 if (s->cong_algo >= 0) 214 opt2 |= V_CONG_CNTRL(s->cong_algo); 215 else if (sc->tt.cong_algorithm >= 0) 216 opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL); 217 else { 218 struct cc_algo *cc = CC_ALGO(tp); 219 220 if (strcasecmp(cc->name, "reno") == 0) 221 opt2 |= V_CONG_CNTRL(CONG_ALG_RENO); 222 else if (strcasecmp(cc->name, "tahoe") == 0) 223 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 224 if (strcasecmp(cc->name, "newreno") == 0) 225 opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); 226 if (strcasecmp(cc->name, "highspeed") == 0) 227 opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED); 228 else { 229 /* 230 * Use newreno in case the algorithm selected by the 231 * host stack is not supported by the hardware. 232 */ 233 opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); 234 } 235 } 236 237 if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce)) 238 opt2 |= V_RX_COALESCE(M_RX_COALESCE); 239 240 /* Note that ofld_rxq is already set according to s->rxq. */ 241 opt2 |= F_RSS_QUEUE_VALID; 242 opt2 |= V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id); 243 244 #ifdef USE_DDP_RX_FLOW_CONTROL 245 if (toep->ulp_mode == ULP_MODE_TCPDDP) 246 opt2 |= F_RX_FC_DDP; 247 #endif 248 249 if (toep->ulp_mode == ULP_MODE_TLS) { 250 opt2 &= ~V_RX_COALESCE(M_RX_COALESCE); 251 opt2 |= F_RX_FC_DISABLE; 252 } 253 254 return (htobe32(opt2)); 255 } 256 257 void 258 t4_init_connect_cpl_handlers(void) 259 { 260 261 t4_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 262 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl, 263 CPL_COOKIE_TOM); 264 } 265 266 void 267 t4_uninit_connect_cpl_handlers(void) 268 { 269 270 t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL); 271 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, NULL, CPL_COOKIE_TOM); 272 } 273 274 #define DONT_OFFLOAD_ACTIVE_OPEN(x) do { \ 275 reason = __LINE__; \ 276 rc = (x); \ 277 goto failed; \ 278 } while (0) 279 280 static inline int 281 act_open_cpl_size(struct adapter *sc, int isipv6) 282 { 283 int idx; 284 static const int sz_table[3][2] = { 285 { 286 sizeof (struct cpl_act_open_req), 287 sizeof (struct cpl_act_open_req6) 288 }, 289 { 290 sizeof (struct cpl_t5_act_open_req), 291 sizeof (struct cpl_t5_act_open_req6) 292 }, 293 { 294 sizeof (struct cpl_t6_act_open_req), 295 sizeof (struct cpl_t6_act_open_req6) 296 }, 297 }; 298 299 MPASS(chip_id(sc) >= CHELSIO_T4); 300 idx = min(chip_id(sc) - CHELSIO_T4, 2); 301 302 return (sz_table[idx][!!isipv6]); 303 } 304 305 /* 306 * active open (soconnect). 307 * 308 * State of affairs on entry: 309 * soisconnecting (so_state |= SS_ISCONNECTING) 310 * tcbinfo not locked (This has changed - used to be WLOCKed) 311 * inp WLOCKed 312 * tp->t_state = TCPS_SYN_SENT 313 * rtalloc1, RT_UNLOCK on rt. 314 */ 315 int 316 t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt, 317 struct sockaddr *nam) 318 { 319 struct adapter *sc = tod->tod_softc; 320 struct toepcb *toep = NULL; 321 struct wrqe *wr = NULL; 322 struct ifnet *rt_ifp = rt->rt_ifp; 323 struct vi_info *vi; 324 int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid; 325 struct inpcb *inp = sotoinpcb(so); 326 struct tcpcb *tp = intotcpcb(inp); 327 int reason; 328 struct offload_settings settings; 329 uint16_t vid = 0xfff, pcp = 0; 330 331 INP_WLOCK_ASSERT(inp); 332 KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6, 333 ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family)); 334 335 if (rt_ifp->if_type == IFT_ETHER) 336 vi = rt_ifp->if_softc; 337 else if (rt_ifp->if_type == IFT_L2VLAN) { 338 struct ifnet *ifp = VLAN_TRUNKDEV(rt_ifp); 339 340 vi = ifp->if_softc; 341 VLAN_TAG(rt_ifp, &vid); 342 VLAN_PCP(rt_ifp, &pcp); 343 } else if (rt_ifp->if_type == IFT_IEEE8023ADLAG) 344 DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */ 345 else 346 DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); 347 348 rw_rlock(&sc->policy_lock); 349 settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL, 350 EVL_MAKETAG(vid, pcp, 0), inp); 351 rw_runlock(&sc->policy_lock); 352 if (!settings.offload) 353 DONT_OFFLOAD_ACTIVE_OPEN(EPERM); 354 355 if (settings.txq >= 0 && settings.txq < vi->nofldtxq) 356 txqid = settings.txq; 357 else 358 txqid = arc4random() % vi->nofldtxq; 359 txqid += vi->first_ofld_txq; 360 if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq) 361 rxqid = settings.rxq; 362 else 363 rxqid = arc4random() % vi->nofldrxq; 364 rxqid += vi->first_ofld_rxq; 365 366 toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO); 367 if (toep == NULL) 368 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 369 370 toep->tid = alloc_atid(sc, toep); 371 if (toep->tid < 0) 372 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 373 374 toep->l2te = t4_l2t_get(vi->pi, rt_ifp, 375 rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam); 376 if (toep->l2te == NULL) 377 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 378 379 isipv6 = nam->sa_family == AF_INET6; 380 wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq); 381 if (wr == NULL) 382 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 383 384 toep->vnet = so->so_vnet; 385 set_ulp_mode(toep, select_ulp_mode(so, sc, &settings)); 386 SOCKBUF_LOCK(&so->so_rcv); 387 /* opt0 rcv_bufsiz initially, assumes its normal meaning later */ 388 toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ); 389 SOCKBUF_UNLOCK(&so->so_rcv); 390 391 /* 392 * The kernel sets request_r_scale based on sb_max whereas we need to 393 * take hardware's MAX_RCV_WND into account too. This is normally a 394 * no-op as MAX_RCV_WND is much larger than the default sb_max. 395 */ 396 if (tp->t_flags & TF_REQ_SCALE) 397 rscale = tp->request_r_scale = select_rcv_wscale(); 398 else 399 rscale = 0; 400 mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings); 401 qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) | 402 V_TID_COOKIE(CPL_COOKIE_TOM); 403 404 if (isipv6) { 405 struct cpl_act_open_req6 *cpl = wrtod(wr); 406 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl; 407 struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl; 408 409 if ((inp->inp_vflag & INP_IPV6) == 0) 410 DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); 411 412 toep->ce = t4_hold_lip(sc, &inp->in6p_laddr, NULL); 413 if (toep->ce == NULL) 414 DONT_OFFLOAD_ACTIVE_OPEN(ENOENT); 415 416 switch (chip_id(sc)) { 417 case CHELSIO_T4: 418 INIT_TP_WR(cpl, 0); 419 cpl->params = select_ntuple(vi, toep->l2te); 420 break; 421 case CHELSIO_T5: 422 INIT_TP_WR(cpl5, 0); 423 cpl5->iss = htobe32(tp->iss); 424 cpl5->params = select_ntuple(vi, toep->l2te); 425 break; 426 case CHELSIO_T6: 427 default: 428 INIT_TP_WR(cpl6, 0); 429 cpl6->iss = htobe32(tp->iss); 430 cpl6->params = select_ntuple(vi, toep->l2te); 431 break; 432 } 433 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 434 qid_atid)); 435 cpl->local_port = inp->inp_lport; 436 cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0]; 437 cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8]; 438 cpl->peer_port = inp->inp_fport; 439 cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0]; 440 cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8]; 441 cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, 442 toep->rx_credits, toep->ulp_mode, &settings); 443 cpl->opt2 = calc_opt2a(so, toep, &settings); 444 } else { 445 struct cpl_act_open_req *cpl = wrtod(wr); 446 struct cpl_t5_act_open_req *cpl5 = (void *)cpl; 447 struct cpl_t6_act_open_req *cpl6 = (void *)cpl; 448 449 switch (chip_id(sc)) { 450 case CHELSIO_T4: 451 INIT_TP_WR(cpl, 0); 452 cpl->params = select_ntuple(vi, toep->l2te); 453 break; 454 case CHELSIO_T5: 455 INIT_TP_WR(cpl5, 0); 456 cpl5->iss = htobe32(tp->iss); 457 cpl5->params = select_ntuple(vi, toep->l2te); 458 break; 459 case CHELSIO_T6: 460 default: 461 INIT_TP_WR(cpl6, 0); 462 cpl6->iss = htobe32(tp->iss); 463 cpl6->params = select_ntuple(vi, toep->l2te); 464 break; 465 } 466 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 467 qid_atid)); 468 inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, 469 &cpl->peer_ip, &cpl->peer_port); 470 cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, 471 toep->rx_credits, toep->ulp_mode, &settings); 472 cpl->opt2 = calc_opt2a(so, toep, &settings); 473 } 474 475 CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__, 476 toep->tid, tcpstates[tp->t_state], toep, inp); 477 478 offload_socket(so, toep); 479 rc = t4_l2t_send(sc, wr, toep->l2te); 480 if (rc == 0) { 481 toep->flags |= TPF_CPL_PENDING; 482 return (0); 483 } 484 485 undo_offload_socket(so); 486 reason = __LINE__; 487 failed: 488 CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc); 489 490 if (wr) 491 free_wrqe(wr); 492 493 if (toep) { 494 if (toep->tid >= 0) 495 free_atid(sc, toep->tid); 496 if (toep->l2te) 497 t4_l2t_release(toep->l2te); 498 if (toep->ce) 499 t4_release_lip(sc, toep->ce); 500 free_toepcb(toep); 501 } 502 503 return (rc); 504 } 505 #endif 506