1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 36 #ifdef TCP_OFFLOAD 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/module.h> 42 #include <sys/protosw.h> 43 #include <sys/domain.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/if_types.h> 50 #include <net/if_vlan_var.h> 51 #include <net/route.h> 52 #include <netinet/in.h> 53 #include <netinet/in_pcb.h> 54 #include <netinet/ip.h> 55 #define TCPSTATES 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_var.h> 58 #include <netinet/toecore.h> 59 #include <netinet/cc/cc.h> 60 61 #include "common/common.h" 62 #include "common/t4_msg.h" 63 #include "common/t4_regs.h" 64 #include "common/t4_regs_values.h" 65 #include "tom/t4_tom_l2t.h" 66 #include "tom/t4_tom.h" 67 68 /* 69 * Active open succeeded. 70 */ 71 static int 72 do_act_establish(struct sge_iq *iq, const struct rss_header *rss, 73 struct mbuf *m) 74 { 75 struct adapter *sc = iq->adapter; 76 const struct cpl_act_establish *cpl = (const void *)(rss + 1); 77 u_int tid = GET_TID(cpl); 78 u_int atid = G_TID_TID(ntohl(cpl->tos_atid)); 79 struct toepcb *toep = lookup_atid(sc, atid); 80 struct inpcb *inp = toep->inp; 81 82 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 83 KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__)); 84 85 CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid); 86 free_atid(sc, atid); 87 88 CURVNET_SET(toep->vnet); 89 INP_WLOCK(inp); 90 toep->tid = tid; 91 insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1); 92 if (inp->inp_flags & INP_DROPPED) { 93 94 /* socket closed by the kernel before hw told us it connected */ 95 96 send_flowc_wr(toep, NULL); 97 send_reset(sc, toep, be32toh(cpl->snd_isn)); 98 goto done; 99 } 100 101 make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt); 102 103 if (toep->ulp_mode == ULP_MODE_TLS) 104 tls_establish(toep); 105 106 done: 107 INP_WUNLOCK(inp); 108 CURVNET_RESTORE(); 109 return (0); 110 } 111 112 void 113 act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status) 114 { 115 struct toepcb *toep = lookup_atid(sc, atid); 116 struct inpcb *inp = toep->inp; 117 struct toedev *tod = &toep->td->tod; 118 struct epoch_tracker et; 119 120 free_atid(sc, atid); 121 toep->tid = -1; 122 123 CURVNET_SET(toep->vnet); 124 if (status != EAGAIN) 125 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 126 INP_WLOCK(inp); 127 toe_connect_failed(tod, inp, status); 128 final_cpl_received(toep); /* unlocks inp */ 129 if (status != EAGAIN) 130 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 131 CURVNET_RESTORE(); 132 } 133 134 /* 135 * Active open failed. 136 */ 137 static int 138 do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss, 139 struct mbuf *m) 140 { 141 struct adapter *sc = iq->adapter; 142 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 143 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status))); 144 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status)); 145 struct toepcb *toep = lookup_atid(sc, atid); 146 int rc; 147 148 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 149 KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__)); 150 151 CTR3(KTR_CXGBE, "%s: atid %u, status %u ", __func__, atid, status); 152 153 /* Ignore negative advice */ 154 if (negative_advice(status)) 155 return (0); 156 157 if (status && act_open_has_tid(status)) 158 release_tid(sc, GET_TID(cpl), toep->ctrlq); 159 160 rc = act_open_rpl_status_to_errno(status); 161 act_open_failure_cleanup(sc, atid, rc); 162 163 return (0); 164 } 165 166 /* 167 * Options2 for active open. 168 */ 169 static uint32_t 170 calc_opt2a(struct socket *so, struct toepcb *toep, 171 const struct offload_settings *s) 172 { 173 struct tcpcb *tp = so_sototcpcb(so); 174 struct port_info *pi = toep->vi->pi; 175 struct adapter *sc = pi->adapter; 176 uint32_t opt2 = 0; 177 178 /* 179 * rx flow control, rx coalesce, congestion control, and tx pace are all 180 * explicitly set by the driver. On T5+ the ISS is also set by the 181 * driver to the value picked by the kernel. 182 */ 183 if (is_t4(sc)) { 184 opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID; 185 opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID; 186 } else { 187 opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */ 188 opt2 |= F_T5_ISS; /* ISS provided in CPL */ 189 } 190 191 if (s->sack > 0 || (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT))) 192 opt2 |= F_SACK_EN; 193 194 if (s->tstamp > 0 || (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP))) 195 opt2 |= F_TSTAMPS_EN; 196 197 if (tp->t_flags & TF_REQ_SCALE) 198 opt2 |= F_WND_SCALE_EN; 199 200 if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1)) 201 opt2 |= F_CCTRL_ECN; 202 203 /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */ 204 205 opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]); 206 207 /* These defaults are subject to ULP specific fixups later. */ 208 opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0); 209 210 opt2 |= V_PACE(0); 211 212 if (s->cong_algo >= 0) 213 opt2 |= V_CONG_CNTRL(s->cong_algo); 214 else if (sc->tt.cong_algorithm >= 0) 215 opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL); 216 else { 217 struct cc_algo *cc = CC_ALGO(tp); 218 219 if (strcasecmp(cc->name, "reno") == 0) 220 opt2 |= V_CONG_CNTRL(CONG_ALG_RENO); 221 else if (strcasecmp(cc->name, "tahoe") == 0) 222 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 223 if (strcasecmp(cc->name, "newreno") == 0) 224 opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); 225 if (strcasecmp(cc->name, "highspeed") == 0) 226 opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED); 227 else { 228 /* 229 * Use newreno in case the algorithm selected by the 230 * host stack is not supported by the hardware. 231 */ 232 opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); 233 } 234 } 235 236 if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce)) 237 opt2 |= V_RX_COALESCE(M_RX_COALESCE); 238 239 /* Note that ofld_rxq is already set according to s->rxq. */ 240 opt2 |= F_RSS_QUEUE_VALID; 241 opt2 |= V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id); 242 243 #ifdef USE_DDP_RX_FLOW_CONTROL 244 if (toep->ulp_mode == ULP_MODE_TCPDDP) 245 opt2 |= F_RX_FC_DDP; 246 #endif 247 248 if (toep->ulp_mode == ULP_MODE_TLS) { 249 opt2 &= ~V_RX_COALESCE(M_RX_COALESCE); 250 opt2 |= F_RX_FC_DISABLE; 251 } 252 253 return (htobe32(opt2)); 254 } 255 256 void 257 t4_init_connect_cpl_handlers(void) 258 { 259 260 t4_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 261 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl, 262 CPL_COOKIE_TOM); 263 } 264 265 void 266 t4_uninit_connect_cpl_handlers(void) 267 { 268 269 t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL); 270 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, NULL, CPL_COOKIE_TOM); 271 } 272 273 #define DONT_OFFLOAD_ACTIVE_OPEN(x) do { \ 274 reason = __LINE__; \ 275 rc = (x); \ 276 goto failed; \ 277 } while (0) 278 279 static inline int 280 act_open_cpl_size(struct adapter *sc, int isipv6) 281 { 282 int idx; 283 static const int sz_table[3][2] = { 284 { 285 sizeof (struct cpl_act_open_req), 286 sizeof (struct cpl_act_open_req6) 287 }, 288 { 289 sizeof (struct cpl_t5_act_open_req), 290 sizeof (struct cpl_t5_act_open_req6) 291 }, 292 { 293 sizeof (struct cpl_t6_act_open_req), 294 sizeof (struct cpl_t6_act_open_req6) 295 }, 296 }; 297 298 MPASS(chip_id(sc) >= CHELSIO_T4); 299 idx = min(chip_id(sc) - CHELSIO_T4, 2); 300 301 return (sz_table[idx][!!isipv6]); 302 } 303 304 /* 305 * active open (soconnect). 306 * 307 * State of affairs on entry: 308 * soisconnecting (so_state |= SS_ISCONNECTING) 309 * tcbinfo not locked (This has changed - used to be WLOCKed) 310 * inp WLOCKed 311 * tp->t_state = TCPS_SYN_SENT 312 * rtalloc1, RT_UNLOCK on rt. 313 */ 314 int 315 t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt, 316 struct sockaddr *nam) 317 { 318 struct adapter *sc = tod->tod_softc; 319 struct tom_data *td = tod_td(tod); 320 struct toepcb *toep = NULL; 321 struct wrqe *wr = NULL; 322 struct ifnet *rt_ifp = rt->rt_ifp; 323 struct vi_info *vi; 324 int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid; 325 struct inpcb *inp = sotoinpcb(so); 326 struct tcpcb *tp = intotcpcb(inp); 327 int reason; 328 struct offload_settings settings; 329 uint16_t vid = 0xffff; 330 331 INP_WLOCK_ASSERT(inp); 332 KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6, 333 ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family)); 334 335 if (rt_ifp->if_type == IFT_ETHER) 336 vi = rt_ifp->if_softc; 337 else if (rt_ifp->if_type == IFT_L2VLAN) { 338 struct ifnet *ifp = VLAN_COOKIE(rt_ifp); 339 340 vi = ifp->if_softc; 341 VLAN_TAG(rt_ifp, &vid); 342 } else if (rt_ifp->if_type == IFT_IEEE8023ADLAG) 343 DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */ 344 else 345 DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); 346 347 rw_rlock(&sc->policy_lock); 348 settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL, vid, inp); 349 rw_runlock(&sc->policy_lock); 350 if (!settings.offload) 351 DONT_OFFLOAD_ACTIVE_OPEN(EPERM); 352 353 if (settings.txq >= 0 && settings.txq < vi->nofldtxq) 354 txqid = settings.txq; 355 else 356 txqid = arc4random() % vi->nofldtxq; 357 txqid += vi->first_ofld_txq; 358 if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq) 359 rxqid = settings.rxq; 360 else 361 rxqid = arc4random() % vi->nofldrxq; 362 rxqid += vi->first_ofld_rxq; 363 364 toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO); 365 if (toep == NULL) 366 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 367 368 toep->tid = alloc_atid(sc, toep); 369 if (toep->tid < 0) 370 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 371 372 toep->l2te = t4_l2t_get(vi->pi, rt_ifp, 373 rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam); 374 if (toep->l2te == NULL) 375 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 376 377 isipv6 = nam->sa_family == AF_INET6; 378 wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq); 379 if (wr == NULL) 380 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 381 382 toep->vnet = so->so_vnet; 383 set_ulp_mode(toep, select_ulp_mode(so, sc, &settings)); 384 SOCKBUF_LOCK(&so->so_rcv); 385 /* opt0 rcv_bufsiz initially, assumes its normal meaning later */ 386 toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ); 387 SOCKBUF_UNLOCK(&so->so_rcv); 388 389 /* 390 * The kernel sets request_r_scale based on sb_max whereas we need to 391 * take hardware's MAX_RCV_WND into account too. This is normally a 392 * no-op as MAX_RCV_WND is much larger than the default sb_max. 393 */ 394 if (tp->t_flags & TF_REQ_SCALE) 395 rscale = tp->request_r_scale = select_rcv_wscale(); 396 else 397 rscale = 0; 398 mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings); 399 qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) | 400 V_TID_COOKIE(CPL_COOKIE_TOM); 401 402 if (isipv6) { 403 struct cpl_act_open_req6 *cpl = wrtod(wr); 404 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl; 405 struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl; 406 407 if ((inp->inp_vflag & INP_IPV6) == 0) 408 DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); 409 410 toep->ce = hold_lip(td, &inp->in6p_laddr, NULL); 411 if (toep->ce == NULL) 412 DONT_OFFLOAD_ACTIVE_OPEN(ENOENT); 413 414 switch (chip_id(sc)) { 415 case CHELSIO_T4: 416 INIT_TP_WR(cpl, 0); 417 cpl->params = select_ntuple(vi, toep->l2te); 418 break; 419 case CHELSIO_T5: 420 INIT_TP_WR(cpl5, 0); 421 cpl5->iss = htobe32(tp->iss); 422 cpl5->params = select_ntuple(vi, toep->l2te); 423 break; 424 case CHELSIO_T6: 425 default: 426 INIT_TP_WR(cpl6, 0); 427 cpl6->iss = htobe32(tp->iss); 428 cpl6->params = select_ntuple(vi, toep->l2te); 429 break; 430 } 431 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 432 qid_atid)); 433 cpl->local_port = inp->inp_lport; 434 cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0]; 435 cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8]; 436 cpl->peer_port = inp->inp_fport; 437 cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0]; 438 cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8]; 439 cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, 440 toep->rx_credits, toep->ulp_mode, &settings); 441 cpl->opt2 = calc_opt2a(so, toep, &settings); 442 } else { 443 struct cpl_act_open_req *cpl = wrtod(wr); 444 struct cpl_t5_act_open_req *cpl5 = (void *)cpl; 445 struct cpl_t6_act_open_req *cpl6 = (void *)cpl; 446 447 switch (chip_id(sc)) { 448 case CHELSIO_T4: 449 INIT_TP_WR(cpl, 0); 450 cpl->params = select_ntuple(vi, toep->l2te); 451 break; 452 case CHELSIO_T5: 453 INIT_TP_WR(cpl5, 0); 454 cpl5->iss = htobe32(tp->iss); 455 cpl5->params = select_ntuple(vi, toep->l2te); 456 break; 457 case CHELSIO_T6: 458 default: 459 INIT_TP_WR(cpl6, 0); 460 cpl6->iss = htobe32(tp->iss); 461 cpl6->params = select_ntuple(vi, toep->l2te); 462 break; 463 } 464 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 465 qid_atid)); 466 inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, 467 &cpl->peer_ip, &cpl->peer_port); 468 cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, 469 toep->rx_credits, toep->ulp_mode, &settings); 470 cpl->opt2 = calc_opt2a(so, toep, &settings); 471 } 472 473 CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__, 474 toep->tid, tcpstates[tp->t_state], toep, inp); 475 476 offload_socket(so, toep); 477 rc = t4_l2t_send(sc, wr, toep->l2te); 478 if (rc == 0) { 479 toep->flags |= TPF_CPL_PENDING; 480 return (0); 481 } 482 483 undo_offload_socket(so); 484 reason = __LINE__; 485 failed: 486 CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc); 487 488 if (wr) 489 free_wrqe(wr); 490 491 if (toep) { 492 if (toep->tid >= 0) 493 free_atid(sc, toep->tid); 494 if (toep->l2te) 495 t4_l2t_release(toep->l2te); 496 if (toep->ce) 497 release_lip(td, toep->ce); 498 free_toepcb(toep); 499 } 500 501 return (rc); 502 } 503 #endif 504