1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2012 Chelsio Communications, Inc. 5 * All rights reserved. 6 * Written by: Navdeep Parhar <np@FreeBSD.org> 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 #include "opt_inet.h" 34 #include "opt_inet6.h" 35 36 #ifdef TCP_OFFLOAD 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/ktr.h> 41 #include <sys/module.h> 42 #include <sys/protosw.h> 43 #include <sys/domain.h> 44 #include <sys/socket.h> 45 #include <sys/socketvar.h> 46 #include <sys/sysctl.h> 47 #include <net/ethernet.h> 48 #include <net/if.h> 49 #include <net/if_types.h> 50 #include <net/if_vlan_var.h> 51 #include <net/route.h> 52 #include <netinet/in.h> 53 #include <netinet/in_pcb.h> 54 #include <netinet/ip.h> 55 #define TCPSTATES 56 #include <netinet/tcp_fsm.h> 57 #include <netinet/tcp_var.h> 58 #include <netinet/toecore.h> 59 #include <netinet/cc/cc.h> 60 61 #include "common/common.h" 62 #include "common/t4_msg.h" 63 #include "common/t4_regs.h" 64 #include "common/t4_regs_values.h" 65 #include "tom/t4_tom_l2t.h" 66 #include "tom/t4_tom.h" 67 68 /* 69 * Active open succeeded. 70 */ 71 static int 72 do_act_establish(struct sge_iq *iq, const struct rss_header *rss, 73 struct mbuf *m) 74 { 75 struct adapter *sc = iq->adapter; 76 const struct cpl_act_establish *cpl = (const void *)(rss + 1); 77 u_int tid = GET_TID(cpl); 78 u_int atid = G_TID_TID(ntohl(cpl->tos_atid)); 79 struct toepcb *toep = lookup_atid(sc, atid); 80 struct inpcb *inp = toep->inp; 81 82 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 83 KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__)); 84 85 CTR3(KTR_CXGBE, "%s: atid %u, tid %u", __func__, atid, tid); 86 free_atid(sc, atid); 87 88 CURVNET_SET(toep->vnet); 89 INP_WLOCK(inp); 90 toep->tid = tid; 91 insert_tid(sc, tid, toep, inp->inp_vflag & INP_IPV6 ? 2 : 1); 92 if (inp->inp_flags & INP_DROPPED) { 93 94 /* socket closed by the kernel before hw told us it connected */ 95 96 send_flowc_wr(toep, NULL); 97 send_reset(sc, toep, be32toh(cpl->snd_isn)); 98 goto done; 99 } 100 101 make_established(toep, cpl->snd_isn, cpl->rcv_isn, cpl->tcp_opt); 102 103 if (toep->ulp_mode == ULP_MODE_TLS) 104 tls_establish(toep); 105 106 done: 107 INP_WUNLOCK(inp); 108 CURVNET_RESTORE(); 109 return (0); 110 } 111 112 void 113 act_open_failure_cleanup(struct adapter *sc, u_int atid, u_int status) 114 { 115 struct toepcb *toep = lookup_atid(sc, atid); 116 struct inpcb *inp = toep->inp; 117 struct toedev *tod = &toep->td->tod; 118 struct epoch_tracker et; 119 120 free_atid(sc, atid); 121 toep->tid = -1; 122 123 CURVNET_SET(toep->vnet); 124 if (status != EAGAIN) 125 INP_INFO_RLOCK_ET(&V_tcbinfo, et); 126 INP_WLOCK(inp); 127 toe_connect_failed(tod, inp, status); 128 final_cpl_received(toep); /* unlocks inp */ 129 if (status != EAGAIN) 130 INP_INFO_RUNLOCK_ET(&V_tcbinfo, et); 131 CURVNET_RESTORE(); 132 } 133 134 /* 135 * Active open failed. 136 */ 137 static int 138 do_act_open_rpl(struct sge_iq *iq, const struct rss_header *rss, 139 struct mbuf *m) 140 { 141 struct adapter *sc = iq->adapter; 142 const struct cpl_act_open_rpl *cpl = (const void *)(rss + 1); 143 u_int atid = G_TID_TID(G_AOPEN_ATID(be32toh(cpl->atid_status))); 144 u_int status = G_AOPEN_STATUS(be32toh(cpl->atid_status)); 145 struct toepcb *toep = lookup_atid(sc, atid); 146 int rc; 147 148 KASSERT(m == NULL, ("%s: wasn't expecting payload", __func__)); 149 KASSERT(toep->tid == atid, ("%s: toep tid/atid mismatch", __func__)); 150 151 CTR3(KTR_CXGBE, "%s: atid %u, status %u ", __func__, atid, status); 152 153 /* Ignore negative advice */ 154 if (negative_advice(status)) 155 return (0); 156 157 if (status && act_open_has_tid(status)) 158 release_tid(sc, GET_TID(cpl), toep->ctrlq); 159 160 rc = act_open_rpl_status_to_errno(status); 161 act_open_failure_cleanup(sc, atid, rc); 162 163 return (0); 164 } 165 166 /* 167 * Options2 for active open. 168 */ 169 static uint32_t 170 calc_opt2a(struct socket *so, struct toepcb *toep, 171 const struct offload_settings *s) 172 { 173 struct tcpcb *tp = so_sototcpcb(so); 174 struct port_info *pi = toep->vi->pi; 175 struct adapter *sc = pi->adapter; 176 uint32_t opt2 = 0; 177 178 /* 179 * rx flow control, rx coalesce, congestion control, and tx pace are all 180 * explicitly set by the driver. On T5+ the ISS is also set by the 181 * driver to the value picked by the kernel. 182 */ 183 if (is_t4(sc)) { 184 opt2 |= F_RX_FC_VALID | F_RX_COALESCE_VALID; 185 opt2 |= F_CONG_CNTRL_VALID | F_PACE_VALID; 186 } else { 187 opt2 |= F_T5_OPT_2_VALID; /* all 4 valid */ 188 opt2 |= F_T5_ISS; /* ISS provided in CPL */ 189 } 190 191 if (s->sack > 0 || (s->sack < 0 && (tp->t_flags & TF_SACK_PERMIT))) 192 opt2 |= F_SACK_EN; 193 194 if (s->tstamp > 0 || (s->tstamp < 0 && (tp->t_flags & TF_REQ_TSTMP))) 195 opt2 |= F_TSTAMPS_EN; 196 197 if (tp->t_flags & TF_REQ_SCALE) 198 opt2 |= F_WND_SCALE_EN; 199 200 if (s->ecn > 0 || (s->ecn < 0 && V_tcp_do_ecn == 1)) 201 opt2 |= F_CCTRL_ECN; 202 203 /* XXX: F_RX_CHANNEL for multiple rx c-chan support goes here. */ 204 205 opt2 |= V_TX_QUEUE(sc->params.tp.tx_modq[pi->tx_chan]); 206 207 /* These defaults are subject to ULP specific fixups later. */ 208 opt2 |= V_RX_FC_DDP(0) | V_RX_FC_DISABLE(0); 209 210 opt2 |= V_PACE(0); 211 212 if (s->cong_algo >= 0) 213 opt2 |= V_CONG_CNTRL(s->cong_algo); 214 else if (sc->tt.cong_algorithm >= 0) 215 opt2 |= V_CONG_CNTRL(sc->tt.cong_algorithm & M_CONG_CNTRL); 216 else { 217 struct cc_algo *cc = CC_ALGO(tp); 218 219 if (strcasecmp(cc->name, "reno") == 0) 220 opt2 |= V_CONG_CNTRL(CONG_ALG_RENO); 221 else if (strcasecmp(cc->name, "tahoe") == 0) 222 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 223 if (strcasecmp(cc->name, "newreno") == 0) 224 opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); 225 if (strcasecmp(cc->name, "highspeed") == 0) 226 opt2 |= V_CONG_CNTRL(CONG_ALG_HIGHSPEED); 227 else { 228 /* 229 * Use newreno in case the algorithm selected by the 230 * host stack is not supported by the hardware. 231 */ 232 opt2 |= V_CONG_CNTRL(CONG_ALG_NEWRENO); 233 } 234 } 235 236 if (s->rx_coalesce > 0 || (s->rx_coalesce < 0 && sc->tt.rx_coalesce)) 237 opt2 |= V_RX_COALESCE(M_RX_COALESCE); 238 239 /* Note that ofld_rxq is already set according to s->rxq. */ 240 opt2 |= F_RSS_QUEUE_VALID; 241 opt2 |= V_RSS_QUEUE(toep->ofld_rxq->iq.abs_id); 242 243 #ifdef USE_DDP_RX_FLOW_CONTROL 244 if (toep->ulp_mode == ULP_MODE_TCPDDP) 245 opt2 |= F_RX_FC_DDP; 246 #endif 247 248 if (toep->ulp_mode == ULP_MODE_TLS) { 249 opt2 &= ~V_RX_COALESCE(M_RX_COALESCE); 250 opt2 |= F_RX_FC_DISABLE; 251 } 252 253 return (htobe32(opt2)); 254 } 255 256 void 257 t4_init_connect_cpl_handlers(void) 258 { 259 260 t4_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish); 261 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl, 262 CPL_COOKIE_TOM); 263 } 264 265 void 266 t4_uninit_connect_cpl_handlers(void) 267 { 268 269 t4_register_cpl_handler(CPL_ACT_ESTABLISH, NULL); 270 t4_register_shared_cpl_handler(CPL_ACT_OPEN_RPL, NULL, CPL_COOKIE_TOM); 271 } 272 273 #define DONT_OFFLOAD_ACTIVE_OPEN(x) do { \ 274 reason = __LINE__; \ 275 rc = (x); \ 276 goto failed; \ 277 } while (0) 278 279 static inline int 280 act_open_cpl_size(struct adapter *sc, int isipv6) 281 { 282 int idx; 283 static const int sz_table[3][2] = { 284 { 285 sizeof (struct cpl_act_open_req), 286 sizeof (struct cpl_act_open_req6) 287 }, 288 { 289 sizeof (struct cpl_t5_act_open_req), 290 sizeof (struct cpl_t5_act_open_req6) 291 }, 292 { 293 sizeof (struct cpl_t6_act_open_req), 294 sizeof (struct cpl_t6_act_open_req6) 295 }, 296 }; 297 298 MPASS(chip_id(sc) >= CHELSIO_T4); 299 idx = min(chip_id(sc) - CHELSIO_T4, 2); 300 301 return (sz_table[idx][!!isipv6]); 302 } 303 304 /* 305 * active open (soconnect). 306 * 307 * State of affairs on entry: 308 * soisconnecting (so_state |= SS_ISCONNECTING) 309 * tcbinfo not locked (This has changed - used to be WLOCKed) 310 * inp WLOCKed 311 * tp->t_state = TCPS_SYN_SENT 312 * rtalloc1, RT_UNLOCK on rt. 313 */ 314 int 315 t4_connect(struct toedev *tod, struct socket *so, struct rtentry *rt, 316 struct sockaddr *nam) 317 { 318 struct adapter *sc = tod->tod_softc; 319 struct tom_data *td = tod_td(tod); 320 struct toepcb *toep = NULL; 321 struct wrqe *wr = NULL; 322 struct ifnet *rt_ifp = rt->rt_ifp; 323 struct vi_info *vi; 324 int mtu_idx, rscale, qid_atid, rc, isipv6, txqid, rxqid; 325 struct inpcb *inp = sotoinpcb(so); 326 struct tcpcb *tp = intotcpcb(inp); 327 int reason; 328 struct offload_settings settings; 329 uint16_t vid = 0xfff, pcp = 0; 330 331 INP_WLOCK_ASSERT(inp); 332 KASSERT(nam->sa_family == AF_INET || nam->sa_family == AF_INET6, 333 ("%s: dest addr %p has family %u", __func__, nam, nam->sa_family)); 334 335 if (rt_ifp->if_type == IFT_ETHER) 336 vi = rt_ifp->if_softc; 337 else if (rt_ifp->if_type == IFT_L2VLAN) { 338 struct ifnet *ifp = VLAN_TRUNKDEV(rt_ifp); 339 340 vi = ifp->if_softc; 341 VLAN_TAG(rt_ifp, &vid); 342 VLAN_PCP(rt_ifp, &pcp); 343 } else if (rt_ifp->if_type == IFT_IEEE8023ADLAG) 344 DONT_OFFLOAD_ACTIVE_OPEN(ENOSYS); /* XXX: implement lagg+TOE */ 345 else 346 DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); 347 348 rw_rlock(&sc->policy_lock); 349 settings = *lookup_offload_policy(sc, OPEN_TYPE_ACTIVE, NULL, 350 EVL_MAKETAG(vid, pcp, 0), inp); 351 rw_runlock(&sc->policy_lock); 352 if (!settings.offload) 353 DONT_OFFLOAD_ACTIVE_OPEN(EPERM); 354 355 if (settings.txq >= 0 && settings.txq < vi->nofldtxq) 356 txqid = settings.txq; 357 else 358 txqid = arc4random() % vi->nofldtxq; 359 txqid += vi->first_ofld_txq; 360 if (settings.rxq >= 0 && settings.rxq < vi->nofldrxq) 361 rxqid = settings.rxq; 362 else 363 rxqid = arc4random() % vi->nofldrxq; 364 rxqid += vi->first_ofld_rxq; 365 366 toep = alloc_toepcb(vi, txqid, rxqid, M_NOWAIT | M_ZERO); 367 if (toep == NULL) 368 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 369 370 toep->tid = alloc_atid(sc, toep); 371 if (toep->tid < 0) 372 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 373 374 toep->l2te = t4_l2t_get(vi->pi, rt_ifp, 375 rt->rt_flags & RTF_GATEWAY ? rt->rt_gateway : nam); 376 if (toep->l2te == NULL) 377 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 378 379 isipv6 = nam->sa_family == AF_INET6; 380 wr = alloc_wrqe(act_open_cpl_size(sc, isipv6), toep->ctrlq); 381 if (wr == NULL) 382 DONT_OFFLOAD_ACTIVE_OPEN(ENOMEM); 383 384 toep->vnet = so->so_vnet; 385 set_ulp_mode(toep, select_ulp_mode(so, sc, &settings)); 386 SOCKBUF_LOCK(&so->so_rcv); 387 /* opt0 rcv_bufsiz initially, assumes its normal meaning later */ 388 toep->rx_credits = min(select_rcv_wnd(so) >> 10, M_RCV_BUFSIZ); 389 SOCKBUF_UNLOCK(&so->so_rcv); 390 391 /* 392 * The kernel sets request_r_scale based on sb_max whereas we need to 393 * take hardware's MAX_RCV_WND into account too. This is normally a 394 * no-op as MAX_RCV_WND is much larger than the default sb_max. 395 */ 396 if (tp->t_flags & TF_REQ_SCALE) 397 rscale = tp->request_r_scale = select_rcv_wscale(); 398 else 399 rscale = 0; 400 mtu_idx = find_best_mtu_idx(sc, &inp->inp_inc, &settings); 401 qid_atid = V_TID_QID(toep->ofld_rxq->iq.abs_id) | V_TID_TID(toep->tid) | 402 V_TID_COOKIE(CPL_COOKIE_TOM); 403 404 if (isipv6) { 405 struct cpl_act_open_req6 *cpl = wrtod(wr); 406 struct cpl_t5_act_open_req6 *cpl5 = (void *)cpl; 407 struct cpl_t6_act_open_req6 *cpl6 = (void *)cpl; 408 409 if ((inp->inp_vflag & INP_IPV6) == 0) 410 DONT_OFFLOAD_ACTIVE_OPEN(ENOTSUP); 411 412 toep->ce = hold_lip(td, &inp->in6p_laddr, NULL); 413 if (toep->ce == NULL) 414 DONT_OFFLOAD_ACTIVE_OPEN(ENOENT); 415 416 switch (chip_id(sc)) { 417 case CHELSIO_T4: 418 INIT_TP_WR(cpl, 0); 419 cpl->params = select_ntuple(vi, toep->l2te); 420 break; 421 case CHELSIO_T5: 422 INIT_TP_WR(cpl5, 0); 423 cpl5->iss = htobe32(tp->iss); 424 cpl5->params = select_ntuple(vi, toep->l2te); 425 break; 426 case CHELSIO_T6: 427 default: 428 INIT_TP_WR(cpl6, 0); 429 cpl6->iss = htobe32(tp->iss); 430 cpl6->params = select_ntuple(vi, toep->l2te); 431 break; 432 } 433 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 434 qid_atid)); 435 cpl->local_port = inp->inp_lport; 436 cpl->local_ip_hi = *(uint64_t *)&inp->in6p_laddr.s6_addr[0]; 437 cpl->local_ip_lo = *(uint64_t *)&inp->in6p_laddr.s6_addr[8]; 438 cpl->peer_port = inp->inp_fport; 439 cpl->peer_ip_hi = *(uint64_t *)&inp->in6p_faddr.s6_addr[0]; 440 cpl->peer_ip_lo = *(uint64_t *)&inp->in6p_faddr.s6_addr[8]; 441 cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, 442 toep->rx_credits, toep->ulp_mode, &settings); 443 cpl->opt2 = calc_opt2a(so, toep, &settings); 444 } else { 445 struct cpl_act_open_req *cpl = wrtod(wr); 446 struct cpl_t5_act_open_req *cpl5 = (void *)cpl; 447 struct cpl_t6_act_open_req *cpl6 = (void *)cpl; 448 449 switch (chip_id(sc)) { 450 case CHELSIO_T4: 451 INIT_TP_WR(cpl, 0); 452 cpl->params = select_ntuple(vi, toep->l2te); 453 break; 454 case CHELSIO_T5: 455 INIT_TP_WR(cpl5, 0); 456 cpl5->iss = htobe32(tp->iss); 457 cpl5->params = select_ntuple(vi, toep->l2te); 458 break; 459 case CHELSIO_T6: 460 default: 461 INIT_TP_WR(cpl6, 0); 462 cpl6->iss = htobe32(tp->iss); 463 cpl6->params = select_ntuple(vi, toep->l2te); 464 break; 465 } 466 OPCODE_TID(cpl) = htobe32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 467 qid_atid)); 468 inp_4tuple_get(inp, &cpl->local_ip, &cpl->local_port, 469 &cpl->peer_ip, &cpl->peer_port); 470 cpl->opt0 = calc_opt0(so, vi, toep->l2te, mtu_idx, rscale, 471 toep->rx_credits, toep->ulp_mode, &settings); 472 cpl->opt2 = calc_opt2a(so, toep, &settings); 473 } 474 475 CTR5(KTR_CXGBE, "%s: atid %u (%s), toep %p, inp %p", __func__, 476 toep->tid, tcpstates[tp->t_state], toep, inp); 477 478 offload_socket(so, toep); 479 rc = t4_l2t_send(sc, wr, toep->l2te); 480 if (rc == 0) { 481 toep->flags |= TPF_CPL_PENDING; 482 return (0); 483 } 484 485 undo_offload_socket(so); 486 reason = __LINE__; 487 failed: 488 CTR3(KTR_CXGBE, "%s: not offloading (%d), rc %d", __func__, reason, rc); 489 490 if (wr) 491 free_wrqe(wr); 492 493 if (toep) { 494 if (toep->tid >= 0) 495 free_atid(sc, toep->tid); 496 if (toep->l2te) 497 t4_l2t_release(toep->l2te); 498 if (toep->ce) 499 release_lip(td, toep->ce); 500 free_toepcb(toep); 501 } 502 503 return (rc); 504 } 505 #endif 506