1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 39 #ifdef TCP_OFFLOAD 40 #include <sys/types.h> 41 #include <sys/malloc.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/sockio.h> 45 #include <sys/taskqueue.h> 46 #include <netinet/in.h> 47 #include <net/route.h> 48 49 #include <netinet/in_systm.h> 50 #include <netinet/in_pcb.h> 51 #include <netinet6/in6_pcb.h> 52 #include <netinet/ip.h> 53 #include <netinet/in_fib.h> 54 #include <netinet6/in6_fib.h> 55 #include <netinet6/scope6_var.h> 56 #include <netinet/ip_var.h> 57 #include <netinet/tcp_var.h> 58 #include <netinet/tcp.h> 59 #include <netinet/tcpip.h> 60 61 #include <netinet/toecore.h> 62 63 struct sge_iq; 64 struct rss_header; 65 struct cpl_set_tcb_rpl; 66 #include <linux/types.h> 67 #include "offload.h" 68 #include "tom/t4_tom.h" 69 70 #define TOEPCB(so) ((struct toepcb *)(so_sototcpcb((so))->t_toe)) 71 72 #include "iw_cxgbe.h" 73 #include <linux/module.h> 74 #include <linux/workqueue.h> 75 #include <linux/notifier.h> 76 #include <linux/inetdevice.h> 77 #include <linux/if_vlan.h> 78 #include <net/netevent.h> 79 #include <rdma/rdma_cm.h> 80 81 static spinlock_t req_lock; 82 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list; 83 static struct work_struct c4iw_task; 84 static struct workqueue_struct *c4iw_taskq; 85 static LIST_HEAD(err_cqe_list); 86 static spinlock_t err_cqe_lock; 87 static LIST_HEAD(listen_port_list); 88 static DEFINE_MUTEX(listen_port_mutex); 89 90 static void process_req(struct work_struct *ctx); 91 static void start_ep_timer(struct c4iw_ep *ep); 92 static int stop_ep_timer(struct c4iw_ep *ep); 93 static int set_tcpinfo(struct c4iw_ep *ep); 94 static void process_timeout(struct c4iw_ep *ep); 95 static void process_err_cqes(void); 96 static void *alloc_ep(int size, gfp_t flags); 97 static void close_socket(struct socket *so); 98 static int send_mpa_req(struct c4iw_ep *ep); 99 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen); 100 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen); 101 static void close_complete_upcall(struct c4iw_ep *ep, int status); 102 static int send_abort(struct c4iw_ep *ep); 103 static void peer_close_upcall(struct c4iw_ep *ep); 104 static void peer_abort_upcall(struct c4iw_ep *ep); 105 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 106 static int connect_request_upcall(struct c4iw_ep *ep); 107 static void established_upcall(struct c4iw_ep *ep); 108 static int process_mpa_reply(struct c4iw_ep *ep); 109 static int process_mpa_request(struct c4iw_ep *ep); 110 static void process_peer_close(struct c4iw_ep *ep); 111 static void process_conn_error(struct c4iw_ep *ep); 112 static void process_close_complete(struct c4iw_ep *ep); 113 static void ep_timeout(unsigned long arg); 114 static void setiwsockopt(struct socket *so); 115 static void init_iwarp_socket(struct socket *so, void *arg); 116 static void uninit_iwarp_socket(struct socket *so); 117 static void process_data(struct c4iw_ep *ep); 118 static void process_connected(struct c4iw_ep *ep); 119 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag); 120 static void process_socket_event(struct c4iw_ep *ep); 121 static void release_ep_resources(struct c4iw_ep *ep); 122 static int process_terminate(struct c4iw_ep *ep); 123 static int terminate(struct sge_iq *iq, const struct rss_header *rss, 124 struct mbuf *m); 125 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events); 126 static struct listen_port_info * 127 add_ep_to_listenlist(struct c4iw_listen_ep *lep); 128 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep); 129 static struct c4iw_listen_ep * 130 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so); 131 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr, 132 struct ifnet **ifp); 133 static void process_newconn(struct c4iw_listen_ep *master_lep, 134 struct socket *new_so); 135 #define START_EP_TIMER(ep) \ 136 do { \ 137 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \ 138 __func__, __LINE__, (ep)); \ 139 start_ep_timer(ep); \ 140 } while (0) 141 142 #define STOP_EP_TIMER(ep) \ 143 ({ \ 144 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \ 145 __func__, __LINE__, (ep)); \ 146 stop_ep_timer(ep); \ 147 }) 148 149 #define GET_LOCAL_ADDR(pladdr, so) \ 150 do { \ 151 struct sockaddr_storage *__a = NULL; \ 152 struct inpcb *__inp = sotoinpcb(so); \ 153 KASSERT(__inp != NULL, \ 154 ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \ 155 if (__inp->inp_vflag & INP_IPV4) \ 156 in_getsockaddr(so, (struct sockaddr **)&__a); \ 157 else \ 158 in6_getsockaddr(so, (struct sockaddr **)&__a); \ 159 *(pladdr) = *__a; \ 160 free(__a, M_SONAME); \ 161 } while (0) 162 163 #define GET_REMOTE_ADDR(praddr, so) \ 164 do { \ 165 struct sockaddr_storage *__a = NULL; \ 166 struct inpcb *__inp = sotoinpcb(so); \ 167 KASSERT(__inp != NULL, \ 168 ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \ 169 if (__inp->inp_vflag & INP_IPV4) \ 170 in_getpeeraddr(so, (struct sockaddr **)&__a); \ 171 else \ 172 in6_getpeeraddr(so, (struct sockaddr **)&__a); \ 173 *(praddr) = *__a; \ 174 free(__a, M_SONAME); \ 175 } while (0) 176 177 #ifdef KTR 178 static char *states[] = { 179 "idle", 180 "listen", 181 "connecting", 182 "mpa_wait_req", 183 "mpa_req_sent", 184 "mpa_req_rcvd", 185 "mpa_rep_sent", 186 "fpdu_mode", 187 "aborting", 188 "closing", 189 "moribund", 190 "dead", 191 NULL, 192 }; 193 #endif 194 195 static void deref_cm_id(struct c4iw_ep_common *epc) 196 { 197 epc->cm_id->rem_ref(epc->cm_id); 198 epc->cm_id = NULL; 199 set_bit(CM_ID_DEREFED, &epc->history); 200 } 201 202 static void ref_cm_id(struct c4iw_ep_common *epc) 203 { 204 set_bit(CM_ID_REFED, &epc->history); 205 epc->cm_id->add_ref(epc->cm_id); 206 } 207 208 static void deref_qp(struct c4iw_ep *ep) 209 { 210 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 211 clear_bit(QP_REFERENCED, &ep->com.flags); 212 set_bit(QP_DEREFED, &ep->com.history); 213 } 214 215 static void ref_qp(struct c4iw_ep *ep) 216 { 217 set_bit(QP_REFERENCED, &ep->com.flags); 218 set_bit(QP_REFED, &ep->com.history); 219 c4iw_qp_add_ref(&ep->com.qp->ibqp); 220 } 221 /* allocated per TCP port while listening */ 222 struct listen_port_info { 223 uint16_t port_num; /* TCP port address */ 224 struct list_head list; /* belongs to listen_port_list */ 225 struct list_head lep_list; /* per port lep list */ 226 uint32_t refcnt; /* number of lep's listening */ 227 }; 228 229 /* 230 * Following two lists are used to manage INADDR_ANY listeners: 231 * 1)listen_port_list 232 * 2)lep_list 233 * 234 * Below is the INADDR_ANY listener lists overview on a system with a two port 235 * adapter: 236 * |------------------| 237 * |listen_port_list | 238 * |------------------| 239 * | 240 * | |-----------| |-----------| 241 * | | port_num:X| | port_num:X| 242 * |--------------|-list------|-------|-list------|-------.... 243 * | lep_list----| | lep_list----| 244 * | refcnt | | | refcnt | | 245 * | | | | | | 246 * | | | | | | 247 * |-----------| | |-----------| | 248 * | | 249 * | | 250 * | | 251 * | | lep1 lep2 252 * | | |----------------| |----------------| 253 * | |----| listen_ep_list |----| listen_ep_list | 254 * | |----------------| |----------------| 255 * | 256 * | 257 * | lep1 lep2 258 * | |----------------| |----------------| 259 * |---| listen_ep_list |----| listen_ep_list | 260 * |----------------| |----------------| 261 * 262 * Because of two port adapter, the number of lep's are two(lep1 & lep2) for 263 * each TCP port number. 264 * 265 * Here 'lep1' is always marked as Master lep, because solisten() is always 266 * called through first lep. 267 * 268 */ 269 static struct listen_port_info * 270 add_ep_to_listenlist(struct c4iw_listen_ep *lep) 271 { 272 uint16_t port; 273 struct listen_port_info *port_info = NULL; 274 struct sockaddr_storage *laddr = &lep->com.local_addr; 275 276 port = (laddr->ss_family == AF_INET) ? 277 ((struct sockaddr_in *)laddr)->sin_port : 278 ((struct sockaddr_in6 *)laddr)->sin6_port; 279 280 mutex_lock(&listen_port_mutex); 281 282 list_for_each_entry(port_info, &listen_port_list, list) 283 if (port_info->port_num == port) 284 goto found_port; 285 286 port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK); 287 port_info->port_num = port; 288 port_info->refcnt = 0; 289 290 list_add_tail(&port_info->list, &listen_port_list); 291 INIT_LIST_HEAD(&port_info->lep_list); 292 293 found_port: 294 port_info->refcnt++; 295 list_add_tail(&lep->listen_ep_list, &port_info->lep_list); 296 mutex_unlock(&listen_port_mutex); 297 return port_info; 298 } 299 300 static int 301 rem_ep_from_listenlist(struct c4iw_listen_ep *lep) 302 { 303 uint16_t port; 304 struct listen_port_info *port_info = NULL; 305 struct sockaddr_storage *laddr = &lep->com.local_addr; 306 int refcnt = 0; 307 308 port = (laddr->ss_family == AF_INET) ? 309 ((struct sockaddr_in *)laddr)->sin_port : 310 ((struct sockaddr_in6 *)laddr)->sin6_port; 311 312 mutex_lock(&listen_port_mutex); 313 314 /* get the port_info structure based on the lep's port address */ 315 list_for_each_entry(port_info, &listen_port_list, list) { 316 if (port_info->port_num == port) { 317 port_info->refcnt--; 318 refcnt = port_info->refcnt; 319 /* remove the current lep from the listen list */ 320 list_del(&lep->listen_ep_list); 321 if (port_info->refcnt == 0) { 322 /* Remove this entry from the list as there 323 * are no more listeners for this port_num. 324 */ 325 list_del(&port_info->list); 326 kfree(port_info); 327 } 328 break; 329 } 330 } 331 mutex_unlock(&listen_port_mutex); 332 return refcnt; 333 } 334 335 /* 336 * Find the lep that belongs to the ifnet on which the SYN frame was received. 337 */ 338 struct c4iw_listen_ep * 339 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so) 340 { 341 struct adapter *adap = NULL; 342 struct c4iw_listen_ep *lep = NULL; 343 struct ifnet *ifp = NULL, *hw_ifp = NULL; 344 struct listen_port_info *port_info = NULL; 345 int i = 0, found_portinfo = 0, found_lep = 0; 346 uint16_t port; 347 348 /* 349 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo 350 * interfaces like vlan, lagg, etc.. 351 * TBD: lagg support, lagg + vlan support. 352 */ 353 ifp = TOEPCB(so)->l2te->ifp; 354 if (ifp->if_type == IFT_L2VLAN) { 355 hw_ifp = VLAN_TRUNKDEV(ifp); 356 if (hw_ifp == NULL) { 357 CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of " 358 "vlan ifnet %p, sock %p, master_lep %p", 359 __func__, ifp, so, master_lep); 360 return (NULL); 361 } 362 } else 363 hw_ifp = ifp; 364 365 /* STEP 2: Find 'port_info' with listener local port address. */ 366 port = (master_lep->com.local_addr.ss_family == AF_INET) ? 367 ((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port : 368 ((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port; 369 370 371 mutex_lock(&listen_port_mutex); 372 list_for_each_entry(port_info, &listen_port_list, list) 373 if (port_info->port_num == port) { 374 found_portinfo =1; 375 break; 376 } 377 if (!found_portinfo) 378 goto out; 379 380 /* STEP 3: Traverse through list of lep's that are bound to the current 381 * TCP port address and find the lep that belongs to the ifnet on which 382 * the SYN frame was received. 383 */ 384 list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) { 385 adap = lep->com.dev->rdev.adap; 386 for_each_port(adap, i) { 387 if (hw_ifp == adap->port[i]->vi[0].ifp) { 388 found_lep =1; 389 goto out; 390 } 391 } 392 } 393 out: 394 mutex_unlock(&listen_port_mutex); 395 return found_lep ? lep : (NULL); 396 } 397 398 static void process_timeout(struct c4iw_ep *ep) 399 { 400 struct c4iw_qp_attributes attrs = {0}; 401 int abort = 1; 402 403 CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__, 404 ep, ep->hwtid, ep->com.state); 405 set_bit(TIMEDOUT, &ep->com.history); 406 switch (ep->com.state) { 407 case MPA_REQ_SENT: 408 connect_reply_upcall(ep, -ETIMEDOUT); 409 break; 410 case MPA_REQ_WAIT: 411 case MPA_REQ_RCVD: 412 case MPA_REP_SENT: 413 case FPDU_MODE: 414 break; 415 case CLOSING: 416 case MORIBUND: 417 if (ep->com.cm_id && ep->com.qp) { 418 attrs.next_state = C4IW_QP_STATE_ERROR; 419 c4iw_modify_qp(ep->com.dev, ep->com.qp, 420 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 421 } 422 close_complete_upcall(ep, -ETIMEDOUT); 423 break; 424 case ABORTING: 425 case DEAD: 426 /* 427 * These states are expected if the ep timed out at the same 428 * time as another thread was calling stop_ep_timer(). 429 * So we silently do nothing for these states. 430 */ 431 abort = 0; 432 break; 433 default: 434 CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u\n" 435 , __func__, ep, ep->hwtid, ep->com.state); 436 abort = 0; 437 } 438 if (abort) 439 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 440 c4iw_put_ep(&ep->com); 441 return; 442 } 443 444 struct cqe_list_entry { 445 struct list_head entry; 446 struct c4iw_dev *rhp; 447 struct t4_cqe err_cqe; 448 }; 449 450 static void 451 process_err_cqes(void) 452 { 453 unsigned long flag; 454 struct cqe_list_entry *cle; 455 456 spin_lock_irqsave(&err_cqe_lock, flag); 457 while (!list_empty(&err_cqe_list)) { 458 struct list_head *tmp; 459 tmp = err_cqe_list.next; 460 list_del(tmp); 461 tmp->next = tmp->prev = NULL; 462 spin_unlock_irqrestore(&err_cqe_lock, flag); 463 cle = list_entry(tmp, struct cqe_list_entry, entry); 464 c4iw_ev_dispatch(cle->rhp, &cle->err_cqe); 465 free(cle, M_CXGBE); 466 spin_lock_irqsave(&err_cqe_lock, flag); 467 } 468 spin_unlock_irqrestore(&err_cqe_lock, flag); 469 470 return; 471 } 472 473 static void 474 process_req(struct work_struct *ctx) 475 { 476 struct c4iw_ep_common *epc; 477 unsigned long flag; 478 int ep_events; 479 480 process_err_cqes(); 481 spin_lock_irqsave(&req_lock, flag); 482 while (!TAILQ_EMPTY(&req_list)) { 483 epc = TAILQ_FIRST(&req_list); 484 TAILQ_REMOVE(&req_list, epc, entry); 485 epc->entry.tqe_prev = NULL; 486 ep_events = epc->ep_events; 487 epc->ep_events = 0; 488 spin_unlock_irqrestore(&req_lock, flag); 489 mutex_lock(&epc->mutex); 490 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x", 491 __func__, epc->so, epc, states[epc->state], ep_events); 492 if (ep_events & C4IW_EVENT_TERM) 493 process_terminate((struct c4iw_ep *)epc); 494 if (ep_events & C4IW_EVENT_TIMEOUT) 495 process_timeout((struct c4iw_ep *)epc); 496 if (ep_events & C4IW_EVENT_SOCKET) 497 process_socket_event((struct c4iw_ep *)epc); 498 mutex_unlock(&epc->mutex); 499 c4iw_put_ep(epc); 500 process_err_cqes(); 501 spin_lock_irqsave(&req_lock, flag); 502 } 503 spin_unlock_irqrestore(&req_lock, flag); 504 } 505 506 /* 507 * XXX: doesn't belong here in the iWARP driver. 508 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is 509 * set. Is this a valid assumption for active open? 510 */ 511 static int 512 set_tcpinfo(struct c4iw_ep *ep) 513 { 514 struct socket *so = ep->com.so; 515 struct inpcb *inp = sotoinpcb(so); 516 struct tcpcb *tp; 517 struct toepcb *toep; 518 int rc = 0; 519 520 INP_WLOCK(inp); 521 tp = intotcpcb(inp); 522 if ((tp->t_flags & TF_TOE) == 0) { 523 rc = EINVAL; 524 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n", 525 __func__, so, ep); 526 goto done; 527 } 528 toep = TOEPCB(so); 529 530 ep->hwtid = toep->tid; 531 ep->snd_seq = tp->snd_nxt; 532 ep->rcv_seq = tp->rcv_nxt; 533 ep->emss = max(tp->t_maxseg, 128); 534 done: 535 INP_WUNLOCK(inp); 536 return (rc); 537 538 } 539 static int 540 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp) 541 { 542 int err = 0; 543 544 if (raddr->ss_family == AF_INET) { 545 struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr; 546 struct nhop4_extended nh4 = {0}; 547 548 err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, raddr4->sin_addr, 549 NHR_REF, 0, &nh4); 550 *ifp = nh4.nh_ifp; 551 if (err) 552 fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4); 553 } else { 554 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr; 555 struct nhop6_extended nh6 = {0}; 556 struct in6_addr addr6; 557 uint32_t scopeid; 558 559 memset(&addr6, 0, sizeof(addr6)); 560 in6_splitscope((struct in6_addr *)&raddr6->sin6_addr, 561 &addr6, &scopeid); 562 err = fib6_lookup_nh_ext(RT_DEFAULT_FIB, &addr6, scopeid, 563 NHR_REF, 0, &nh6); 564 *ifp = nh6.nh_ifp; 565 if (err) 566 fib6_free_nh_ext(RT_DEFAULT_FIB, &nh6); 567 } 568 569 CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err); 570 return err; 571 } 572 573 static void 574 close_socket(struct socket *so) 575 { 576 uninit_iwarp_socket(so); 577 soclose(so); 578 } 579 580 static void 581 process_peer_close(struct c4iw_ep *ep) 582 { 583 struct c4iw_qp_attributes attrs = {0}; 584 int disconnect = 1; 585 int release = 0; 586 587 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep, 588 ep->com.so, states[ep->com.state]); 589 590 switch (ep->com.state) { 591 592 case MPA_REQ_WAIT: 593 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD", 594 __func__, ep); 595 /* Fallthrough */ 596 case MPA_REQ_SENT: 597 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD", 598 __func__, ep); 599 ep->com.state = DEAD; 600 connect_reply_upcall(ep, -ECONNABORTED); 601 602 disconnect = 0; 603 STOP_EP_TIMER(ep); 604 close_socket(ep->com.so); 605 deref_cm_id(&ep->com); 606 release = 1; 607 break; 608 609 case MPA_REQ_RCVD: 610 611 /* 612 * We're gonna mark this puppy DEAD, but keep 613 * the reference on it until the ULP accepts or 614 * rejects the CR. 615 */ 616 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING", 617 __func__, ep); 618 ep->com.state = CLOSING; 619 break; 620 621 case MPA_REP_SENT: 622 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING", 623 __func__, ep); 624 ep->com.state = CLOSING; 625 break; 626 627 case FPDU_MODE: 628 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING", 629 __func__, ep); 630 START_EP_TIMER(ep); 631 ep->com.state = CLOSING; 632 attrs.next_state = C4IW_QP_STATE_CLOSING; 633 c4iw_modify_qp(ep->com.dev, ep->com.qp, 634 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 635 peer_close_upcall(ep); 636 break; 637 638 case ABORTING: 639 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)", 640 __func__, ep); 641 disconnect = 0; 642 break; 643 644 case CLOSING: 645 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND", 646 __func__, ep); 647 ep->com.state = MORIBUND; 648 disconnect = 0; 649 break; 650 651 case MORIBUND: 652 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__, 653 ep); 654 STOP_EP_TIMER(ep); 655 if (ep->com.cm_id && ep->com.qp) { 656 attrs.next_state = C4IW_QP_STATE_IDLE; 657 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 658 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 659 } 660 close_socket(ep->com.so); 661 close_complete_upcall(ep, 0); 662 ep->com.state = DEAD; 663 release = 1; 664 disconnect = 0; 665 break; 666 667 case DEAD: 668 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)", 669 __func__, ep); 670 disconnect = 0; 671 break; 672 673 default: 674 panic("%s: ep %p state %d", __func__, ep, 675 ep->com.state); 676 break; 677 } 678 679 680 if (disconnect) { 681 682 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep); 683 c4iw_ep_disconnect(ep, 0, M_NOWAIT); 684 } 685 if (release) { 686 687 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep); 688 c4iw_put_ep(&ep->com); 689 } 690 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep); 691 return; 692 } 693 694 static void 695 process_conn_error(struct c4iw_ep *ep) 696 { 697 struct c4iw_qp_attributes attrs = {0}; 698 int ret; 699 int state; 700 701 state = ep->com.state; 702 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s", 703 __func__, ep, ep->com.so, ep->com.so->so_error, 704 states[ep->com.state]); 705 706 switch (state) { 707 708 case MPA_REQ_WAIT: 709 STOP_EP_TIMER(ep); 710 c4iw_put_ep(&ep->parent_ep->com); 711 break; 712 713 case MPA_REQ_SENT: 714 STOP_EP_TIMER(ep); 715 connect_reply_upcall(ep, -ECONNRESET); 716 break; 717 718 case MPA_REP_SENT: 719 ep->com.rpl_err = ECONNRESET; 720 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep); 721 break; 722 723 case MPA_REQ_RCVD: 724 break; 725 726 case MORIBUND: 727 case CLOSING: 728 STOP_EP_TIMER(ep); 729 /*FALLTHROUGH*/ 730 case FPDU_MODE: 731 732 if (ep->com.cm_id && ep->com.qp) { 733 734 attrs.next_state = C4IW_QP_STATE_ERROR; 735 ret = c4iw_modify_qp(ep->com.qp->rhp, 736 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 737 &attrs, 1); 738 if (ret) 739 log(LOG_ERR, 740 "%s - qp <- error failed!\n", 741 __func__); 742 } 743 peer_abort_upcall(ep); 744 break; 745 746 case ABORTING: 747 break; 748 749 case DEAD: 750 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!", 751 __func__, ep->com.so->so_error); 752 return; 753 754 default: 755 panic("%s: ep %p state %d", __func__, ep, state); 756 break; 757 } 758 759 if (state != ABORTING) { 760 close_socket(ep->com.so); 761 ep->com.state = DEAD; 762 c4iw_put_ep(&ep->com); 763 } 764 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep); 765 return; 766 } 767 768 static void 769 process_close_complete(struct c4iw_ep *ep) 770 { 771 struct c4iw_qp_attributes attrs = {0}; 772 int release = 0; 773 774 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep, 775 ep->com.so, states[ep->com.state]); 776 777 /* The cm_id may be null if we failed to connect */ 778 set_bit(CLOSE_CON_RPL, &ep->com.history); 779 780 switch (ep->com.state) { 781 782 case CLOSING: 783 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND", 784 __func__, ep); 785 ep->com.state = MORIBUND; 786 break; 787 788 case MORIBUND: 789 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__, 790 ep); 791 STOP_EP_TIMER(ep); 792 793 if ((ep->com.cm_id) && (ep->com.qp)) { 794 795 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE", 796 __func__, ep); 797 attrs.next_state = C4IW_QP_STATE_IDLE; 798 c4iw_modify_qp(ep->com.dev, 799 ep->com.qp, 800 C4IW_QP_ATTR_NEXT_STATE, 801 &attrs, 1); 802 } 803 804 close_socket(ep->com.so); 805 close_complete_upcall(ep, 0); 806 ep->com.state = DEAD; 807 release = 1; 808 break; 809 810 case ABORTING: 811 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep); 812 break; 813 814 case DEAD: 815 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep); 816 break; 817 default: 818 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state", 819 __func__, ep); 820 panic("%s:pcc6 %p unknown ep state", __func__, ep); 821 break; 822 } 823 824 if (release) { 825 826 CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep); 827 release_ep_resources(ep); 828 } 829 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep); 830 return; 831 } 832 833 static void 834 setiwsockopt(struct socket *so) 835 { 836 int rc; 837 struct sockopt sopt; 838 int on = 1; 839 840 sopt.sopt_dir = SOPT_SET; 841 sopt.sopt_level = IPPROTO_TCP; 842 sopt.sopt_name = TCP_NODELAY; 843 sopt.sopt_val = (caddr_t)&on; 844 sopt.sopt_valsize = sizeof on; 845 sopt.sopt_td = NULL; 846 rc = sosetopt(so, &sopt); 847 if (rc) { 848 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n", 849 __func__, so, rc); 850 } 851 } 852 853 static void 854 init_iwarp_socket(struct socket *so, void *arg) 855 { 856 if (SOLISTENING(so)) { 857 SOLISTEN_LOCK(so); 858 solisten_upcall_set(so, c4iw_so_upcall, arg); 859 so->so_state |= SS_NBIO; 860 SOLISTEN_UNLOCK(so); 861 } else { 862 SOCKBUF_LOCK(&so->so_rcv); 863 soupcall_set(so, SO_RCV, c4iw_so_upcall, arg); 864 so->so_state |= SS_NBIO; 865 SOCKBUF_UNLOCK(&so->so_rcv); 866 } 867 } 868 869 static void 870 uninit_iwarp_socket(struct socket *so) 871 { 872 if (SOLISTENING(so)) { 873 SOLISTEN_LOCK(so); 874 solisten_upcall_set(so, NULL, NULL); 875 SOLISTEN_UNLOCK(so); 876 } else { 877 SOCKBUF_LOCK(&so->so_rcv); 878 soupcall_clear(so, SO_RCV); 879 SOCKBUF_UNLOCK(&so->so_rcv); 880 } 881 } 882 883 static void 884 process_data(struct c4iw_ep *ep) 885 { 886 int disconnect = 0; 887 888 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__, 889 ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv)); 890 891 switch (ep->com.state) { 892 case MPA_REQ_SENT: 893 disconnect = process_mpa_reply(ep); 894 break; 895 case MPA_REQ_WAIT: 896 disconnect = process_mpa_request(ep); 897 if (disconnect) 898 /* Refered in process_newconn() */ 899 c4iw_put_ep(&ep->parent_ep->com); 900 break; 901 default: 902 if (sbused(&ep->com.so->so_rcv)) 903 log(LOG_ERR, "%s: Unexpected streaming data. ep %p, " 904 "state %d, so %p, so_state 0x%x, sbused %u\n", 905 __func__, ep, ep->com.state, ep->com.so, 906 ep->com.so->so_state, sbused(&ep->com.so->so_rcv)); 907 break; 908 } 909 if (disconnect) 910 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 911 912 } 913 914 static void 915 process_connected(struct c4iw_ep *ep) 916 { 917 struct socket *so = ep->com.so; 918 919 if ((so->so_state & SS_ISCONNECTED) && !so->so_error) { 920 if (send_mpa_req(ep)) 921 goto err; 922 } else { 923 connect_reply_upcall(ep, -so->so_error); 924 goto err; 925 } 926 return; 927 err: 928 close_socket(so); 929 ep->com.state = DEAD; 930 c4iw_put_ep(&ep->com); 931 return; 932 } 933 934 static inline int c4iw_zero_addr(struct sockaddr *addr) 935 { 936 struct in6_addr *ip6; 937 938 if (addr->sa_family == AF_INET) 939 return IN_ZERONET( 940 ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr)); 941 else { 942 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 943 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 944 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 945 } 946 } 947 948 static inline int c4iw_loopback_addr(struct sockaddr *addr) 949 { 950 if (addr->sa_family == AF_INET) 951 return IN_LOOPBACK( 952 ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr)); 953 else 954 return IN6_IS_ADDR_LOOPBACK( 955 &((struct sockaddr_in6 *) addr)->sin6_addr); 956 } 957 958 static inline int c4iw_any_addr(struct sockaddr *addr) 959 { 960 return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr); 961 } 962 963 static void 964 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so) 965 { 966 struct c4iw_listen_ep *real_lep = NULL; 967 struct c4iw_ep *new_ep = NULL; 968 struct sockaddr_in *remote = NULL; 969 int ret = 0; 970 971 MPASS(new_so != NULL); 972 973 if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) { 974 /* Here we need to find the 'real_lep' that belongs to the 975 * incomming socket's network interface, such that the newly 976 * created 'ep' can be attached to the real 'lep'. 977 */ 978 real_lep = find_real_listen_ep(master_lep, new_so); 979 if (real_lep == NULL) { 980 CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen " 981 "ep for sock: %p", __func__, new_so); 982 log(LOG_ERR,"%s: Could not find the real listen ep for " 983 "sock: %p\n", __func__, new_so); 984 /* FIXME: properly free the 'new_so' in failure case. 985 * Use of soabort() and soclose() are not legal 986 * here(before soaccept()). 987 */ 988 return; 989 } 990 } else /* for Non-Wildcard address, master_lep is always the real_lep */ 991 real_lep = master_lep; 992 993 new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL); 994 995 CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, " 996 "listening so %p, new so %p", __func__, master_lep, real_lep, 997 new_ep, master_lep->com.so, new_so); 998 999 new_ep->com.dev = real_lep->com.dev; 1000 new_ep->com.so = new_so; 1001 new_ep->com.cm_id = NULL; 1002 new_ep->com.thread = real_lep->com.thread; 1003 new_ep->parent_ep = real_lep; 1004 1005 GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so); 1006 GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so); 1007 c4iw_get_ep(&real_lep->com); 1008 init_timer(&new_ep->timer); 1009 new_ep->com.state = MPA_REQ_WAIT; 1010 START_EP_TIMER(new_ep); 1011 1012 setiwsockopt(new_so); 1013 ret = soaccept(new_so, (struct sockaddr **)&remote); 1014 if (ret != 0) { 1015 CTR4(KTR_IW_CXGBE, 1016 "%s:listen sock:%p, new sock:%p, ret:%d\n", 1017 __func__, master_lep->com.so, new_so, ret); 1018 if (remote != NULL) 1019 free(remote, M_SONAME); 1020 uninit_iwarp_socket(new_so); 1021 soclose(new_so); 1022 c4iw_put_ep(&new_ep->com); 1023 c4iw_put_ep(&real_lep->com); 1024 return; 1025 } 1026 free(remote, M_SONAME); 1027 1028 /* MPA request might have been queued up on the socket already, so we 1029 * initialize the socket/upcall_handler under lock to prevent processing 1030 * MPA request on another thread(via process_req()) simultaniously. 1031 */ 1032 c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to 1033 avoid freeing of ep before ep unlock. */ 1034 mutex_lock(&new_ep->com.mutex); 1035 init_iwarp_socket(new_so, &new_ep->com); 1036 1037 ret = process_mpa_request(new_ep); 1038 if (ret) { 1039 /* ABORT */ 1040 c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL); 1041 c4iw_put_ep(&real_lep->com); 1042 } 1043 mutex_unlock(&new_ep->com.mutex); 1044 c4iw_put_ep(&new_ep->com); 1045 return; 1046 } 1047 1048 static int 1049 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event) 1050 { 1051 unsigned long flag; 1052 1053 spin_lock_irqsave(&req_lock, flag); 1054 if (ep && ep->com.so) { 1055 ep->com.ep_events |= new_ep_event; 1056 if (!ep->com.entry.tqe_prev) { 1057 c4iw_get_ep(&ep->com); 1058 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry); 1059 queue_work(c4iw_taskq, &c4iw_task); 1060 } 1061 } 1062 spin_unlock_irqrestore(&req_lock, flag); 1063 1064 return (0); 1065 } 1066 1067 static int 1068 c4iw_so_upcall(struct socket *so, void *arg, int waitflag) 1069 { 1070 struct c4iw_ep *ep = arg; 1071 1072 CTR6(KTR_IW_CXGBE, 1073 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p", 1074 __func__, so, so->so_state, ep, states[ep->com.state], 1075 ep->com.entry.tqe_prev); 1076 1077 MPASS(ep->com.so == so); 1078 /* 1079 * Wake up any threads waiting in rdma_init()/rdma_fini(), 1080 * with locks held. 1081 */ 1082 if (so->so_error) 1083 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1084 add_ep_to_req_list(ep, C4IW_EVENT_SOCKET); 1085 1086 return (SU_OK); 1087 } 1088 1089 1090 static int 1091 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1092 { 1093 struct adapter *sc = iq->adapter; 1094 const struct cpl_rdma_terminate *cpl = mtod(m, const void *); 1095 unsigned int tid = GET_TID(cpl); 1096 struct toepcb *toep = lookup_tid(sc, tid); 1097 struct socket *so; 1098 struct c4iw_ep *ep; 1099 1100 INP_WLOCK(toep->inp); 1101 so = inp_inpcbtosocket(toep->inp); 1102 ep = so->so_rcv.sb_upcallarg; 1103 INP_WUNLOCK(toep->inp); 1104 1105 CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep); 1106 add_ep_to_req_list(ep, C4IW_EVENT_TERM); 1107 1108 return 0; 1109 } 1110 1111 static void 1112 process_socket_event(struct c4iw_ep *ep) 1113 { 1114 int state = ep->com.state; 1115 struct socket *so = ep->com.so; 1116 1117 if (ep->com.state == DEAD) { 1118 CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded " 1119 "ep %p ep_state %s", __func__, ep, states[state]); 1120 return; 1121 } 1122 1123 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, " 1124 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state, 1125 so->so_error, so->so_rcv.sb_state, ep, states[state]); 1126 1127 if (state == CONNECTING) { 1128 process_connected(ep); 1129 return; 1130 } 1131 1132 if (state == LISTEN) { 1133 struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep; 1134 struct socket *listen_so = so, *new_so = NULL; 1135 int error = 0; 1136 1137 SOLISTEN_LOCK(listen_so); 1138 do { 1139 error = solisten_dequeue(listen_so, &new_so, 1140 SOCK_NONBLOCK); 1141 if (error) { 1142 CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p " 1143 "error %d", __func__, lep, listen_so, 1144 error); 1145 return; 1146 } 1147 process_newconn(lep, new_so); 1148 1149 /* solisten_dequeue() unlocks while return, so aquire 1150 * lock again for sol_qlen and also for next iteration. 1151 */ 1152 SOLISTEN_LOCK(listen_so); 1153 } while (listen_so->sol_qlen); 1154 SOLISTEN_UNLOCK(listen_so); 1155 1156 return; 1157 } 1158 1159 /* connection error */ 1160 if (so->so_error) { 1161 process_conn_error(ep); 1162 return; 1163 } 1164 1165 /* peer close */ 1166 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) { 1167 process_peer_close(ep); 1168 /* 1169 * check whether socket disconnect event is pending before 1170 * returning. Fallthrough if yes. 1171 */ 1172 if (!(so->so_state & SS_ISDISCONNECTED)) 1173 return; 1174 } 1175 1176 /* close complete */ 1177 if (so->so_state & SS_ISDISCONNECTED) { 1178 process_close_complete(ep); 1179 return; 1180 } 1181 1182 /* rx data */ 1183 process_data(ep); 1184 } 1185 1186 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters"); 1187 1188 static int dack_mode = 0; 1189 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0, 1190 "Delayed ack mode (default = 0)"); 1191 1192 int c4iw_max_read_depth = 8; 1193 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0, 1194 "Per-connection max ORD/IRD (default = 8)"); 1195 1196 static int enable_tcp_timestamps; 1197 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0, 1198 "Enable tcp timestamps (default = 0)"); 1199 1200 static int enable_tcp_sack; 1201 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0, 1202 "Enable tcp SACK (default = 0)"); 1203 1204 static int enable_tcp_window_scaling = 1; 1205 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0, 1206 "Enable tcp window scaling (default = 1)"); 1207 1208 int c4iw_debug = 0; 1209 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0, 1210 "Enable debug logging (default = 0)"); 1211 1212 static int peer2peer = 1; 1213 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0, 1214 "Support peer2peer ULPs (default = 1)"); 1215 1216 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 1217 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0, 1218 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)"); 1219 1220 static int ep_timeout_secs = 60; 1221 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0, 1222 "CM Endpoint operation timeout in seconds (default = 60)"); 1223 1224 static int mpa_rev = 1; 1225 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0, 1226 "MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)"); 1227 1228 static int markers_enabled; 1229 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0, 1230 "Enable MPA MARKERS (default(0) = disabled)"); 1231 1232 static int crc_enabled = 1; 1233 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0, 1234 "Enable MPA CRC (default(1) = enabled)"); 1235 1236 static int rcv_win = 256 * 1024; 1237 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0, 1238 "TCP receive window in bytes (default = 256KB)"); 1239 1240 static int snd_win = 128 * 1024; 1241 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0, 1242 "TCP send window in bytes (default = 128KB)"); 1243 1244 static void 1245 start_ep_timer(struct c4iw_ep *ep) 1246 { 1247 1248 if (timer_pending(&ep->timer)) { 1249 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep); 1250 printk(KERN_ERR "%s timer already started! ep %p\n", __func__, 1251 ep); 1252 return; 1253 } 1254 clear_bit(TIMEOUT, &ep->com.flags); 1255 c4iw_get_ep(&ep->com); 1256 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 1257 ep->timer.data = (unsigned long)ep; 1258 ep->timer.function = ep_timeout; 1259 add_timer(&ep->timer); 1260 } 1261 1262 static int 1263 stop_ep_timer(struct c4iw_ep *ep) 1264 { 1265 1266 del_timer_sync(&ep->timer); 1267 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 1268 c4iw_put_ep(&ep->com); 1269 return 0; 1270 } 1271 return 1; 1272 } 1273 1274 static void * 1275 alloc_ep(int size, gfp_t gfp) 1276 { 1277 struct c4iw_ep_common *epc; 1278 1279 epc = kzalloc(size, gfp); 1280 if (epc == NULL) 1281 return (NULL); 1282 1283 kref_init(&epc->kref); 1284 mutex_init(&epc->mutex); 1285 c4iw_init_wr_wait(&epc->wr_wait); 1286 1287 return (epc); 1288 } 1289 1290 void _c4iw_free_ep(struct kref *kref) 1291 { 1292 struct c4iw_ep *ep; 1293 struct c4iw_ep_common *epc; 1294 1295 ep = container_of(kref, struct c4iw_ep, com.kref); 1296 epc = &ep->com; 1297 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list", 1298 __func__, epc)); 1299 if (test_bit(QP_REFERENCED, &ep->com.flags)) 1300 deref_qp(ep); 1301 CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx", 1302 __func__, ep, epc->history, epc->flags); 1303 kfree(ep); 1304 } 1305 1306 static void release_ep_resources(struct c4iw_ep *ep) 1307 { 1308 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep); 1309 set_bit(RELEASE_RESOURCES, &ep->com.flags); 1310 c4iw_put_ep(&ep->com); 1311 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep); 1312 } 1313 1314 static int 1315 send_mpa_req(struct c4iw_ep *ep) 1316 { 1317 int mpalen; 1318 struct mpa_message *mpa; 1319 struct mpa_v2_conn_params mpa_v2_params; 1320 struct mbuf *m; 1321 char mpa_rev_to_use = mpa_rev; 1322 int err = 0; 1323 1324 if (ep->retry_with_mpa_v1) 1325 mpa_rev_to_use = 1; 1326 mpalen = sizeof(*mpa) + ep->plen; 1327 if (mpa_rev_to_use == 2) 1328 mpalen += sizeof(struct mpa_v2_conn_params); 1329 1330 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1331 if (mpa == NULL) { 1332 err = -ENOMEM; 1333 CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d", 1334 __func__, ep, err); 1335 goto err; 1336 } 1337 1338 memset(mpa, 0, mpalen); 1339 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 1340 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 1341 (markers_enabled ? MPA_MARKERS : 0) | 1342 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 1343 mpa->private_data_size = htons(ep->plen); 1344 mpa->revision = mpa_rev_to_use; 1345 1346 if (mpa_rev_to_use == 1) { 1347 ep->tried_with_mpa_v1 = 1; 1348 ep->retry_with_mpa_v1 = 0; 1349 } 1350 1351 if (mpa_rev_to_use == 2) { 1352 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1353 sizeof(struct mpa_v2_conn_params)); 1354 mpa_v2_params.ird = htons((u16)ep->ird); 1355 mpa_v2_params.ord = htons((u16)ep->ord); 1356 1357 if (peer2peer) { 1358 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1359 1360 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1361 mpa_v2_params.ord |= 1362 htons(MPA_V2_RDMA_WRITE_RTR); 1363 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1364 mpa_v2_params.ord |= 1365 htons(MPA_V2_RDMA_READ_RTR); 1366 } 1367 } 1368 memcpy(mpa->private_data, &mpa_v2_params, 1369 sizeof(struct mpa_v2_conn_params)); 1370 1371 if (ep->plen) { 1372 1373 memcpy(mpa->private_data + 1374 sizeof(struct mpa_v2_conn_params), 1375 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1376 } 1377 } else { 1378 1379 if (ep->plen) 1380 memcpy(mpa->private_data, 1381 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1382 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep); 1383 } 1384 1385 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1386 if (m == NULL) { 1387 err = -ENOMEM; 1388 CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d", 1389 __func__, ep, err); 1390 free(mpa, M_CXGBE); 1391 goto err; 1392 } 1393 m_copyback(m, 0, mpalen, (void *)mpa); 1394 free(mpa, M_CXGBE); 1395 1396 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1397 ep->com.thread); 1398 if (err) { 1399 CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d", 1400 __func__, ep, err); 1401 goto err; 1402 } 1403 1404 START_EP_TIMER(ep); 1405 ep->com.state = MPA_REQ_SENT; 1406 ep->mpa_attr.initiator = 1; 1407 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err); 1408 return 0; 1409 err: 1410 connect_reply_upcall(ep, err); 1411 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err); 1412 return err; 1413 } 1414 1415 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1416 { 1417 int mpalen ; 1418 struct mpa_message *mpa; 1419 struct mpa_v2_conn_params mpa_v2_params; 1420 struct mbuf *m; 1421 int err; 1422 1423 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid, 1424 ep->plen); 1425 1426 mpalen = sizeof(*mpa) + plen; 1427 1428 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1429 1430 mpalen += sizeof(struct mpa_v2_conn_params); 1431 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep, 1432 ep->mpa_attr.version, mpalen); 1433 } 1434 1435 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1436 if (mpa == NULL) 1437 return (-ENOMEM); 1438 1439 memset(mpa, 0, mpalen); 1440 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1441 mpa->flags = MPA_REJECT; 1442 mpa->revision = mpa_rev; 1443 mpa->private_data_size = htons(plen); 1444 1445 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1446 1447 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1448 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1449 sizeof(struct mpa_v2_conn_params)); 1450 mpa_v2_params.ird = htons(((u16)ep->ird) | 1451 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1452 0)); 1453 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1454 (p2p_type == 1455 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1456 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1457 FW_RI_INIT_P2PTYPE_READ_REQ ? 1458 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1459 memcpy(mpa->private_data, &mpa_v2_params, 1460 sizeof(struct mpa_v2_conn_params)); 1461 1462 if (ep->plen) 1463 memcpy(mpa->private_data + 1464 sizeof(struct mpa_v2_conn_params), pdata, plen); 1465 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep, 1466 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen); 1467 } else 1468 if (plen) 1469 memcpy(mpa->private_data, pdata, plen); 1470 1471 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1472 if (m == NULL) { 1473 free(mpa, M_CXGBE); 1474 return (-ENOMEM); 1475 } 1476 m_copyback(m, 0, mpalen, (void *)mpa); 1477 free(mpa, M_CXGBE); 1478 1479 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); 1480 if (!err) 1481 ep->snd_seq += mpalen; 1482 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err); 1483 return err; 1484 } 1485 1486 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1487 { 1488 int mpalen; 1489 struct mpa_message *mpa; 1490 struct mbuf *m; 1491 struct mpa_v2_conn_params mpa_v2_params; 1492 int err; 1493 1494 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep); 1495 1496 mpalen = sizeof(*mpa) + plen; 1497 1498 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1499 1500 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep, 1501 ep->mpa_attr.version); 1502 mpalen += sizeof(struct mpa_v2_conn_params); 1503 } 1504 1505 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1506 if (mpa == NULL) 1507 return (-ENOMEM); 1508 1509 memset(mpa, 0, sizeof(*mpa)); 1510 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1511 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 1512 (markers_enabled ? MPA_MARKERS : 0); 1513 mpa->revision = ep->mpa_attr.version; 1514 mpa->private_data_size = htons(plen); 1515 1516 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1517 1518 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1519 mpa->private_data_size += 1520 htons(sizeof(struct mpa_v2_conn_params)); 1521 mpa_v2_params.ird = htons((u16)ep->ird); 1522 mpa_v2_params.ord = htons((u16)ep->ord); 1523 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep, 1524 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord); 1525 1526 if (peer2peer && (ep->mpa_attr.p2p_type != 1527 FW_RI_INIT_P2PTYPE_DISABLED)) { 1528 1529 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1530 1531 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1532 1533 mpa_v2_params.ord |= 1534 htons(MPA_V2_RDMA_WRITE_RTR); 1535 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d", 1536 __func__, ep, p2p_type, mpa_v2_params.ird, 1537 mpa_v2_params.ord); 1538 } 1539 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1540 1541 mpa_v2_params.ord |= 1542 htons(MPA_V2_RDMA_READ_RTR); 1543 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d", 1544 __func__, ep, p2p_type, mpa_v2_params.ird, 1545 mpa_v2_params.ord); 1546 } 1547 } 1548 1549 memcpy(mpa->private_data, &mpa_v2_params, 1550 sizeof(struct mpa_v2_conn_params)); 1551 1552 if (ep->plen) 1553 memcpy(mpa->private_data + 1554 sizeof(struct mpa_v2_conn_params), pdata, plen); 1555 } else 1556 if (plen) 1557 memcpy(mpa->private_data, pdata, plen); 1558 1559 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1560 if (m == NULL) { 1561 free(mpa, M_CXGBE); 1562 return (-ENOMEM); 1563 } 1564 m_copyback(m, 0, mpalen, (void *)mpa); 1565 free(mpa, M_CXGBE); 1566 1567 1568 ep->com.state = MPA_REP_SENT; 1569 ep->snd_seq += mpalen; 1570 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1571 ep->com.thread); 1572 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err); 1573 return err; 1574 } 1575 1576 1577 1578 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1579 { 1580 struct iw_cm_event event; 1581 1582 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep); 1583 memset(&event, 0, sizeof(event)); 1584 event.event = IW_CM_EVENT_CLOSE; 1585 event.status = status; 1586 1587 if (ep->com.cm_id) { 1588 1589 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep); 1590 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1591 deref_cm_id(&ep->com); 1592 set_bit(CLOSE_UPCALL, &ep->com.history); 1593 } 1594 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep); 1595 } 1596 1597 static int 1598 send_abort(struct c4iw_ep *ep) 1599 { 1600 struct socket *so = ep->com.so; 1601 struct sockopt sopt; 1602 int rc; 1603 struct linger l; 1604 1605 CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so, 1606 states[ep->com.state], ep->hwtid); 1607 1608 l.l_onoff = 1; 1609 l.l_linger = 0; 1610 1611 /* linger_time of 0 forces RST to be sent */ 1612 sopt.sopt_dir = SOPT_SET; 1613 sopt.sopt_level = SOL_SOCKET; 1614 sopt.sopt_name = SO_LINGER; 1615 sopt.sopt_val = (caddr_t)&l; 1616 sopt.sopt_valsize = sizeof l; 1617 sopt.sopt_td = NULL; 1618 rc = sosetopt(so, &sopt); 1619 if (rc != 0) { 1620 log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n", 1621 __func__, so, rc); 1622 } 1623 1624 uninit_iwarp_socket(so); 1625 soclose(so); 1626 set_bit(ABORT_CONN, &ep->com.history); 1627 1628 /* 1629 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT 1630 * request it has sent. But the current TOE driver is not propagating 1631 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work- 1632 * around de-refererece 'ep' here instead of doing it in abort_rpl() 1633 * handler(not yet implemented) of iw_cxgbe driver. 1634 */ 1635 release_ep_resources(ep); 1636 1637 return (0); 1638 } 1639 1640 static void peer_close_upcall(struct c4iw_ep *ep) 1641 { 1642 struct iw_cm_event event; 1643 1644 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep); 1645 memset(&event, 0, sizeof(event)); 1646 event.event = IW_CM_EVENT_DISCONNECT; 1647 1648 if (ep->com.cm_id) { 1649 1650 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep); 1651 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1652 set_bit(DISCONN_UPCALL, &ep->com.history); 1653 } 1654 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep); 1655 } 1656 1657 static void peer_abort_upcall(struct c4iw_ep *ep) 1658 { 1659 struct iw_cm_event event; 1660 1661 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep); 1662 memset(&event, 0, sizeof(event)); 1663 event.event = IW_CM_EVENT_CLOSE; 1664 event.status = -ECONNRESET; 1665 1666 if (ep->com.cm_id) { 1667 1668 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep); 1669 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1670 deref_cm_id(&ep->com); 1671 set_bit(ABORT_UPCALL, &ep->com.history); 1672 } 1673 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep); 1674 } 1675 1676 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1677 { 1678 struct iw_cm_event event; 1679 1680 CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status); 1681 memset(&event, 0, sizeof(event)); 1682 event.event = IW_CM_EVENT_CONNECT_REPLY; 1683 event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ? 1684 -ECONNRESET : status; 1685 event.local_addr = ep->com.local_addr; 1686 event.remote_addr = ep->com.remote_addr; 1687 1688 if ((status == 0) || (status == -ECONNREFUSED)) { 1689 1690 if (!ep->tried_with_mpa_v1) { 1691 1692 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep); 1693 /* this means MPA_v2 is used */ 1694 event.ord = ep->ird; 1695 event.ird = ep->ord; 1696 event.private_data_len = ep->plen - 1697 sizeof(struct mpa_v2_conn_params); 1698 event.private_data = ep->mpa_pkt + 1699 sizeof(struct mpa_message) + 1700 sizeof(struct mpa_v2_conn_params); 1701 } else { 1702 1703 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep); 1704 /* this means MPA_v1 is used */ 1705 event.ord = c4iw_max_read_depth; 1706 event.ird = c4iw_max_read_depth; 1707 event.private_data_len = ep->plen; 1708 event.private_data = ep->mpa_pkt + 1709 sizeof(struct mpa_message); 1710 } 1711 } 1712 1713 if (ep->com.cm_id) { 1714 1715 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep); 1716 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1717 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1718 } 1719 1720 if(status == -ECONNABORTED) { 1721 1722 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status); 1723 return; 1724 } 1725 1726 if (status < 0) { 1727 1728 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status); 1729 deref_cm_id(&ep->com); 1730 } 1731 1732 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep); 1733 } 1734 1735 static int connect_request_upcall(struct c4iw_ep *ep) 1736 { 1737 struct iw_cm_event event; 1738 int ret; 1739 1740 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep, 1741 ep->tried_with_mpa_v1); 1742 1743 memset(&event, 0, sizeof(event)); 1744 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1745 event.local_addr = ep->com.local_addr; 1746 event.remote_addr = ep->com.remote_addr; 1747 event.provider_data = ep; 1748 1749 if (!ep->tried_with_mpa_v1) { 1750 /* this means MPA_v2 is used */ 1751 event.ord = ep->ord; 1752 event.ird = ep->ird; 1753 event.private_data_len = ep->plen - 1754 sizeof(struct mpa_v2_conn_params); 1755 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1756 sizeof(struct mpa_v2_conn_params); 1757 } else { 1758 1759 /* this means MPA_v1 is used. Send max supported */ 1760 event.ord = c4iw_max_read_depth; 1761 event.ird = c4iw_max_read_depth; 1762 event.private_data_len = ep->plen; 1763 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1764 } 1765 1766 c4iw_get_ep(&ep->com); 1767 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1768 &event); 1769 if(ret) { 1770 CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to" 1771 " IWCM, err:%d", __func__, ep, ret); 1772 c4iw_put_ep(&ep->com); 1773 } else 1774 /* Dereference parent_ep only in success case. 1775 * In case of failure, parent_ep is dereferenced by the caller 1776 * of process_mpa_request(). 1777 */ 1778 c4iw_put_ep(&ep->parent_ep->com); 1779 1780 set_bit(CONNREQ_UPCALL, &ep->com.history); 1781 return ret; 1782 } 1783 1784 static void established_upcall(struct c4iw_ep *ep) 1785 { 1786 struct iw_cm_event event; 1787 1788 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep); 1789 memset(&event, 0, sizeof(event)); 1790 event.event = IW_CM_EVENT_ESTABLISHED; 1791 event.ird = ep->ord; 1792 event.ord = ep->ird; 1793 1794 if (ep->com.cm_id) { 1795 1796 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep); 1797 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1798 set_bit(ESTAB_UPCALL, &ep->com.history); 1799 } 1800 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep); 1801 } 1802 1803 1804 #define RELAXED_IRD_NEGOTIATION 1 1805 1806 /* 1807 * process_mpa_reply - process streaming mode MPA reply 1808 * 1809 * Returns: 1810 * 1811 * 0 upon success indicating a connect request was delivered to the ULP 1812 * or the mpa request is incomplete but valid so far. 1813 * 1814 * 1 if a failure requires the caller to close the connection. 1815 * 1816 * 2 if a failure requires the caller to abort the connection. 1817 */ 1818 static int process_mpa_reply(struct c4iw_ep *ep) 1819 { 1820 struct mpa_message *mpa; 1821 struct mpa_v2_conn_params *mpa_v2_params; 1822 u16 plen; 1823 u16 resp_ird, resp_ord; 1824 u8 rtr_mismatch = 0, insuff_ird = 0; 1825 struct c4iw_qp_attributes attrs = {0}; 1826 enum c4iw_qp_attr_mask mask; 1827 int err; 1828 struct mbuf *top, *m; 1829 int flags = MSG_DONTWAIT; 1830 struct uio uio; 1831 int disconnect = 0; 1832 1833 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep); 1834 1835 /* 1836 * Stop mpa timer. If it expired, then 1837 * we ignore the MPA reply. process_timeout() 1838 * will abort the connection. 1839 */ 1840 if (STOP_EP_TIMER(ep)) 1841 return 0; 1842 1843 uio.uio_resid = 1000000; 1844 uio.uio_td = ep->com.thread; 1845 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags); 1846 1847 if (err) { 1848 1849 if (err == EWOULDBLOCK) { 1850 1851 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep); 1852 START_EP_TIMER(ep); 1853 return 0; 1854 } 1855 err = -err; 1856 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep); 1857 goto err; 1858 } 1859 1860 if (ep->com.so->so_rcv.sb_mb) { 1861 1862 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep); 1863 printf("%s data after soreceive called! so %p sb_mb %p top %p\n", 1864 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top); 1865 } 1866 1867 m = top; 1868 1869 do { 1870 1871 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep); 1872 /* 1873 * If we get more than the supported amount of private data 1874 * then we must fail this connection. 1875 */ 1876 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) { 1877 1878 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep, 1879 ep->mpa_pkt_len + m->m_len); 1880 err = (-EINVAL); 1881 goto err_stop_timer; 1882 } 1883 1884 /* 1885 * copy the new data into our accumulation buffer. 1886 */ 1887 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len])); 1888 ep->mpa_pkt_len += m->m_len; 1889 if (!m->m_next) 1890 m = m->m_nextpkt; 1891 else 1892 m = m->m_next; 1893 } while (m); 1894 1895 m_freem(top); 1896 /* 1897 * if we don't even have the mpa message, then bail. 1898 */ 1899 if (ep->mpa_pkt_len < sizeof(*mpa)) { 1900 return 0; 1901 } 1902 mpa = (struct mpa_message *) ep->mpa_pkt; 1903 1904 /* Validate MPA header. */ 1905 if (mpa->revision > mpa_rev) { 1906 1907 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep, 1908 mpa->revision, mpa_rev); 1909 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, " 1910 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1911 err = -EPROTO; 1912 goto err_stop_timer; 1913 } 1914 1915 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1916 1917 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep); 1918 err = -EPROTO; 1919 goto err_stop_timer; 1920 } 1921 1922 plen = ntohs(mpa->private_data_size); 1923 1924 /* 1925 * Fail if there's too much private data. 1926 */ 1927 if (plen > MPA_MAX_PRIVATE_DATA) { 1928 1929 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep); 1930 err = -EPROTO; 1931 goto err_stop_timer; 1932 } 1933 1934 /* 1935 * If plen does not account for pkt size 1936 */ 1937 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1938 1939 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep); 1940 STOP_EP_TIMER(ep); 1941 err = -EPROTO; 1942 goto err_stop_timer; 1943 } 1944 1945 ep->plen = (u8) plen; 1946 1947 /* 1948 * If we don't have all the pdata yet, then bail. 1949 * We'll continue process when more data arrives. 1950 */ 1951 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) { 1952 1953 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep); 1954 return 0; 1955 } 1956 1957 if (mpa->flags & MPA_REJECT) { 1958 1959 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep); 1960 err = -ECONNREFUSED; 1961 goto err_stop_timer; 1962 } 1963 1964 /* 1965 * If we get here we have accumulated the entire mpa 1966 * start reply message including private data. And 1967 * the MPA header is valid. 1968 */ 1969 ep->com.state = FPDU_MODE; 1970 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1971 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1972 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1973 ep->mpa_attr.version = mpa->revision; 1974 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1975 1976 if (mpa->revision == 2) { 1977 1978 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep); 1979 ep->mpa_attr.enhanced_rdma_conn = 1980 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1981 1982 if (ep->mpa_attr.enhanced_rdma_conn) { 1983 1984 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep); 1985 mpa_v2_params = (struct mpa_v2_conn_params *) 1986 (ep->mpa_pkt + sizeof(*mpa)); 1987 resp_ird = ntohs(mpa_v2_params->ird) & 1988 MPA_V2_IRD_ORD_MASK; 1989 resp_ord = ntohs(mpa_v2_params->ord) & 1990 MPA_V2_IRD_ORD_MASK; 1991 1992 /* 1993 * This is a double-check. Ideally, below checks are 1994 * not required since ird/ord stuff has been taken 1995 * care of in c4iw_accept_cr 1996 */ 1997 if (ep->ird < resp_ord) { 1998 if (RELAXED_IRD_NEGOTIATION && resp_ord <= 1999 ep->com.dev->rdev.adap->params.max_ordird_qp) 2000 ep->ird = resp_ord; 2001 else 2002 insuff_ird = 1; 2003 } else if (ep->ird > resp_ord) { 2004 ep->ird = resp_ord; 2005 } 2006 if (ep->ord > resp_ird) { 2007 if (RELAXED_IRD_NEGOTIATION) 2008 ep->ord = resp_ird; 2009 else 2010 insuff_ird = 1; 2011 } 2012 if (insuff_ird) { 2013 err = -ENOMEM; 2014 ep->ird = resp_ord; 2015 ep->ord = resp_ird; 2016 } 2017 2018 if (ntohs(mpa_v2_params->ird) & 2019 MPA_V2_PEER2PEER_MODEL) { 2020 2021 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep); 2022 if (ntohs(mpa_v2_params->ord) & 2023 MPA_V2_RDMA_WRITE_RTR) { 2024 2025 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep); 2026 ep->mpa_attr.p2p_type = 2027 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 2028 } 2029 else if (ntohs(mpa_v2_params->ord) & 2030 MPA_V2_RDMA_READ_RTR) { 2031 2032 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep); 2033 ep->mpa_attr.p2p_type = 2034 FW_RI_INIT_P2PTYPE_READ_REQ; 2035 } 2036 } 2037 } 2038 } else { 2039 2040 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep); 2041 2042 if (mpa->revision == 1) { 2043 2044 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep); 2045 2046 if (peer2peer) { 2047 2048 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep); 2049 ep->mpa_attr.p2p_type = p2p_type; 2050 } 2051 } 2052 } 2053 2054 if (set_tcpinfo(ep)) { 2055 2056 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep); 2057 printf("%s set_tcpinfo error\n", __func__); 2058 err = -ECONNRESET; 2059 goto err; 2060 } 2061 2062 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, " 2063 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__, 2064 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 2065 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 2066 ep->mpa_attr.p2p_type); 2067 2068 /* 2069 * If responder's RTR does not match with that of initiator, assign 2070 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 2071 * generated when moving QP to RTS state. 2072 * A TERM message will be sent after QP has moved to RTS state 2073 */ 2074 if ((ep->mpa_attr.version == 2) && peer2peer && 2075 (ep->mpa_attr.p2p_type != p2p_type)) { 2076 2077 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep); 2078 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 2079 rtr_mismatch = 1; 2080 } 2081 2082 2083 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 2084 attrs.mpa_attr = ep->mpa_attr; 2085 attrs.max_ird = ep->ird; 2086 attrs.max_ord = ep->ord; 2087 attrs.llp_stream_handle = ep; 2088 attrs.next_state = C4IW_QP_STATE_RTS; 2089 2090 mask = C4IW_QP_ATTR_NEXT_STATE | 2091 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 2092 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 2093 2094 /* bind QP and TID with INIT_WR */ 2095 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 2096 2097 if (err) { 2098 2099 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep); 2100 goto err; 2101 } 2102 2103 /* 2104 * If responder's RTR requirement did not match with what initiator 2105 * supports, generate TERM message 2106 */ 2107 if (rtr_mismatch) { 2108 2109 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep); 2110 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 2111 attrs.layer_etype = LAYER_MPA | DDP_LLP; 2112 attrs.ecode = MPA_NOMATCH_RTR; 2113 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2114 attrs.send_term = 1; 2115 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2116 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2117 err = -ENOMEM; 2118 disconnect = 1; 2119 goto out; 2120 } 2121 2122 /* 2123 * Generate TERM if initiator IRD is not sufficient for responder 2124 * provided ORD. Currently, we do the same behaviour even when 2125 * responder provided IRD is also not sufficient as regards to 2126 * initiator ORD. 2127 */ 2128 if (insuff_ird) { 2129 2130 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep); 2131 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 2132 __func__); 2133 attrs.layer_etype = LAYER_MPA | DDP_LLP; 2134 attrs.ecode = MPA_INSUFF_IRD; 2135 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2136 attrs.send_term = 1; 2137 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2138 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2139 err = -ENOMEM; 2140 disconnect = 1; 2141 goto out; 2142 } 2143 goto out; 2144 err_stop_timer: 2145 STOP_EP_TIMER(ep); 2146 err: 2147 disconnect = 2; 2148 out: 2149 connect_reply_upcall(ep, err); 2150 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep); 2151 return disconnect; 2152 } 2153 2154 /* 2155 * process_mpa_request - process streaming mode MPA request 2156 * 2157 * Returns: 2158 * 2159 * 0 upon success indicating a connect request was delivered to the ULP 2160 * or the mpa request is incomplete but valid so far. 2161 * 2162 * 1 if a failure requires the caller to close the connection. 2163 * 2164 * 2 if a failure requires the caller to abort the connection. 2165 */ 2166 static int 2167 process_mpa_request(struct c4iw_ep *ep) 2168 { 2169 struct mpa_message *mpa; 2170 struct mpa_v2_conn_params *mpa_v2_params; 2171 u16 plen; 2172 int flags = MSG_DONTWAIT; 2173 int rc; 2174 struct iovec iov; 2175 struct uio uio; 2176 enum c4iw_ep_state state = ep->com.state; 2177 2178 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]); 2179 2180 if (state != MPA_REQ_WAIT) 2181 return 0; 2182 2183 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len]; 2184 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 2185 uio.uio_iov = &iov; 2186 uio.uio_iovcnt = 1; 2187 uio.uio_offset = 0; 2188 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 2189 uio.uio_segflg = UIO_SYSSPACE; 2190 uio.uio_rw = UIO_READ; 2191 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */ 2192 2193 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags); 2194 if (rc == EAGAIN) 2195 return 0; 2196 else if (rc) 2197 goto err_stop_timer; 2198 2199 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data", 2200 __func__, ep->com.so)); 2201 ep->mpa_pkt_len += uio.uio_offset; 2202 2203 /* 2204 * If we get more than the supported amount of private data then we must 2205 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another 2206 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last 2207 * byte is filled by the soreceive above. 2208 */ 2209 2210 /* Don't even have the MPA message. Wait for more data to arrive. */ 2211 if (ep->mpa_pkt_len < sizeof(*mpa)) 2212 return 0; 2213 mpa = (struct mpa_message *) ep->mpa_pkt; 2214 2215 /* 2216 * Validate MPA Header. 2217 */ 2218 if (mpa->revision > mpa_rev) { 2219 log(LOG_ERR, "%s: MPA version mismatch. Local = %d," 2220 " Received = %d\n", __func__, mpa_rev, mpa->revision); 2221 goto err_stop_timer; 2222 } 2223 2224 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 2225 goto err_stop_timer; 2226 2227 /* 2228 * Fail if there's too much private data. 2229 */ 2230 plen = ntohs(mpa->private_data_size); 2231 if (plen > MPA_MAX_PRIVATE_DATA) 2232 goto err_stop_timer; 2233 2234 /* 2235 * If plen does not account for pkt size 2236 */ 2237 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 2238 goto err_stop_timer; 2239 2240 ep->plen = (u8) plen; 2241 2242 /* 2243 * If we don't have all the pdata yet, then bail. 2244 */ 2245 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 2246 return 0; 2247 2248 /* 2249 * If we get here we have accumulated the entire mpa 2250 * start reply message including private data. 2251 */ 2252 ep->mpa_attr.initiator = 0; 2253 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 2254 ep->mpa_attr.recv_marker_enabled = markers_enabled; 2255 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 2256 ep->mpa_attr.version = mpa->revision; 2257 if (mpa->revision == 1) 2258 ep->tried_with_mpa_v1 = 1; 2259 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 2260 2261 if (mpa->revision == 2) { 2262 ep->mpa_attr.enhanced_rdma_conn = 2263 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 2264 if (ep->mpa_attr.enhanced_rdma_conn) { 2265 mpa_v2_params = (struct mpa_v2_conn_params *) 2266 (ep->mpa_pkt + sizeof(*mpa)); 2267 ep->ird = ntohs(mpa_v2_params->ird) & 2268 MPA_V2_IRD_ORD_MASK; 2269 ep->ird = min_t(u32, ep->ird, 2270 cur_max_read_depth(ep->com.dev)); 2271 ep->ord = ntohs(mpa_v2_params->ord) & 2272 MPA_V2_IRD_ORD_MASK; 2273 ep->ord = min_t(u32, ep->ord, 2274 cur_max_read_depth(ep->com.dev)); 2275 CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u\n", 2276 __func__, ep->ird, ep->ord); 2277 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 2278 if (peer2peer) { 2279 if (ntohs(mpa_v2_params->ord) & 2280 MPA_V2_RDMA_WRITE_RTR) 2281 ep->mpa_attr.p2p_type = 2282 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 2283 else if (ntohs(mpa_v2_params->ord) & 2284 MPA_V2_RDMA_READ_RTR) 2285 ep->mpa_attr.p2p_type = 2286 FW_RI_INIT_P2PTYPE_READ_REQ; 2287 } 2288 } 2289 } else if (mpa->revision == 1 && peer2peer) 2290 ep->mpa_attr.p2p_type = p2p_type; 2291 2292 if (set_tcpinfo(ep)) 2293 goto err_stop_timer; 2294 2295 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, " 2296 "xmit_marker_enabled = %d, version = %d", __func__, 2297 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 2298 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 2299 2300 ep->com.state = MPA_REQ_RCVD; 2301 STOP_EP_TIMER(ep); 2302 2303 /* drive upcall */ 2304 if (ep->parent_ep->com.state != DEAD) 2305 if (connect_request_upcall(ep)) 2306 goto err_out; 2307 return 0; 2308 2309 err_stop_timer: 2310 STOP_EP_TIMER(ep); 2311 err_out: 2312 return 2; 2313 } 2314 2315 /* 2316 * Upcall from the adapter indicating data has been transmitted. 2317 * For us its just the single MPA request or reply. We can now free 2318 * the skb holding the mpa message. 2319 */ 2320 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2321 { 2322 int err; 2323 struct c4iw_ep *ep = to_ep(cm_id); 2324 int abort = 0; 2325 2326 mutex_lock(&ep->com.mutex); 2327 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep); 2328 2329 if ((ep->com.state == DEAD) || 2330 (ep->com.state != MPA_REQ_RCVD)) { 2331 2332 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep); 2333 mutex_unlock(&ep->com.mutex); 2334 c4iw_put_ep(&ep->com); 2335 return -ECONNRESET; 2336 } 2337 set_bit(ULP_REJECT, &ep->com.history); 2338 2339 if (mpa_rev == 0) { 2340 2341 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep); 2342 abort = 1; 2343 } 2344 else { 2345 2346 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep); 2347 abort = send_mpa_reject(ep, pdata, pdata_len); 2348 } 2349 STOP_EP_TIMER(ep); 2350 err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); 2351 mutex_unlock(&ep->com.mutex); 2352 c4iw_put_ep(&ep->com); 2353 CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err); 2354 return 0; 2355 } 2356 2357 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2358 { 2359 int err; 2360 struct c4iw_qp_attributes attrs = {0}; 2361 enum c4iw_qp_attr_mask mask; 2362 struct c4iw_ep *ep = to_ep(cm_id); 2363 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 2364 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2365 int abort = 0; 2366 2367 mutex_lock(&ep->com.mutex); 2368 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep); 2369 2370 if ((ep->com.state == DEAD) || 2371 (ep->com.state != MPA_REQ_RCVD)) { 2372 2373 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep); 2374 err = -ECONNRESET; 2375 goto err_out; 2376 } 2377 2378 BUG_ON(!qp); 2379 2380 set_bit(ULP_ACCEPT, &ep->com.history); 2381 2382 if ((conn_param->ord > c4iw_max_read_depth) || 2383 (conn_param->ird > c4iw_max_read_depth)) { 2384 2385 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep); 2386 err = -EINVAL; 2387 goto err_abort; 2388 } 2389 2390 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2391 2392 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep); 2393 2394 if (conn_param->ord > ep->ird) { 2395 if (RELAXED_IRD_NEGOTIATION) { 2396 conn_param->ord = ep->ird; 2397 } else { 2398 ep->ird = conn_param->ird; 2399 ep->ord = conn_param->ord; 2400 send_mpa_reject(ep, conn_param->private_data, 2401 conn_param->private_data_len); 2402 err = -ENOMEM; 2403 goto err_abort; 2404 } 2405 } 2406 if (conn_param->ird < ep->ord) { 2407 if (RELAXED_IRD_NEGOTIATION && 2408 ep->ord <= h->rdev.adap->params.max_ordird_qp) { 2409 conn_param->ird = ep->ord; 2410 } else { 2411 err = -ENOMEM; 2412 goto err_abort; 2413 } 2414 } 2415 } 2416 ep->ird = conn_param->ird; 2417 ep->ord = conn_param->ord; 2418 2419 if (ep->mpa_attr.version == 1) { 2420 if (peer2peer && ep->ird == 0) 2421 ep->ird = 1; 2422 } else { 2423 if (peer2peer && 2424 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && 2425 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) 2426 ep->ird = 1; 2427 } 2428 2429 CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d\n", __func__, __LINE__, 2430 ep->ird, ep->ord); 2431 2432 ep->com.cm_id = cm_id; 2433 ref_cm_id(&ep->com); 2434 ep->com.qp = qp; 2435 ref_qp(ep); 2436 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 2437 2438 /* bind QP to EP and move to RTS */ 2439 attrs.mpa_attr = ep->mpa_attr; 2440 attrs.max_ird = ep->ird; 2441 attrs.max_ord = ep->ord; 2442 attrs.llp_stream_handle = ep; 2443 attrs.next_state = C4IW_QP_STATE_RTS; 2444 2445 /* bind QP and TID with INIT_WR */ 2446 mask = C4IW_QP_ATTR_NEXT_STATE | 2447 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2448 C4IW_QP_ATTR_MPA_ATTR | 2449 C4IW_QP_ATTR_MAX_IRD | 2450 C4IW_QP_ATTR_MAX_ORD; 2451 2452 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 2453 if (err) { 2454 CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err); 2455 goto err_defef_cm_id; 2456 } 2457 2458 err = send_mpa_reply(ep, conn_param->private_data, 2459 conn_param->private_data_len); 2460 if (err) { 2461 CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err); 2462 goto err_defef_cm_id; 2463 } 2464 2465 ep->com.state = FPDU_MODE; 2466 established_upcall(ep); 2467 mutex_unlock(&ep->com.mutex); 2468 c4iw_put_ep(&ep->com); 2469 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep); 2470 return 0; 2471 err_defef_cm_id: 2472 deref_cm_id(&ep->com); 2473 err_abort: 2474 abort = 1; 2475 err_out: 2476 if (abort) 2477 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 2478 mutex_unlock(&ep->com.mutex); 2479 c4iw_put_ep(&ep->com); 2480 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep); 2481 return err; 2482 } 2483 2484 static int 2485 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so) 2486 { 2487 int ret; 2488 int size; 2489 struct socket *sock = NULL; 2490 2491 ret = sock_create_kern(laddr->ss_family, 2492 SOCK_STREAM, IPPROTO_TCP, &sock); 2493 if (ret) { 2494 CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d", 2495 __func__, ret); 2496 return ret; 2497 } 2498 2499 ret = sobind(sock, (struct sockaddr *)laddr, curthread); 2500 if (ret) { 2501 CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p", 2502 __func__, ret); 2503 sock_release(sock); 2504 return ret; 2505 } 2506 2507 size = laddr->ss_family == AF_INET6 ? 2508 sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in); 2509 ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0); 2510 if (ret) { 2511 CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p", 2512 __func__, ret); 2513 sock_release(sock); 2514 return ret; 2515 } 2516 2517 *so = sock; 2518 return 0; 2519 } 2520 2521 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2522 { 2523 int err = 0; 2524 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2525 struct c4iw_ep *ep = NULL; 2526 struct ifnet *nh_ifp; /* Logical egress interface */ 2527 #ifdef VIMAGE 2528 struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context; 2529 struct vnet *vnet = rdma_id->route.addr.dev_addr.net; 2530 #endif 2531 2532 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id); 2533 2534 2535 if ((conn_param->ord > c4iw_max_read_depth) || 2536 (conn_param->ird > c4iw_max_read_depth)) { 2537 2538 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id); 2539 err = -EINVAL; 2540 goto out; 2541 } 2542 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2543 2544 init_timer(&ep->timer); 2545 ep->plen = conn_param->private_data_len; 2546 2547 if (ep->plen) { 2548 2549 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep); 2550 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2551 conn_param->private_data, ep->plen); 2552 } 2553 ep->ird = conn_param->ird; 2554 ep->ord = conn_param->ord; 2555 2556 if (peer2peer && ep->ord == 0) { 2557 2558 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep); 2559 ep->ord = 1; 2560 } 2561 2562 ep->com.dev = dev; 2563 ep->com.cm_id = cm_id; 2564 ref_cm_id(&ep->com); 2565 ep->com.qp = get_qhp(dev, conn_param->qpn); 2566 2567 if (!ep->com.qp) { 2568 2569 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep); 2570 err = -EINVAL; 2571 goto fail; 2572 } 2573 ref_qp(ep); 2574 ep->com.thread = curthread; 2575 2576 CURVNET_SET(vnet); 2577 err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp); 2578 CURVNET_RESTORE(); 2579 2580 if (err) { 2581 2582 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep); 2583 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2584 err = EHOSTUNREACH; 2585 return err; 2586 } 2587 2588 if (!(nh_ifp->if_capenable & IFCAP_TOE) || 2589 TOEDEV(nh_ifp) == NULL) { 2590 err = -ENOPROTOOPT; 2591 goto fail; 2592 } 2593 ep->com.state = CONNECTING; 2594 ep->tos = 0; 2595 ep->com.local_addr = cm_id->local_addr; 2596 ep->com.remote_addr = cm_id->remote_addr; 2597 2598 err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so); 2599 if (err) 2600 goto fail; 2601 2602 setiwsockopt(ep->com.so); 2603 err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr, 2604 ep->com.thread); 2605 if (!err) { 2606 init_iwarp_socket(ep->com.so, &ep->com); 2607 goto out; 2608 } else 2609 goto fail_free_so; 2610 2611 fail_free_so: 2612 sock_release(ep->com.so); 2613 fail: 2614 deref_cm_id(&ep->com); 2615 c4iw_put_ep(&ep->com); 2616 ep = NULL; 2617 out: 2618 CTR2(KTR_IW_CXGBE, "%s:ccE ret:%d", __func__, err); 2619 return err; 2620 } 2621 2622 /* 2623 * iwcm->create_listen. Returns -errno on failure. 2624 */ 2625 int 2626 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2627 { 2628 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2629 struct c4iw_listen_ep *lep = NULL; 2630 struct listen_port_info *port_info = NULL; 2631 int rc = 0; 2632 2633 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id, 2634 backlog); 2635 lep = alloc_ep(sizeof(*lep), GFP_KERNEL); 2636 lep->com.cm_id = cm_id; 2637 ref_cm_id(&lep->com); 2638 lep->com.dev = dev; 2639 lep->backlog = backlog; 2640 lep->com.local_addr = cm_id->local_addr; 2641 lep->com.thread = curthread; 2642 cm_id->provider_data = lep; 2643 lep->com.state = LISTEN; 2644 2645 /* In case of INDADDR_ANY, ibcore creates cmid for each device and 2646 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates 2647 * HW listeners for each device seperately. But toecore expects single 2648 * solisten() call with INADDR_ANY address to create HW listeners on 2649 * all devices for a given port number. So iw_cxgbe driver calls 2650 * solisten() only once for INADDR_ANY(usually done at first time 2651 * listener callback from ibcore). And all the subsequent INADDR_ANY 2652 * listener callbacks from ibcore(for the same port address) do not 2653 * invoke solisten() as first listener callback has already created 2654 * listeners for all other devices(via solisten). 2655 */ 2656 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) { 2657 port_info = add_ep_to_listenlist(lep); 2658 /* skip solisten() if refcnt > 1, as the listeners were 2659 * alredy created by 'Master lep' 2660 */ 2661 if (port_info->refcnt > 1) { 2662 /* As there will be only one listener socket for a TCP 2663 * port, copy Master lep's socket pointer to other lep's 2664 * that are belonging to same TCP port. 2665 */ 2666 struct c4iw_listen_ep *head_lep = 2667 container_of(port_info->lep_list.next, 2668 struct c4iw_listen_ep, listen_ep_list); 2669 lep->com.so = head_lep->com.so; 2670 goto out; 2671 } 2672 } 2673 rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so); 2674 if (rc) { 2675 CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d", 2676 __func__, rc); 2677 goto fail; 2678 } 2679 2680 rc = solisten(lep->com.so, backlog, curthread); 2681 if (rc) { 2682 CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d", 2683 __func__, lep->com.so, rc); 2684 goto fail_free_so; 2685 } 2686 init_iwarp_socket(lep->com.so, &lep->com); 2687 out: 2688 return 0; 2689 2690 fail_free_so: 2691 sock_release(lep->com.so); 2692 fail: 2693 if (port_info) 2694 rem_ep_from_listenlist(lep); 2695 deref_cm_id(&lep->com); 2696 c4iw_put_ep(&lep->com); 2697 return rc; 2698 } 2699 2700 int 2701 c4iw_destroy_listen(struct iw_cm_id *cm_id) 2702 { 2703 struct c4iw_listen_ep *lep = to_listen_ep(cm_id); 2704 2705 mutex_lock(&lep->com.mutex); 2706 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id, 2707 states[lep->com.state]); 2708 2709 lep->com.state = DEAD; 2710 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) { 2711 /* if no refcount then close listen socket */ 2712 if (!rem_ep_from_listenlist(lep)) 2713 close_socket(lep->com.so); 2714 } else 2715 close_socket(lep->com.so); 2716 deref_cm_id(&lep->com); 2717 mutex_unlock(&lep->com.mutex); 2718 c4iw_put_ep(&lep->com); 2719 return 0; 2720 } 2721 2722 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2723 { 2724 int ret; 2725 mutex_lock(&ep->com.mutex); 2726 ret = c4iw_ep_disconnect(ep, abrupt, gfp); 2727 mutex_unlock(&ep->com.mutex); 2728 return ret; 2729 } 2730 2731 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2732 { 2733 int ret = 0; 2734 int close = 0; 2735 int fatal = 0; 2736 struct c4iw_rdev *rdev; 2737 2738 2739 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep); 2740 2741 rdev = &ep->com.dev->rdev; 2742 2743 if (c4iw_fatal_error(rdev)) { 2744 2745 CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep); 2746 fatal = 1; 2747 close_complete_upcall(ep, -ECONNRESET); 2748 send_abort(ep); 2749 ep->com.state = DEAD; 2750 } 2751 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep, 2752 states[ep->com.state]); 2753 2754 /* 2755 * Ref the ep here in case we have fatal errors causing the 2756 * ep to be released and freed. 2757 */ 2758 c4iw_get_ep(&ep->com); 2759 switch (ep->com.state) { 2760 2761 case MPA_REQ_WAIT: 2762 case MPA_REQ_SENT: 2763 case MPA_REQ_RCVD: 2764 case MPA_REP_SENT: 2765 case FPDU_MODE: 2766 close = 1; 2767 if (abrupt) 2768 ep->com.state = ABORTING; 2769 else { 2770 ep->com.state = CLOSING; 2771 START_EP_TIMER(ep); 2772 } 2773 set_bit(CLOSE_SENT, &ep->com.flags); 2774 break; 2775 2776 case CLOSING: 2777 2778 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2779 2780 close = 1; 2781 if (abrupt) { 2782 STOP_EP_TIMER(ep); 2783 ep->com.state = ABORTING; 2784 } else 2785 ep->com.state = MORIBUND; 2786 } 2787 break; 2788 2789 case MORIBUND: 2790 case ABORTING: 2791 case DEAD: 2792 CTR3(KTR_IW_CXGBE, 2793 "%s ignoring disconnect ep %p state %u", __func__, 2794 ep, ep->com.state); 2795 break; 2796 2797 default: 2798 BUG(); 2799 break; 2800 } 2801 2802 2803 if (close) { 2804 2805 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep); 2806 2807 if (abrupt) { 2808 2809 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep); 2810 set_bit(EP_DISC_ABORT, &ep->com.history); 2811 close_complete_upcall(ep, -ECONNRESET); 2812 ret = send_abort(ep); 2813 if (ret) 2814 fatal = 1; 2815 } else { 2816 2817 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep); 2818 set_bit(EP_DISC_CLOSE, &ep->com.history); 2819 2820 if (!ep->parent_ep) 2821 ep->com.state = MORIBUND; 2822 2823 CURVNET_SET(ep->com.so->so_vnet); 2824 sodisconnect(ep->com.so); 2825 CURVNET_RESTORE(); 2826 } 2827 2828 } 2829 2830 if (fatal) { 2831 set_bit(EP_DISC_FAIL, &ep->com.history); 2832 if (!abrupt) { 2833 STOP_EP_TIMER(ep); 2834 close_complete_upcall(ep, -EIO); 2835 } 2836 if (ep->com.qp) { 2837 struct c4iw_qp_attributes attrs = {0}; 2838 2839 attrs.next_state = C4IW_QP_STATE_ERROR; 2840 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp, 2841 C4IW_QP_ATTR_NEXT_STATE, 2842 &attrs, 1); 2843 if (ret) { 2844 CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep); 2845 printf("%s - qp <- error failed!\n", __func__); 2846 } 2847 } 2848 release_ep_resources(ep); 2849 ep->com.state = DEAD; 2850 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep); 2851 } 2852 c4iw_put_ep(&ep->com); 2853 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep); 2854 return ret; 2855 } 2856 2857 #ifdef C4IW_EP_REDIRECT 2858 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, 2859 struct l2t_entry *l2t) 2860 { 2861 struct c4iw_ep *ep = ctx; 2862 2863 if (ep->dst != old) 2864 return 0; 2865 2866 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, 2867 l2t); 2868 dst_hold(new); 2869 cxgb4_l2t_release(ep->l2t); 2870 ep->l2t = l2t; 2871 dst_release(old); 2872 ep->dst = new; 2873 return 1; 2874 } 2875 #endif 2876 2877 2878 2879 static void ep_timeout(unsigned long arg) 2880 { 2881 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2882 2883 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 2884 2885 /* 2886 * Only insert if it is not already on the list. 2887 */ 2888 if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) { 2889 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep); 2890 add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT); 2891 } 2892 } 2893 } 2894 2895 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl) 2896 { 2897 uint64_t val = be64toh(*rpl); 2898 int ret; 2899 struct c4iw_wr_wait *wr_waitp; 2900 2901 ret = (int)((val >> 8) & 0xff); 2902 wr_waitp = (struct c4iw_wr_wait *)rpl[1]; 2903 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret); 2904 if (wr_waitp) 2905 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 2906 2907 return (0); 2908 } 2909 2910 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl) 2911 { 2912 struct cqe_list_entry *cle; 2913 unsigned long flag; 2914 2915 cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT); 2916 cle->rhp = sc->iwarp_softc; 2917 cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]); 2918 2919 spin_lock_irqsave(&err_cqe_lock, flag); 2920 list_add_tail(&cle->entry, &err_cqe_list); 2921 queue_work(c4iw_taskq, &c4iw_task); 2922 spin_unlock_irqrestore(&err_cqe_lock, flag); 2923 2924 return (0); 2925 } 2926 2927 static int 2928 process_terminate(struct c4iw_ep *ep) 2929 { 2930 struct c4iw_qp_attributes attrs = {0}; 2931 2932 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep); 2933 2934 if (ep && ep->com.qp) { 2935 2936 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", 2937 ep->hwtid, ep->com.qp->wq.sq.qid); 2938 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2939 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 2940 1); 2941 } else 2942 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", 2943 ep->hwtid); 2944 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep); 2945 2946 return 0; 2947 } 2948 2949 int __init c4iw_cm_init(void) 2950 { 2951 2952 t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate); 2953 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl); 2954 t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler); 2955 t4_register_an_handler(c4iw_ev_handler); 2956 2957 TAILQ_INIT(&req_list); 2958 spin_lock_init(&req_lock); 2959 INIT_LIST_HEAD(&err_cqe_list); 2960 spin_lock_init(&err_cqe_lock); 2961 2962 INIT_WORK(&c4iw_task, process_req); 2963 2964 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe"); 2965 if (!c4iw_taskq) 2966 return -ENOMEM; 2967 2968 return 0; 2969 } 2970 2971 void __exit c4iw_cm_term(void) 2972 { 2973 WARN_ON(!TAILQ_EMPTY(&req_list)); 2974 WARN_ON(!list_empty(&err_cqe_list)); 2975 flush_workqueue(c4iw_taskq); 2976 destroy_workqueue(c4iw_taskq); 2977 2978 t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL); 2979 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL); 2980 t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL); 2981 t4_register_an_handler(NULL); 2982 } 2983 #endif 2984