1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 39 #ifdef TCP_OFFLOAD 40 #include <sys/types.h> 41 #include <sys/malloc.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/sockio.h> 45 #include <sys/taskqueue.h> 46 #include <netinet/in.h> 47 #include <net/route.h> 48 49 #include <netinet/in_systm.h> 50 #include <netinet/in_pcb.h> 51 #include <netinet6/in6_pcb.h> 52 #include <netinet/ip.h> 53 #include <netinet/in_fib.h> 54 #include <netinet6/in6_fib.h> 55 #include <netinet6/scope6_var.h> 56 #include <netinet/ip_var.h> 57 #include <netinet/tcp_var.h> 58 #include <netinet/tcp.h> 59 #include <netinet/tcpip.h> 60 61 #include <netinet/toecore.h> 62 63 struct sge_iq; 64 struct rss_header; 65 struct cpl_set_tcb_rpl; 66 #include <linux/types.h> 67 #include "offload.h" 68 #include "tom/t4_tom.h" 69 70 #define TOEPCB(so) ((struct toepcb *)(so_sototcpcb((so))->t_toe)) 71 72 #include "iw_cxgbe.h" 73 #include <linux/module.h> 74 #include <linux/workqueue.h> 75 #include <linux/notifier.h> 76 #include <linux/inetdevice.h> 77 #include <linux/if_vlan.h> 78 #include <net/netevent.h> 79 #include <rdma/rdma_cm.h> 80 81 static spinlock_t req_lock; 82 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list; 83 static struct work_struct c4iw_task; 84 static struct workqueue_struct *c4iw_taskq; 85 static LIST_HEAD(err_cqe_list); 86 static spinlock_t err_cqe_lock; 87 static LIST_HEAD(listen_port_list); 88 static DEFINE_MUTEX(listen_port_mutex); 89 90 static void process_req(struct work_struct *ctx); 91 static void start_ep_timer(struct c4iw_ep *ep); 92 static int stop_ep_timer(struct c4iw_ep *ep); 93 static int set_tcpinfo(struct c4iw_ep *ep); 94 static void process_timeout(struct c4iw_ep *ep); 95 static void process_err_cqes(void); 96 static void *alloc_ep(int size, gfp_t flags); 97 static void close_socket(struct socket *so); 98 static int send_mpa_req(struct c4iw_ep *ep); 99 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen); 100 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen); 101 static void close_complete_upcall(struct c4iw_ep *ep, int status); 102 static int send_abort(struct c4iw_ep *ep); 103 static void peer_close_upcall(struct c4iw_ep *ep); 104 static void peer_abort_upcall(struct c4iw_ep *ep); 105 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 106 static int connect_request_upcall(struct c4iw_ep *ep); 107 static void established_upcall(struct c4iw_ep *ep); 108 static int process_mpa_reply(struct c4iw_ep *ep); 109 static int process_mpa_request(struct c4iw_ep *ep); 110 static void process_peer_close(struct c4iw_ep *ep); 111 static void process_conn_error(struct c4iw_ep *ep); 112 static void process_close_complete(struct c4iw_ep *ep); 113 static void ep_timeout(unsigned long arg); 114 static void setiwsockopt(struct socket *so); 115 static void init_iwarp_socket(struct socket *so, void *arg); 116 static void uninit_iwarp_socket(struct socket *so); 117 static void process_data(struct c4iw_ep *ep); 118 static void process_connected(struct c4iw_ep *ep); 119 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag); 120 static void process_socket_event(struct c4iw_ep *ep); 121 static void release_ep_resources(struct c4iw_ep *ep); 122 static int process_terminate(struct c4iw_ep *ep); 123 static int terminate(struct sge_iq *iq, const struct rss_header *rss, 124 struct mbuf *m); 125 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events); 126 static struct listen_port_info * 127 add_ep_to_listenlist(struct c4iw_listen_ep *lep); 128 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep); 129 static struct c4iw_listen_ep * 130 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so); 131 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr, 132 struct ifnet **ifp); 133 static void process_newconn(struct c4iw_listen_ep *master_lep, 134 struct socket *new_so); 135 #define START_EP_TIMER(ep) \ 136 do { \ 137 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \ 138 __func__, __LINE__, (ep)); \ 139 start_ep_timer(ep); \ 140 } while (0) 141 142 #define STOP_EP_TIMER(ep) \ 143 ({ \ 144 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \ 145 __func__, __LINE__, (ep)); \ 146 stop_ep_timer(ep); \ 147 }) 148 149 #define GET_LOCAL_ADDR(pladdr, so) \ 150 do { \ 151 struct sockaddr_storage *__a = NULL; \ 152 struct inpcb *__inp = sotoinpcb(so); \ 153 KASSERT(__inp != NULL, \ 154 ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \ 155 if (__inp->inp_vflag & INP_IPV4) \ 156 in_getsockaddr(so, (struct sockaddr **)&__a); \ 157 else \ 158 in6_getsockaddr(so, (struct sockaddr **)&__a); \ 159 *(pladdr) = *__a; \ 160 free(__a, M_SONAME); \ 161 } while (0) 162 163 #define GET_REMOTE_ADDR(praddr, so) \ 164 do { \ 165 struct sockaddr_storage *__a = NULL; \ 166 struct inpcb *__inp = sotoinpcb(so); \ 167 KASSERT(__inp != NULL, \ 168 ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \ 169 if (__inp->inp_vflag & INP_IPV4) \ 170 in_getpeeraddr(so, (struct sockaddr **)&__a); \ 171 else \ 172 in6_getpeeraddr(so, (struct sockaddr **)&__a); \ 173 *(praddr) = *__a; \ 174 free(__a, M_SONAME); \ 175 } while (0) 176 177 static char *states[] = { 178 "idle", 179 "listen", 180 "connecting", 181 "mpa_wait_req", 182 "mpa_req_sent", 183 "mpa_req_rcvd", 184 "mpa_rep_sent", 185 "fpdu_mode", 186 "aborting", 187 "closing", 188 "moribund", 189 "dead", 190 NULL, 191 }; 192 193 static void deref_cm_id(struct c4iw_ep_common *epc) 194 { 195 epc->cm_id->rem_ref(epc->cm_id); 196 epc->cm_id = NULL; 197 set_bit(CM_ID_DEREFED, &epc->history); 198 } 199 200 static void ref_cm_id(struct c4iw_ep_common *epc) 201 { 202 set_bit(CM_ID_REFED, &epc->history); 203 epc->cm_id->add_ref(epc->cm_id); 204 } 205 206 static void deref_qp(struct c4iw_ep *ep) 207 { 208 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 209 clear_bit(QP_REFERENCED, &ep->com.flags); 210 set_bit(QP_DEREFED, &ep->com.history); 211 } 212 213 static void ref_qp(struct c4iw_ep *ep) 214 { 215 set_bit(QP_REFERENCED, &ep->com.flags); 216 set_bit(QP_REFED, &ep->com.history); 217 c4iw_qp_add_ref(&ep->com.qp->ibqp); 218 } 219 /* allocated per TCP port while listening */ 220 struct listen_port_info { 221 uint16_t port_num; /* TCP port address */ 222 struct list_head list; /* belongs to listen_port_list */ 223 struct list_head lep_list; /* per port lep list */ 224 uint32_t refcnt; /* number of lep's listening */ 225 }; 226 227 /* 228 * Following two lists are used to manage INADDR_ANY listeners: 229 * 1)listen_port_list 230 * 2)lep_list 231 * 232 * Below is the INADDR_ANY listener lists overview on a system with a two port 233 * adapter: 234 * |------------------| 235 * |listen_port_list | 236 * |------------------| 237 * | 238 * | |-----------| |-----------| 239 * | | port_num:X| | port_num:X| 240 * |--------------|-list------|-------|-list------|-------.... 241 * | lep_list----| | lep_list----| 242 * | refcnt | | | refcnt | | 243 * | | | | | | 244 * | | | | | | 245 * |-----------| | |-----------| | 246 * | | 247 * | | 248 * | | 249 * | | lep1 lep2 250 * | | |----------------| |----------------| 251 * | |----| listen_ep_list |----| listen_ep_list | 252 * | |----------------| |----------------| 253 * | 254 * | 255 * | lep1 lep2 256 * | |----------------| |----------------| 257 * |---| listen_ep_list |----| listen_ep_list | 258 * |----------------| |----------------| 259 * 260 * Because of two port adapter, the number of lep's are two(lep1 & lep2) for 261 * each TCP port number. 262 * 263 * Here 'lep1' is always marked as Master lep, because solisten() is always 264 * called through first lep. 265 * 266 */ 267 static struct listen_port_info * 268 add_ep_to_listenlist(struct c4iw_listen_ep *lep) 269 { 270 uint16_t port; 271 struct listen_port_info *port_info = NULL; 272 struct sockaddr_storage *laddr = &lep->com.local_addr; 273 274 port = (laddr->ss_family == AF_INET) ? 275 ((struct sockaddr_in *)laddr)->sin_port : 276 ((struct sockaddr_in6 *)laddr)->sin6_port; 277 278 mutex_lock(&listen_port_mutex); 279 280 list_for_each_entry(port_info, &listen_port_list, list) 281 if (port_info->port_num == port) 282 goto found_port; 283 284 port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK); 285 port_info->port_num = port; 286 port_info->refcnt = 0; 287 288 list_add_tail(&port_info->list, &listen_port_list); 289 INIT_LIST_HEAD(&port_info->lep_list); 290 291 found_port: 292 port_info->refcnt++; 293 list_add_tail(&lep->listen_ep_list, &port_info->lep_list); 294 mutex_unlock(&listen_port_mutex); 295 return port_info; 296 } 297 298 static int 299 rem_ep_from_listenlist(struct c4iw_listen_ep *lep) 300 { 301 uint16_t port; 302 struct listen_port_info *port_info = NULL; 303 struct sockaddr_storage *laddr = &lep->com.local_addr; 304 int refcnt = 0; 305 306 port = (laddr->ss_family == AF_INET) ? 307 ((struct sockaddr_in *)laddr)->sin_port : 308 ((struct sockaddr_in6 *)laddr)->sin6_port; 309 310 mutex_lock(&listen_port_mutex); 311 312 /* get the port_info structure based on the lep's port address */ 313 list_for_each_entry(port_info, &listen_port_list, list) { 314 if (port_info->port_num == port) { 315 port_info->refcnt--; 316 refcnt = port_info->refcnt; 317 /* remove the current lep from the listen list */ 318 list_del(&lep->listen_ep_list); 319 if (port_info->refcnt == 0) { 320 /* Remove this entry from the list as there 321 * are no more listeners for this port_num. 322 */ 323 list_del(&port_info->list); 324 kfree(port_info); 325 } 326 break; 327 } 328 } 329 mutex_unlock(&listen_port_mutex); 330 return refcnt; 331 } 332 333 /* 334 * Find the lep that belongs to the ifnet on which the SYN frame was received. 335 */ 336 struct c4iw_listen_ep * 337 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so) 338 { 339 struct adapter *adap = NULL; 340 struct c4iw_listen_ep *lep = NULL; 341 struct ifnet *ifp = NULL, *hw_ifp = NULL; 342 struct listen_port_info *port_info = NULL; 343 int i = 0, found_portinfo = 0, found_lep = 0; 344 uint16_t port; 345 346 /* 347 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo 348 * interfaces like vlan, lagg, etc.. 349 * TBD: lagg support, lagg + vlan support. 350 */ 351 ifp = TOEPCB(so)->l2te->ifp; 352 if (ifp->if_type == IFT_L2VLAN) { 353 hw_ifp = VLAN_TRUNKDEV(ifp); 354 if (hw_ifp == NULL) { 355 CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of " 356 "vlan ifnet %p, sock %p, master_lep %p", 357 __func__, ifp, so, master_lep); 358 return (NULL); 359 } 360 } else 361 hw_ifp = ifp; 362 363 /* STEP 2: Find 'port_info' with listener local port address. */ 364 port = (master_lep->com.local_addr.ss_family == AF_INET) ? 365 ((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port : 366 ((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port; 367 368 369 mutex_lock(&listen_port_mutex); 370 list_for_each_entry(port_info, &listen_port_list, list) 371 if (port_info->port_num == port) { 372 found_portinfo =1; 373 break; 374 } 375 if (!found_portinfo) 376 goto out; 377 378 /* STEP 3: Traverse through list of lep's that are bound to the current 379 * TCP port address and find the lep that belongs to the ifnet on which 380 * the SYN frame was received. 381 */ 382 list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) { 383 adap = lep->com.dev->rdev.adap; 384 for_each_port(adap, i) { 385 if (hw_ifp == adap->port[i]->vi[0].ifp) { 386 found_lep =1; 387 goto out; 388 } 389 } 390 } 391 out: 392 mutex_unlock(&listen_port_mutex); 393 return found_lep ? lep : (NULL); 394 } 395 396 static void process_timeout(struct c4iw_ep *ep) 397 { 398 struct c4iw_qp_attributes attrs = {0}; 399 int abort = 1; 400 401 CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__, 402 ep, ep->hwtid, ep->com.state); 403 set_bit(TIMEDOUT, &ep->com.history); 404 switch (ep->com.state) { 405 case MPA_REQ_SENT: 406 connect_reply_upcall(ep, -ETIMEDOUT); 407 break; 408 case MPA_REQ_WAIT: 409 case MPA_REQ_RCVD: 410 case MPA_REP_SENT: 411 case FPDU_MODE: 412 break; 413 case CLOSING: 414 case MORIBUND: 415 if (ep->com.cm_id && ep->com.qp) { 416 attrs.next_state = C4IW_QP_STATE_ERROR; 417 c4iw_modify_qp(ep->com.dev, ep->com.qp, 418 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 419 } 420 close_complete_upcall(ep, -ETIMEDOUT); 421 break; 422 case ABORTING: 423 case DEAD: 424 /* 425 * These states are expected if the ep timed out at the same 426 * time as another thread was calling stop_ep_timer(). 427 * So we silently do nothing for these states. 428 */ 429 abort = 0; 430 break; 431 default: 432 CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u" 433 , __func__, ep, ep->hwtid, ep->com.state); 434 abort = 0; 435 } 436 if (abort) 437 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 438 c4iw_put_ep(&ep->com); 439 return; 440 } 441 442 struct cqe_list_entry { 443 struct list_head entry; 444 struct c4iw_dev *rhp; 445 struct t4_cqe err_cqe; 446 }; 447 448 static void 449 process_err_cqes(void) 450 { 451 unsigned long flag; 452 struct cqe_list_entry *cle; 453 454 spin_lock_irqsave(&err_cqe_lock, flag); 455 while (!list_empty(&err_cqe_list)) { 456 struct list_head *tmp; 457 tmp = err_cqe_list.next; 458 list_del(tmp); 459 tmp->next = tmp->prev = NULL; 460 spin_unlock_irqrestore(&err_cqe_lock, flag); 461 cle = list_entry(tmp, struct cqe_list_entry, entry); 462 c4iw_ev_dispatch(cle->rhp, &cle->err_cqe); 463 free(cle, M_CXGBE); 464 spin_lock_irqsave(&err_cqe_lock, flag); 465 } 466 spin_unlock_irqrestore(&err_cqe_lock, flag); 467 468 return; 469 } 470 471 static void 472 process_req(struct work_struct *ctx) 473 { 474 struct c4iw_ep_common *epc; 475 unsigned long flag; 476 int ep_events; 477 478 process_err_cqes(); 479 spin_lock_irqsave(&req_lock, flag); 480 while (!TAILQ_EMPTY(&req_list)) { 481 epc = TAILQ_FIRST(&req_list); 482 TAILQ_REMOVE(&req_list, epc, entry); 483 epc->entry.tqe_prev = NULL; 484 ep_events = epc->ep_events; 485 epc->ep_events = 0; 486 spin_unlock_irqrestore(&req_lock, flag); 487 mutex_lock(&epc->mutex); 488 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x", 489 __func__, epc->so, epc, states[epc->state], ep_events); 490 if (ep_events & C4IW_EVENT_TERM) 491 process_terminate((struct c4iw_ep *)epc); 492 if (ep_events & C4IW_EVENT_TIMEOUT) 493 process_timeout((struct c4iw_ep *)epc); 494 if (ep_events & C4IW_EVENT_SOCKET) 495 process_socket_event((struct c4iw_ep *)epc); 496 mutex_unlock(&epc->mutex); 497 c4iw_put_ep(epc); 498 process_err_cqes(); 499 spin_lock_irqsave(&req_lock, flag); 500 } 501 spin_unlock_irqrestore(&req_lock, flag); 502 } 503 504 /* 505 * XXX: doesn't belong here in the iWARP driver. 506 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is 507 * set. Is this a valid assumption for active open? 508 */ 509 static int 510 set_tcpinfo(struct c4iw_ep *ep) 511 { 512 struct socket *so = ep->com.so; 513 struct inpcb *inp = sotoinpcb(so); 514 struct tcpcb *tp; 515 struct toepcb *toep; 516 int rc = 0; 517 518 INP_WLOCK(inp); 519 tp = intotcpcb(inp); 520 if ((tp->t_flags & TF_TOE) == 0) { 521 rc = EINVAL; 522 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n", 523 __func__, so, ep); 524 goto done; 525 } 526 toep = TOEPCB(so); 527 528 ep->hwtid = toep->tid; 529 ep->snd_seq = tp->snd_nxt; 530 ep->rcv_seq = tp->rcv_nxt; 531 done: 532 INP_WUNLOCK(inp); 533 return (rc); 534 535 } 536 static int 537 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp) 538 { 539 int err = 0; 540 541 if (raddr->ss_family == AF_INET) { 542 struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr; 543 struct nhop4_extended nh4 = {0}; 544 545 err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, raddr4->sin_addr, 546 NHR_REF, 0, &nh4); 547 *ifp = nh4.nh_ifp; 548 if (err) 549 fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4); 550 } else { 551 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr; 552 struct nhop6_extended nh6 = {0}; 553 struct in6_addr addr6; 554 uint32_t scopeid; 555 556 memset(&addr6, 0, sizeof(addr6)); 557 in6_splitscope((struct in6_addr *)&raddr6->sin6_addr, 558 &addr6, &scopeid); 559 err = fib6_lookup_nh_ext(RT_DEFAULT_FIB, &addr6, scopeid, 560 NHR_REF, 0, &nh6); 561 *ifp = nh6.nh_ifp; 562 if (err) 563 fib6_free_nh_ext(RT_DEFAULT_FIB, &nh6); 564 } 565 566 CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err); 567 return err; 568 } 569 570 static void 571 close_socket(struct socket *so) 572 { 573 uninit_iwarp_socket(so); 574 soclose(so); 575 } 576 577 static void 578 process_peer_close(struct c4iw_ep *ep) 579 { 580 struct c4iw_qp_attributes attrs = {0}; 581 int disconnect = 1; 582 int release = 0; 583 584 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep, 585 ep->com.so, states[ep->com.state]); 586 587 switch (ep->com.state) { 588 589 case MPA_REQ_WAIT: 590 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD", 591 __func__, ep); 592 /* Fallthrough */ 593 case MPA_REQ_SENT: 594 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD", 595 __func__, ep); 596 ep->com.state = DEAD; 597 connect_reply_upcall(ep, -ECONNABORTED); 598 599 disconnect = 0; 600 STOP_EP_TIMER(ep); 601 close_socket(ep->com.so); 602 deref_cm_id(&ep->com); 603 release = 1; 604 break; 605 606 case MPA_REQ_RCVD: 607 608 /* 609 * We're gonna mark this puppy DEAD, but keep 610 * the reference on it until the ULP accepts or 611 * rejects the CR. 612 */ 613 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING", 614 __func__, ep); 615 ep->com.state = CLOSING; 616 break; 617 618 case MPA_REP_SENT: 619 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING", 620 __func__, ep); 621 ep->com.state = CLOSING; 622 break; 623 624 case FPDU_MODE: 625 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING", 626 __func__, ep); 627 START_EP_TIMER(ep); 628 ep->com.state = CLOSING; 629 attrs.next_state = C4IW_QP_STATE_CLOSING; 630 c4iw_modify_qp(ep->com.dev, ep->com.qp, 631 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 632 peer_close_upcall(ep); 633 break; 634 635 case ABORTING: 636 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)", 637 __func__, ep); 638 disconnect = 0; 639 break; 640 641 case CLOSING: 642 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND", 643 __func__, ep); 644 ep->com.state = MORIBUND; 645 disconnect = 0; 646 break; 647 648 case MORIBUND: 649 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__, 650 ep); 651 STOP_EP_TIMER(ep); 652 if (ep->com.cm_id && ep->com.qp) { 653 attrs.next_state = C4IW_QP_STATE_IDLE; 654 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 655 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 656 } 657 close_socket(ep->com.so); 658 close_complete_upcall(ep, 0); 659 ep->com.state = DEAD; 660 release = 1; 661 disconnect = 0; 662 break; 663 664 case DEAD: 665 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)", 666 __func__, ep); 667 disconnect = 0; 668 break; 669 670 default: 671 panic("%s: ep %p state %d", __func__, ep, 672 ep->com.state); 673 break; 674 } 675 676 677 if (disconnect) { 678 679 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep); 680 c4iw_ep_disconnect(ep, 0, M_NOWAIT); 681 } 682 if (release) { 683 684 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep); 685 c4iw_put_ep(&ep->com); 686 } 687 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep); 688 return; 689 } 690 691 static void 692 process_conn_error(struct c4iw_ep *ep) 693 { 694 struct c4iw_qp_attributes attrs = {0}; 695 int ret; 696 int state; 697 698 state = ep->com.state; 699 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s", 700 __func__, ep, ep->com.so, ep->com.so->so_error, 701 states[ep->com.state]); 702 703 switch (state) { 704 705 case MPA_REQ_WAIT: 706 STOP_EP_TIMER(ep); 707 c4iw_put_ep(&ep->parent_ep->com); 708 break; 709 710 case MPA_REQ_SENT: 711 STOP_EP_TIMER(ep); 712 connect_reply_upcall(ep, -ECONNRESET); 713 break; 714 715 case MPA_REP_SENT: 716 ep->com.rpl_err = ECONNRESET; 717 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep); 718 break; 719 720 case MPA_REQ_RCVD: 721 break; 722 723 case MORIBUND: 724 case CLOSING: 725 STOP_EP_TIMER(ep); 726 /*FALLTHROUGH*/ 727 case FPDU_MODE: 728 729 if (ep->com.cm_id && ep->com.qp) { 730 731 attrs.next_state = C4IW_QP_STATE_ERROR; 732 ret = c4iw_modify_qp(ep->com.qp->rhp, 733 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 734 &attrs, 1); 735 if (ret) 736 log(LOG_ERR, 737 "%s - qp <- error failed!\n", 738 __func__); 739 } 740 peer_abort_upcall(ep); 741 break; 742 743 case ABORTING: 744 break; 745 746 case DEAD: 747 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!", 748 __func__, ep->com.so->so_error); 749 return; 750 751 default: 752 panic("%s: ep %p state %d", __func__, ep, state); 753 break; 754 } 755 756 if (state != ABORTING) { 757 close_socket(ep->com.so); 758 ep->com.state = DEAD; 759 c4iw_put_ep(&ep->com); 760 } 761 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep); 762 return; 763 } 764 765 static void 766 process_close_complete(struct c4iw_ep *ep) 767 { 768 struct c4iw_qp_attributes attrs = {0}; 769 int release = 0; 770 771 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep, 772 ep->com.so, states[ep->com.state]); 773 774 /* The cm_id may be null if we failed to connect */ 775 set_bit(CLOSE_CON_RPL, &ep->com.history); 776 777 switch (ep->com.state) { 778 779 case CLOSING: 780 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND", 781 __func__, ep); 782 ep->com.state = MORIBUND; 783 break; 784 785 case MORIBUND: 786 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__, 787 ep); 788 STOP_EP_TIMER(ep); 789 790 if ((ep->com.cm_id) && (ep->com.qp)) { 791 792 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE", 793 __func__, ep); 794 attrs.next_state = C4IW_QP_STATE_IDLE; 795 c4iw_modify_qp(ep->com.dev, 796 ep->com.qp, 797 C4IW_QP_ATTR_NEXT_STATE, 798 &attrs, 1); 799 } 800 801 close_socket(ep->com.so); 802 close_complete_upcall(ep, 0); 803 ep->com.state = DEAD; 804 release = 1; 805 break; 806 807 case ABORTING: 808 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep); 809 break; 810 811 case DEAD: 812 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep); 813 break; 814 default: 815 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state", 816 __func__, ep); 817 panic("%s:pcc6 %p unknown ep state", __func__, ep); 818 break; 819 } 820 821 if (release) { 822 823 CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep); 824 release_ep_resources(ep); 825 } 826 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep); 827 return; 828 } 829 830 static void 831 setiwsockopt(struct socket *so) 832 { 833 int rc; 834 struct sockopt sopt; 835 int on = 1; 836 837 sopt.sopt_dir = SOPT_SET; 838 sopt.sopt_level = IPPROTO_TCP; 839 sopt.sopt_name = TCP_NODELAY; 840 sopt.sopt_val = (caddr_t)&on; 841 sopt.sopt_valsize = sizeof on; 842 sopt.sopt_td = NULL; 843 rc = -sosetopt(so, &sopt); 844 if (rc) { 845 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n", 846 __func__, so, rc); 847 } 848 } 849 850 static void 851 init_iwarp_socket(struct socket *so, void *arg) 852 { 853 if (SOLISTENING(so)) { 854 SOLISTEN_LOCK(so); 855 solisten_upcall_set(so, c4iw_so_upcall, arg); 856 so->so_state |= SS_NBIO; 857 SOLISTEN_UNLOCK(so); 858 } else { 859 SOCKBUF_LOCK(&so->so_rcv); 860 soupcall_set(so, SO_RCV, c4iw_so_upcall, arg); 861 so->so_state |= SS_NBIO; 862 SOCKBUF_UNLOCK(&so->so_rcv); 863 } 864 } 865 866 static void 867 uninit_iwarp_socket(struct socket *so) 868 { 869 if (SOLISTENING(so)) { 870 SOLISTEN_LOCK(so); 871 solisten_upcall_set(so, NULL, NULL); 872 SOLISTEN_UNLOCK(so); 873 } else { 874 SOCKBUF_LOCK(&so->so_rcv); 875 soupcall_clear(so, SO_RCV); 876 SOCKBUF_UNLOCK(&so->so_rcv); 877 } 878 } 879 880 static void 881 process_data(struct c4iw_ep *ep) 882 { 883 int ret = 0; 884 int disconnect = 0; 885 struct c4iw_qp_attributes attrs = {0}; 886 887 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__, 888 ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv)); 889 890 switch (ep->com.state) { 891 case MPA_REQ_SENT: 892 disconnect = process_mpa_reply(ep); 893 break; 894 case MPA_REQ_WAIT: 895 disconnect = process_mpa_request(ep); 896 if (disconnect) 897 /* Refered in process_newconn() */ 898 c4iw_put_ep(&ep->parent_ep->com); 899 break; 900 case FPDU_MODE: 901 MPASS(ep->com.qp != NULL); 902 attrs.next_state = C4IW_QP_STATE_TERMINATE; 903 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp, 904 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 905 if (ret != -EINPROGRESS) 906 disconnect = 1; 907 break; 908 default: 909 log(LOG_ERR, "%s: Unexpected streaming data. ep %p, " 910 "state %d, so %p, so_state 0x%x, sbused %u\n", 911 __func__, ep, ep->com.state, ep->com.so, 912 ep->com.so->so_state, sbused(&ep->com.so->so_rcv)); 913 break; 914 } 915 if (disconnect) 916 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 917 918 } 919 920 static void 921 process_connected(struct c4iw_ep *ep) 922 { 923 struct socket *so = ep->com.so; 924 925 if ((so->so_state & SS_ISCONNECTED) && !so->so_error) { 926 if (send_mpa_req(ep)) 927 goto err; 928 } else { 929 connect_reply_upcall(ep, -so->so_error); 930 goto err; 931 } 932 return; 933 err: 934 close_socket(so); 935 ep->com.state = DEAD; 936 c4iw_put_ep(&ep->com); 937 return; 938 } 939 940 static inline int c4iw_zero_addr(struct sockaddr *addr) 941 { 942 struct in6_addr *ip6; 943 944 if (addr->sa_family == AF_INET) 945 return IN_ZERONET( 946 ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr)); 947 else { 948 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 949 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 950 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 951 } 952 } 953 954 static inline int c4iw_loopback_addr(struct sockaddr *addr) 955 { 956 if (addr->sa_family == AF_INET) 957 return IN_LOOPBACK( 958 ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr)); 959 else 960 return IN6_IS_ADDR_LOOPBACK( 961 &((struct sockaddr_in6 *) addr)->sin6_addr); 962 } 963 964 static inline int c4iw_any_addr(struct sockaddr *addr) 965 { 966 return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr); 967 } 968 969 static void 970 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so) 971 { 972 struct c4iw_listen_ep *real_lep = NULL; 973 struct c4iw_ep *new_ep = NULL; 974 struct sockaddr_in *remote = NULL; 975 int ret = 0; 976 977 MPASS(new_so != NULL); 978 979 if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) { 980 /* Here we need to find the 'real_lep' that belongs to the 981 * incomming socket's network interface, such that the newly 982 * created 'ep' can be attached to the real 'lep'. 983 */ 984 real_lep = find_real_listen_ep(master_lep, new_so); 985 if (real_lep == NULL) { 986 CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen " 987 "ep for sock: %p", __func__, new_so); 988 log(LOG_ERR,"%s: Could not find the real listen ep for " 989 "sock: %p\n", __func__, new_so); 990 /* FIXME: properly free the 'new_so' in failure case. 991 * Use of soabort() and soclose() are not legal 992 * here(before soaccept()). 993 */ 994 return; 995 } 996 } else /* for Non-Wildcard address, master_lep is always the real_lep */ 997 real_lep = master_lep; 998 999 new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL); 1000 1001 CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, " 1002 "listening so %p, new so %p", __func__, master_lep, real_lep, 1003 new_ep, master_lep->com.so, new_so); 1004 1005 new_ep->com.dev = real_lep->com.dev; 1006 new_ep->com.so = new_so; 1007 new_ep->com.cm_id = NULL; 1008 new_ep->com.thread = real_lep->com.thread; 1009 new_ep->parent_ep = real_lep; 1010 1011 GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so); 1012 GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so); 1013 c4iw_get_ep(&real_lep->com); 1014 init_timer(&new_ep->timer); 1015 new_ep->com.state = MPA_REQ_WAIT; 1016 START_EP_TIMER(new_ep); 1017 1018 setiwsockopt(new_so); 1019 ret = soaccept(new_so, (struct sockaddr **)&remote); 1020 if (ret != 0) { 1021 CTR4(KTR_IW_CXGBE, 1022 "%s:listen sock:%p, new sock:%p, ret:%d", 1023 __func__, master_lep->com.so, new_so, ret); 1024 if (remote != NULL) 1025 free(remote, M_SONAME); 1026 uninit_iwarp_socket(new_so); 1027 soclose(new_so); 1028 c4iw_put_ep(&new_ep->com); 1029 c4iw_put_ep(&real_lep->com); 1030 return; 1031 } 1032 free(remote, M_SONAME); 1033 1034 /* MPA request might have been queued up on the socket already, so we 1035 * initialize the socket/upcall_handler under lock to prevent processing 1036 * MPA request on another thread(via process_req()) simultaniously. 1037 */ 1038 c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to 1039 avoid freeing of ep before ep unlock. */ 1040 mutex_lock(&new_ep->com.mutex); 1041 init_iwarp_socket(new_so, &new_ep->com); 1042 1043 ret = process_mpa_request(new_ep); 1044 if (ret) { 1045 /* ABORT */ 1046 c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL); 1047 c4iw_put_ep(&real_lep->com); 1048 } 1049 mutex_unlock(&new_ep->com.mutex); 1050 c4iw_put_ep(&new_ep->com); 1051 return; 1052 } 1053 1054 static int 1055 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event) 1056 { 1057 unsigned long flag; 1058 1059 spin_lock_irqsave(&req_lock, flag); 1060 if (ep && ep->com.so) { 1061 ep->com.ep_events |= new_ep_event; 1062 if (!ep->com.entry.tqe_prev) { 1063 c4iw_get_ep(&ep->com); 1064 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry); 1065 queue_work(c4iw_taskq, &c4iw_task); 1066 } 1067 } 1068 spin_unlock_irqrestore(&req_lock, flag); 1069 1070 return (0); 1071 } 1072 1073 static int 1074 c4iw_so_upcall(struct socket *so, void *arg, int waitflag) 1075 { 1076 struct c4iw_ep *ep = arg; 1077 1078 CTR6(KTR_IW_CXGBE, 1079 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p", 1080 __func__, so, so->so_state, ep, states[ep->com.state], 1081 ep->com.entry.tqe_prev); 1082 1083 MPASS(ep->com.so == so); 1084 /* 1085 * Wake up any threads waiting in rdma_init()/rdma_fini(), 1086 * with locks held. 1087 */ 1088 if (so->so_error) 1089 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1090 add_ep_to_req_list(ep, C4IW_EVENT_SOCKET); 1091 1092 return (SU_OK); 1093 } 1094 1095 1096 static int 1097 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1098 { 1099 struct adapter *sc = iq->adapter; 1100 const struct cpl_rdma_terminate *cpl = mtod(m, const void *); 1101 unsigned int tid = GET_TID(cpl); 1102 struct toepcb *toep = lookup_tid(sc, tid); 1103 struct socket *so; 1104 struct c4iw_ep *ep; 1105 1106 INP_WLOCK(toep->inp); 1107 so = inp_inpcbtosocket(toep->inp); 1108 ep = so->so_rcv.sb_upcallarg; 1109 INP_WUNLOCK(toep->inp); 1110 1111 CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep); 1112 add_ep_to_req_list(ep, C4IW_EVENT_TERM); 1113 1114 return 0; 1115 } 1116 1117 static void 1118 process_socket_event(struct c4iw_ep *ep) 1119 { 1120 int state = ep->com.state; 1121 struct socket *so = ep->com.so; 1122 1123 if (ep->com.state == DEAD) { 1124 CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded " 1125 "ep %p ep_state %s", __func__, ep, states[state]); 1126 return; 1127 } 1128 1129 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, " 1130 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state, 1131 so->so_error, so->so_rcv.sb_state, ep, states[state]); 1132 1133 if (state == CONNECTING) { 1134 process_connected(ep); 1135 return; 1136 } 1137 1138 if (state == LISTEN) { 1139 struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep; 1140 struct socket *listen_so = so, *new_so = NULL; 1141 int error = 0; 1142 1143 SOLISTEN_LOCK(listen_so); 1144 do { 1145 error = solisten_dequeue(listen_so, &new_so, 1146 SOCK_NONBLOCK); 1147 if (error) { 1148 CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p " 1149 "error %d", __func__, lep, listen_so, 1150 error); 1151 return; 1152 } 1153 process_newconn(lep, new_so); 1154 1155 /* solisten_dequeue() unlocks while return, so aquire 1156 * lock again for sol_qlen and also for next iteration. 1157 */ 1158 SOLISTEN_LOCK(listen_so); 1159 } while (listen_so->sol_qlen); 1160 SOLISTEN_UNLOCK(listen_so); 1161 1162 return; 1163 } 1164 1165 /* connection error */ 1166 if (so->so_error) { 1167 process_conn_error(ep); 1168 return; 1169 } 1170 1171 /* peer close */ 1172 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) { 1173 process_peer_close(ep); 1174 /* 1175 * check whether socket disconnect event is pending before 1176 * returning. Fallthrough if yes. 1177 */ 1178 if (!(so->so_state & SS_ISDISCONNECTED)) 1179 return; 1180 } 1181 1182 /* close complete */ 1183 if (so->so_state & SS_ISDISCONNECTED) { 1184 process_close_complete(ep); 1185 return; 1186 } 1187 1188 /* rx data */ 1189 if (sbused(&ep->com.so->so_rcv)) { 1190 process_data(ep); 1191 return; 1192 } 1193 1194 /* Socket events for 'MPA Request Received' and 'Close Complete' 1195 * were already processed earlier in their previous events handlers. 1196 * Hence, these socket events are skipped. 1197 * And any other socket events must have handled above. 1198 */ 1199 MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND)); 1200 1201 if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND)) 1202 log(LOG_ERR, "%s: Unprocessed socket event so %p, " 1203 "so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n", 1204 __func__, so, so->so_state, so->so_error, so->so_rcv.sb_state, 1205 ep, states[state]); 1206 1207 } 1208 1209 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters"); 1210 1211 static int dack_mode = 0; 1212 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0, 1213 "Delayed ack mode (default = 0)"); 1214 1215 int c4iw_max_read_depth = 8; 1216 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0, 1217 "Per-connection max ORD/IRD (default = 8)"); 1218 1219 static int enable_tcp_timestamps; 1220 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0, 1221 "Enable tcp timestamps (default = 0)"); 1222 1223 static int enable_tcp_sack; 1224 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0, 1225 "Enable tcp SACK (default = 0)"); 1226 1227 static int enable_tcp_window_scaling = 1; 1228 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0, 1229 "Enable tcp window scaling (default = 1)"); 1230 1231 int c4iw_debug = 0; 1232 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0, 1233 "Enable debug logging (default = 0)"); 1234 1235 static int peer2peer = 1; 1236 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0, 1237 "Support peer2peer ULPs (default = 1)"); 1238 1239 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 1240 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0, 1241 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)"); 1242 1243 static int ep_timeout_secs = 60; 1244 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0, 1245 "CM Endpoint operation timeout in seconds (default = 60)"); 1246 1247 static int mpa_rev = 1; 1248 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0, 1249 "MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)"); 1250 1251 static int markers_enabled; 1252 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0, 1253 "Enable MPA MARKERS (default(0) = disabled)"); 1254 1255 static int crc_enabled = 1; 1256 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0, 1257 "Enable MPA CRC (default(1) = enabled)"); 1258 1259 static int rcv_win = 256 * 1024; 1260 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0, 1261 "TCP receive window in bytes (default = 256KB)"); 1262 1263 static int snd_win = 128 * 1024; 1264 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0, 1265 "TCP send window in bytes (default = 128KB)"); 1266 1267 int use_dsgl = 1; 1268 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0, 1269 "Use DSGL for PBL/FastReg (default=1)"); 1270 1271 int inline_threshold = 128; 1272 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0, 1273 "inline vs dsgl threshold (default=128)"); 1274 1275 static int reuseaddr = 0; 1276 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0, 1277 "Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)"); 1278 1279 static void 1280 start_ep_timer(struct c4iw_ep *ep) 1281 { 1282 1283 if (timer_pending(&ep->timer)) { 1284 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep); 1285 printk(KERN_ERR "%s timer already started! ep %p\n", __func__, 1286 ep); 1287 return; 1288 } 1289 clear_bit(TIMEOUT, &ep->com.flags); 1290 c4iw_get_ep(&ep->com); 1291 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 1292 ep->timer.data = (unsigned long)ep; 1293 ep->timer.function = ep_timeout; 1294 add_timer(&ep->timer); 1295 } 1296 1297 static int 1298 stop_ep_timer(struct c4iw_ep *ep) 1299 { 1300 1301 del_timer_sync(&ep->timer); 1302 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 1303 c4iw_put_ep(&ep->com); 1304 return 0; 1305 } 1306 return 1; 1307 } 1308 1309 static void * 1310 alloc_ep(int size, gfp_t gfp) 1311 { 1312 struct c4iw_ep_common *epc; 1313 1314 epc = kzalloc(size, gfp); 1315 if (epc == NULL) 1316 return (NULL); 1317 1318 kref_init(&epc->kref); 1319 mutex_init(&epc->mutex); 1320 c4iw_init_wr_wait(&epc->wr_wait); 1321 1322 return (epc); 1323 } 1324 1325 void _c4iw_free_ep(struct kref *kref) 1326 { 1327 struct c4iw_ep *ep; 1328 struct c4iw_ep_common *epc; 1329 1330 ep = container_of(kref, struct c4iw_ep, com.kref); 1331 epc = &ep->com; 1332 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list", 1333 __func__, epc)); 1334 if (test_bit(QP_REFERENCED, &ep->com.flags)) 1335 deref_qp(ep); 1336 CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx", 1337 __func__, ep, epc->history, epc->flags); 1338 kfree(ep); 1339 } 1340 1341 static void release_ep_resources(struct c4iw_ep *ep) 1342 { 1343 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep); 1344 set_bit(RELEASE_RESOURCES, &ep->com.flags); 1345 c4iw_put_ep(&ep->com); 1346 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep); 1347 } 1348 1349 static int 1350 send_mpa_req(struct c4iw_ep *ep) 1351 { 1352 int mpalen; 1353 struct mpa_message *mpa; 1354 struct mpa_v2_conn_params mpa_v2_params; 1355 struct mbuf *m; 1356 char mpa_rev_to_use = mpa_rev; 1357 int err = 0; 1358 1359 if (ep->retry_with_mpa_v1) 1360 mpa_rev_to_use = 1; 1361 mpalen = sizeof(*mpa) + ep->plen; 1362 if (mpa_rev_to_use == 2) 1363 mpalen += sizeof(struct mpa_v2_conn_params); 1364 1365 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1366 if (mpa == NULL) { 1367 err = -ENOMEM; 1368 CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d", 1369 __func__, ep, err); 1370 goto err; 1371 } 1372 1373 memset(mpa, 0, mpalen); 1374 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 1375 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 1376 (markers_enabled ? MPA_MARKERS : 0) | 1377 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 1378 mpa->private_data_size = htons(ep->plen); 1379 mpa->revision = mpa_rev_to_use; 1380 1381 if (mpa_rev_to_use == 1) { 1382 ep->tried_with_mpa_v1 = 1; 1383 ep->retry_with_mpa_v1 = 0; 1384 } 1385 1386 if (mpa_rev_to_use == 2) { 1387 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1388 sizeof(struct mpa_v2_conn_params)); 1389 mpa_v2_params.ird = htons((u16)ep->ird); 1390 mpa_v2_params.ord = htons((u16)ep->ord); 1391 1392 if (peer2peer) { 1393 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1394 1395 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1396 mpa_v2_params.ord |= 1397 htons(MPA_V2_RDMA_WRITE_RTR); 1398 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1399 mpa_v2_params.ord |= 1400 htons(MPA_V2_RDMA_READ_RTR); 1401 } 1402 } 1403 memcpy(mpa->private_data, &mpa_v2_params, 1404 sizeof(struct mpa_v2_conn_params)); 1405 1406 if (ep->plen) { 1407 1408 memcpy(mpa->private_data + 1409 sizeof(struct mpa_v2_conn_params), 1410 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1411 } 1412 } else { 1413 1414 if (ep->plen) 1415 memcpy(mpa->private_data, 1416 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1417 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep); 1418 } 1419 1420 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1421 if (m == NULL) { 1422 err = -ENOMEM; 1423 CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d", 1424 __func__, ep, err); 1425 free(mpa, M_CXGBE); 1426 goto err; 1427 } 1428 m_copyback(m, 0, mpalen, (void *)mpa); 1429 free(mpa, M_CXGBE); 1430 1431 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1432 ep->com.thread); 1433 if (err) { 1434 CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d", 1435 __func__, ep, err); 1436 goto err; 1437 } 1438 1439 START_EP_TIMER(ep); 1440 ep->com.state = MPA_REQ_SENT; 1441 ep->mpa_attr.initiator = 1; 1442 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err); 1443 return 0; 1444 err: 1445 connect_reply_upcall(ep, err); 1446 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err); 1447 return err; 1448 } 1449 1450 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1451 { 1452 int mpalen ; 1453 struct mpa_message *mpa; 1454 struct mpa_v2_conn_params mpa_v2_params; 1455 struct mbuf *m; 1456 int err; 1457 1458 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid, 1459 ep->plen); 1460 1461 mpalen = sizeof(*mpa) + plen; 1462 1463 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1464 1465 mpalen += sizeof(struct mpa_v2_conn_params); 1466 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep, 1467 ep->mpa_attr.version, mpalen); 1468 } 1469 1470 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1471 if (mpa == NULL) 1472 return (-ENOMEM); 1473 1474 memset(mpa, 0, mpalen); 1475 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1476 mpa->flags = MPA_REJECT; 1477 mpa->revision = mpa_rev; 1478 mpa->private_data_size = htons(plen); 1479 1480 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1481 1482 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1483 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1484 sizeof(struct mpa_v2_conn_params)); 1485 mpa_v2_params.ird = htons(((u16)ep->ird) | 1486 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1487 0)); 1488 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1489 (p2p_type == 1490 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1491 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1492 FW_RI_INIT_P2PTYPE_READ_REQ ? 1493 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1494 memcpy(mpa->private_data, &mpa_v2_params, 1495 sizeof(struct mpa_v2_conn_params)); 1496 1497 if (ep->plen) 1498 memcpy(mpa->private_data + 1499 sizeof(struct mpa_v2_conn_params), pdata, plen); 1500 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep, 1501 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen); 1502 } else 1503 if (plen) 1504 memcpy(mpa->private_data, pdata, plen); 1505 1506 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1507 if (m == NULL) { 1508 free(mpa, M_CXGBE); 1509 return (-ENOMEM); 1510 } 1511 m_copyback(m, 0, mpalen, (void *)mpa); 1512 free(mpa, M_CXGBE); 1513 1514 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); 1515 if (!err) 1516 ep->snd_seq += mpalen; 1517 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err); 1518 return err; 1519 } 1520 1521 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1522 { 1523 int mpalen; 1524 struct mpa_message *mpa; 1525 struct mbuf *m; 1526 struct mpa_v2_conn_params mpa_v2_params; 1527 int err; 1528 1529 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep); 1530 1531 mpalen = sizeof(*mpa) + plen; 1532 1533 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1534 1535 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep, 1536 ep->mpa_attr.version); 1537 mpalen += sizeof(struct mpa_v2_conn_params); 1538 } 1539 1540 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1541 if (mpa == NULL) 1542 return (-ENOMEM); 1543 1544 memset(mpa, 0, sizeof(*mpa)); 1545 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1546 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 1547 (markers_enabled ? MPA_MARKERS : 0); 1548 mpa->revision = ep->mpa_attr.version; 1549 mpa->private_data_size = htons(plen); 1550 1551 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1552 1553 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1554 mpa->private_data_size += 1555 htons(sizeof(struct mpa_v2_conn_params)); 1556 mpa_v2_params.ird = htons((u16)ep->ird); 1557 mpa_v2_params.ord = htons((u16)ep->ord); 1558 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep, 1559 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord); 1560 1561 if (peer2peer && (ep->mpa_attr.p2p_type != 1562 FW_RI_INIT_P2PTYPE_DISABLED)) { 1563 1564 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1565 1566 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1567 1568 mpa_v2_params.ord |= 1569 htons(MPA_V2_RDMA_WRITE_RTR); 1570 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d", 1571 __func__, ep, p2p_type, mpa_v2_params.ird, 1572 mpa_v2_params.ord); 1573 } 1574 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1575 1576 mpa_v2_params.ord |= 1577 htons(MPA_V2_RDMA_READ_RTR); 1578 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d", 1579 __func__, ep, p2p_type, mpa_v2_params.ird, 1580 mpa_v2_params.ord); 1581 } 1582 } 1583 1584 memcpy(mpa->private_data, &mpa_v2_params, 1585 sizeof(struct mpa_v2_conn_params)); 1586 1587 if (ep->plen) 1588 memcpy(mpa->private_data + 1589 sizeof(struct mpa_v2_conn_params), pdata, plen); 1590 } else 1591 if (plen) 1592 memcpy(mpa->private_data, pdata, plen); 1593 1594 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1595 if (m == NULL) { 1596 free(mpa, M_CXGBE); 1597 return (-ENOMEM); 1598 } 1599 m_copyback(m, 0, mpalen, (void *)mpa); 1600 free(mpa, M_CXGBE); 1601 1602 1603 ep->com.state = MPA_REP_SENT; 1604 ep->snd_seq += mpalen; 1605 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1606 ep->com.thread); 1607 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err); 1608 return err; 1609 } 1610 1611 1612 1613 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1614 { 1615 struct iw_cm_event event; 1616 1617 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep); 1618 memset(&event, 0, sizeof(event)); 1619 event.event = IW_CM_EVENT_CLOSE; 1620 event.status = status; 1621 1622 if (ep->com.cm_id) { 1623 1624 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep); 1625 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1626 deref_cm_id(&ep->com); 1627 set_bit(CLOSE_UPCALL, &ep->com.history); 1628 } 1629 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep); 1630 } 1631 1632 static int 1633 send_abort(struct c4iw_ep *ep) 1634 { 1635 struct socket *so = ep->com.so; 1636 struct sockopt sopt; 1637 int rc; 1638 struct linger l; 1639 1640 CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so, 1641 states[ep->com.state], ep->hwtid); 1642 1643 l.l_onoff = 1; 1644 l.l_linger = 0; 1645 1646 /* linger_time of 0 forces RST to be sent */ 1647 sopt.sopt_dir = SOPT_SET; 1648 sopt.sopt_level = SOL_SOCKET; 1649 sopt.sopt_name = SO_LINGER; 1650 sopt.sopt_val = (caddr_t)&l; 1651 sopt.sopt_valsize = sizeof l; 1652 sopt.sopt_td = NULL; 1653 rc = -sosetopt(so, &sopt); 1654 if (rc != 0) { 1655 log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n", 1656 __func__, so, rc); 1657 } 1658 1659 uninit_iwarp_socket(so); 1660 soclose(so); 1661 set_bit(ABORT_CONN, &ep->com.history); 1662 1663 /* 1664 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT 1665 * request it has sent. But the current TOE driver is not propagating 1666 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work- 1667 * around de-refererece 'ep' here instead of doing it in abort_rpl() 1668 * handler(not yet implemented) of iw_cxgbe driver. 1669 */ 1670 release_ep_resources(ep); 1671 ep->com.state = DEAD; 1672 1673 return (0); 1674 } 1675 1676 static void peer_close_upcall(struct c4iw_ep *ep) 1677 { 1678 struct iw_cm_event event; 1679 1680 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep); 1681 memset(&event, 0, sizeof(event)); 1682 event.event = IW_CM_EVENT_DISCONNECT; 1683 1684 if (ep->com.cm_id) { 1685 1686 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep); 1687 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1688 set_bit(DISCONN_UPCALL, &ep->com.history); 1689 } 1690 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep); 1691 } 1692 1693 static void peer_abort_upcall(struct c4iw_ep *ep) 1694 { 1695 struct iw_cm_event event; 1696 1697 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep); 1698 memset(&event, 0, sizeof(event)); 1699 event.event = IW_CM_EVENT_CLOSE; 1700 event.status = -ECONNRESET; 1701 1702 if (ep->com.cm_id) { 1703 1704 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep); 1705 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1706 deref_cm_id(&ep->com); 1707 set_bit(ABORT_UPCALL, &ep->com.history); 1708 } 1709 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep); 1710 } 1711 1712 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1713 { 1714 struct iw_cm_event event; 1715 1716 CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status); 1717 memset(&event, 0, sizeof(event)); 1718 event.event = IW_CM_EVENT_CONNECT_REPLY; 1719 event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ? 1720 -ECONNRESET : status; 1721 event.local_addr = ep->com.local_addr; 1722 event.remote_addr = ep->com.remote_addr; 1723 1724 if ((status == 0) || (status == -ECONNREFUSED)) { 1725 1726 if (!ep->tried_with_mpa_v1) { 1727 1728 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep); 1729 /* this means MPA_v2 is used */ 1730 event.ord = ep->ird; 1731 event.ird = ep->ord; 1732 event.private_data_len = ep->plen - 1733 sizeof(struct mpa_v2_conn_params); 1734 event.private_data = ep->mpa_pkt + 1735 sizeof(struct mpa_message) + 1736 sizeof(struct mpa_v2_conn_params); 1737 } else { 1738 1739 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep); 1740 /* this means MPA_v1 is used */ 1741 event.ord = c4iw_max_read_depth; 1742 event.ird = c4iw_max_read_depth; 1743 event.private_data_len = ep->plen; 1744 event.private_data = ep->mpa_pkt + 1745 sizeof(struct mpa_message); 1746 } 1747 } 1748 1749 if (ep->com.cm_id) { 1750 1751 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep); 1752 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1753 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1754 } 1755 1756 if(status == -ECONNABORTED) { 1757 1758 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status); 1759 return; 1760 } 1761 1762 if (status < 0) { 1763 1764 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status); 1765 deref_cm_id(&ep->com); 1766 } 1767 1768 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep); 1769 } 1770 1771 static int connect_request_upcall(struct c4iw_ep *ep) 1772 { 1773 struct iw_cm_event event; 1774 int ret; 1775 1776 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep, 1777 ep->tried_with_mpa_v1); 1778 1779 memset(&event, 0, sizeof(event)); 1780 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1781 event.local_addr = ep->com.local_addr; 1782 event.remote_addr = ep->com.remote_addr; 1783 event.provider_data = ep; 1784 1785 if (!ep->tried_with_mpa_v1) { 1786 /* this means MPA_v2 is used */ 1787 event.ord = ep->ord; 1788 event.ird = ep->ird; 1789 event.private_data_len = ep->plen - 1790 sizeof(struct mpa_v2_conn_params); 1791 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1792 sizeof(struct mpa_v2_conn_params); 1793 } else { 1794 1795 /* this means MPA_v1 is used. Send max supported */ 1796 event.ord = c4iw_max_read_depth; 1797 event.ird = c4iw_max_read_depth; 1798 event.private_data_len = ep->plen; 1799 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1800 } 1801 1802 c4iw_get_ep(&ep->com); 1803 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1804 &event); 1805 if(ret) { 1806 CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to" 1807 " IWCM, err:%d", __func__, ep, ret); 1808 c4iw_put_ep(&ep->com); 1809 } else 1810 /* Dereference parent_ep only in success case. 1811 * In case of failure, parent_ep is dereferenced by the caller 1812 * of process_mpa_request(). 1813 */ 1814 c4iw_put_ep(&ep->parent_ep->com); 1815 1816 set_bit(CONNREQ_UPCALL, &ep->com.history); 1817 return ret; 1818 } 1819 1820 static void established_upcall(struct c4iw_ep *ep) 1821 { 1822 struct iw_cm_event event; 1823 1824 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep); 1825 memset(&event, 0, sizeof(event)); 1826 event.event = IW_CM_EVENT_ESTABLISHED; 1827 event.ird = ep->ord; 1828 event.ord = ep->ird; 1829 1830 if (ep->com.cm_id) { 1831 1832 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep); 1833 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1834 set_bit(ESTAB_UPCALL, &ep->com.history); 1835 } 1836 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep); 1837 } 1838 1839 1840 #define RELAXED_IRD_NEGOTIATION 1 1841 1842 /* 1843 * process_mpa_reply - process streaming mode MPA reply 1844 * 1845 * Returns: 1846 * 1847 * 0 upon success indicating a connect request was delivered to the ULP 1848 * or the mpa request is incomplete but valid so far. 1849 * 1850 * 1 if a failure requires the caller to close the connection. 1851 * 1852 * 2 if a failure requires the caller to abort the connection. 1853 */ 1854 static int process_mpa_reply(struct c4iw_ep *ep) 1855 { 1856 struct mpa_message *mpa; 1857 struct mpa_v2_conn_params *mpa_v2_params; 1858 u16 plen; 1859 u16 resp_ird, resp_ord; 1860 u8 rtr_mismatch = 0, insuff_ird = 0; 1861 struct c4iw_qp_attributes attrs = {0}; 1862 enum c4iw_qp_attr_mask mask; 1863 int err; 1864 struct mbuf *top, *m; 1865 int flags = MSG_DONTWAIT; 1866 struct uio uio; 1867 int disconnect = 0; 1868 1869 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep); 1870 1871 /* 1872 * Stop mpa timer. If it expired, then 1873 * we ignore the MPA reply. process_timeout() 1874 * will abort the connection. 1875 */ 1876 if (STOP_EP_TIMER(ep)) 1877 return 0; 1878 1879 uio.uio_resid = 1000000; 1880 uio.uio_td = ep->com.thread; 1881 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags); 1882 1883 if (err) { 1884 1885 if (err == EWOULDBLOCK) { 1886 1887 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep); 1888 START_EP_TIMER(ep); 1889 return 0; 1890 } 1891 err = -err; 1892 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep); 1893 goto err; 1894 } 1895 1896 if (ep->com.so->so_rcv.sb_mb) { 1897 1898 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep); 1899 printf("%s data after soreceive called! so %p sb_mb %p top %p\n", 1900 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top); 1901 } 1902 1903 m = top; 1904 1905 do { 1906 1907 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep); 1908 /* 1909 * If we get more than the supported amount of private data 1910 * then we must fail this connection. 1911 */ 1912 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) { 1913 1914 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep, 1915 ep->mpa_pkt_len + m->m_len); 1916 err = (-EINVAL); 1917 goto err_stop_timer; 1918 } 1919 1920 /* 1921 * copy the new data into our accumulation buffer. 1922 */ 1923 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len])); 1924 ep->mpa_pkt_len += m->m_len; 1925 if (!m->m_next) 1926 m = m->m_nextpkt; 1927 else 1928 m = m->m_next; 1929 } while (m); 1930 1931 m_freem(top); 1932 /* 1933 * if we don't even have the mpa message, then bail. 1934 */ 1935 if (ep->mpa_pkt_len < sizeof(*mpa)) { 1936 return 0; 1937 } 1938 mpa = (struct mpa_message *) ep->mpa_pkt; 1939 1940 /* Validate MPA header. */ 1941 if (mpa->revision > mpa_rev) { 1942 1943 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep, 1944 mpa->revision, mpa_rev); 1945 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, " 1946 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1947 err = -EPROTO; 1948 goto err_stop_timer; 1949 } 1950 1951 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1952 1953 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep); 1954 err = -EPROTO; 1955 goto err_stop_timer; 1956 } 1957 1958 plen = ntohs(mpa->private_data_size); 1959 1960 /* 1961 * Fail if there's too much private data. 1962 */ 1963 if (plen > MPA_MAX_PRIVATE_DATA) { 1964 1965 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep); 1966 err = -EPROTO; 1967 goto err_stop_timer; 1968 } 1969 1970 /* 1971 * If plen does not account for pkt size 1972 */ 1973 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1974 1975 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep); 1976 STOP_EP_TIMER(ep); 1977 err = -EPROTO; 1978 goto err_stop_timer; 1979 } 1980 1981 ep->plen = (u8) plen; 1982 1983 /* 1984 * If we don't have all the pdata yet, then bail. 1985 * We'll continue process when more data arrives. 1986 */ 1987 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) { 1988 1989 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep); 1990 return 0; 1991 } 1992 1993 if (mpa->flags & MPA_REJECT) { 1994 1995 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep); 1996 err = -ECONNREFUSED; 1997 goto err_stop_timer; 1998 } 1999 2000 /* 2001 * If we get here we have accumulated the entire mpa 2002 * start reply message including private data. And 2003 * the MPA header is valid. 2004 */ 2005 ep->com.state = FPDU_MODE; 2006 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 2007 ep->mpa_attr.recv_marker_enabled = markers_enabled; 2008 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 2009 ep->mpa_attr.version = mpa->revision; 2010 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 2011 2012 if (mpa->revision == 2) { 2013 2014 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep); 2015 ep->mpa_attr.enhanced_rdma_conn = 2016 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 2017 2018 if (ep->mpa_attr.enhanced_rdma_conn) { 2019 2020 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep); 2021 mpa_v2_params = (struct mpa_v2_conn_params *) 2022 (ep->mpa_pkt + sizeof(*mpa)); 2023 resp_ird = ntohs(mpa_v2_params->ird) & 2024 MPA_V2_IRD_ORD_MASK; 2025 resp_ord = ntohs(mpa_v2_params->ord) & 2026 MPA_V2_IRD_ORD_MASK; 2027 2028 /* 2029 * This is a double-check. Ideally, below checks are 2030 * not required since ird/ord stuff has been taken 2031 * care of in c4iw_accept_cr 2032 */ 2033 if (ep->ird < resp_ord) { 2034 if (RELAXED_IRD_NEGOTIATION && resp_ord <= 2035 ep->com.dev->rdev.adap->params.max_ordird_qp) 2036 ep->ird = resp_ord; 2037 else 2038 insuff_ird = 1; 2039 } else if (ep->ird > resp_ord) { 2040 ep->ird = resp_ord; 2041 } 2042 if (ep->ord > resp_ird) { 2043 if (RELAXED_IRD_NEGOTIATION) 2044 ep->ord = resp_ird; 2045 else 2046 insuff_ird = 1; 2047 } 2048 if (insuff_ird) { 2049 err = -ENOMEM; 2050 ep->ird = resp_ord; 2051 ep->ord = resp_ird; 2052 } 2053 2054 if (ntohs(mpa_v2_params->ird) & 2055 MPA_V2_PEER2PEER_MODEL) { 2056 2057 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep); 2058 if (ntohs(mpa_v2_params->ord) & 2059 MPA_V2_RDMA_WRITE_RTR) { 2060 2061 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep); 2062 ep->mpa_attr.p2p_type = 2063 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 2064 } 2065 else if (ntohs(mpa_v2_params->ord) & 2066 MPA_V2_RDMA_READ_RTR) { 2067 2068 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep); 2069 ep->mpa_attr.p2p_type = 2070 FW_RI_INIT_P2PTYPE_READ_REQ; 2071 } 2072 } 2073 } 2074 } else { 2075 2076 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep); 2077 2078 if (mpa->revision == 1) { 2079 2080 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep); 2081 2082 if (peer2peer) { 2083 2084 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep); 2085 ep->mpa_attr.p2p_type = p2p_type; 2086 } 2087 } 2088 } 2089 2090 if (set_tcpinfo(ep)) { 2091 2092 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep); 2093 printf("%s set_tcpinfo error\n", __func__); 2094 err = -ECONNRESET; 2095 goto err; 2096 } 2097 2098 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, " 2099 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__, 2100 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 2101 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 2102 ep->mpa_attr.p2p_type); 2103 2104 /* 2105 * If responder's RTR does not match with that of initiator, assign 2106 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 2107 * generated when moving QP to RTS state. 2108 * A TERM message will be sent after QP has moved to RTS state 2109 */ 2110 if ((ep->mpa_attr.version == 2) && peer2peer && 2111 (ep->mpa_attr.p2p_type != p2p_type)) { 2112 2113 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep); 2114 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 2115 rtr_mismatch = 1; 2116 } 2117 2118 2119 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 2120 attrs.mpa_attr = ep->mpa_attr; 2121 attrs.max_ird = ep->ird; 2122 attrs.max_ord = ep->ord; 2123 attrs.llp_stream_handle = ep; 2124 attrs.next_state = C4IW_QP_STATE_RTS; 2125 2126 mask = C4IW_QP_ATTR_NEXT_STATE | 2127 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 2128 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 2129 2130 /* bind QP and TID with INIT_WR */ 2131 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 2132 2133 if (err) { 2134 2135 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep); 2136 goto err; 2137 } 2138 2139 /* 2140 * If responder's RTR requirement did not match with what initiator 2141 * supports, generate TERM message 2142 */ 2143 if (rtr_mismatch) { 2144 2145 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep); 2146 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 2147 attrs.layer_etype = LAYER_MPA | DDP_LLP; 2148 attrs.ecode = MPA_NOMATCH_RTR; 2149 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2150 attrs.send_term = 1; 2151 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2152 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2153 err = -ENOMEM; 2154 disconnect = 1; 2155 goto out; 2156 } 2157 2158 /* 2159 * Generate TERM if initiator IRD is not sufficient for responder 2160 * provided ORD. Currently, we do the same behaviour even when 2161 * responder provided IRD is also not sufficient as regards to 2162 * initiator ORD. 2163 */ 2164 if (insuff_ird) { 2165 2166 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep); 2167 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 2168 __func__); 2169 attrs.layer_etype = LAYER_MPA | DDP_LLP; 2170 attrs.ecode = MPA_INSUFF_IRD; 2171 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2172 attrs.send_term = 1; 2173 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2174 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2175 err = -ENOMEM; 2176 disconnect = 1; 2177 goto out; 2178 } 2179 goto out; 2180 err_stop_timer: 2181 STOP_EP_TIMER(ep); 2182 err: 2183 disconnect = 2; 2184 out: 2185 connect_reply_upcall(ep, err); 2186 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep); 2187 return disconnect; 2188 } 2189 2190 /* 2191 * process_mpa_request - process streaming mode MPA request 2192 * 2193 * Returns: 2194 * 2195 * 0 upon success indicating a connect request was delivered to the ULP 2196 * or the mpa request is incomplete but valid so far. 2197 * 2198 * 1 if a failure requires the caller to close the connection. 2199 * 2200 * 2 if a failure requires the caller to abort the connection. 2201 */ 2202 static int 2203 process_mpa_request(struct c4iw_ep *ep) 2204 { 2205 struct mpa_message *mpa; 2206 struct mpa_v2_conn_params *mpa_v2_params; 2207 u16 plen; 2208 int flags = MSG_DONTWAIT; 2209 int rc; 2210 struct iovec iov; 2211 struct uio uio; 2212 enum c4iw_ep_state state = ep->com.state; 2213 2214 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]); 2215 2216 if (state != MPA_REQ_WAIT) 2217 return 0; 2218 2219 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len]; 2220 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 2221 uio.uio_iov = &iov; 2222 uio.uio_iovcnt = 1; 2223 uio.uio_offset = 0; 2224 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 2225 uio.uio_segflg = UIO_SYSSPACE; 2226 uio.uio_rw = UIO_READ; 2227 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */ 2228 2229 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags); 2230 if (rc == EAGAIN) 2231 return 0; 2232 else if (rc) 2233 goto err_stop_timer; 2234 2235 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data", 2236 __func__, ep->com.so)); 2237 ep->mpa_pkt_len += uio.uio_offset; 2238 2239 /* 2240 * If we get more than the supported amount of private data then we must 2241 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another 2242 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last 2243 * byte is filled by the soreceive above. 2244 */ 2245 2246 /* Don't even have the MPA message. Wait for more data to arrive. */ 2247 if (ep->mpa_pkt_len < sizeof(*mpa)) 2248 return 0; 2249 mpa = (struct mpa_message *) ep->mpa_pkt; 2250 2251 /* 2252 * Validate MPA Header. 2253 */ 2254 if (mpa->revision > mpa_rev) { 2255 log(LOG_ERR, "%s: MPA version mismatch. Local = %d," 2256 " Received = %d\n", __func__, mpa_rev, mpa->revision); 2257 goto err_stop_timer; 2258 } 2259 2260 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 2261 goto err_stop_timer; 2262 2263 /* 2264 * Fail if there's too much private data. 2265 */ 2266 plen = ntohs(mpa->private_data_size); 2267 if (plen > MPA_MAX_PRIVATE_DATA) 2268 goto err_stop_timer; 2269 2270 /* 2271 * If plen does not account for pkt size 2272 */ 2273 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 2274 goto err_stop_timer; 2275 2276 ep->plen = (u8) plen; 2277 2278 /* 2279 * If we don't have all the pdata yet, then bail. 2280 */ 2281 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 2282 return 0; 2283 2284 /* 2285 * If we get here we have accumulated the entire mpa 2286 * start reply message including private data. 2287 */ 2288 ep->mpa_attr.initiator = 0; 2289 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 2290 ep->mpa_attr.recv_marker_enabled = markers_enabled; 2291 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 2292 ep->mpa_attr.version = mpa->revision; 2293 if (mpa->revision == 1) 2294 ep->tried_with_mpa_v1 = 1; 2295 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 2296 2297 if (mpa->revision == 2) { 2298 ep->mpa_attr.enhanced_rdma_conn = 2299 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 2300 if (ep->mpa_attr.enhanced_rdma_conn) { 2301 mpa_v2_params = (struct mpa_v2_conn_params *) 2302 (ep->mpa_pkt + sizeof(*mpa)); 2303 ep->ird = ntohs(mpa_v2_params->ird) & 2304 MPA_V2_IRD_ORD_MASK; 2305 ep->ird = min_t(u32, ep->ird, 2306 cur_max_read_depth(ep->com.dev)); 2307 ep->ord = ntohs(mpa_v2_params->ord) & 2308 MPA_V2_IRD_ORD_MASK; 2309 ep->ord = min_t(u32, ep->ord, 2310 cur_max_read_depth(ep->com.dev)); 2311 CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u", 2312 __func__, ep->ird, ep->ord); 2313 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 2314 if (peer2peer) { 2315 if (ntohs(mpa_v2_params->ord) & 2316 MPA_V2_RDMA_WRITE_RTR) 2317 ep->mpa_attr.p2p_type = 2318 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 2319 else if (ntohs(mpa_v2_params->ord) & 2320 MPA_V2_RDMA_READ_RTR) 2321 ep->mpa_attr.p2p_type = 2322 FW_RI_INIT_P2PTYPE_READ_REQ; 2323 } 2324 } 2325 } else if (mpa->revision == 1 && peer2peer) 2326 ep->mpa_attr.p2p_type = p2p_type; 2327 2328 if (set_tcpinfo(ep)) 2329 goto err_stop_timer; 2330 2331 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, " 2332 "xmit_marker_enabled = %d, version = %d", __func__, 2333 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 2334 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 2335 2336 ep->com.state = MPA_REQ_RCVD; 2337 STOP_EP_TIMER(ep); 2338 2339 /* drive upcall */ 2340 if (ep->parent_ep->com.state != DEAD) 2341 if (connect_request_upcall(ep)) 2342 goto err_out; 2343 return 0; 2344 2345 err_stop_timer: 2346 STOP_EP_TIMER(ep); 2347 err_out: 2348 return 2; 2349 } 2350 2351 /* 2352 * Upcall from the adapter indicating data has been transmitted. 2353 * For us its just the single MPA request or reply. We can now free 2354 * the skb holding the mpa message. 2355 */ 2356 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2357 { 2358 int err; 2359 struct c4iw_ep *ep = to_ep(cm_id); 2360 int abort = 0; 2361 2362 mutex_lock(&ep->com.mutex); 2363 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep); 2364 2365 if ((ep->com.state == DEAD) || 2366 (ep->com.state != MPA_REQ_RCVD)) { 2367 2368 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep); 2369 mutex_unlock(&ep->com.mutex); 2370 c4iw_put_ep(&ep->com); 2371 return -ECONNRESET; 2372 } 2373 set_bit(ULP_REJECT, &ep->com.history); 2374 2375 if (mpa_rev == 0) { 2376 2377 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep); 2378 abort = 1; 2379 } 2380 else { 2381 2382 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep); 2383 abort = send_mpa_reject(ep, pdata, pdata_len); 2384 } 2385 STOP_EP_TIMER(ep); 2386 err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); 2387 mutex_unlock(&ep->com.mutex); 2388 c4iw_put_ep(&ep->com); 2389 CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err); 2390 return 0; 2391 } 2392 2393 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2394 { 2395 int err; 2396 struct c4iw_qp_attributes attrs = {0}; 2397 enum c4iw_qp_attr_mask mask; 2398 struct c4iw_ep *ep = to_ep(cm_id); 2399 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 2400 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2401 int abort = 0; 2402 2403 mutex_lock(&ep->com.mutex); 2404 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep); 2405 2406 if ((ep->com.state == DEAD) || 2407 (ep->com.state != MPA_REQ_RCVD)) { 2408 2409 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep); 2410 err = -ECONNRESET; 2411 goto err_out; 2412 } 2413 2414 BUG_ON(!qp); 2415 2416 set_bit(ULP_ACCEPT, &ep->com.history); 2417 2418 if ((conn_param->ord > c4iw_max_read_depth) || 2419 (conn_param->ird > c4iw_max_read_depth)) { 2420 2421 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep); 2422 err = -EINVAL; 2423 goto err_abort; 2424 } 2425 2426 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2427 2428 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep); 2429 2430 if (conn_param->ord > ep->ird) { 2431 if (RELAXED_IRD_NEGOTIATION) { 2432 conn_param->ord = ep->ird; 2433 } else { 2434 ep->ird = conn_param->ird; 2435 ep->ord = conn_param->ord; 2436 send_mpa_reject(ep, conn_param->private_data, 2437 conn_param->private_data_len); 2438 err = -ENOMEM; 2439 goto err_abort; 2440 } 2441 } 2442 if (conn_param->ird < ep->ord) { 2443 if (RELAXED_IRD_NEGOTIATION && 2444 ep->ord <= h->rdev.adap->params.max_ordird_qp) { 2445 conn_param->ird = ep->ord; 2446 } else { 2447 err = -ENOMEM; 2448 goto err_abort; 2449 } 2450 } 2451 } 2452 ep->ird = conn_param->ird; 2453 ep->ord = conn_param->ord; 2454 2455 if (ep->mpa_attr.version == 1) { 2456 if (peer2peer && ep->ird == 0) 2457 ep->ird = 1; 2458 } else { 2459 if (peer2peer && 2460 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && 2461 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) 2462 ep->ird = 1; 2463 } 2464 2465 CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__, 2466 ep->ird, ep->ord); 2467 2468 ep->com.cm_id = cm_id; 2469 ref_cm_id(&ep->com); 2470 ep->com.qp = qp; 2471 ref_qp(ep); 2472 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 2473 2474 /* bind QP to EP and move to RTS */ 2475 attrs.mpa_attr = ep->mpa_attr; 2476 attrs.max_ird = ep->ird; 2477 attrs.max_ord = ep->ord; 2478 attrs.llp_stream_handle = ep; 2479 attrs.next_state = C4IW_QP_STATE_RTS; 2480 2481 /* bind QP and TID with INIT_WR */ 2482 mask = C4IW_QP_ATTR_NEXT_STATE | 2483 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2484 C4IW_QP_ATTR_MPA_ATTR | 2485 C4IW_QP_ATTR_MAX_IRD | 2486 C4IW_QP_ATTR_MAX_ORD; 2487 2488 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 2489 if (err) { 2490 CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err); 2491 goto err_defef_cm_id; 2492 } 2493 2494 err = send_mpa_reply(ep, conn_param->private_data, 2495 conn_param->private_data_len); 2496 if (err) { 2497 CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err); 2498 goto err_defef_cm_id; 2499 } 2500 2501 ep->com.state = FPDU_MODE; 2502 established_upcall(ep); 2503 mutex_unlock(&ep->com.mutex); 2504 c4iw_put_ep(&ep->com); 2505 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep); 2506 return 0; 2507 err_defef_cm_id: 2508 deref_cm_id(&ep->com); 2509 err_abort: 2510 abort = 1; 2511 err_out: 2512 if (abort) 2513 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 2514 mutex_unlock(&ep->com.mutex); 2515 c4iw_put_ep(&ep->com); 2516 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep); 2517 return err; 2518 } 2519 2520 static int 2521 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so) 2522 { 2523 int ret; 2524 int size, on; 2525 struct socket *sock = NULL; 2526 struct sockopt sopt; 2527 2528 ret = sock_create_kern(laddr->ss_family, 2529 SOCK_STREAM, IPPROTO_TCP, &sock); 2530 if (ret) { 2531 CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d", 2532 __func__, ret); 2533 return ret; 2534 } 2535 2536 if (reuseaddr) { 2537 bzero(&sopt, sizeof(struct sockopt)); 2538 sopt.sopt_dir = SOPT_SET; 2539 sopt.sopt_level = SOL_SOCKET; 2540 sopt.sopt_name = SO_REUSEADDR; 2541 on = 1; 2542 sopt.sopt_val = &on; 2543 sopt.sopt_valsize = sizeof(on); 2544 ret = -sosetopt(sock, &sopt); 2545 if (ret != 0) { 2546 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) " 2547 "failed with %d.\n", __func__, sock, ret); 2548 } 2549 bzero(&sopt, sizeof(struct sockopt)); 2550 sopt.sopt_dir = SOPT_SET; 2551 sopt.sopt_level = SOL_SOCKET; 2552 sopt.sopt_name = SO_REUSEPORT; 2553 on = 1; 2554 sopt.sopt_val = &on; 2555 sopt.sopt_valsize = sizeof(on); 2556 ret = -sosetopt(sock, &sopt); 2557 if (ret != 0) { 2558 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) " 2559 "failed with %d.\n", __func__, sock, ret); 2560 } 2561 } 2562 2563 ret = -sobind(sock, (struct sockaddr *)laddr, curthread); 2564 if (ret) { 2565 CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p", 2566 __func__, ret); 2567 sock_release(sock); 2568 return ret; 2569 } 2570 2571 size = laddr->ss_family == AF_INET6 ? 2572 sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in); 2573 ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0); 2574 if (ret) { 2575 CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p", 2576 __func__, ret); 2577 sock_release(sock); 2578 return ret; 2579 } 2580 2581 *so = sock; 2582 return 0; 2583 } 2584 2585 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2586 { 2587 int err = 0; 2588 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2589 struct c4iw_ep *ep = NULL; 2590 struct ifnet *nh_ifp; /* Logical egress interface */ 2591 #ifdef VIMAGE 2592 struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context; 2593 struct vnet *vnet = rdma_id->route.addr.dev_addr.net; 2594 #endif 2595 2596 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id); 2597 2598 2599 if ((conn_param->ord > c4iw_max_read_depth) || 2600 (conn_param->ird > c4iw_max_read_depth)) { 2601 2602 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id); 2603 err = -EINVAL; 2604 goto out; 2605 } 2606 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2607 cm_id->provider_data = ep; 2608 2609 init_timer(&ep->timer); 2610 ep->plen = conn_param->private_data_len; 2611 2612 if (ep->plen) { 2613 2614 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep); 2615 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2616 conn_param->private_data, ep->plen); 2617 } 2618 ep->ird = conn_param->ird; 2619 ep->ord = conn_param->ord; 2620 2621 if (peer2peer && ep->ord == 0) { 2622 2623 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep); 2624 ep->ord = 1; 2625 } 2626 2627 ep->com.dev = dev; 2628 ep->com.cm_id = cm_id; 2629 ref_cm_id(&ep->com); 2630 ep->com.qp = get_qhp(dev, conn_param->qpn); 2631 2632 if (!ep->com.qp) { 2633 2634 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep); 2635 err = -EINVAL; 2636 goto fail; 2637 } 2638 ref_qp(ep); 2639 ep->com.thread = curthread; 2640 2641 CURVNET_SET(vnet); 2642 err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp); 2643 CURVNET_RESTORE(); 2644 2645 if (err) { 2646 2647 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep); 2648 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2649 err = EHOSTUNREACH; 2650 return err; 2651 } 2652 2653 if (!(nh_ifp->if_capenable & IFCAP_TOE) || 2654 TOEDEV(nh_ifp) == NULL) { 2655 err = -ENOPROTOOPT; 2656 goto fail; 2657 } 2658 ep->com.state = CONNECTING; 2659 ep->tos = 0; 2660 ep->com.local_addr = cm_id->local_addr; 2661 ep->com.remote_addr = cm_id->remote_addr; 2662 2663 err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so); 2664 if (err) 2665 goto fail; 2666 2667 setiwsockopt(ep->com.so); 2668 init_iwarp_socket(ep->com.so, &ep->com); 2669 err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr, 2670 ep->com.thread); 2671 if (err) 2672 goto fail_free_so; 2673 CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep); 2674 return 0; 2675 2676 fail_free_so: 2677 uninit_iwarp_socket(ep->com.so); 2678 ep->com.state = DEAD; 2679 sock_release(ep->com.so); 2680 fail: 2681 deref_cm_id(&ep->com); 2682 c4iw_put_ep(&ep->com); 2683 ep = NULL; 2684 out: 2685 CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err); 2686 return err; 2687 } 2688 2689 /* 2690 * iwcm->create_listen. Returns -errno on failure. 2691 */ 2692 int 2693 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2694 { 2695 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2696 struct c4iw_listen_ep *lep = NULL; 2697 struct listen_port_info *port_info = NULL; 2698 int rc = 0; 2699 2700 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id, 2701 backlog); 2702 lep = alloc_ep(sizeof(*lep), GFP_KERNEL); 2703 lep->com.cm_id = cm_id; 2704 ref_cm_id(&lep->com); 2705 lep->com.dev = dev; 2706 lep->backlog = backlog; 2707 lep->com.local_addr = cm_id->local_addr; 2708 lep->com.thread = curthread; 2709 cm_id->provider_data = lep; 2710 lep->com.state = LISTEN; 2711 2712 /* In case of INDADDR_ANY, ibcore creates cmid for each device and 2713 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates 2714 * HW listeners for each device seperately. But toecore expects single 2715 * solisten() call with INADDR_ANY address to create HW listeners on 2716 * all devices for a given port number. So iw_cxgbe driver calls 2717 * solisten() only once for INADDR_ANY(usually done at first time 2718 * listener callback from ibcore). And all the subsequent INADDR_ANY 2719 * listener callbacks from ibcore(for the same port address) do not 2720 * invoke solisten() as first listener callback has already created 2721 * listeners for all other devices(via solisten). 2722 */ 2723 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) { 2724 port_info = add_ep_to_listenlist(lep); 2725 /* skip solisten() if refcnt > 1, as the listeners were 2726 * alredy created by 'Master lep' 2727 */ 2728 if (port_info->refcnt > 1) { 2729 /* As there will be only one listener socket for a TCP 2730 * port, copy Master lep's socket pointer to other lep's 2731 * that are belonging to same TCP port. 2732 */ 2733 struct c4iw_listen_ep *head_lep = 2734 container_of(port_info->lep_list.next, 2735 struct c4iw_listen_ep, listen_ep_list); 2736 lep->com.so = head_lep->com.so; 2737 goto out; 2738 } 2739 } 2740 rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so); 2741 if (rc) { 2742 CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d", 2743 __func__, rc); 2744 goto fail; 2745 } 2746 2747 rc = -solisten(lep->com.so, backlog, curthread); 2748 if (rc) { 2749 CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d", 2750 __func__, lep->com.so, rc); 2751 goto fail_free_so; 2752 } 2753 init_iwarp_socket(lep->com.so, &lep->com); 2754 out: 2755 return 0; 2756 2757 fail_free_so: 2758 sock_release(lep->com.so); 2759 fail: 2760 if (port_info) 2761 rem_ep_from_listenlist(lep); 2762 deref_cm_id(&lep->com); 2763 c4iw_put_ep(&lep->com); 2764 return rc; 2765 } 2766 2767 int 2768 c4iw_destroy_listen(struct iw_cm_id *cm_id) 2769 { 2770 struct c4iw_listen_ep *lep = to_listen_ep(cm_id); 2771 2772 mutex_lock(&lep->com.mutex); 2773 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id, 2774 states[lep->com.state]); 2775 2776 lep->com.state = DEAD; 2777 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) { 2778 /* if no refcount then close listen socket */ 2779 if (!rem_ep_from_listenlist(lep)) 2780 close_socket(lep->com.so); 2781 } else 2782 close_socket(lep->com.so); 2783 deref_cm_id(&lep->com); 2784 mutex_unlock(&lep->com.mutex); 2785 c4iw_put_ep(&lep->com); 2786 return 0; 2787 } 2788 2789 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2790 { 2791 int ret; 2792 mutex_lock(&ep->com.mutex); 2793 ret = c4iw_ep_disconnect(ep, abrupt, gfp); 2794 mutex_unlock(&ep->com.mutex); 2795 return ret; 2796 } 2797 2798 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2799 { 2800 int ret = 0; 2801 int close = 0; 2802 int fatal = 0; 2803 struct c4iw_rdev *rdev; 2804 2805 2806 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep); 2807 2808 rdev = &ep->com.dev->rdev; 2809 2810 if (c4iw_fatal_error(rdev)) { 2811 2812 CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep); 2813 fatal = 1; 2814 close_complete_upcall(ep, -ECONNRESET); 2815 send_abort(ep); 2816 ep->com.state = DEAD; 2817 } 2818 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep, 2819 states[ep->com.state]); 2820 2821 /* 2822 * Ref the ep here in case we have fatal errors causing the 2823 * ep to be released and freed. 2824 */ 2825 c4iw_get_ep(&ep->com); 2826 switch (ep->com.state) { 2827 2828 case MPA_REQ_WAIT: 2829 case MPA_REQ_SENT: 2830 case MPA_REQ_RCVD: 2831 case MPA_REP_SENT: 2832 case FPDU_MODE: 2833 close = 1; 2834 if (abrupt) 2835 ep->com.state = ABORTING; 2836 else { 2837 ep->com.state = CLOSING; 2838 START_EP_TIMER(ep); 2839 } 2840 set_bit(CLOSE_SENT, &ep->com.flags); 2841 break; 2842 2843 case CLOSING: 2844 2845 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2846 2847 close = 1; 2848 if (abrupt) { 2849 STOP_EP_TIMER(ep); 2850 ep->com.state = ABORTING; 2851 } else 2852 ep->com.state = MORIBUND; 2853 } 2854 break; 2855 2856 case MORIBUND: 2857 case ABORTING: 2858 case DEAD: 2859 CTR3(KTR_IW_CXGBE, 2860 "%s ignoring disconnect ep %p state %u", __func__, 2861 ep, ep->com.state); 2862 break; 2863 2864 default: 2865 BUG(); 2866 break; 2867 } 2868 2869 2870 if (close) { 2871 2872 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep); 2873 2874 if (abrupt) { 2875 2876 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep); 2877 set_bit(EP_DISC_ABORT, &ep->com.history); 2878 close_complete_upcall(ep, -ECONNRESET); 2879 ret = send_abort(ep); 2880 if (ret) 2881 fatal = 1; 2882 } else { 2883 2884 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep); 2885 set_bit(EP_DISC_CLOSE, &ep->com.history); 2886 2887 if (!ep->parent_ep) 2888 ep->com.state = MORIBUND; 2889 2890 CURVNET_SET(ep->com.so->so_vnet); 2891 sodisconnect(ep->com.so); 2892 CURVNET_RESTORE(); 2893 } 2894 2895 } 2896 2897 if (fatal) { 2898 set_bit(EP_DISC_FAIL, &ep->com.history); 2899 if (!abrupt) { 2900 STOP_EP_TIMER(ep); 2901 close_complete_upcall(ep, -EIO); 2902 } 2903 if (ep->com.qp) { 2904 struct c4iw_qp_attributes attrs = {0}; 2905 2906 attrs.next_state = C4IW_QP_STATE_ERROR; 2907 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp, 2908 C4IW_QP_ATTR_NEXT_STATE, 2909 &attrs, 1); 2910 if (ret) { 2911 CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep); 2912 printf("%s - qp <- error failed!\n", __func__); 2913 } 2914 } 2915 release_ep_resources(ep); 2916 ep->com.state = DEAD; 2917 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep); 2918 } 2919 c4iw_put_ep(&ep->com); 2920 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep); 2921 return ret; 2922 } 2923 2924 #ifdef C4IW_EP_REDIRECT 2925 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, 2926 struct l2t_entry *l2t) 2927 { 2928 struct c4iw_ep *ep = ctx; 2929 2930 if (ep->dst != old) 2931 return 0; 2932 2933 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, 2934 l2t); 2935 dst_hold(new); 2936 cxgb4_l2t_release(ep->l2t); 2937 ep->l2t = l2t; 2938 dst_release(old); 2939 ep->dst = new; 2940 return 1; 2941 } 2942 #endif 2943 2944 2945 2946 static void ep_timeout(unsigned long arg) 2947 { 2948 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2949 2950 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 2951 2952 /* 2953 * Only insert if it is not already on the list. 2954 */ 2955 if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) { 2956 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep); 2957 add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT); 2958 } 2959 } 2960 } 2961 2962 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl) 2963 { 2964 uint64_t val = be64toh(*rpl); 2965 int ret; 2966 struct c4iw_wr_wait *wr_waitp; 2967 2968 ret = (int)((val >> 8) & 0xff); 2969 wr_waitp = (struct c4iw_wr_wait *)rpl[1]; 2970 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret); 2971 if (wr_waitp) 2972 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 2973 2974 return (0); 2975 } 2976 2977 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl) 2978 { 2979 struct cqe_list_entry *cle; 2980 unsigned long flag; 2981 2982 cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT); 2983 cle->rhp = sc->iwarp_softc; 2984 cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]); 2985 2986 spin_lock_irqsave(&err_cqe_lock, flag); 2987 list_add_tail(&cle->entry, &err_cqe_list); 2988 queue_work(c4iw_taskq, &c4iw_task); 2989 spin_unlock_irqrestore(&err_cqe_lock, flag); 2990 2991 return (0); 2992 } 2993 2994 static int 2995 process_terminate(struct c4iw_ep *ep) 2996 { 2997 struct c4iw_qp_attributes attrs = {0}; 2998 2999 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep); 3000 3001 if (ep && ep->com.qp) { 3002 3003 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", 3004 ep->hwtid, ep->com.qp->wq.sq.qid); 3005 attrs.next_state = C4IW_QP_STATE_TERMINATE; 3006 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 3007 1); 3008 } else 3009 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", 3010 ep->hwtid); 3011 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep); 3012 3013 return 0; 3014 } 3015 3016 int __init c4iw_cm_init(void) 3017 { 3018 3019 t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate); 3020 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl); 3021 t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler); 3022 t4_register_an_handler(c4iw_ev_handler); 3023 3024 TAILQ_INIT(&req_list); 3025 spin_lock_init(&req_lock); 3026 INIT_LIST_HEAD(&err_cqe_list); 3027 spin_lock_init(&err_cqe_lock); 3028 3029 INIT_WORK(&c4iw_task, process_req); 3030 3031 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe"); 3032 if (!c4iw_taskq) 3033 return -ENOMEM; 3034 3035 return 0; 3036 } 3037 3038 void __exit c4iw_cm_term(void) 3039 { 3040 WARN_ON(!TAILQ_EMPTY(&req_list)); 3041 WARN_ON(!list_empty(&err_cqe_list)); 3042 flush_workqueue(c4iw_taskq); 3043 destroy_workqueue(c4iw_taskq); 3044 3045 t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL); 3046 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL); 3047 t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL); 3048 t4_register_an_handler(NULL); 3049 } 3050 #endif 3051