1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * OpenIB.org BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_inet.h" 38 39 #ifdef TCP_OFFLOAD 40 #include <sys/types.h> 41 #include <sys/malloc.h> 42 #include <sys/socket.h> 43 #include <sys/socketvar.h> 44 #include <sys/sockio.h> 45 #include <sys/taskqueue.h> 46 #include <netinet/in.h> 47 #include <net/route.h> 48 49 #include <netinet/in_systm.h> 50 #include <netinet/in_pcb.h> 51 #include <netinet6/in6_pcb.h> 52 #include <netinet/ip.h> 53 #include <netinet/in_fib.h> 54 #include <netinet6/in6_fib.h> 55 #include <netinet6/scope6_var.h> 56 #include <netinet/ip_var.h> 57 #include <netinet/tcp_var.h> 58 #include <netinet/tcp.h> 59 #include <netinet/tcpip.h> 60 61 #include <netinet/toecore.h> 62 63 struct sge_iq; 64 struct rss_header; 65 struct cpl_set_tcb_rpl; 66 #include <linux/types.h> 67 #include "offload.h" 68 #include "tom/t4_tom.h" 69 70 #define TOEPCB(so) ((struct toepcb *)(so_sototcpcb((so))->t_toe)) 71 72 #include "iw_cxgbe.h" 73 #include <linux/module.h> 74 #include <linux/workqueue.h> 75 #include <linux/notifier.h> 76 #include <linux/inetdevice.h> 77 #include <linux/if_vlan.h> 78 #include <net/netevent.h> 79 #include <rdma/rdma_cm.h> 80 81 static spinlock_t req_lock; 82 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list; 83 static struct work_struct c4iw_task; 84 static struct workqueue_struct *c4iw_taskq; 85 static LIST_HEAD(err_cqe_list); 86 static spinlock_t err_cqe_lock; 87 static LIST_HEAD(listen_port_list); 88 static DEFINE_MUTEX(listen_port_mutex); 89 90 static void process_req(struct work_struct *ctx); 91 static void start_ep_timer(struct c4iw_ep *ep); 92 static int stop_ep_timer(struct c4iw_ep *ep); 93 static int set_tcpinfo(struct c4iw_ep *ep); 94 static void process_timeout(struct c4iw_ep *ep); 95 static void process_err_cqes(void); 96 static void *alloc_ep(int size, gfp_t flags); 97 static void close_socket(struct socket *so); 98 static int send_mpa_req(struct c4iw_ep *ep); 99 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen); 100 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen); 101 static void close_complete_upcall(struct c4iw_ep *ep, int status); 102 static int send_abort(struct c4iw_ep *ep); 103 static void peer_close_upcall(struct c4iw_ep *ep); 104 static void peer_abort_upcall(struct c4iw_ep *ep); 105 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 106 static int connect_request_upcall(struct c4iw_ep *ep); 107 static void established_upcall(struct c4iw_ep *ep); 108 static int process_mpa_reply(struct c4iw_ep *ep); 109 static int process_mpa_request(struct c4iw_ep *ep); 110 static void process_peer_close(struct c4iw_ep *ep); 111 static void process_conn_error(struct c4iw_ep *ep); 112 static void process_close_complete(struct c4iw_ep *ep); 113 static void ep_timeout(unsigned long arg); 114 static void setiwsockopt(struct socket *so); 115 static void init_iwarp_socket(struct socket *so, void *arg); 116 static void uninit_iwarp_socket(struct socket *so); 117 static void process_data(struct c4iw_ep *ep); 118 static void process_connected(struct c4iw_ep *ep); 119 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag); 120 static void process_socket_event(struct c4iw_ep *ep); 121 static void release_ep_resources(struct c4iw_ep *ep); 122 static int process_terminate(struct c4iw_ep *ep); 123 static int terminate(struct sge_iq *iq, const struct rss_header *rss, 124 struct mbuf *m); 125 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events); 126 static struct listen_port_info * 127 add_ep_to_listenlist(struct c4iw_listen_ep *lep); 128 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep); 129 static struct c4iw_listen_ep * 130 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so); 131 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr, 132 struct ifnet **ifp); 133 static void process_newconn(struct c4iw_listen_ep *master_lep, 134 struct socket *new_so); 135 #define START_EP_TIMER(ep) \ 136 do { \ 137 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \ 138 __func__, __LINE__, (ep)); \ 139 start_ep_timer(ep); \ 140 } while (0) 141 142 #define STOP_EP_TIMER(ep) \ 143 ({ \ 144 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \ 145 __func__, __LINE__, (ep)); \ 146 stop_ep_timer(ep); \ 147 }) 148 149 #define GET_LOCAL_ADDR(pladdr, so) \ 150 do { \ 151 struct sockaddr_storage *__a = NULL; \ 152 struct inpcb *__inp = sotoinpcb(so); \ 153 KASSERT(__inp != NULL, \ 154 ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \ 155 if (__inp->inp_vflag & INP_IPV4) \ 156 in_getsockaddr(so, (struct sockaddr **)&__a); \ 157 else \ 158 in6_getsockaddr(so, (struct sockaddr **)&__a); \ 159 *(pladdr) = *__a; \ 160 free(__a, M_SONAME); \ 161 } while (0) 162 163 #define GET_REMOTE_ADDR(praddr, so) \ 164 do { \ 165 struct sockaddr_storage *__a = NULL; \ 166 struct inpcb *__inp = sotoinpcb(so); \ 167 KASSERT(__inp != NULL, \ 168 ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \ 169 if (__inp->inp_vflag & INP_IPV4) \ 170 in_getpeeraddr(so, (struct sockaddr **)&__a); \ 171 else \ 172 in6_getpeeraddr(so, (struct sockaddr **)&__a); \ 173 *(praddr) = *__a; \ 174 free(__a, M_SONAME); \ 175 } while (0) 176 177 static char *states[] = { 178 "idle", 179 "listen", 180 "connecting", 181 "mpa_wait_req", 182 "mpa_req_sent", 183 "mpa_req_rcvd", 184 "mpa_rep_sent", 185 "fpdu_mode", 186 "aborting", 187 "closing", 188 "moribund", 189 "dead", 190 NULL, 191 }; 192 193 static void deref_cm_id(struct c4iw_ep_common *epc) 194 { 195 epc->cm_id->rem_ref(epc->cm_id); 196 epc->cm_id = NULL; 197 set_bit(CM_ID_DEREFED, &epc->history); 198 } 199 200 static void ref_cm_id(struct c4iw_ep_common *epc) 201 { 202 set_bit(CM_ID_REFED, &epc->history); 203 epc->cm_id->add_ref(epc->cm_id); 204 } 205 206 static void deref_qp(struct c4iw_ep *ep) 207 { 208 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 209 clear_bit(QP_REFERENCED, &ep->com.flags); 210 set_bit(QP_DEREFED, &ep->com.history); 211 } 212 213 static void ref_qp(struct c4iw_ep *ep) 214 { 215 set_bit(QP_REFERENCED, &ep->com.flags); 216 set_bit(QP_REFED, &ep->com.history); 217 c4iw_qp_add_ref(&ep->com.qp->ibqp); 218 } 219 /* allocated per TCP port while listening */ 220 struct listen_port_info { 221 uint16_t port_num; /* TCP port address */ 222 struct list_head list; /* belongs to listen_port_list */ 223 struct list_head lep_list; /* per port lep list */ 224 uint32_t refcnt; /* number of lep's listening */ 225 }; 226 227 /* 228 * Following two lists are used to manage INADDR_ANY listeners: 229 * 1)listen_port_list 230 * 2)lep_list 231 * 232 * Below is the INADDR_ANY listener lists overview on a system with a two port 233 * adapter: 234 * |------------------| 235 * |listen_port_list | 236 * |------------------| 237 * | 238 * | |-----------| |-----------| 239 * | | port_num:X| | port_num:X| 240 * |--------------|-list------|-------|-list------|-------.... 241 * | lep_list----| | lep_list----| 242 * | refcnt | | | refcnt | | 243 * | | | | | | 244 * | | | | | | 245 * |-----------| | |-----------| | 246 * | | 247 * | | 248 * | | 249 * | | lep1 lep2 250 * | | |----------------| |----------------| 251 * | |----| listen_ep_list |----| listen_ep_list | 252 * | |----------------| |----------------| 253 * | 254 * | 255 * | lep1 lep2 256 * | |----------------| |----------------| 257 * |---| listen_ep_list |----| listen_ep_list | 258 * |----------------| |----------------| 259 * 260 * Because of two port adapter, the number of lep's are two(lep1 & lep2) for 261 * each TCP port number. 262 * 263 * Here 'lep1' is always marked as Master lep, because solisten() is always 264 * called through first lep. 265 * 266 */ 267 static struct listen_port_info * 268 add_ep_to_listenlist(struct c4iw_listen_ep *lep) 269 { 270 uint16_t port; 271 struct listen_port_info *port_info = NULL; 272 struct sockaddr_storage *laddr = &lep->com.local_addr; 273 274 port = (laddr->ss_family == AF_INET) ? 275 ((struct sockaddr_in *)laddr)->sin_port : 276 ((struct sockaddr_in6 *)laddr)->sin6_port; 277 278 mutex_lock(&listen_port_mutex); 279 280 list_for_each_entry(port_info, &listen_port_list, list) 281 if (port_info->port_num == port) 282 goto found_port; 283 284 port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK); 285 port_info->port_num = port; 286 port_info->refcnt = 0; 287 288 list_add_tail(&port_info->list, &listen_port_list); 289 INIT_LIST_HEAD(&port_info->lep_list); 290 291 found_port: 292 port_info->refcnt++; 293 list_add_tail(&lep->listen_ep_list, &port_info->lep_list); 294 mutex_unlock(&listen_port_mutex); 295 return port_info; 296 } 297 298 static int 299 rem_ep_from_listenlist(struct c4iw_listen_ep *lep) 300 { 301 uint16_t port; 302 struct listen_port_info *port_info = NULL; 303 struct sockaddr_storage *laddr = &lep->com.local_addr; 304 int refcnt = 0; 305 306 port = (laddr->ss_family == AF_INET) ? 307 ((struct sockaddr_in *)laddr)->sin_port : 308 ((struct sockaddr_in6 *)laddr)->sin6_port; 309 310 mutex_lock(&listen_port_mutex); 311 312 /* get the port_info structure based on the lep's port address */ 313 list_for_each_entry(port_info, &listen_port_list, list) { 314 if (port_info->port_num == port) { 315 port_info->refcnt--; 316 refcnt = port_info->refcnt; 317 /* remove the current lep from the listen list */ 318 list_del(&lep->listen_ep_list); 319 if (port_info->refcnt == 0) { 320 /* Remove this entry from the list as there 321 * are no more listeners for this port_num. 322 */ 323 list_del(&port_info->list); 324 kfree(port_info); 325 } 326 break; 327 } 328 } 329 mutex_unlock(&listen_port_mutex); 330 return refcnt; 331 } 332 333 /* 334 * Find the lep that belongs to the ifnet on which the SYN frame was received. 335 */ 336 struct c4iw_listen_ep * 337 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so) 338 { 339 struct adapter *adap = NULL; 340 struct c4iw_listen_ep *lep = NULL; 341 struct ifnet *ifp = NULL, *hw_ifp = NULL; 342 struct listen_port_info *port_info = NULL; 343 int i = 0, found_portinfo = 0, found_lep = 0; 344 uint16_t port; 345 346 /* 347 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo 348 * interfaces like vlan, lagg, etc.. 349 * TBD: lagg support, lagg + vlan support. 350 */ 351 ifp = TOEPCB(so)->l2te->ifp; 352 if (ifp->if_type == IFT_L2VLAN) { 353 hw_ifp = VLAN_TRUNKDEV(ifp); 354 if (hw_ifp == NULL) { 355 CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of " 356 "vlan ifnet %p, sock %p, master_lep %p", 357 __func__, ifp, so, master_lep); 358 return (NULL); 359 } 360 } else 361 hw_ifp = ifp; 362 363 /* STEP 2: Find 'port_info' with listener local port address. */ 364 port = (master_lep->com.local_addr.ss_family == AF_INET) ? 365 ((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port : 366 ((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port; 367 368 369 mutex_lock(&listen_port_mutex); 370 list_for_each_entry(port_info, &listen_port_list, list) 371 if (port_info->port_num == port) { 372 found_portinfo =1; 373 break; 374 } 375 if (!found_portinfo) 376 goto out; 377 378 /* STEP 3: Traverse through list of lep's that are bound to the current 379 * TCP port address and find the lep that belongs to the ifnet on which 380 * the SYN frame was received. 381 */ 382 list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) { 383 adap = lep->com.dev->rdev.adap; 384 for_each_port(adap, i) { 385 if (hw_ifp == adap->port[i]->vi[0].ifp) { 386 found_lep =1; 387 goto out; 388 } 389 } 390 } 391 out: 392 mutex_unlock(&listen_port_mutex); 393 return found_lep ? lep : (NULL); 394 } 395 396 static void process_timeout(struct c4iw_ep *ep) 397 { 398 struct c4iw_qp_attributes attrs = {0}; 399 int abort = 1; 400 401 CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__, 402 ep, ep->hwtid, ep->com.state); 403 set_bit(TIMEDOUT, &ep->com.history); 404 switch (ep->com.state) { 405 case MPA_REQ_SENT: 406 connect_reply_upcall(ep, -ETIMEDOUT); 407 break; 408 case MPA_REQ_WAIT: 409 case MPA_REQ_RCVD: 410 case MPA_REP_SENT: 411 case FPDU_MODE: 412 break; 413 case CLOSING: 414 case MORIBUND: 415 if (ep->com.cm_id && ep->com.qp) { 416 attrs.next_state = C4IW_QP_STATE_ERROR; 417 c4iw_modify_qp(ep->com.dev, ep->com.qp, 418 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 419 } 420 close_complete_upcall(ep, -ETIMEDOUT); 421 break; 422 case ABORTING: 423 case DEAD: 424 /* 425 * These states are expected if the ep timed out at the same 426 * time as another thread was calling stop_ep_timer(). 427 * So we silently do nothing for these states. 428 */ 429 abort = 0; 430 break; 431 default: 432 CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u" 433 , __func__, ep, ep->hwtid, ep->com.state); 434 abort = 0; 435 } 436 if (abort) 437 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 438 c4iw_put_ep(&ep->com); 439 return; 440 } 441 442 struct cqe_list_entry { 443 struct list_head entry; 444 struct c4iw_dev *rhp; 445 struct t4_cqe err_cqe; 446 }; 447 448 static void 449 process_err_cqes(void) 450 { 451 unsigned long flag; 452 struct cqe_list_entry *cle; 453 454 spin_lock_irqsave(&err_cqe_lock, flag); 455 while (!list_empty(&err_cqe_list)) { 456 struct list_head *tmp; 457 tmp = err_cqe_list.next; 458 list_del(tmp); 459 tmp->next = tmp->prev = NULL; 460 spin_unlock_irqrestore(&err_cqe_lock, flag); 461 cle = list_entry(tmp, struct cqe_list_entry, entry); 462 c4iw_ev_dispatch(cle->rhp, &cle->err_cqe); 463 free(cle, M_CXGBE); 464 spin_lock_irqsave(&err_cqe_lock, flag); 465 } 466 spin_unlock_irqrestore(&err_cqe_lock, flag); 467 468 return; 469 } 470 471 static void 472 process_req(struct work_struct *ctx) 473 { 474 struct c4iw_ep_common *epc; 475 unsigned long flag; 476 int ep_events; 477 478 process_err_cqes(); 479 spin_lock_irqsave(&req_lock, flag); 480 while (!TAILQ_EMPTY(&req_list)) { 481 epc = TAILQ_FIRST(&req_list); 482 TAILQ_REMOVE(&req_list, epc, entry); 483 epc->entry.tqe_prev = NULL; 484 ep_events = epc->ep_events; 485 epc->ep_events = 0; 486 spin_unlock_irqrestore(&req_lock, flag); 487 mutex_lock(&epc->mutex); 488 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x", 489 __func__, epc->so, epc, states[epc->state], ep_events); 490 if (ep_events & C4IW_EVENT_TERM) 491 process_terminate((struct c4iw_ep *)epc); 492 if (ep_events & C4IW_EVENT_TIMEOUT) 493 process_timeout((struct c4iw_ep *)epc); 494 if (ep_events & C4IW_EVENT_SOCKET) 495 process_socket_event((struct c4iw_ep *)epc); 496 mutex_unlock(&epc->mutex); 497 c4iw_put_ep(epc); 498 process_err_cqes(); 499 spin_lock_irqsave(&req_lock, flag); 500 } 501 spin_unlock_irqrestore(&req_lock, flag); 502 } 503 504 /* 505 * XXX: doesn't belong here in the iWARP driver. 506 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is 507 * set. Is this a valid assumption for active open? 508 */ 509 static int 510 set_tcpinfo(struct c4iw_ep *ep) 511 { 512 struct socket *so = ep->com.so; 513 struct inpcb *inp = sotoinpcb(so); 514 struct tcpcb *tp; 515 struct toepcb *toep; 516 int rc = 0; 517 518 INP_WLOCK(inp); 519 tp = intotcpcb(inp); 520 if ((tp->t_flags & TF_TOE) == 0) { 521 rc = EINVAL; 522 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n", 523 __func__, so, ep); 524 goto done; 525 } 526 toep = TOEPCB(so); 527 528 ep->hwtid = toep->tid; 529 ep->snd_seq = tp->snd_nxt; 530 ep->rcv_seq = tp->rcv_nxt; 531 done: 532 INP_WUNLOCK(inp); 533 return (rc); 534 535 } 536 static int 537 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp) 538 { 539 int err = 0; 540 541 if (raddr->ss_family == AF_INET) { 542 struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr; 543 struct nhop4_extended nh4 = {0}; 544 545 err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, raddr4->sin_addr, 546 NHR_REF, 0, &nh4); 547 *ifp = nh4.nh_ifp; 548 if (err) 549 fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4); 550 } else { 551 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr; 552 struct nhop6_extended nh6 = {0}; 553 struct in6_addr addr6; 554 uint32_t scopeid; 555 556 memset(&addr6, 0, sizeof(addr6)); 557 in6_splitscope((struct in6_addr *)&raddr6->sin6_addr, 558 &addr6, &scopeid); 559 err = fib6_lookup_nh_ext(RT_DEFAULT_FIB, &addr6, scopeid, 560 NHR_REF, 0, &nh6); 561 *ifp = nh6.nh_ifp; 562 if (err) 563 fib6_free_nh_ext(RT_DEFAULT_FIB, &nh6); 564 } 565 566 CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err); 567 return err; 568 } 569 570 static void 571 close_socket(struct socket *so) 572 { 573 uninit_iwarp_socket(so); 574 soclose(so); 575 } 576 577 static void 578 process_peer_close(struct c4iw_ep *ep) 579 { 580 struct c4iw_qp_attributes attrs = {0}; 581 int disconnect = 1; 582 int release = 0; 583 584 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep, 585 ep->com.so, states[ep->com.state]); 586 587 switch (ep->com.state) { 588 589 case MPA_REQ_WAIT: 590 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD", 591 __func__, ep); 592 /* Fallthrough */ 593 case MPA_REQ_SENT: 594 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD", 595 __func__, ep); 596 ep->com.state = DEAD; 597 connect_reply_upcall(ep, -ECONNABORTED); 598 599 disconnect = 0; 600 STOP_EP_TIMER(ep); 601 close_socket(ep->com.so); 602 deref_cm_id(&ep->com); 603 release = 1; 604 break; 605 606 case MPA_REQ_RCVD: 607 608 /* 609 * We're gonna mark this puppy DEAD, but keep 610 * the reference on it until the ULP accepts or 611 * rejects the CR. 612 */ 613 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING", 614 __func__, ep); 615 ep->com.state = CLOSING; 616 break; 617 618 case MPA_REP_SENT: 619 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING", 620 __func__, ep); 621 ep->com.state = CLOSING; 622 break; 623 624 case FPDU_MODE: 625 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING", 626 __func__, ep); 627 START_EP_TIMER(ep); 628 ep->com.state = CLOSING; 629 attrs.next_state = C4IW_QP_STATE_CLOSING; 630 c4iw_modify_qp(ep->com.dev, ep->com.qp, 631 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 632 peer_close_upcall(ep); 633 break; 634 635 case ABORTING: 636 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)", 637 __func__, ep); 638 disconnect = 0; 639 break; 640 641 case CLOSING: 642 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND", 643 __func__, ep); 644 ep->com.state = MORIBUND; 645 disconnect = 0; 646 break; 647 648 case MORIBUND: 649 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__, 650 ep); 651 STOP_EP_TIMER(ep); 652 if (ep->com.cm_id && ep->com.qp) { 653 attrs.next_state = C4IW_QP_STATE_IDLE; 654 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 655 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 656 } 657 close_socket(ep->com.so); 658 close_complete_upcall(ep, 0); 659 ep->com.state = DEAD; 660 release = 1; 661 disconnect = 0; 662 break; 663 664 case DEAD: 665 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)", 666 __func__, ep); 667 disconnect = 0; 668 break; 669 670 default: 671 panic("%s: ep %p state %d", __func__, ep, 672 ep->com.state); 673 break; 674 } 675 676 677 if (disconnect) { 678 679 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep); 680 c4iw_ep_disconnect(ep, 0, M_NOWAIT); 681 } 682 if (release) { 683 684 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep); 685 c4iw_put_ep(&ep->com); 686 } 687 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep); 688 return; 689 } 690 691 static void 692 process_conn_error(struct c4iw_ep *ep) 693 { 694 struct c4iw_qp_attributes attrs = {0}; 695 int ret; 696 int state; 697 698 state = ep->com.state; 699 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s", 700 __func__, ep, ep->com.so, ep->com.so->so_error, 701 states[ep->com.state]); 702 703 switch (state) { 704 705 case MPA_REQ_WAIT: 706 STOP_EP_TIMER(ep); 707 c4iw_put_ep(&ep->parent_ep->com); 708 break; 709 710 case MPA_REQ_SENT: 711 STOP_EP_TIMER(ep); 712 connect_reply_upcall(ep, -ECONNRESET); 713 break; 714 715 case MPA_REP_SENT: 716 ep->com.rpl_err = ECONNRESET; 717 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep); 718 break; 719 720 case MPA_REQ_RCVD: 721 break; 722 723 case MORIBUND: 724 case CLOSING: 725 STOP_EP_TIMER(ep); 726 /*FALLTHROUGH*/ 727 case FPDU_MODE: 728 729 if (ep->com.cm_id && ep->com.qp) { 730 731 attrs.next_state = C4IW_QP_STATE_ERROR; 732 ret = c4iw_modify_qp(ep->com.qp->rhp, 733 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 734 &attrs, 1); 735 if (ret) 736 log(LOG_ERR, 737 "%s - qp <- error failed!\n", 738 __func__); 739 } 740 peer_abort_upcall(ep); 741 break; 742 743 case ABORTING: 744 break; 745 746 case DEAD: 747 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!", 748 __func__, ep->com.so->so_error); 749 return; 750 751 default: 752 panic("%s: ep %p state %d", __func__, ep, state); 753 break; 754 } 755 756 if (state != ABORTING) { 757 close_socket(ep->com.so); 758 ep->com.state = DEAD; 759 c4iw_put_ep(&ep->com); 760 } 761 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep); 762 return; 763 } 764 765 static void 766 process_close_complete(struct c4iw_ep *ep) 767 { 768 struct c4iw_qp_attributes attrs = {0}; 769 int release = 0; 770 771 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep, 772 ep->com.so, states[ep->com.state]); 773 774 /* The cm_id may be null if we failed to connect */ 775 set_bit(CLOSE_CON_RPL, &ep->com.history); 776 777 switch (ep->com.state) { 778 779 case CLOSING: 780 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND", 781 __func__, ep); 782 ep->com.state = MORIBUND; 783 break; 784 785 case MORIBUND: 786 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__, 787 ep); 788 STOP_EP_TIMER(ep); 789 790 if ((ep->com.cm_id) && (ep->com.qp)) { 791 792 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE", 793 __func__, ep); 794 attrs.next_state = C4IW_QP_STATE_IDLE; 795 c4iw_modify_qp(ep->com.dev, 796 ep->com.qp, 797 C4IW_QP_ATTR_NEXT_STATE, 798 &attrs, 1); 799 } 800 801 close_socket(ep->com.so); 802 close_complete_upcall(ep, 0); 803 ep->com.state = DEAD; 804 release = 1; 805 break; 806 807 case ABORTING: 808 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep); 809 break; 810 811 case DEAD: 812 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep); 813 break; 814 default: 815 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state", 816 __func__, ep); 817 panic("%s:pcc6 %p unknown ep state", __func__, ep); 818 break; 819 } 820 821 if (release) { 822 823 CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep); 824 release_ep_resources(ep); 825 } 826 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep); 827 return; 828 } 829 830 static void 831 setiwsockopt(struct socket *so) 832 { 833 int rc; 834 struct sockopt sopt; 835 int on = 1; 836 837 sopt.sopt_dir = SOPT_SET; 838 sopt.sopt_level = IPPROTO_TCP; 839 sopt.sopt_name = TCP_NODELAY; 840 sopt.sopt_val = (caddr_t)&on; 841 sopt.sopt_valsize = sizeof on; 842 sopt.sopt_td = NULL; 843 rc = -sosetopt(so, &sopt); 844 if (rc) { 845 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n", 846 __func__, so, rc); 847 } 848 } 849 850 static void 851 init_iwarp_socket(struct socket *so, void *arg) 852 { 853 if (SOLISTENING(so)) { 854 SOLISTEN_LOCK(so); 855 solisten_upcall_set(so, c4iw_so_upcall, arg); 856 so->so_state |= SS_NBIO; 857 SOLISTEN_UNLOCK(so); 858 } else { 859 SOCKBUF_LOCK(&so->so_rcv); 860 soupcall_set(so, SO_RCV, c4iw_so_upcall, arg); 861 so->so_state |= SS_NBIO; 862 SOCKBUF_UNLOCK(&so->so_rcv); 863 } 864 } 865 866 static void 867 uninit_iwarp_socket(struct socket *so) 868 { 869 if (SOLISTENING(so)) { 870 SOLISTEN_LOCK(so); 871 solisten_upcall_set(so, NULL, NULL); 872 SOLISTEN_UNLOCK(so); 873 } else { 874 SOCKBUF_LOCK(&so->so_rcv); 875 soupcall_clear(so, SO_RCV); 876 SOCKBUF_UNLOCK(&so->so_rcv); 877 } 878 } 879 880 static void 881 process_data(struct c4iw_ep *ep) 882 { 883 int ret = 0; 884 int disconnect = 0; 885 struct c4iw_qp_attributes attrs = {0}; 886 887 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__, 888 ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv)); 889 890 switch (ep->com.state) { 891 case MPA_REQ_SENT: 892 disconnect = process_mpa_reply(ep); 893 break; 894 case MPA_REQ_WAIT: 895 disconnect = process_mpa_request(ep); 896 if (disconnect) 897 /* Refered in process_newconn() */ 898 c4iw_put_ep(&ep->parent_ep->com); 899 break; 900 case FPDU_MODE: 901 MPASS(ep->com.qp != NULL); 902 attrs.next_state = C4IW_QP_STATE_TERMINATE; 903 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp, 904 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 905 if (ret != -EINPROGRESS) 906 disconnect = 1; 907 break; 908 default: 909 log(LOG_ERR, "%s: Unexpected streaming data. ep %p, " 910 "state %d, so %p, so_state 0x%x, sbused %u\n", 911 __func__, ep, ep->com.state, ep->com.so, 912 ep->com.so->so_state, sbused(&ep->com.so->so_rcv)); 913 break; 914 } 915 if (disconnect) 916 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 917 918 } 919 920 static void 921 process_connected(struct c4iw_ep *ep) 922 { 923 struct socket *so = ep->com.so; 924 925 if ((so->so_state & SS_ISCONNECTED) && !so->so_error) { 926 if (send_mpa_req(ep)) 927 goto err; 928 } else { 929 connect_reply_upcall(ep, -so->so_error); 930 goto err; 931 } 932 return; 933 err: 934 close_socket(so); 935 ep->com.state = DEAD; 936 c4iw_put_ep(&ep->com); 937 return; 938 } 939 940 static inline int c4iw_zero_addr(struct sockaddr *addr) 941 { 942 struct in6_addr *ip6; 943 944 if (addr->sa_family == AF_INET) 945 return IN_ZERONET( 946 ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr)); 947 else { 948 ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr; 949 return (ip6->s6_addr32[0] | ip6->s6_addr32[1] | 950 ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0; 951 } 952 } 953 954 static inline int c4iw_loopback_addr(struct sockaddr *addr) 955 { 956 if (addr->sa_family == AF_INET) 957 return IN_LOOPBACK( 958 ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr)); 959 else 960 return IN6_IS_ADDR_LOOPBACK( 961 &((struct sockaddr_in6 *) addr)->sin6_addr); 962 } 963 964 static inline int c4iw_any_addr(struct sockaddr *addr) 965 { 966 return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr); 967 } 968 969 static void 970 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so) 971 { 972 struct c4iw_listen_ep *real_lep = NULL; 973 struct c4iw_ep *new_ep = NULL; 974 struct sockaddr_in *remote = NULL; 975 int ret = 0; 976 977 MPASS(new_so != NULL); 978 979 if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) { 980 /* Here we need to find the 'real_lep' that belongs to the 981 * incomming socket's network interface, such that the newly 982 * created 'ep' can be attached to the real 'lep'. 983 */ 984 real_lep = find_real_listen_ep(master_lep, new_so); 985 if (real_lep == NULL) { 986 CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen " 987 "ep for sock: %p", __func__, new_so); 988 log(LOG_ERR,"%s: Could not find the real listen ep for " 989 "sock: %p\n", __func__, new_so); 990 /* FIXME: properly free the 'new_so' in failure case. 991 * Use of soabort() and soclose() are not legal 992 * here(before soaccept()). 993 */ 994 return; 995 } 996 } else /* for Non-Wildcard address, master_lep is always the real_lep */ 997 real_lep = master_lep; 998 999 new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL); 1000 1001 CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, " 1002 "listening so %p, new so %p", __func__, master_lep, real_lep, 1003 new_ep, master_lep->com.so, new_so); 1004 1005 new_ep->com.dev = real_lep->com.dev; 1006 new_ep->com.so = new_so; 1007 new_ep->com.cm_id = NULL; 1008 new_ep->com.thread = real_lep->com.thread; 1009 new_ep->parent_ep = real_lep; 1010 1011 GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so); 1012 GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so); 1013 c4iw_get_ep(&real_lep->com); 1014 init_timer(&new_ep->timer); 1015 new_ep->com.state = MPA_REQ_WAIT; 1016 1017 setiwsockopt(new_so); 1018 ret = soaccept(new_so, (struct sockaddr **)&remote); 1019 if (ret != 0) { 1020 CTR4(KTR_IW_CXGBE, 1021 "%s:listen sock:%p, new sock:%p, ret:%d", 1022 __func__, master_lep->com.so, new_so, ret); 1023 if (remote != NULL) 1024 free(remote, M_SONAME); 1025 soclose(new_so); 1026 c4iw_put_ep(&new_ep->com); 1027 c4iw_put_ep(&real_lep->com); 1028 return; 1029 } 1030 free(remote, M_SONAME); 1031 1032 START_EP_TIMER(new_ep); 1033 1034 /* MPA request might have been queued up on the socket already, so we 1035 * initialize the socket/upcall_handler under lock to prevent processing 1036 * MPA request on another thread(via process_req()) simultaniously. 1037 */ 1038 c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to 1039 avoid freeing of ep before ep unlock. */ 1040 mutex_lock(&new_ep->com.mutex); 1041 init_iwarp_socket(new_so, &new_ep->com); 1042 1043 ret = process_mpa_request(new_ep); 1044 if (ret) { 1045 /* ABORT */ 1046 c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL); 1047 c4iw_put_ep(&real_lep->com); 1048 } 1049 mutex_unlock(&new_ep->com.mutex); 1050 c4iw_put_ep(&new_ep->com); 1051 return; 1052 } 1053 1054 static int 1055 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event) 1056 { 1057 unsigned long flag; 1058 1059 spin_lock_irqsave(&req_lock, flag); 1060 if (ep && ep->com.so) { 1061 ep->com.ep_events |= new_ep_event; 1062 if (!ep->com.entry.tqe_prev) { 1063 c4iw_get_ep(&ep->com); 1064 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry); 1065 queue_work(c4iw_taskq, &c4iw_task); 1066 } 1067 } 1068 spin_unlock_irqrestore(&req_lock, flag); 1069 1070 return (0); 1071 } 1072 1073 static int 1074 c4iw_so_upcall(struct socket *so, void *arg, int waitflag) 1075 { 1076 struct c4iw_ep *ep = arg; 1077 1078 CTR6(KTR_IW_CXGBE, 1079 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p", 1080 __func__, so, so->so_state, ep, states[ep->com.state], 1081 ep->com.entry.tqe_prev); 1082 1083 MPASS(ep->com.so == so); 1084 /* 1085 * Wake up any threads waiting in rdma_init()/rdma_fini(), 1086 * with locks held. 1087 */ 1088 if (so->so_error || (ep->com.dev->rdev.flags & T4_FATAL_ERROR)) 1089 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1090 add_ep_to_req_list(ep, C4IW_EVENT_SOCKET); 1091 1092 return (SU_OK); 1093 } 1094 1095 1096 static int 1097 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 1098 { 1099 struct adapter *sc = iq->adapter; 1100 const struct cpl_rdma_terminate *cpl = mtod(m, const void *); 1101 unsigned int tid = GET_TID(cpl); 1102 struct toepcb *toep = lookup_tid(sc, tid); 1103 struct socket *so; 1104 struct c4iw_ep *ep; 1105 1106 INP_WLOCK(toep->inp); 1107 so = inp_inpcbtosocket(toep->inp); 1108 ep = so->so_rcv.sb_upcallarg; 1109 INP_WUNLOCK(toep->inp); 1110 1111 CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep); 1112 add_ep_to_req_list(ep, C4IW_EVENT_TERM); 1113 1114 return 0; 1115 } 1116 1117 static void 1118 process_socket_event(struct c4iw_ep *ep) 1119 { 1120 int state = ep->com.state; 1121 struct socket *so = ep->com.so; 1122 1123 if (ep->com.state == DEAD) { 1124 CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded " 1125 "ep %p ep_state %s", __func__, ep, states[state]); 1126 return; 1127 } 1128 1129 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, " 1130 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state, 1131 so->so_error, so->so_rcv.sb_state, ep, states[state]); 1132 1133 if (state == CONNECTING) { 1134 process_connected(ep); 1135 return; 1136 } 1137 1138 if (state == LISTEN) { 1139 struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep; 1140 struct socket *listen_so = so, *new_so = NULL; 1141 int error = 0; 1142 1143 SOLISTEN_LOCK(listen_so); 1144 do { 1145 error = solisten_dequeue(listen_so, &new_so, 1146 SOCK_NONBLOCK); 1147 if (error) { 1148 CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p " 1149 "error %d", __func__, lep, listen_so, 1150 error); 1151 return; 1152 } 1153 process_newconn(lep, new_so); 1154 1155 /* solisten_dequeue() unlocks while return, so aquire 1156 * lock again for sol_qlen and also for next iteration. 1157 */ 1158 SOLISTEN_LOCK(listen_so); 1159 } while (listen_so->sol_qlen); 1160 SOLISTEN_UNLOCK(listen_so); 1161 1162 return; 1163 } 1164 1165 /* connection error */ 1166 if (so->so_error) { 1167 process_conn_error(ep); 1168 return; 1169 } 1170 1171 /* peer close */ 1172 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) { 1173 process_peer_close(ep); 1174 /* 1175 * check whether socket disconnect event is pending before 1176 * returning. Fallthrough if yes. 1177 */ 1178 if (!(so->so_state & SS_ISDISCONNECTED)) 1179 return; 1180 } 1181 1182 /* close complete */ 1183 if (so->so_state & SS_ISDISCONNECTED) { 1184 process_close_complete(ep); 1185 return; 1186 } 1187 1188 /* rx data */ 1189 if (sbused(&ep->com.so->so_rcv)) { 1190 process_data(ep); 1191 return; 1192 } 1193 1194 /* Socket events for 'MPA Request Received' and 'Close Complete' 1195 * were already processed earlier in their previous events handlers. 1196 * Hence, these socket events are skipped. 1197 * And any other socket events must have handled above. 1198 */ 1199 MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND)); 1200 1201 if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND)) 1202 log(LOG_ERR, "%s: Unprocessed socket event so %p, " 1203 "so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n", 1204 __func__, so, so->so_state, so->so_error, so->so_rcv.sb_state, 1205 ep, states[state]); 1206 1207 } 1208 1209 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 1210 "iw_cxgbe driver parameters"); 1211 1212 static int dack_mode = 0; 1213 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0, 1214 "Delayed ack mode (default = 0)"); 1215 1216 int c4iw_max_read_depth = 8; 1217 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0, 1218 "Per-connection max ORD/IRD (default = 8)"); 1219 1220 static int enable_tcp_timestamps; 1221 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0, 1222 "Enable tcp timestamps (default = 0)"); 1223 1224 static int enable_tcp_sack; 1225 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0, 1226 "Enable tcp SACK (default = 0)"); 1227 1228 static int enable_tcp_window_scaling = 1; 1229 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0, 1230 "Enable tcp window scaling (default = 1)"); 1231 1232 int c4iw_debug = 0; 1233 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0, 1234 "Enable debug logging (default = 0)"); 1235 1236 static int peer2peer = 1; 1237 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0, 1238 "Support peer2peer ULPs (default = 1)"); 1239 1240 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 1241 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0, 1242 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)"); 1243 1244 static int ep_timeout_secs = 60; 1245 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0, 1246 "CM Endpoint operation timeout in seconds (default = 60)"); 1247 1248 static int mpa_rev = 1; 1249 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0, 1250 "MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)"); 1251 1252 static int markers_enabled; 1253 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0, 1254 "Enable MPA MARKERS (default(0) = disabled)"); 1255 1256 static int crc_enabled = 1; 1257 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0, 1258 "Enable MPA CRC (default(1) = enabled)"); 1259 1260 static int rcv_win = 256 * 1024; 1261 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0, 1262 "TCP receive window in bytes (default = 256KB)"); 1263 1264 static int snd_win = 128 * 1024; 1265 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0, 1266 "TCP send window in bytes (default = 128KB)"); 1267 1268 int use_dsgl = 1; 1269 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0, 1270 "Use DSGL for PBL/FastReg (default=1)"); 1271 1272 int inline_threshold = 128; 1273 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0, 1274 "inline vs dsgl threshold (default=128)"); 1275 1276 static int reuseaddr = 0; 1277 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0, 1278 "Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)"); 1279 1280 static void 1281 start_ep_timer(struct c4iw_ep *ep) 1282 { 1283 1284 if (timer_pending(&ep->timer)) { 1285 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep); 1286 printk(KERN_ERR "%s timer already started! ep %p\n", __func__, 1287 ep); 1288 return; 1289 } 1290 clear_bit(TIMEOUT, &ep->com.flags); 1291 c4iw_get_ep(&ep->com); 1292 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 1293 ep->timer.data = (unsigned long)ep; 1294 ep->timer.function = ep_timeout; 1295 add_timer(&ep->timer); 1296 } 1297 1298 static int 1299 stop_ep_timer(struct c4iw_ep *ep) 1300 { 1301 1302 del_timer_sync(&ep->timer); 1303 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 1304 c4iw_put_ep(&ep->com); 1305 return 0; 1306 } 1307 return 1; 1308 } 1309 1310 static void * 1311 alloc_ep(int size, gfp_t gfp) 1312 { 1313 struct c4iw_ep_common *epc; 1314 1315 epc = kzalloc(size, gfp); 1316 if (epc == NULL) 1317 return (NULL); 1318 1319 kref_init(&epc->kref); 1320 mutex_init(&epc->mutex); 1321 c4iw_init_wr_wait(&epc->wr_wait); 1322 1323 return (epc); 1324 } 1325 1326 void _c4iw_free_ep(struct kref *kref) 1327 { 1328 struct c4iw_ep *ep; 1329 struct c4iw_ep_common *epc; 1330 1331 ep = container_of(kref, struct c4iw_ep, com.kref); 1332 epc = &ep->com; 1333 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list", 1334 __func__, epc)); 1335 if (test_bit(QP_REFERENCED, &ep->com.flags)) 1336 deref_qp(ep); 1337 CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx", 1338 __func__, ep, epc->history, epc->flags); 1339 kfree(ep); 1340 } 1341 1342 static void release_ep_resources(struct c4iw_ep *ep) 1343 { 1344 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep); 1345 set_bit(RELEASE_RESOURCES, &ep->com.flags); 1346 c4iw_put_ep(&ep->com); 1347 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep); 1348 } 1349 1350 static int 1351 send_mpa_req(struct c4iw_ep *ep) 1352 { 1353 int mpalen; 1354 struct mpa_message *mpa; 1355 struct mpa_v2_conn_params mpa_v2_params; 1356 struct mbuf *m; 1357 char mpa_rev_to_use = mpa_rev; 1358 int err = 0; 1359 1360 if (ep->retry_with_mpa_v1) 1361 mpa_rev_to_use = 1; 1362 mpalen = sizeof(*mpa) + ep->plen; 1363 if (mpa_rev_to_use == 2) 1364 mpalen += sizeof(struct mpa_v2_conn_params); 1365 1366 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1367 if (mpa == NULL) { 1368 err = -ENOMEM; 1369 CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d", 1370 __func__, ep, err); 1371 goto err; 1372 } 1373 1374 memset(mpa, 0, mpalen); 1375 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 1376 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 1377 (markers_enabled ? MPA_MARKERS : 0) | 1378 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 1379 mpa->private_data_size = htons(ep->plen); 1380 mpa->revision = mpa_rev_to_use; 1381 1382 if (mpa_rev_to_use == 1) { 1383 ep->tried_with_mpa_v1 = 1; 1384 ep->retry_with_mpa_v1 = 0; 1385 } 1386 1387 if (mpa_rev_to_use == 2) { 1388 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1389 sizeof(struct mpa_v2_conn_params)); 1390 mpa_v2_params.ird = htons((u16)ep->ird); 1391 mpa_v2_params.ord = htons((u16)ep->ord); 1392 1393 if (peer2peer) { 1394 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1395 1396 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1397 mpa_v2_params.ord |= 1398 htons(MPA_V2_RDMA_WRITE_RTR); 1399 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1400 mpa_v2_params.ord |= 1401 htons(MPA_V2_RDMA_READ_RTR); 1402 } 1403 } 1404 memcpy(mpa->private_data, &mpa_v2_params, 1405 sizeof(struct mpa_v2_conn_params)); 1406 1407 if (ep->plen) { 1408 1409 memcpy(mpa->private_data + 1410 sizeof(struct mpa_v2_conn_params), 1411 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1412 } 1413 } else { 1414 1415 if (ep->plen) 1416 memcpy(mpa->private_data, 1417 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1418 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep); 1419 } 1420 1421 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1422 if (m == NULL) { 1423 err = -ENOMEM; 1424 CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d", 1425 __func__, ep, err); 1426 free(mpa, M_CXGBE); 1427 goto err; 1428 } 1429 m_copyback(m, 0, mpalen, (void *)mpa); 1430 free(mpa, M_CXGBE); 1431 1432 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1433 ep->com.thread); 1434 if (err) { 1435 CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d", 1436 __func__, ep, err); 1437 goto err; 1438 } 1439 1440 START_EP_TIMER(ep); 1441 ep->com.state = MPA_REQ_SENT; 1442 ep->mpa_attr.initiator = 1; 1443 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err); 1444 return 0; 1445 err: 1446 connect_reply_upcall(ep, err); 1447 CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err); 1448 return err; 1449 } 1450 1451 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1452 { 1453 int mpalen ; 1454 struct mpa_message *mpa; 1455 struct mpa_v2_conn_params mpa_v2_params; 1456 struct mbuf *m; 1457 int err; 1458 1459 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid, 1460 ep->plen); 1461 1462 mpalen = sizeof(*mpa) + plen; 1463 1464 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1465 1466 mpalen += sizeof(struct mpa_v2_conn_params); 1467 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep, 1468 ep->mpa_attr.version, mpalen); 1469 } 1470 1471 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1472 if (mpa == NULL) 1473 return (-ENOMEM); 1474 1475 memset(mpa, 0, mpalen); 1476 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1477 mpa->flags = MPA_REJECT; 1478 mpa->revision = mpa_rev; 1479 mpa->private_data_size = htons(plen); 1480 1481 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1482 1483 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1484 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1485 sizeof(struct mpa_v2_conn_params)); 1486 mpa_v2_params.ird = htons(((u16)ep->ird) | 1487 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1488 0)); 1489 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1490 (p2p_type == 1491 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1492 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1493 FW_RI_INIT_P2PTYPE_READ_REQ ? 1494 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1495 memcpy(mpa->private_data, &mpa_v2_params, 1496 sizeof(struct mpa_v2_conn_params)); 1497 1498 if (ep->plen) 1499 memcpy(mpa->private_data + 1500 sizeof(struct mpa_v2_conn_params), pdata, plen); 1501 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep, 1502 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen); 1503 } else 1504 if (plen) 1505 memcpy(mpa->private_data, pdata, plen); 1506 1507 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1508 if (m == NULL) { 1509 free(mpa, M_CXGBE); 1510 return (-ENOMEM); 1511 } 1512 m_copyback(m, 0, mpalen, (void *)mpa); 1513 free(mpa, M_CXGBE); 1514 1515 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); 1516 if (!err) 1517 ep->snd_seq += mpalen; 1518 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err); 1519 return err; 1520 } 1521 1522 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1523 { 1524 int mpalen; 1525 struct mpa_message *mpa; 1526 struct mbuf *m; 1527 struct mpa_v2_conn_params mpa_v2_params; 1528 int err; 1529 1530 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep); 1531 1532 mpalen = sizeof(*mpa) + plen; 1533 1534 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1535 1536 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep, 1537 ep->mpa_attr.version); 1538 mpalen += sizeof(struct mpa_v2_conn_params); 1539 } 1540 1541 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1542 if (mpa == NULL) 1543 return (-ENOMEM); 1544 1545 memset(mpa, 0, sizeof(*mpa)); 1546 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1547 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 1548 (markers_enabled ? MPA_MARKERS : 0); 1549 mpa->revision = ep->mpa_attr.version; 1550 mpa->private_data_size = htons(plen); 1551 1552 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1553 1554 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1555 mpa->private_data_size += 1556 htons(sizeof(struct mpa_v2_conn_params)); 1557 mpa_v2_params.ird = htons((u16)ep->ird); 1558 mpa_v2_params.ord = htons((u16)ep->ord); 1559 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep, 1560 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord); 1561 1562 if (peer2peer && (ep->mpa_attr.p2p_type != 1563 FW_RI_INIT_P2PTYPE_DISABLED)) { 1564 1565 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1566 1567 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1568 1569 mpa_v2_params.ord |= 1570 htons(MPA_V2_RDMA_WRITE_RTR); 1571 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d", 1572 __func__, ep, p2p_type, mpa_v2_params.ird, 1573 mpa_v2_params.ord); 1574 } 1575 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1576 1577 mpa_v2_params.ord |= 1578 htons(MPA_V2_RDMA_READ_RTR); 1579 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d", 1580 __func__, ep, p2p_type, mpa_v2_params.ird, 1581 mpa_v2_params.ord); 1582 } 1583 } 1584 1585 memcpy(mpa->private_data, &mpa_v2_params, 1586 sizeof(struct mpa_v2_conn_params)); 1587 1588 if (ep->plen) 1589 memcpy(mpa->private_data + 1590 sizeof(struct mpa_v2_conn_params), pdata, plen); 1591 } else 1592 if (plen) 1593 memcpy(mpa->private_data, pdata, plen); 1594 1595 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1596 if (m == NULL) { 1597 free(mpa, M_CXGBE); 1598 return (-ENOMEM); 1599 } 1600 m_copyback(m, 0, mpalen, (void *)mpa); 1601 free(mpa, M_CXGBE); 1602 1603 1604 ep->com.state = MPA_REP_SENT; 1605 ep->snd_seq += mpalen; 1606 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1607 ep->com.thread); 1608 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err); 1609 return err; 1610 } 1611 1612 1613 1614 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1615 { 1616 struct iw_cm_event event; 1617 1618 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep); 1619 memset(&event, 0, sizeof(event)); 1620 event.event = IW_CM_EVENT_CLOSE; 1621 event.status = status; 1622 1623 if (ep->com.cm_id) { 1624 1625 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep); 1626 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1627 deref_cm_id(&ep->com); 1628 set_bit(CLOSE_UPCALL, &ep->com.history); 1629 } 1630 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep); 1631 } 1632 1633 static int 1634 send_abort(struct c4iw_ep *ep) 1635 { 1636 struct socket *so = ep->com.so; 1637 struct sockopt sopt; 1638 int rc; 1639 struct linger l; 1640 1641 CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so, 1642 states[ep->com.state], ep->hwtid); 1643 1644 l.l_onoff = 1; 1645 l.l_linger = 0; 1646 1647 /* linger_time of 0 forces RST to be sent */ 1648 sopt.sopt_dir = SOPT_SET; 1649 sopt.sopt_level = SOL_SOCKET; 1650 sopt.sopt_name = SO_LINGER; 1651 sopt.sopt_val = (caddr_t)&l; 1652 sopt.sopt_valsize = sizeof l; 1653 sopt.sopt_td = NULL; 1654 rc = -sosetopt(so, &sopt); 1655 if (rc != 0) { 1656 log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n", 1657 __func__, so, rc); 1658 } 1659 1660 uninit_iwarp_socket(so); 1661 soclose(so); 1662 set_bit(ABORT_CONN, &ep->com.history); 1663 1664 /* 1665 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT 1666 * request it has sent. But the current TOE driver is not propagating 1667 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work- 1668 * around de-refererece 'ep' here instead of doing it in abort_rpl() 1669 * handler(not yet implemented) of iw_cxgbe driver. 1670 */ 1671 release_ep_resources(ep); 1672 ep->com.state = DEAD; 1673 1674 return (0); 1675 } 1676 1677 static void peer_close_upcall(struct c4iw_ep *ep) 1678 { 1679 struct iw_cm_event event; 1680 1681 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep); 1682 memset(&event, 0, sizeof(event)); 1683 event.event = IW_CM_EVENT_DISCONNECT; 1684 1685 if (ep->com.cm_id) { 1686 1687 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep); 1688 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1689 set_bit(DISCONN_UPCALL, &ep->com.history); 1690 } 1691 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep); 1692 } 1693 1694 static void peer_abort_upcall(struct c4iw_ep *ep) 1695 { 1696 struct iw_cm_event event; 1697 1698 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep); 1699 memset(&event, 0, sizeof(event)); 1700 event.event = IW_CM_EVENT_CLOSE; 1701 event.status = -ECONNRESET; 1702 1703 if (ep->com.cm_id) { 1704 1705 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep); 1706 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1707 deref_cm_id(&ep->com); 1708 set_bit(ABORT_UPCALL, &ep->com.history); 1709 } 1710 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep); 1711 } 1712 1713 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1714 { 1715 struct iw_cm_event event; 1716 1717 CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status); 1718 memset(&event, 0, sizeof(event)); 1719 event.event = IW_CM_EVENT_CONNECT_REPLY; 1720 event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ? 1721 -ECONNRESET : status; 1722 event.local_addr = ep->com.local_addr; 1723 event.remote_addr = ep->com.remote_addr; 1724 1725 if ((status == 0) || (status == -ECONNREFUSED)) { 1726 1727 if (!ep->tried_with_mpa_v1) { 1728 1729 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep); 1730 /* this means MPA_v2 is used */ 1731 event.ord = ep->ird; 1732 event.ird = ep->ord; 1733 event.private_data_len = ep->plen - 1734 sizeof(struct mpa_v2_conn_params); 1735 event.private_data = ep->mpa_pkt + 1736 sizeof(struct mpa_message) + 1737 sizeof(struct mpa_v2_conn_params); 1738 } else { 1739 1740 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep); 1741 /* this means MPA_v1 is used */ 1742 event.ord = c4iw_max_read_depth; 1743 event.ird = c4iw_max_read_depth; 1744 event.private_data_len = ep->plen; 1745 event.private_data = ep->mpa_pkt + 1746 sizeof(struct mpa_message); 1747 } 1748 } 1749 1750 if (ep->com.cm_id) { 1751 1752 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep); 1753 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1754 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1755 } 1756 1757 if(status == -ECONNABORTED) { 1758 1759 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status); 1760 return; 1761 } 1762 1763 if (status < 0) { 1764 1765 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status); 1766 deref_cm_id(&ep->com); 1767 } 1768 1769 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep); 1770 } 1771 1772 static int connect_request_upcall(struct c4iw_ep *ep) 1773 { 1774 struct iw_cm_event event; 1775 int ret; 1776 1777 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep, 1778 ep->tried_with_mpa_v1); 1779 1780 memset(&event, 0, sizeof(event)); 1781 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1782 event.local_addr = ep->com.local_addr; 1783 event.remote_addr = ep->com.remote_addr; 1784 event.provider_data = ep; 1785 1786 if (!ep->tried_with_mpa_v1) { 1787 /* this means MPA_v2 is used */ 1788 event.ord = ep->ord; 1789 event.ird = ep->ird; 1790 event.private_data_len = ep->plen - 1791 sizeof(struct mpa_v2_conn_params); 1792 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1793 sizeof(struct mpa_v2_conn_params); 1794 } else { 1795 1796 /* this means MPA_v1 is used. Send max supported */ 1797 event.ord = c4iw_max_read_depth; 1798 event.ird = c4iw_max_read_depth; 1799 event.private_data_len = ep->plen; 1800 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1801 } 1802 1803 c4iw_get_ep(&ep->com); 1804 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1805 &event); 1806 if(ret) { 1807 CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to" 1808 " IWCM, err:%d", __func__, ep, ret); 1809 c4iw_put_ep(&ep->com); 1810 } else 1811 /* Dereference parent_ep only in success case. 1812 * In case of failure, parent_ep is dereferenced by the caller 1813 * of process_mpa_request(). 1814 */ 1815 c4iw_put_ep(&ep->parent_ep->com); 1816 1817 set_bit(CONNREQ_UPCALL, &ep->com.history); 1818 return ret; 1819 } 1820 1821 static void established_upcall(struct c4iw_ep *ep) 1822 { 1823 struct iw_cm_event event; 1824 1825 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep); 1826 memset(&event, 0, sizeof(event)); 1827 event.event = IW_CM_EVENT_ESTABLISHED; 1828 event.ird = ep->ord; 1829 event.ord = ep->ird; 1830 1831 if (ep->com.cm_id) { 1832 1833 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep); 1834 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1835 set_bit(ESTAB_UPCALL, &ep->com.history); 1836 } 1837 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep); 1838 } 1839 1840 1841 #define RELAXED_IRD_NEGOTIATION 1 1842 1843 /* 1844 * process_mpa_reply - process streaming mode MPA reply 1845 * 1846 * Returns: 1847 * 1848 * 0 upon success indicating a connect request was delivered to the ULP 1849 * or the mpa request is incomplete but valid so far. 1850 * 1851 * 1 if a failure requires the caller to close the connection. 1852 * 1853 * 2 if a failure requires the caller to abort the connection. 1854 */ 1855 static int process_mpa_reply(struct c4iw_ep *ep) 1856 { 1857 struct mpa_message *mpa; 1858 struct mpa_v2_conn_params *mpa_v2_params; 1859 u16 plen; 1860 u16 resp_ird, resp_ord; 1861 u8 rtr_mismatch = 0, insuff_ird = 0; 1862 struct c4iw_qp_attributes attrs = {0}; 1863 enum c4iw_qp_attr_mask mask; 1864 int err; 1865 struct mbuf *top, *m; 1866 int flags = MSG_DONTWAIT; 1867 struct uio uio; 1868 int disconnect = 0; 1869 1870 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep); 1871 1872 /* 1873 * Stop mpa timer. If it expired, then 1874 * we ignore the MPA reply. process_timeout() 1875 * will abort the connection. 1876 */ 1877 if (STOP_EP_TIMER(ep)) 1878 return 0; 1879 1880 uio.uio_resid = 1000000; 1881 uio.uio_td = ep->com.thread; 1882 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags); 1883 1884 if (err) { 1885 1886 if (err == EWOULDBLOCK) { 1887 1888 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep); 1889 START_EP_TIMER(ep); 1890 return 0; 1891 } 1892 err = -err; 1893 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep); 1894 goto err; 1895 } 1896 1897 if (ep->com.so->so_rcv.sb_mb) { 1898 1899 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep); 1900 printf("%s data after soreceive called! so %p sb_mb %p top %p\n", 1901 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top); 1902 } 1903 1904 m = top; 1905 1906 do { 1907 1908 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep); 1909 /* 1910 * If we get more than the supported amount of private data 1911 * then we must fail this connection. 1912 */ 1913 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) { 1914 1915 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep, 1916 ep->mpa_pkt_len + m->m_len); 1917 err = (-EINVAL); 1918 goto err_stop_timer; 1919 } 1920 1921 /* 1922 * copy the new data into our accumulation buffer. 1923 */ 1924 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len])); 1925 ep->mpa_pkt_len += m->m_len; 1926 if (!m->m_next) 1927 m = m->m_nextpkt; 1928 else 1929 m = m->m_next; 1930 } while (m); 1931 1932 m_freem(top); 1933 /* 1934 * if we don't even have the mpa message, then bail. 1935 */ 1936 if (ep->mpa_pkt_len < sizeof(*mpa)) { 1937 return 0; 1938 } 1939 mpa = (struct mpa_message *) ep->mpa_pkt; 1940 1941 /* Validate MPA header. */ 1942 if (mpa->revision > mpa_rev) { 1943 1944 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep, 1945 mpa->revision, mpa_rev); 1946 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, " 1947 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1948 err = -EPROTO; 1949 goto err_stop_timer; 1950 } 1951 1952 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1953 1954 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep); 1955 err = -EPROTO; 1956 goto err_stop_timer; 1957 } 1958 1959 plen = ntohs(mpa->private_data_size); 1960 1961 /* 1962 * Fail if there's too much private data. 1963 */ 1964 if (plen > MPA_MAX_PRIVATE_DATA) { 1965 1966 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep); 1967 err = -EPROTO; 1968 goto err_stop_timer; 1969 } 1970 1971 /* 1972 * If plen does not account for pkt size 1973 */ 1974 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1975 1976 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep); 1977 STOP_EP_TIMER(ep); 1978 err = -EPROTO; 1979 goto err_stop_timer; 1980 } 1981 1982 ep->plen = (u8) plen; 1983 1984 /* 1985 * If we don't have all the pdata yet, then bail. 1986 * We'll continue process when more data arrives. 1987 */ 1988 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) { 1989 1990 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep); 1991 return 0; 1992 } 1993 1994 if (mpa->flags & MPA_REJECT) { 1995 1996 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep); 1997 err = -ECONNREFUSED; 1998 goto err_stop_timer; 1999 } 2000 2001 /* 2002 * If we get here we have accumulated the entire mpa 2003 * start reply message including private data. And 2004 * the MPA header is valid. 2005 */ 2006 ep->com.state = FPDU_MODE; 2007 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 2008 ep->mpa_attr.recv_marker_enabled = markers_enabled; 2009 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 2010 ep->mpa_attr.version = mpa->revision; 2011 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 2012 2013 if (mpa->revision == 2) { 2014 2015 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep); 2016 ep->mpa_attr.enhanced_rdma_conn = 2017 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 2018 2019 if (ep->mpa_attr.enhanced_rdma_conn) { 2020 2021 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep); 2022 mpa_v2_params = (struct mpa_v2_conn_params *) 2023 (ep->mpa_pkt + sizeof(*mpa)); 2024 resp_ird = ntohs(mpa_v2_params->ird) & 2025 MPA_V2_IRD_ORD_MASK; 2026 resp_ord = ntohs(mpa_v2_params->ord) & 2027 MPA_V2_IRD_ORD_MASK; 2028 2029 /* 2030 * This is a double-check. Ideally, below checks are 2031 * not required since ird/ord stuff has been taken 2032 * care of in c4iw_accept_cr 2033 */ 2034 if (ep->ird < resp_ord) { 2035 if (RELAXED_IRD_NEGOTIATION && resp_ord <= 2036 ep->com.dev->rdev.adap->params.max_ordird_qp) 2037 ep->ird = resp_ord; 2038 else 2039 insuff_ird = 1; 2040 } else if (ep->ird > resp_ord) { 2041 ep->ird = resp_ord; 2042 } 2043 if (ep->ord > resp_ird) { 2044 if (RELAXED_IRD_NEGOTIATION) 2045 ep->ord = resp_ird; 2046 else 2047 insuff_ird = 1; 2048 } 2049 if (insuff_ird) { 2050 err = -ENOMEM; 2051 ep->ird = resp_ord; 2052 ep->ord = resp_ird; 2053 } 2054 2055 if (ntohs(mpa_v2_params->ird) & 2056 MPA_V2_PEER2PEER_MODEL) { 2057 2058 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep); 2059 if (ntohs(mpa_v2_params->ord) & 2060 MPA_V2_RDMA_WRITE_RTR) { 2061 2062 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep); 2063 ep->mpa_attr.p2p_type = 2064 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 2065 } 2066 else if (ntohs(mpa_v2_params->ord) & 2067 MPA_V2_RDMA_READ_RTR) { 2068 2069 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep); 2070 ep->mpa_attr.p2p_type = 2071 FW_RI_INIT_P2PTYPE_READ_REQ; 2072 } 2073 } 2074 } 2075 } else { 2076 2077 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep); 2078 2079 if (mpa->revision == 1) { 2080 2081 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep); 2082 2083 if (peer2peer) { 2084 2085 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep); 2086 ep->mpa_attr.p2p_type = p2p_type; 2087 } 2088 } 2089 } 2090 2091 if (set_tcpinfo(ep)) { 2092 2093 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep); 2094 printf("%s set_tcpinfo error\n", __func__); 2095 err = -ECONNRESET; 2096 goto err; 2097 } 2098 2099 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, " 2100 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__, 2101 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 2102 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 2103 ep->mpa_attr.p2p_type); 2104 2105 /* 2106 * If responder's RTR does not match with that of initiator, assign 2107 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 2108 * generated when moving QP to RTS state. 2109 * A TERM message will be sent after QP has moved to RTS state 2110 */ 2111 if ((ep->mpa_attr.version == 2) && peer2peer && 2112 (ep->mpa_attr.p2p_type != p2p_type)) { 2113 2114 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep); 2115 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 2116 rtr_mismatch = 1; 2117 } 2118 2119 2120 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 2121 attrs.mpa_attr = ep->mpa_attr; 2122 attrs.max_ird = ep->ird; 2123 attrs.max_ord = ep->ord; 2124 attrs.llp_stream_handle = ep; 2125 attrs.next_state = C4IW_QP_STATE_RTS; 2126 2127 mask = C4IW_QP_ATTR_NEXT_STATE | 2128 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 2129 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 2130 2131 /* bind QP and TID with INIT_WR */ 2132 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 2133 2134 if (err) { 2135 2136 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep); 2137 goto err; 2138 } 2139 2140 /* 2141 * If responder's RTR requirement did not match with what initiator 2142 * supports, generate TERM message 2143 */ 2144 if (rtr_mismatch) { 2145 2146 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep); 2147 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 2148 attrs.layer_etype = LAYER_MPA | DDP_LLP; 2149 attrs.ecode = MPA_NOMATCH_RTR; 2150 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2151 attrs.send_term = 1; 2152 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2153 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2154 err = -ENOMEM; 2155 disconnect = 1; 2156 goto out; 2157 } 2158 2159 /* 2160 * Generate TERM if initiator IRD is not sufficient for responder 2161 * provided ORD. Currently, we do the same behaviour even when 2162 * responder provided IRD is also not sufficient as regards to 2163 * initiator ORD. 2164 */ 2165 if (insuff_ird) { 2166 2167 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep); 2168 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 2169 __func__); 2170 attrs.layer_etype = LAYER_MPA | DDP_LLP; 2171 attrs.ecode = MPA_INSUFF_IRD; 2172 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2173 attrs.send_term = 1; 2174 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2175 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2176 err = -ENOMEM; 2177 disconnect = 1; 2178 goto out; 2179 } 2180 goto out; 2181 err_stop_timer: 2182 STOP_EP_TIMER(ep); 2183 err: 2184 disconnect = 2; 2185 out: 2186 connect_reply_upcall(ep, err); 2187 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep); 2188 return disconnect; 2189 } 2190 2191 /* 2192 * process_mpa_request - process streaming mode MPA request 2193 * 2194 * Returns: 2195 * 2196 * 0 upon success indicating a connect request was delivered to the ULP 2197 * or the mpa request is incomplete but valid so far. 2198 * 2199 * 1 if a failure requires the caller to close the connection. 2200 * 2201 * 2 if a failure requires the caller to abort the connection. 2202 */ 2203 static int 2204 process_mpa_request(struct c4iw_ep *ep) 2205 { 2206 struct mpa_message *mpa; 2207 struct mpa_v2_conn_params *mpa_v2_params; 2208 u16 plen; 2209 int flags = MSG_DONTWAIT; 2210 int rc; 2211 struct iovec iov; 2212 struct uio uio; 2213 enum c4iw_ep_state state = ep->com.state; 2214 2215 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]); 2216 2217 if (state != MPA_REQ_WAIT) 2218 return 0; 2219 2220 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len]; 2221 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 2222 uio.uio_iov = &iov; 2223 uio.uio_iovcnt = 1; 2224 uio.uio_offset = 0; 2225 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 2226 uio.uio_segflg = UIO_SYSSPACE; 2227 uio.uio_rw = UIO_READ; 2228 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */ 2229 2230 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags); 2231 if (rc == EAGAIN) 2232 return 0; 2233 else if (rc) 2234 goto err_stop_timer; 2235 2236 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data", 2237 __func__, ep->com.so)); 2238 ep->mpa_pkt_len += uio.uio_offset; 2239 2240 /* 2241 * If we get more than the supported amount of private data then we must 2242 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another 2243 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last 2244 * byte is filled by the soreceive above. 2245 */ 2246 2247 /* Don't even have the MPA message. Wait for more data to arrive. */ 2248 if (ep->mpa_pkt_len < sizeof(*mpa)) 2249 return 0; 2250 mpa = (struct mpa_message *) ep->mpa_pkt; 2251 2252 /* 2253 * Validate MPA Header. 2254 */ 2255 if (mpa->revision > mpa_rev) { 2256 log(LOG_ERR, "%s: MPA version mismatch. Local = %d," 2257 " Received = %d\n", __func__, mpa_rev, mpa->revision); 2258 goto err_stop_timer; 2259 } 2260 2261 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 2262 goto err_stop_timer; 2263 2264 /* 2265 * Fail if there's too much private data. 2266 */ 2267 plen = ntohs(mpa->private_data_size); 2268 if (plen > MPA_MAX_PRIVATE_DATA) 2269 goto err_stop_timer; 2270 2271 /* 2272 * If plen does not account for pkt size 2273 */ 2274 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 2275 goto err_stop_timer; 2276 2277 ep->plen = (u8) plen; 2278 2279 /* 2280 * If we don't have all the pdata yet, then bail. 2281 */ 2282 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 2283 return 0; 2284 2285 /* 2286 * If we get here we have accumulated the entire mpa 2287 * start reply message including private data. 2288 */ 2289 ep->mpa_attr.initiator = 0; 2290 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 2291 ep->mpa_attr.recv_marker_enabled = markers_enabled; 2292 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 2293 ep->mpa_attr.version = mpa->revision; 2294 if (mpa->revision == 1) 2295 ep->tried_with_mpa_v1 = 1; 2296 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 2297 2298 if (mpa->revision == 2) { 2299 ep->mpa_attr.enhanced_rdma_conn = 2300 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 2301 if (ep->mpa_attr.enhanced_rdma_conn) { 2302 mpa_v2_params = (struct mpa_v2_conn_params *) 2303 (ep->mpa_pkt + sizeof(*mpa)); 2304 ep->ird = ntohs(mpa_v2_params->ird) & 2305 MPA_V2_IRD_ORD_MASK; 2306 ep->ird = min_t(u32, ep->ird, 2307 cur_max_read_depth(ep->com.dev)); 2308 ep->ord = ntohs(mpa_v2_params->ord) & 2309 MPA_V2_IRD_ORD_MASK; 2310 ep->ord = min_t(u32, ep->ord, 2311 cur_max_read_depth(ep->com.dev)); 2312 CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u", 2313 __func__, ep->ird, ep->ord); 2314 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 2315 if (peer2peer) { 2316 if (ntohs(mpa_v2_params->ord) & 2317 MPA_V2_RDMA_WRITE_RTR) 2318 ep->mpa_attr.p2p_type = 2319 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 2320 else if (ntohs(mpa_v2_params->ord) & 2321 MPA_V2_RDMA_READ_RTR) 2322 ep->mpa_attr.p2p_type = 2323 FW_RI_INIT_P2PTYPE_READ_REQ; 2324 } 2325 } 2326 } else if (mpa->revision == 1 && peer2peer) 2327 ep->mpa_attr.p2p_type = p2p_type; 2328 2329 if (set_tcpinfo(ep)) 2330 goto err_stop_timer; 2331 2332 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, " 2333 "xmit_marker_enabled = %d, version = %d", __func__, 2334 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 2335 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 2336 2337 ep->com.state = MPA_REQ_RCVD; 2338 STOP_EP_TIMER(ep); 2339 2340 /* drive upcall */ 2341 if (ep->parent_ep->com.state != DEAD) 2342 if (connect_request_upcall(ep)) 2343 goto err_out; 2344 return 0; 2345 2346 err_stop_timer: 2347 STOP_EP_TIMER(ep); 2348 err_out: 2349 return 2; 2350 } 2351 2352 /* 2353 * Upcall from the adapter indicating data has been transmitted. 2354 * For us its just the single MPA request or reply. We can now free 2355 * the skb holding the mpa message. 2356 */ 2357 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 2358 { 2359 int err; 2360 struct c4iw_ep *ep = to_ep(cm_id); 2361 int abort = 0; 2362 2363 mutex_lock(&ep->com.mutex); 2364 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep); 2365 2366 if ((ep->com.state == DEAD) || 2367 (ep->com.state != MPA_REQ_RCVD)) { 2368 2369 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep); 2370 mutex_unlock(&ep->com.mutex); 2371 c4iw_put_ep(&ep->com); 2372 return -ECONNRESET; 2373 } 2374 set_bit(ULP_REJECT, &ep->com.history); 2375 2376 if (mpa_rev == 0) { 2377 2378 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep); 2379 abort = 1; 2380 } 2381 else { 2382 2383 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep); 2384 abort = send_mpa_reject(ep, pdata, pdata_len); 2385 } 2386 STOP_EP_TIMER(ep); 2387 err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); 2388 mutex_unlock(&ep->com.mutex); 2389 c4iw_put_ep(&ep->com); 2390 CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err); 2391 return 0; 2392 } 2393 2394 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2395 { 2396 int err; 2397 struct c4iw_qp_attributes attrs = {0}; 2398 enum c4iw_qp_attr_mask mask; 2399 struct c4iw_ep *ep = to_ep(cm_id); 2400 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 2401 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 2402 int abort = 0; 2403 2404 mutex_lock(&ep->com.mutex); 2405 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep); 2406 2407 if ((ep->com.state == DEAD) || 2408 (ep->com.state != MPA_REQ_RCVD)) { 2409 2410 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep); 2411 err = -ECONNRESET; 2412 goto err_out; 2413 } 2414 2415 BUG_ON(!qp); 2416 2417 set_bit(ULP_ACCEPT, &ep->com.history); 2418 2419 if ((conn_param->ord > c4iw_max_read_depth) || 2420 (conn_param->ird > c4iw_max_read_depth)) { 2421 2422 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep); 2423 err = -EINVAL; 2424 goto err_abort; 2425 } 2426 2427 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 2428 2429 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep); 2430 2431 if (conn_param->ord > ep->ird) { 2432 if (RELAXED_IRD_NEGOTIATION) { 2433 conn_param->ord = ep->ird; 2434 } else { 2435 ep->ird = conn_param->ird; 2436 ep->ord = conn_param->ord; 2437 send_mpa_reject(ep, conn_param->private_data, 2438 conn_param->private_data_len); 2439 err = -ENOMEM; 2440 goto err_abort; 2441 } 2442 } 2443 if (conn_param->ird < ep->ord) { 2444 if (RELAXED_IRD_NEGOTIATION && 2445 ep->ord <= h->rdev.adap->params.max_ordird_qp) { 2446 conn_param->ird = ep->ord; 2447 } else { 2448 err = -ENOMEM; 2449 goto err_abort; 2450 } 2451 } 2452 } 2453 ep->ird = conn_param->ird; 2454 ep->ord = conn_param->ord; 2455 2456 if (ep->mpa_attr.version == 1) { 2457 if (peer2peer && ep->ird == 0) 2458 ep->ird = 1; 2459 } else { 2460 if (peer2peer && 2461 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && 2462 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) 2463 ep->ird = 1; 2464 } 2465 2466 CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__, 2467 ep->ird, ep->ord); 2468 2469 ep->com.cm_id = cm_id; 2470 ref_cm_id(&ep->com); 2471 ep->com.qp = qp; 2472 ref_qp(ep); 2473 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 2474 2475 /* bind QP to EP and move to RTS */ 2476 attrs.mpa_attr = ep->mpa_attr; 2477 attrs.max_ird = ep->ird; 2478 attrs.max_ord = ep->ord; 2479 attrs.llp_stream_handle = ep; 2480 attrs.next_state = C4IW_QP_STATE_RTS; 2481 2482 /* bind QP and TID with INIT_WR */ 2483 mask = C4IW_QP_ATTR_NEXT_STATE | 2484 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2485 C4IW_QP_ATTR_MPA_ATTR | 2486 C4IW_QP_ATTR_MAX_IRD | 2487 C4IW_QP_ATTR_MAX_ORD; 2488 2489 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 2490 if (err) { 2491 CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err); 2492 goto err_defef_cm_id; 2493 } 2494 2495 err = send_mpa_reply(ep, conn_param->private_data, 2496 conn_param->private_data_len); 2497 if (err) { 2498 CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err); 2499 goto err_defef_cm_id; 2500 } 2501 2502 ep->com.state = FPDU_MODE; 2503 established_upcall(ep); 2504 mutex_unlock(&ep->com.mutex); 2505 c4iw_put_ep(&ep->com); 2506 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep); 2507 return 0; 2508 err_defef_cm_id: 2509 deref_cm_id(&ep->com); 2510 err_abort: 2511 abort = 1; 2512 err_out: 2513 if (abort) 2514 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 2515 mutex_unlock(&ep->com.mutex); 2516 c4iw_put_ep(&ep->com); 2517 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep); 2518 return err; 2519 } 2520 2521 static int 2522 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so) 2523 { 2524 int ret; 2525 int size, on; 2526 struct socket *sock = NULL; 2527 struct sockopt sopt; 2528 2529 ret = sock_create_kern(laddr->ss_family, 2530 SOCK_STREAM, IPPROTO_TCP, &sock); 2531 if (ret) { 2532 CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d", 2533 __func__, ret); 2534 return ret; 2535 } 2536 2537 if (reuseaddr) { 2538 bzero(&sopt, sizeof(struct sockopt)); 2539 sopt.sopt_dir = SOPT_SET; 2540 sopt.sopt_level = SOL_SOCKET; 2541 sopt.sopt_name = SO_REUSEADDR; 2542 on = 1; 2543 sopt.sopt_val = &on; 2544 sopt.sopt_valsize = sizeof(on); 2545 ret = -sosetopt(sock, &sopt); 2546 if (ret != 0) { 2547 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) " 2548 "failed with %d.\n", __func__, sock, ret); 2549 } 2550 bzero(&sopt, sizeof(struct sockopt)); 2551 sopt.sopt_dir = SOPT_SET; 2552 sopt.sopt_level = SOL_SOCKET; 2553 sopt.sopt_name = SO_REUSEPORT; 2554 on = 1; 2555 sopt.sopt_val = &on; 2556 sopt.sopt_valsize = sizeof(on); 2557 ret = -sosetopt(sock, &sopt); 2558 if (ret != 0) { 2559 log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) " 2560 "failed with %d.\n", __func__, sock, ret); 2561 } 2562 } 2563 2564 ret = -sobind(sock, (struct sockaddr *)laddr, curthread); 2565 if (ret) { 2566 CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p", 2567 __func__, ret); 2568 sock_release(sock); 2569 return ret; 2570 } 2571 2572 size = laddr->ss_family == AF_INET6 ? 2573 sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in); 2574 ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0); 2575 if (ret) { 2576 CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p", 2577 __func__, ret); 2578 sock_release(sock); 2579 return ret; 2580 } 2581 2582 *so = sock; 2583 return 0; 2584 } 2585 2586 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2587 { 2588 int err = 0; 2589 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2590 struct c4iw_ep *ep = NULL; 2591 struct ifnet *nh_ifp; /* Logical egress interface */ 2592 #ifdef VIMAGE 2593 struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context; 2594 struct vnet *vnet = rdma_id->route.addr.dev_addr.net; 2595 #endif 2596 2597 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id); 2598 2599 2600 if ((conn_param->ord > c4iw_max_read_depth) || 2601 (conn_param->ird > c4iw_max_read_depth)) { 2602 2603 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id); 2604 err = -EINVAL; 2605 goto out; 2606 } 2607 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2608 cm_id->provider_data = ep; 2609 2610 init_timer(&ep->timer); 2611 ep->plen = conn_param->private_data_len; 2612 2613 if (ep->plen) { 2614 2615 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep); 2616 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2617 conn_param->private_data, ep->plen); 2618 } 2619 ep->ird = conn_param->ird; 2620 ep->ord = conn_param->ord; 2621 2622 if (peer2peer && ep->ord == 0) { 2623 2624 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep); 2625 ep->ord = 1; 2626 } 2627 2628 ep->com.dev = dev; 2629 ep->com.cm_id = cm_id; 2630 ref_cm_id(&ep->com); 2631 ep->com.qp = get_qhp(dev, conn_param->qpn); 2632 2633 if (!ep->com.qp) { 2634 2635 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep); 2636 err = -EINVAL; 2637 goto fail; 2638 } 2639 ref_qp(ep); 2640 ep->com.thread = curthread; 2641 2642 CURVNET_SET(vnet); 2643 err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp); 2644 CURVNET_RESTORE(); 2645 2646 if (err) { 2647 2648 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep); 2649 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2650 err = EHOSTUNREACH; 2651 return err; 2652 } 2653 2654 if (!(nh_ifp->if_capenable & IFCAP_TOE) || 2655 TOEDEV(nh_ifp) == NULL) { 2656 err = -ENOPROTOOPT; 2657 goto fail; 2658 } 2659 ep->com.state = CONNECTING; 2660 ep->tos = 0; 2661 ep->com.local_addr = cm_id->local_addr; 2662 ep->com.remote_addr = cm_id->remote_addr; 2663 2664 err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so); 2665 if (err) 2666 goto fail; 2667 2668 setiwsockopt(ep->com.so); 2669 init_iwarp_socket(ep->com.so, &ep->com); 2670 err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr, 2671 ep->com.thread); 2672 if (err) 2673 goto fail_free_so; 2674 CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep); 2675 return 0; 2676 2677 fail_free_so: 2678 uninit_iwarp_socket(ep->com.so); 2679 ep->com.state = DEAD; 2680 sock_release(ep->com.so); 2681 fail: 2682 deref_cm_id(&ep->com); 2683 c4iw_put_ep(&ep->com); 2684 ep = NULL; 2685 out: 2686 CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err); 2687 return err; 2688 } 2689 2690 /* 2691 * iwcm->create_listen. Returns -errno on failure. 2692 */ 2693 int 2694 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2695 { 2696 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2697 struct c4iw_listen_ep *lep = NULL; 2698 struct listen_port_info *port_info = NULL; 2699 int rc = 0; 2700 2701 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id, 2702 backlog); 2703 if (c4iw_fatal_error(&dev->rdev)) { 2704 CTR2(KTR_IW_CXGBE, "%s: cm_id %p, fatal error", __func__, 2705 cm_id); 2706 return -EIO; 2707 } 2708 lep = alloc_ep(sizeof(*lep), GFP_KERNEL); 2709 lep->com.cm_id = cm_id; 2710 ref_cm_id(&lep->com); 2711 lep->com.dev = dev; 2712 lep->backlog = backlog; 2713 lep->com.local_addr = cm_id->local_addr; 2714 lep->com.thread = curthread; 2715 cm_id->provider_data = lep; 2716 lep->com.state = LISTEN; 2717 2718 /* In case of INDADDR_ANY, ibcore creates cmid for each device and 2719 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates 2720 * HW listeners for each device seperately. But toecore expects single 2721 * solisten() call with INADDR_ANY address to create HW listeners on 2722 * all devices for a given port number. So iw_cxgbe driver calls 2723 * solisten() only once for INADDR_ANY(usually done at first time 2724 * listener callback from ibcore). And all the subsequent INADDR_ANY 2725 * listener callbacks from ibcore(for the same port address) do not 2726 * invoke solisten() as first listener callback has already created 2727 * listeners for all other devices(via solisten). 2728 */ 2729 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) { 2730 port_info = add_ep_to_listenlist(lep); 2731 /* skip solisten() if refcnt > 1, as the listeners were 2732 * alredy created by 'Master lep' 2733 */ 2734 if (port_info->refcnt > 1) { 2735 /* As there will be only one listener socket for a TCP 2736 * port, copy Master lep's socket pointer to other lep's 2737 * that are belonging to same TCP port. 2738 */ 2739 struct c4iw_listen_ep *head_lep = 2740 container_of(port_info->lep_list.next, 2741 struct c4iw_listen_ep, listen_ep_list); 2742 lep->com.so = head_lep->com.so; 2743 goto out; 2744 } 2745 } 2746 rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so); 2747 if (rc) { 2748 CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d", 2749 __func__, rc); 2750 goto fail; 2751 } 2752 2753 rc = -solisten(lep->com.so, backlog, curthread); 2754 if (rc) { 2755 CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d", 2756 __func__, lep->com.so, rc); 2757 goto fail_free_so; 2758 } 2759 init_iwarp_socket(lep->com.so, &lep->com); 2760 out: 2761 return 0; 2762 2763 fail_free_so: 2764 sock_release(lep->com.so); 2765 fail: 2766 if (port_info) 2767 rem_ep_from_listenlist(lep); 2768 deref_cm_id(&lep->com); 2769 c4iw_put_ep(&lep->com); 2770 return rc; 2771 } 2772 2773 int 2774 c4iw_destroy_listen(struct iw_cm_id *cm_id) 2775 { 2776 struct c4iw_listen_ep *lep = to_listen_ep(cm_id); 2777 2778 mutex_lock(&lep->com.mutex); 2779 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id, 2780 states[lep->com.state]); 2781 2782 lep->com.state = DEAD; 2783 if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) { 2784 /* if no refcount then close listen socket */ 2785 if (!rem_ep_from_listenlist(lep)) 2786 close_socket(lep->com.so); 2787 } else 2788 close_socket(lep->com.so); 2789 deref_cm_id(&lep->com); 2790 mutex_unlock(&lep->com.mutex); 2791 c4iw_put_ep(&lep->com); 2792 return 0; 2793 } 2794 2795 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2796 { 2797 int ret; 2798 mutex_lock(&ep->com.mutex); 2799 ret = c4iw_ep_disconnect(ep, abrupt, gfp); 2800 mutex_unlock(&ep->com.mutex); 2801 return ret; 2802 } 2803 2804 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2805 { 2806 int ret = 0; 2807 int close = 0; 2808 struct c4iw_rdev *rdev; 2809 2810 2811 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep); 2812 2813 rdev = &ep->com.dev->rdev; 2814 2815 if (c4iw_fatal_error(rdev)) { 2816 CTR3(KTR_IW_CXGBE, "%s:ced1 fatal error %p %s", __func__, ep, 2817 states[ep->com.state]); 2818 if (ep->com.state != DEAD) { 2819 send_abort(ep); 2820 ep->com.state = DEAD; 2821 } 2822 close_complete_upcall(ep, -ECONNRESET); 2823 return ECONNRESET; 2824 } 2825 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep, 2826 states[ep->com.state]); 2827 2828 /* 2829 * Ref the ep here in case we have fatal errors causing the 2830 * ep to be released and freed. 2831 */ 2832 c4iw_get_ep(&ep->com); 2833 switch (ep->com.state) { 2834 2835 case MPA_REQ_WAIT: 2836 case MPA_REQ_SENT: 2837 case MPA_REQ_RCVD: 2838 case MPA_REP_SENT: 2839 case FPDU_MODE: 2840 close = 1; 2841 if (abrupt) 2842 ep->com.state = ABORTING; 2843 else { 2844 ep->com.state = CLOSING; 2845 START_EP_TIMER(ep); 2846 } 2847 set_bit(CLOSE_SENT, &ep->com.flags); 2848 break; 2849 2850 case CLOSING: 2851 2852 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2853 2854 close = 1; 2855 if (abrupt) { 2856 STOP_EP_TIMER(ep); 2857 ep->com.state = ABORTING; 2858 } else 2859 ep->com.state = MORIBUND; 2860 } 2861 break; 2862 2863 case MORIBUND: 2864 case ABORTING: 2865 case DEAD: 2866 CTR3(KTR_IW_CXGBE, 2867 "%s ignoring disconnect ep %p state %u", __func__, 2868 ep, ep->com.state); 2869 break; 2870 2871 default: 2872 BUG(); 2873 break; 2874 } 2875 2876 2877 if (close) { 2878 2879 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep); 2880 2881 if (abrupt) { 2882 2883 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep); 2884 set_bit(EP_DISC_ABORT, &ep->com.history); 2885 close_complete_upcall(ep, -ECONNRESET); 2886 send_abort(ep); 2887 } else { 2888 2889 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep); 2890 set_bit(EP_DISC_CLOSE, &ep->com.history); 2891 2892 if (!ep->parent_ep) 2893 ep->com.state = MORIBUND; 2894 2895 CURVNET_SET(ep->com.so->so_vnet); 2896 ret = sodisconnect(ep->com.so); 2897 CURVNET_RESTORE(); 2898 if (ret) { 2899 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep); 2900 STOP_EP_TIMER(ep); 2901 send_abort(ep); 2902 ep->com.state = DEAD; 2903 close_complete_upcall(ep, -ECONNRESET); 2904 set_bit(EP_DISC_FAIL, &ep->com.history); 2905 if (ep->com.qp) { 2906 struct c4iw_qp_attributes attrs = {0}; 2907 2908 attrs.next_state = C4IW_QP_STATE_ERROR; 2909 ret = c4iw_modify_qp( 2910 ep->com.dev, ep->com.qp, 2911 C4IW_QP_ATTR_NEXT_STATE, 2912 &attrs, 1); 2913 CTR3(KTR_IW_CXGBE, "%s:ced7 %p ret %d", 2914 __func__, ep, ret); 2915 } 2916 } 2917 } 2918 } 2919 c4iw_put_ep(&ep->com); 2920 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep); 2921 return ret; 2922 } 2923 2924 #ifdef C4IW_EP_REDIRECT 2925 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, 2926 struct l2t_entry *l2t) 2927 { 2928 struct c4iw_ep *ep = ctx; 2929 2930 if (ep->dst != old) 2931 return 0; 2932 2933 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, 2934 l2t); 2935 dst_hold(new); 2936 cxgb4_l2t_release(ep->l2t); 2937 ep->l2t = l2t; 2938 dst_release(old); 2939 ep->dst = new; 2940 return 1; 2941 } 2942 #endif 2943 2944 2945 2946 static void ep_timeout(unsigned long arg) 2947 { 2948 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2949 2950 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 2951 2952 /* 2953 * Only insert if it is not already on the list. 2954 */ 2955 if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) { 2956 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep); 2957 add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT); 2958 } 2959 } 2960 } 2961 2962 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl) 2963 { 2964 uint64_t val = be64toh(*rpl); 2965 int ret; 2966 struct c4iw_wr_wait *wr_waitp; 2967 2968 ret = (int)((val >> 8) & 0xff); 2969 wr_waitp = (struct c4iw_wr_wait *)rpl[1]; 2970 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret); 2971 if (wr_waitp) 2972 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 2973 2974 return (0); 2975 } 2976 2977 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl) 2978 { 2979 struct cqe_list_entry *cle; 2980 unsigned long flag; 2981 2982 cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT); 2983 cle->rhp = sc->iwarp_softc; 2984 cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]); 2985 2986 spin_lock_irqsave(&err_cqe_lock, flag); 2987 list_add_tail(&cle->entry, &err_cqe_list); 2988 queue_work(c4iw_taskq, &c4iw_task); 2989 spin_unlock_irqrestore(&err_cqe_lock, flag); 2990 2991 return (0); 2992 } 2993 2994 static int 2995 process_terminate(struct c4iw_ep *ep) 2996 { 2997 struct c4iw_qp_attributes attrs = {0}; 2998 2999 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep); 3000 3001 if (ep && ep->com.qp) { 3002 3003 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", 3004 ep->hwtid, ep->com.qp->wq.sq.qid); 3005 attrs.next_state = C4IW_QP_STATE_TERMINATE; 3006 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 3007 1); 3008 } else 3009 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", 3010 ep->hwtid); 3011 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep); 3012 3013 return 0; 3014 } 3015 3016 int __init c4iw_cm_init(void) 3017 { 3018 3019 t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate); 3020 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl); 3021 t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler); 3022 t4_register_an_handler(c4iw_ev_handler); 3023 3024 TAILQ_INIT(&req_list); 3025 spin_lock_init(&req_lock); 3026 INIT_LIST_HEAD(&err_cqe_list); 3027 spin_lock_init(&err_cqe_lock); 3028 3029 INIT_WORK(&c4iw_task, process_req); 3030 3031 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe"); 3032 if (!c4iw_taskq) 3033 return -ENOMEM; 3034 3035 return 0; 3036 } 3037 3038 void __exit c4iw_cm_term(void) 3039 { 3040 WARN_ON(!TAILQ_EMPTY(&req_list)); 3041 WARN_ON(!list_empty(&err_cqe_list)); 3042 flush_workqueue(c4iw_taskq); 3043 destroy_workqueue(c4iw_taskq); 3044 3045 t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL); 3046 t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL); 3047 t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL); 3048 t4_register_an_handler(NULL); 3049 } 3050 #endif 3051