1 /* 2 * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet.h" 36 37 #ifdef TCP_OFFLOAD 38 #include <sys/types.h> 39 #include <sys/malloc.h> 40 #include <sys/socket.h> 41 #include <sys/socketvar.h> 42 #include <sys/sockio.h> 43 #include <sys/taskqueue.h> 44 #include <netinet/in.h> 45 #include <net/neighbour.h> 46 #include <net/route.h> 47 48 #include <netinet/in_systm.h> 49 #include <netinet/in_pcb.h> 50 #include <netinet/ip.h> 51 #include <netinet/ip_var.h> 52 #include <netinet/tcp_var.h> 53 #include <netinet/tcp.h> 54 #include <netinet/tcpip.h> 55 56 #include <netinet/toecore.h> 57 58 struct sge_iq; 59 struct rss_header; 60 #include <linux/types.h> 61 #include "offload.h" 62 #include "tom/t4_tom.h" 63 64 #define TOEPCB(so) ((struct toepcb *)(so_sototcpcb((so))->t_toe)) 65 66 #include "iw_cxgbe.h" 67 #include <linux/module.h> 68 #include <linux/workqueue.h> 69 #include <linux/notifier.h> 70 #include <linux/inetdevice.h> 71 #include <linux/if_vlan.h> 72 #include <net/netevent.h> 73 74 static spinlock_t req_lock; 75 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list; 76 static struct work_struct c4iw_task; 77 static struct workqueue_struct *c4iw_taskq; 78 static LIST_HEAD(timeout_list); 79 static spinlock_t timeout_lock; 80 81 static void process_req(struct work_struct *ctx); 82 static void start_ep_timer(struct c4iw_ep *ep); 83 static void stop_ep_timer(struct c4iw_ep *ep); 84 static int set_tcpinfo(struct c4iw_ep *ep); 85 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc); 86 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate); 87 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate); 88 static void *alloc_ep(int size, gfp_t flags); 89 void __free_ep(struct c4iw_ep_common *epc); 90 static struct rtentry * find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port, 91 __be16 peer_port, u8 tos); 92 static int close_socket(struct c4iw_ep_common *epc, int close); 93 static int shutdown_socket(struct c4iw_ep_common *epc); 94 static void abort_socket(struct c4iw_ep *ep); 95 static void send_mpa_req(struct c4iw_ep *ep); 96 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen); 97 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen); 98 static void close_complete_upcall(struct c4iw_ep *ep); 99 static int abort_connection(struct c4iw_ep *ep); 100 static void peer_close_upcall(struct c4iw_ep *ep); 101 static void peer_abort_upcall(struct c4iw_ep *ep); 102 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 103 static void connect_request_upcall(struct c4iw_ep *ep); 104 static void established_upcall(struct c4iw_ep *ep); 105 static void process_mpa_reply(struct c4iw_ep *ep); 106 static void process_mpa_request(struct c4iw_ep *ep); 107 static void process_peer_close(struct c4iw_ep *ep); 108 static void process_conn_error(struct c4iw_ep *ep); 109 static void process_close_complete(struct c4iw_ep *ep); 110 static void ep_timeout(unsigned long arg); 111 static void init_sock(struct c4iw_ep_common *epc); 112 static void process_data(struct c4iw_ep *ep); 113 static void process_connected(struct c4iw_ep *ep); 114 static struct socket * dequeue_socket(struct socket *head, struct sockaddr_in **remote, struct c4iw_ep *child_ep); 115 static void process_newconn(struct c4iw_ep *parent_ep); 116 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag); 117 static void process_socket_event(struct c4iw_ep *ep); 118 static void release_ep_resources(struct c4iw_ep *ep); 119 120 #define START_EP_TIMER(ep) \ 121 do { \ 122 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \ 123 __func__, __LINE__, (ep)); \ 124 start_ep_timer(ep); \ 125 } while (0) 126 127 #define STOP_EP_TIMER(ep) \ 128 do { \ 129 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \ 130 __func__, __LINE__, (ep)); \ 131 stop_ep_timer(ep); \ 132 } while (0) 133 134 #ifdef KTR 135 static char *states[] = { 136 "idle", 137 "listen", 138 "connecting", 139 "mpa_wait_req", 140 "mpa_req_sent", 141 "mpa_req_rcvd", 142 "mpa_rep_sent", 143 "fpdu_mode", 144 "aborting", 145 "closing", 146 "moribund", 147 "dead", 148 NULL, 149 }; 150 #endif 151 152 static void 153 process_req(struct work_struct *ctx) 154 { 155 struct c4iw_ep_common *epc; 156 157 spin_lock(&req_lock); 158 while (!TAILQ_EMPTY(&req_list)) { 159 epc = TAILQ_FIRST(&req_list); 160 TAILQ_REMOVE(&req_list, epc, entry); 161 epc->entry.tqe_prev = NULL; 162 spin_unlock(&req_lock); 163 if (epc->so) 164 process_socket_event((struct c4iw_ep *)epc); 165 c4iw_put_ep(epc); 166 spin_lock(&req_lock); 167 } 168 spin_unlock(&req_lock); 169 } 170 171 /* 172 * XXX: doesn't belong here in the iWARP driver. 173 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is 174 * set. Is this a valid assumption for active open? 175 */ 176 static int 177 set_tcpinfo(struct c4iw_ep *ep) 178 { 179 struct socket *so = ep->com.so; 180 struct inpcb *inp = sotoinpcb(so); 181 struct tcpcb *tp; 182 struct toepcb *toep; 183 int rc = 0; 184 185 INP_WLOCK(inp); 186 tp = intotcpcb(inp); 187 if ((tp->t_flags & TF_TOE) == 0) { 188 rc = EINVAL; 189 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n", 190 __func__, so, ep); 191 goto done; 192 } 193 toep = TOEPCB(so); 194 195 ep->hwtid = toep->tid; 196 ep->snd_seq = tp->snd_nxt; 197 ep->rcv_seq = tp->rcv_nxt; 198 ep->emss = max(tp->t_maxseg, 128); 199 done: 200 INP_WUNLOCK(inp); 201 return (rc); 202 203 } 204 205 static struct rtentry * 206 find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port, 207 __be16 peer_port, u8 tos) 208 { 209 struct route iproute; 210 struct sockaddr_in *dst = (struct sockaddr_in *)&iproute.ro_dst; 211 212 CTR5(KTR_IW_CXGBE, "%s:frtB %x, %x, %d, %d", __func__, local_ip, 213 peer_ip, ntohs(local_port), ntohs(peer_port)); 214 bzero(&iproute, sizeof iproute); 215 dst->sin_family = AF_INET; 216 dst->sin_len = sizeof *dst; 217 dst->sin_addr.s_addr = peer_ip; 218 219 rtalloc(&iproute); 220 CTR2(KTR_IW_CXGBE, "%s:frtE %p", __func__, (uint64_t)iproute.ro_rt); 221 return iproute.ro_rt; 222 } 223 224 static int 225 close_socket(struct c4iw_ep_common *epc, int close) 226 { 227 struct socket *so = epc->so; 228 int rc; 229 230 CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc, so, 231 states[epc->state]); 232 233 SOCK_LOCK(so); 234 soupcall_clear(so, SO_RCV); 235 SOCK_UNLOCK(so); 236 237 if (close) 238 rc = soclose(so); 239 else 240 rc = soshutdown(so, SHUT_WR | SHUT_RD); 241 epc->so = NULL; 242 243 return (rc); 244 } 245 246 static int 247 shutdown_socket(struct c4iw_ep_common *epc) 248 { 249 250 CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc->so, epc, 251 states[epc->state]); 252 253 return (soshutdown(epc->so, SHUT_WR)); 254 } 255 256 static void 257 abort_socket(struct c4iw_ep *ep) 258 { 259 struct sockopt sopt; 260 int rc; 261 struct linger l; 262 263 CTR4(KTR_IW_CXGBE, "%s ep %p so %p state %s", __func__, ep, ep->com.so, 264 states[ep->com.state]); 265 266 l.l_onoff = 1; 267 l.l_linger = 0; 268 269 /* linger_time of 0 forces RST to be sent */ 270 sopt.sopt_dir = SOPT_SET; 271 sopt.sopt_level = SOL_SOCKET; 272 sopt.sopt_name = SO_LINGER; 273 sopt.sopt_val = (caddr_t)&l; 274 sopt.sopt_valsize = sizeof l; 275 sopt.sopt_td = NULL; 276 rc = sosetopt(ep->com.so, &sopt); 277 if (rc) { 278 log(LOG_ERR, "%s: can't set linger to 0, no RST! err %d\n", 279 __func__, rc); 280 } 281 } 282 283 static void 284 process_peer_close(struct c4iw_ep *ep) 285 { 286 struct c4iw_qp_attributes attrs; 287 int disconnect = 1; 288 int release = 0; 289 290 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep, 291 ep->com.so, states[ep->com.state]); 292 293 mutex_lock(&ep->com.mutex); 294 switch (ep->com.state) { 295 296 case MPA_REQ_WAIT: 297 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT CLOSING", 298 __func__, ep); 299 __state_set(&ep->com, CLOSING); 300 break; 301 302 case MPA_REQ_SENT: 303 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT CLOSING", 304 __func__, ep); 305 __state_set(&ep->com, DEAD); 306 connect_reply_upcall(ep, -ECONNABORTED); 307 308 disconnect = 0; 309 STOP_EP_TIMER(ep); 310 close_socket(&ep->com, 0); 311 ep->com.cm_id->rem_ref(ep->com.cm_id); 312 ep->com.cm_id = NULL; 313 ep->com.qp = NULL; 314 release = 1; 315 break; 316 317 case MPA_REQ_RCVD: 318 319 /* 320 * We're gonna mark this puppy DEAD, but keep 321 * the reference on it until the ULP accepts or 322 * rejects the CR. 323 */ 324 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING", 325 __func__, ep); 326 __state_set(&ep->com, CLOSING); 327 c4iw_get_ep(&ep->com); 328 break; 329 330 case MPA_REP_SENT: 331 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING", 332 __func__, ep); 333 __state_set(&ep->com, CLOSING); 334 break; 335 336 case FPDU_MODE: 337 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING", 338 __func__, ep); 339 START_EP_TIMER(ep); 340 __state_set(&ep->com, CLOSING); 341 attrs.next_state = C4IW_QP_STATE_CLOSING; 342 c4iw_modify_qp(ep->com.dev, ep->com.qp, 343 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 344 peer_close_upcall(ep); 345 break; 346 347 case ABORTING: 348 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)", 349 __func__, ep); 350 disconnect = 0; 351 break; 352 353 case CLOSING: 354 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND", 355 __func__, ep); 356 __state_set(&ep->com, MORIBUND); 357 disconnect = 0; 358 break; 359 360 case MORIBUND: 361 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__, 362 ep); 363 STOP_EP_TIMER(ep); 364 if (ep->com.cm_id && ep->com.qp) { 365 attrs.next_state = C4IW_QP_STATE_IDLE; 366 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 367 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 368 } 369 close_socket(&ep->com, 0); 370 close_complete_upcall(ep); 371 __state_set(&ep->com, DEAD); 372 release = 1; 373 disconnect = 0; 374 break; 375 376 case DEAD: 377 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)", 378 __func__, ep); 379 disconnect = 0; 380 break; 381 382 default: 383 panic("%s: ep %p state %d", __func__, ep, 384 ep->com.state); 385 break; 386 } 387 388 mutex_unlock(&ep->com.mutex); 389 390 if (disconnect) { 391 392 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep); 393 c4iw_ep_disconnect(ep, 0, M_NOWAIT); 394 } 395 if (release) { 396 397 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep); 398 c4iw_put_ep(&ep->com); 399 } 400 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep); 401 return; 402 } 403 404 static void 405 process_conn_error(struct c4iw_ep *ep) 406 { 407 struct c4iw_qp_attributes attrs; 408 int ret; 409 int state; 410 411 state = state_read(&ep->com); 412 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s", 413 __func__, ep, ep->com.so, ep->com.so->so_error, 414 states[ep->com.state]); 415 416 switch (state) { 417 418 case MPA_REQ_WAIT: 419 STOP_EP_TIMER(ep); 420 break; 421 422 case MPA_REQ_SENT: 423 STOP_EP_TIMER(ep); 424 connect_reply_upcall(ep, -ECONNRESET); 425 break; 426 427 case MPA_REP_SENT: 428 ep->com.rpl_err = ECONNRESET; 429 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep); 430 break; 431 432 case MPA_REQ_RCVD: 433 434 /* 435 * We're gonna mark this puppy DEAD, but keep 436 * the reference on it until the ULP accepts or 437 * rejects the CR. 438 */ 439 c4iw_get_ep(&ep->com); 440 break; 441 442 case MORIBUND: 443 case CLOSING: 444 STOP_EP_TIMER(ep); 445 /*FALLTHROUGH*/ 446 case FPDU_MODE: 447 448 if (ep->com.cm_id && ep->com.qp) { 449 450 attrs.next_state = C4IW_QP_STATE_ERROR; 451 ret = c4iw_modify_qp(ep->com.qp->rhp, 452 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 453 &attrs, 1); 454 if (ret) 455 log(LOG_ERR, 456 "%s - qp <- error failed!\n", 457 __func__); 458 } 459 peer_abort_upcall(ep); 460 break; 461 462 case ABORTING: 463 break; 464 465 case DEAD: 466 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!", 467 __func__, ep->com.so->so_error); 468 return; 469 470 default: 471 panic("%s: ep %p state %d", __func__, ep, state); 472 break; 473 } 474 475 if (state != ABORTING) { 476 477 CTR2(KTR_IW_CXGBE, "%s:pce1 %p", __func__, ep); 478 close_socket(&ep->com, 0); 479 state_set(&ep->com, DEAD); 480 c4iw_put_ep(&ep->com); 481 } 482 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep); 483 return; 484 } 485 486 static void 487 process_close_complete(struct c4iw_ep *ep) 488 { 489 struct c4iw_qp_attributes attrs; 490 int release = 0; 491 492 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep, 493 ep->com.so, states[ep->com.state]); 494 495 /* The cm_id may be null if we failed to connect */ 496 mutex_lock(&ep->com.mutex); 497 498 switch (ep->com.state) { 499 500 case CLOSING: 501 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND", 502 __func__, ep); 503 __state_set(&ep->com, MORIBUND); 504 break; 505 506 case MORIBUND: 507 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__, 508 ep); 509 STOP_EP_TIMER(ep); 510 511 if ((ep->com.cm_id) && (ep->com.qp)) { 512 513 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE", 514 __func__, ep); 515 attrs.next_state = C4IW_QP_STATE_IDLE; 516 c4iw_modify_qp(ep->com.dev, 517 ep->com.qp, 518 C4IW_QP_ATTR_NEXT_STATE, 519 &attrs, 1); 520 } 521 522 if (ep->parent_ep) { 523 524 CTR2(KTR_IW_CXGBE, "%s:pcc3 %p", __func__, ep); 525 close_socket(&ep->com, 1); 526 } 527 else { 528 529 CTR2(KTR_IW_CXGBE, "%s:pcc4 %p", __func__, ep); 530 close_socket(&ep->com, 0); 531 } 532 close_complete_upcall(ep); 533 __state_set(&ep->com, DEAD); 534 release = 1; 535 break; 536 537 case ABORTING: 538 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep); 539 break; 540 541 case DEAD: 542 default: 543 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep); 544 panic("%s:pcc6 %p DEAD", __func__, ep); 545 break; 546 } 547 mutex_unlock(&ep->com.mutex); 548 549 if (release) { 550 551 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p", __func__, ep); 552 c4iw_put_ep(&ep->com); 553 } 554 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep); 555 return; 556 } 557 558 static void 559 init_sock(struct c4iw_ep_common *epc) 560 { 561 int rc; 562 struct sockopt sopt; 563 struct socket *so = epc->so; 564 int on = 1; 565 566 SOCK_LOCK(so); 567 soupcall_set(so, SO_RCV, c4iw_so_upcall, epc); 568 so->so_state |= SS_NBIO; 569 SOCK_UNLOCK(so); 570 sopt.sopt_dir = SOPT_SET; 571 sopt.sopt_level = IPPROTO_TCP; 572 sopt.sopt_name = TCP_NODELAY; 573 sopt.sopt_val = (caddr_t)&on; 574 sopt.sopt_valsize = sizeof on; 575 sopt.sopt_td = NULL; 576 rc = sosetopt(so, &sopt); 577 if (rc) { 578 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n", 579 __func__, so, rc); 580 } 581 } 582 583 static void 584 process_data(struct c4iw_ep *ep) 585 { 586 struct sockaddr_in *local, *remote; 587 588 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sb_cc %d", __func__, 589 ep->com.so, ep, states[ep->com.state], ep->com.so->so_rcv.sb_cc); 590 591 switch (state_read(&ep->com)) { 592 case MPA_REQ_SENT: 593 process_mpa_reply(ep); 594 break; 595 case MPA_REQ_WAIT: 596 in_getsockaddr(ep->com.so, (struct sockaddr **)&local); 597 in_getpeeraddr(ep->com.so, (struct sockaddr **)&remote); 598 ep->com.local_addr = *local; 599 ep->com.remote_addr = *remote; 600 free(local, M_SONAME); 601 free(remote, M_SONAME); 602 process_mpa_request(ep); 603 break; 604 default: 605 if (ep->com.so->so_rcv.sb_cc) 606 log(LOG_ERR, "%s: Unexpected streaming data. " 607 "ep %p, state %d, so %p, so_state 0x%x, sb_cc %u\n", 608 __func__, ep, state_read(&ep->com), ep->com.so, 609 ep->com.so->so_state, ep->com.so->so_rcv.sb_cc); 610 break; 611 } 612 } 613 614 static void 615 process_connected(struct c4iw_ep *ep) 616 { 617 618 if ((ep->com.so->so_state & SS_ISCONNECTED) && !ep->com.so->so_error) 619 send_mpa_req(ep); 620 else { 621 connect_reply_upcall(ep, -ep->com.so->so_error); 622 close_socket(&ep->com, 0); 623 state_set(&ep->com, DEAD); 624 c4iw_put_ep(&ep->com); 625 } 626 } 627 628 static struct socket * 629 dequeue_socket(struct socket *head, struct sockaddr_in **remote, 630 struct c4iw_ep *child_ep) 631 { 632 struct socket *so; 633 634 ACCEPT_LOCK(); 635 so = TAILQ_FIRST(&head->so_comp); 636 if (!so) { 637 ACCEPT_UNLOCK(); 638 return (NULL); 639 } 640 TAILQ_REMOVE(&head->so_comp, so, so_list); 641 head->so_qlen--; 642 SOCK_LOCK(so); 643 so->so_qstate &= ~SQ_COMP; 644 so->so_head = NULL; 645 soref(so); 646 soupcall_set(so, SO_RCV, c4iw_so_upcall, child_ep); 647 so->so_state |= SS_NBIO; 648 SOCK_UNLOCK(so); 649 ACCEPT_UNLOCK(); 650 soaccept(so, (struct sockaddr **)remote); 651 652 return (so); 653 } 654 655 static void 656 process_newconn(struct c4iw_ep *parent_ep) 657 { 658 struct socket *child_so; 659 struct c4iw_ep *child_ep; 660 struct sockaddr_in *remote; 661 662 child_ep = alloc_ep(sizeof(*child_ep), M_NOWAIT); 663 if (!child_ep) { 664 CTR3(KTR_IW_CXGBE, "%s: parent so %p, parent ep %p, ENOMEM", 665 __func__, parent_ep->com.so, parent_ep); 666 log(LOG_ERR, "%s: failed to allocate ep entry\n", __func__); 667 return; 668 } 669 670 child_so = dequeue_socket(parent_ep->com.so, &remote, child_ep); 671 if (!child_so) { 672 CTR4(KTR_IW_CXGBE, 673 "%s: parent so %p, parent ep %p, child ep %p, dequeue err", 674 __func__, parent_ep->com.so, parent_ep, child_ep); 675 log(LOG_ERR, "%s: failed to dequeue child socket\n", __func__); 676 __free_ep(&child_ep->com); 677 return; 678 679 } 680 681 CTR5(KTR_IW_CXGBE, 682 "%s: parent so %p, parent ep %p, child so %p, child ep %p", 683 __func__, parent_ep->com.so, parent_ep, child_so, child_ep); 684 685 child_ep->com.local_addr = parent_ep->com.local_addr; 686 child_ep->com.remote_addr = *remote; 687 child_ep->com.dev = parent_ep->com.dev; 688 child_ep->com.so = child_so; 689 child_ep->com.cm_id = NULL; 690 child_ep->com.thread = parent_ep->com.thread; 691 child_ep->parent_ep = parent_ep; 692 693 free(remote, M_SONAME); 694 c4iw_get_ep(&parent_ep->com); 695 child_ep->parent_ep = parent_ep; 696 init_timer(&child_ep->timer); 697 state_set(&child_ep->com, MPA_REQ_WAIT); 698 START_EP_TIMER(child_ep); 699 700 /* maybe the request has already been queued up on the socket... */ 701 process_mpa_request(child_ep); 702 } 703 704 static int 705 c4iw_so_upcall(struct socket *so, void *arg, int waitflag) 706 { 707 struct c4iw_ep *ep = arg; 708 709 spin_lock(&req_lock); 710 711 CTR6(KTR_IW_CXGBE, 712 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p", 713 __func__, so, so->so_state, ep, states[ep->com.state], 714 ep->com.entry.tqe_prev); 715 716 if (ep && ep->com.so && !ep->com.entry.tqe_prev) { 717 KASSERT(ep->com.so == so, ("%s: XXX review.", __func__)); 718 c4iw_get_ep(&ep->com); 719 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry); 720 queue_work(c4iw_taskq, &c4iw_task); 721 } 722 723 spin_unlock(&req_lock); 724 return (SU_OK); 725 } 726 727 static void 728 process_socket_event(struct c4iw_ep *ep) 729 { 730 int state = state_read(&ep->com); 731 struct socket *so = ep->com.so; 732 733 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, " 734 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state, 735 so->so_error, so->so_rcv.sb_state, ep, states[state]); 736 737 if (state == CONNECTING) { 738 process_connected(ep); 739 return; 740 } 741 742 if (state == LISTEN) { 743 process_newconn(ep); 744 return; 745 } 746 747 /* connection error */ 748 if (so->so_error) { 749 process_conn_error(ep); 750 return; 751 } 752 753 /* peer close */ 754 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state < CLOSING) { 755 process_peer_close(ep); 756 return; 757 } 758 759 /* close complete */ 760 if (so->so_state & SS_ISDISCONNECTED) { 761 process_close_complete(ep); 762 return; 763 } 764 765 /* rx data */ 766 process_data(ep); 767 } 768 769 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters"); 770 771 int db_delay_usecs = 1; 772 TUNABLE_INT("hw.iw_cxgbe.db_delay_usecs", &db_delay_usecs); 773 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RW, &db_delay_usecs, 0, 774 "Usecs to delay awaiting db fifo to drain"); 775 776 static int dack_mode = 1; 777 TUNABLE_INT("hw.iw_cxgbe.dack_mode", &dack_mode); 778 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RW, &dack_mode, 0, 779 "Delayed ack mode (default = 1)"); 780 781 int c4iw_max_read_depth = 8; 782 TUNABLE_INT("hw.iw_cxgbe.c4iw_max_read_depth", &c4iw_max_read_depth); 783 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RW, &c4iw_max_read_depth, 0, 784 "Per-connection max ORD/IRD (default = 8)"); 785 786 static int enable_tcp_timestamps; 787 TUNABLE_INT("hw.iw_cxgbe.enable_tcp_timestamps", &enable_tcp_timestamps); 788 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RW, &enable_tcp_timestamps, 0, 789 "Enable tcp timestamps (default = 0)"); 790 791 static int enable_tcp_sack; 792 TUNABLE_INT("hw.iw_cxgbe.enable_tcp_sack", &enable_tcp_sack); 793 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RW, &enable_tcp_sack, 0, 794 "Enable tcp SACK (default = 0)"); 795 796 static int enable_tcp_window_scaling = 1; 797 TUNABLE_INT("hw.iw_cxgbe.enable_tcp_window_scaling", &enable_tcp_window_scaling); 798 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RW, &enable_tcp_window_scaling, 0, 799 "Enable tcp window scaling (default = 1)"); 800 801 int c4iw_debug = 1; 802 TUNABLE_INT("hw.iw_cxgbe.c4iw_debug", &c4iw_debug); 803 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RW, &c4iw_debug, 0, 804 "Enable debug logging (default = 0)"); 805 806 static int peer2peer; 807 TUNABLE_INT("hw.iw_cxgbe.peer2peer", &peer2peer); 808 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RW, &peer2peer, 0, 809 "Support peer2peer ULPs (default = 0)"); 810 811 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 812 TUNABLE_INT("hw.iw_cxgbe.p2p_type", &p2p_type); 813 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RW, &p2p_type, 0, 814 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)"); 815 816 static int ep_timeout_secs = 60; 817 TUNABLE_INT("hw.iw_cxgbe.ep_timeout_secs", &ep_timeout_secs); 818 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RW, &ep_timeout_secs, 0, 819 "CM Endpoint operation timeout in seconds (default = 60)"); 820 821 static int mpa_rev = 1; 822 TUNABLE_INT("hw.iw_cxgbe.mpa_rev", &mpa_rev); 823 #ifdef IW_CM_MPAV2 824 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0, 825 "MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)"); 826 #else 827 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RW, &mpa_rev, 0, 828 "MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant (default = 1)"); 829 #endif 830 831 static int markers_enabled; 832 TUNABLE_INT("hw.iw_cxgbe.markers_enabled", &markers_enabled); 833 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RW, &markers_enabled, 0, 834 "Enable MPA MARKERS (default(0) = disabled)"); 835 836 static int crc_enabled = 1; 837 TUNABLE_INT("hw.iw_cxgbe.crc_enabled", &crc_enabled); 838 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RW, &crc_enabled, 0, 839 "Enable MPA CRC (default(1) = enabled)"); 840 841 static int rcv_win = 256 * 1024; 842 TUNABLE_INT("hw.iw_cxgbe.rcv_win", &rcv_win); 843 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RW, &rcv_win, 0, 844 "TCP receive window in bytes (default = 256KB)"); 845 846 static int snd_win = 128 * 1024; 847 TUNABLE_INT("hw.iw_cxgbe.snd_win", &snd_win); 848 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RW, &snd_win, 0, 849 "TCP send window in bytes (default = 128KB)"); 850 851 int db_fc_threshold = 2000; 852 TUNABLE_INT("hw.iw_cxgbe.db_fc_threshold", &db_fc_threshold); 853 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RW, &db_fc_threshold, 0, 854 "QP count/threshold that triggers automatic"); 855 856 static void 857 start_ep_timer(struct c4iw_ep *ep) 858 { 859 860 if (timer_pending(&ep->timer)) { 861 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep); 862 printk(KERN_ERR "%s timer already started! ep %p\n", __func__, 863 ep); 864 return; 865 } 866 clear_bit(TIMEOUT, &ep->com.flags); 867 c4iw_get_ep(&ep->com); 868 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 869 ep->timer.data = (unsigned long)ep; 870 ep->timer.function = ep_timeout; 871 add_timer(&ep->timer); 872 } 873 874 static void 875 stop_ep_timer(struct c4iw_ep *ep) 876 { 877 878 del_timer_sync(&ep->timer); 879 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 880 c4iw_put_ep(&ep->com); 881 } 882 } 883 884 static enum 885 c4iw_ep_state state_read(struct c4iw_ep_common *epc) 886 { 887 enum c4iw_ep_state state; 888 889 mutex_lock(&epc->mutex); 890 state = epc->state; 891 mutex_unlock(&epc->mutex); 892 893 return (state); 894 } 895 896 static void 897 __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 898 { 899 900 epc->state = new; 901 } 902 903 static void 904 state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 905 { 906 907 mutex_lock(&epc->mutex); 908 __state_set(epc, new); 909 mutex_unlock(&epc->mutex); 910 } 911 912 static void * 913 alloc_ep(int size, gfp_t gfp) 914 { 915 struct c4iw_ep_common *epc; 916 917 epc = kzalloc(size, gfp); 918 if (epc == NULL) 919 return (NULL); 920 921 kref_init(&epc->kref); 922 mutex_init(&epc->mutex); 923 c4iw_init_wr_wait(&epc->wr_wait); 924 925 return (epc); 926 } 927 928 void 929 __free_ep(struct c4iw_ep_common *epc) 930 { 931 CTR2(KTR_IW_CXGBE, "%s:feB %p", __func__, epc); 932 KASSERT(!epc->so, ("%s warning ep->so %p \n", __func__, epc->so)); 933 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list!\n", __func__, epc)); 934 free(epc, M_DEVBUF); 935 CTR2(KTR_IW_CXGBE, "%s:feE %p", __func__, epc); 936 } 937 938 void _c4iw_free_ep(struct kref *kref) 939 { 940 struct c4iw_ep *ep; 941 struct c4iw_ep_common *epc; 942 943 ep = container_of(kref, struct c4iw_ep, com.kref); 944 epc = &ep->com; 945 KASSERT(!epc->so, ("%s ep->so %p", __func__, epc->so)); 946 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list", 947 __func__, epc)); 948 kfree(ep); 949 } 950 951 static void release_ep_resources(struct c4iw_ep *ep) 952 { 953 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep); 954 set_bit(RELEASE_RESOURCES, &ep->com.flags); 955 c4iw_put_ep(&ep->com); 956 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep); 957 } 958 959 static void 960 send_mpa_req(struct c4iw_ep *ep) 961 { 962 int mpalen; 963 struct mpa_message *mpa; 964 struct mpa_v2_conn_params mpa_v2_params; 965 struct mbuf *m; 966 char mpa_rev_to_use = mpa_rev; 967 int err; 968 969 if (ep->retry_with_mpa_v1) 970 mpa_rev_to_use = 1; 971 mpalen = sizeof(*mpa) + ep->plen; 972 if (mpa_rev_to_use == 2) 973 mpalen += sizeof(struct mpa_v2_conn_params); 974 975 if (mpalen > MHLEN) 976 CXGBE_UNIMPLEMENTED(__func__); 977 978 m = m_gethdr(M_NOWAIT, MT_DATA); 979 if (m == NULL) { 980 connect_reply_upcall(ep, -ENOMEM); 981 return; 982 } 983 984 mpa = mtod(m, struct mpa_message *); 985 m->m_len = mpalen; 986 m->m_pkthdr.len = mpalen; 987 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 988 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 989 (markers_enabled ? MPA_MARKERS : 0) | 990 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 991 mpa->private_data_size = htons(ep->plen); 992 mpa->revision = mpa_rev_to_use; 993 994 if (mpa_rev_to_use == 1) { 995 ep->tried_with_mpa_v1 = 1; 996 ep->retry_with_mpa_v1 = 0; 997 } 998 999 if (mpa_rev_to_use == 2) { 1000 mpa->private_data_size += 1001 htons(sizeof(struct mpa_v2_conn_params)); 1002 mpa_v2_params.ird = htons((u16)ep->ird); 1003 mpa_v2_params.ord = htons((u16)ep->ord); 1004 1005 if (peer2peer) { 1006 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1007 1008 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1009 mpa_v2_params.ord |= 1010 htons(MPA_V2_RDMA_WRITE_RTR); 1011 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1012 mpa_v2_params.ord |= 1013 htons(MPA_V2_RDMA_READ_RTR); 1014 } 1015 } 1016 memcpy(mpa->private_data, &mpa_v2_params, 1017 sizeof(struct mpa_v2_conn_params)); 1018 1019 if (ep->plen) { 1020 1021 memcpy(mpa->private_data + 1022 sizeof(struct mpa_v2_conn_params), 1023 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1024 } 1025 } else { 1026 1027 if (ep->plen) 1028 memcpy(mpa->private_data, 1029 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1030 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep); 1031 } 1032 1033 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); 1034 if (err) { 1035 connect_reply_upcall(ep, -ENOMEM); 1036 return; 1037 } 1038 1039 START_EP_TIMER(ep); 1040 state_set(&ep->com, MPA_REQ_SENT); 1041 ep->mpa_attr.initiator = 1; 1042 } 1043 1044 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1045 { 1046 int mpalen ; 1047 struct mpa_message *mpa; 1048 struct mpa_v2_conn_params mpa_v2_params; 1049 struct mbuf *m; 1050 int err; 1051 1052 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid, 1053 ep->plen); 1054 1055 mpalen = sizeof(*mpa) + plen; 1056 1057 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1058 1059 mpalen += sizeof(struct mpa_v2_conn_params); 1060 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep, 1061 ep->mpa_attr.version, mpalen); 1062 } 1063 1064 if (mpalen > MHLEN) 1065 CXGBE_UNIMPLEMENTED(__func__); 1066 1067 m = m_gethdr(M_NOWAIT, MT_DATA); 1068 if (m == NULL) { 1069 1070 printf("%s - cannot alloc mbuf!\n", __func__); 1071 CTR2(KTR_IW_CXGBE, "%s:smrej2 %p", __func__, ep); 1072 return (-ENOMEM); 1073 } 1074 1075 1076 mpa = mtod(m, struct mpa_message *); 1077 m->m_len = mpalen; 1078 m->m_pkthdr.len = mpalen; 1079 memset(mpa, 0, sizeof(*mpa)); 1080 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1081 mpa->flags = MPA_REJECT; 1082 mpa->revision = mpa_rev; 1083 mpa->private_data_size = htons(plen); 1084 1085 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1086 1087 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1088 mpa->private_data_size += 1089 htons(sizeof(struct mpa_v2_conn_params)); 1090 mpa_v2_params.ird = htons(((u16)ep->ird) | 1091 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1092 0)); 1093 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1094 (p2p_type == 1095 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1096 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1097 FW_RI_INIT_P2PTYPE_READ_REQ ? 1098 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1099 memcpy(mpa->private_data, &mpa_v2_params, 1100 sizeof(struct mpa_v2_conn_params)); 1101 1102 if (ep->plen) 1103 memcpy(mpa->private_data + 1104 sizeof(struct mpa_v2_conn_params), pdata, plen); 1105 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep, 1106 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen); 1107 } else 1108 if (plen) 1109 memcpy(mpa->private_data, pdata, plen); 1110 1111 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); 1112 if (!err) 1113 ep->snd_seq += mpalen; 1114 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err); 1115 return err; 1116 } 1117 1118 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1119 { 1120 int mpalen; 1121 struct mpa_message *mpa; 1122 struct mbuf *m; 1123 struct mpa_v2_conn_params mpa_v2_params; 1124 int err; 1125 1126 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep); 1127 1128 mpalen = sizeof(*mpa) + plen; 1129 1130 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1131 1132 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep, 1133 ep->mpa_attr.version); 1134 mpalen += sizeof(struct mpa_v2_conn_params); 1135 } 1136 1137 if (mpalen > MHLEN) 1138 CXGBE_UNIMPLEMENTED(__func__); 1139 1140 m = m_gethdr(M_NOWAIT, MT_DATA); 1141 if (m == NULL) { 1142 1143 CTR2(KTR_IW_CXGBE, "%s:smrep2 %p", __func__, ep); 1144 printf("%s - cannot alloc mbuf!\n", __func__); 1145 return (-ENOMEM); 1146 } 1147 1148 1149 mpa = mtod(m, struct mpa_message *); 1150 m->m_len = mpalen; 1151 m->m_pkthdr.len = mpalen; 1152 memset(mpa, 0, sizeof(*mpa)); 1153 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1154 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 1155 (markers_enabled ? MPA_MARKERS : 0); 1156 mpa->revision = ep->mpa_attr.version; 1157 mpa->private_data_size = htons(plen); 1158 1159 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1160 1161 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1162 mpa->private_data_size += 1163 htons(sizeof(struct mpa_v2_conn_params)); 1164 mpa_v2_params.ird = htons((u16)ep->ird); 1165 mpa_v2_params.ord = htons((u16)ep->ord); 1166 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep, 1167 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord); 1168 1169 if (peer2peer && (ep->mpa_attr.p2p_type != 1170 FW_RI_INIT_P2PTYPE_DISABLED)) { 1171 1172 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1173 1174 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1175 1176 mpa_v2_params.ord |= 1177 htons(MPA_V2_RDMA_WRITE_RTR); 1178 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d", 1179 __func__, ep, p2p_type, mpa_v2_params.ird, 1180 mpa_v2_params.ord); 1181 } 1182 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1183 1184 mpa_v2_params.ord |= 1185 htons(MPA_V2_RDMA_READ_RTR); 1186 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d", 1187 __func__, ep, p2p_type, mpa_v2_params.ird, 1188 mpa_v2_params.ord); 1189 } 1190 } 1191 1192 memcpy(mpa->private_data, &mpa_v2_params, 1193 sizeof(struct mpa_v2_conn_params)); 1194 1195 if (ep->plen) 1196 memcpy(mpa->private_data + 1197 sizeof(struct mpa_v2_conn_params), pdata, plen); 1198 } else 1199 if (plen) 1200 memcpy(mpa->private_data, pdata, plen); 1201 1202 state_set(&ep->com, MPA_REP_SENT); 1203 ep->snd_seq += mpalen; 1204 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1205 ep->com.thread); 1206 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err); 1207 return err; 1208 } 1209 1210 1211 1212 static void close_complete_upcall(struct c4iw_ep *ep) 1213 { 1214 struct iw_cm_event event; 1215 1216 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep); 1217 memset(&event, 0, sizeof(event)); 1218 event.event = IW_CM_EVENT_CLOSE; 1219 1220 if (ep->com.cm_id) { 1221 1222 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep); 1223 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1224 ep->com.cm_id->rem_ref(ep->com.cm_id); 1225 ep->com.cm_id = NULL; 1226 ep->com.qp = NULL; 1227 set_bit(CLOSE_UPCALL, &ep->com.history); 1228 } 1229 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep); 1230 } 1231 1232 static int abort_connection(struct c4iw_ep *ep) 1233 { 1234 int err; 1235 1236 CTR2(KTR_IW_CXGBE, "%s:abB %p", __func__, ep); 1237 close_complete_upcall(ep); 1238 state_set(&ep->com, ABORTING); 1239 abort_socket(ep); 1240 err = close_socket(&ep->com, 0); 1241 set_bit(ABORT_CONN, &ep->com.history); 1242 CTR2(KTR_IW_CXGBE, "%s:abE %p", __func__, ep); 1243 return err; 1244 } 1245 1246 static void peer_close_upcall(struct c4iw_ep *ep) 1247 { 1248 struct iw_cm_event event; 1249 1250 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep); 1251 memset(&event, 0, sizeof(event)); 1252 event.event = IW_CM_EVENT_DISCONNECT; 1253 1254 if (ep->com.cm_id) { 1255 1256 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep); 1257 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1258 set_bit(DISCONN_UPCALL, &ep->com.history); 1259 } 1260 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep); 1261 } 1262 1263 static void peer_abort_upcall(struct c4iw_ep *ep) 1264 { 1265 struct iw_cm_event event; 1266 1267 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep); 1268 memset(&event, 0, sizeof(event)); 1269 event.event = IW_CM_EVENT_CLOSE; 1270 event.status = -ECONNRESET; 1271 1272 if (ep->com.cm_id) { 1273 1274 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep); 1275 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1276 ep->com.cm_id->rem_ref(ep->com.cm_id); 1277 ep->com.cm_id = NULL; 1278 ep->com.qp = NULL; 1279 set_bit(ABORT_UPCALL, &ep->com.history); 1280 } 1281 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep); 1282 } 1283 1284 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1285 { 1286 struct iw_cm_event event; 1287 1288 CTR3(KTR_IW_CXGBE, "%s:cruB %p", __func__, ep, status); 1289 memset(&event, 0, sizeof(event)); 1290 event.event = IW_CM_EVENT_CONNECT_REPLY; 1291 event.status = (status ==-ECONNABORTED)?-ECONNRESET: status; 1292 event.local_addr = ep->com.local_addr; 1293 event.remote_addr = ep->com.remote_addr; 1294 1295 if ((status == 0) || (status == -ECONNREFUSED)) { 1296 1297 if (!ep->tried_with_mpa_v1) { 1298 1299 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep); 1300 /* this means MPA_v2 is used */ 1301 event.private_data_len = ep->plen - 1302 sizeof(struct mpa_v2_conn_params); 1303 event.private_data = ep->mpa_pkt + 1304 sizeof(struct mpa_message) + 1305 sizeof(struct mpa_v2_conn_params); 1306 } else { 1307 1308 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep); 1309 /* this means MPA_v1 is used */ 1310 event.private_data_len = ep->plen; 1311 event.private_data = ep->mpa_pkt + 1312 sizeof(struct mpa_message); 1313 } 1314 } 1315 1316 if (ep->com.cm_id) { 1317 1318 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep); 1319 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1320 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1321 } 1322 1323 if(status == -ECONNABORTED) { 1324 1325 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status); 1326 return; 1327 } 1328 1329 if (status < 0) { 1330 1331 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status); 1332 ep->com.cm_id->rem_ref(ep->com.cm_id); 1333 ep->com.cm_id = NULL; 1334 ep->com.qp = NULL; 1335 } 1336 1337 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep); 1338 } 1339 1340 static void connect_request_upcall(struct c4iw_ep *ep) 1341 { 1342 struct iw_cm_event event; 1343 1344 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep, 1345 ep->tried_with_mpa_v1); 1346 1347 memset(&event, 0, sizeof(event)); 1348 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1349 event.local_addr = ep->com.local_addr; 1350 event.remote_addr = ep->com.remote_addr; 1351 event.provider_data = ep; 1352 event.so = ep->com.so; 1353 1354 if (!ep->tried_with_mpa_v1) { 1355 /* this means MPA_v2 is used */ 1356 #ifdef IW_CM_MPAV2 1357 event.ord = ep->ord; 1358 event.ird = ep->ird; 1359 #endif 1360 event.private_data_len = ep->plen - 1361 sizeof(struct mpa_v2_conn_params); 1362 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1363 sizeof(struct mpa_v2_conn_params); 1364 } else { 1365 1366 /* this means MPA_v1 is used. Send max supported */ 1367 #ifdef IW_CM_MPAV2 1368 event.ord = c4iw_max_read_depth; 1369 event.ird = c4iw_max_read_depth; 1370 #endif 1371 event.private_data_len = ep->plen; 1372 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1373 } 1374 1375 c4iw_get_ep(&ep->com); 1376 ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1377 &event); 1378 set_bit(CONNREQ_UPCALL, &ep->com.history); 1379 c4iw_put_ep(&ep->parent_ep->com); 1380 } 1381 1382 static void established_upcall(struct c4iw_ep *ep) 1383 { 1384 struct iw_cm_event event; 1385 1386 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep); 1387 memset(&event, 0, sizeof(event)); 1388 event.event = IW_CM_EVENT_ESTABLISHED; 1389 #ifdef IW_CM_MPAV2 1390 event.ird = ep->ird; 1391 event.ord = ep->ord; 1392 #endif 1393 if (ep->com.cm_id) { 1394 1395 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep); 1396 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1397 set_bit(ESTAB_UPCALL, &ep->com.history); 1398 } 1399 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep); 1400 } 1401 1402 1403 1404 static void process_mpa_reply(struct c4iw_ep *ep) 1405 { 1406 struct mpa_message *mpa; 1407 struct mpa_v2_conn_params *mpa_v2_params; 1408 u16 plen; 1409 u16 resp_ird, resp_ord; 1410 u8 rtr_mismatch = 0, insuff_ird = 0; 1411 struct c4iw_qp_attributes attrs; 1412 enum c4iw_qp_attr_mask mask; 1413 int err; 1414 struct mbuf *top, *m; 1415 int flags = MSG_DONTWAIT; 1416 struct uio uio; 1417 1418 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep); 1419 1420 /* 1421 * Stop mpa timer. If it expired, then the state has 1422 * changed and we bail since ep_timeout already aborted 1423 * the connection. 1424 */ 1425 STOP_EP_TIMER(ep); 1426 if (state_read(&ep->com) != MPA_REQ_SENT) 1427 return; 1428 1429 uio.uio_resid = 1000000; 1430 uio.uio_td = ep->com.thread; 1431 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags); 1432 1433 if (err) { 1434 1435 if (err == EWOULDBLOCK) { 1436 1437 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep); 1438 START_EP_TIMER(ep); 1439 return; 1440 } 1441 err = -err; 1442 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep); 1443 goto err; 1444 } 1445 1446 if (ep->com.so->so_rcv.sb_mb) { 1447 1448 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep); 1449 printf("%s data after soreceive called! so %p sb_mb %p top %p\n", 1450 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top); 1451 } 1452 1453 m = top; 1454 1455 do { 1456 1457 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep); 1458 /* 1459 * If we get more than the supported amount of private data 1460 * then we must fail this connection. 1461 */ 1462 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) { 1463 1464 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep, 1465 ep->mpa_pkt_len + m->m_len); 1466 err = (-EINVAL); 1467 goto err; 1468 } 1469 1470 /* 1471 * copy the new data into our accumulation buffer. 1472 */ 1473 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len])); 1474 ep->mpa_pkt_len += m->m_len; 1475 if (!m->m_next) 1476 m = m->m_nextpkt; 1477 else 1478 m = m->m_next; 1479 } while (m); 1480 1481 m_freem(top); 1482 /* 1483 * if we don't even have the mpa message, then bail. 1484 */ 1485 if (ep->mpa_pkt_len < sizeof(*mpa)) 1486 return; 1487 mpa = (struct mpa_message *) ep->mpa_pkt; 1488 1489 /* Validate MPA header. */ 1490 if (mpa->revision > mpa_rev) { 1491 1492 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep, 1493 mpa->revision, mpa_rev); 1494 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, " 1495 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1496 err = -EPROTO; 1497 goto err; 1498 } 1499 1500 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1501 1502 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep); 1503 err = -EPROTO; 1504 goto err; 1505 } 1506 1507 plen = ntohs(mpa->private_data_size); 1508 1509 /* 1510 * Fail if there's too much private data. 1511 */ 1512 if (plen > MPA_MAX_PRIVATE_DATA) { 1513 1514 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep); 1515 err = -EPROTO; 1516 goto err; 1517 } 1518 1519 /* 1520 * If plen does not account for pkt size 1521 */ 1522 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1523 1524 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep); 1525 err = -EPROTO; 1526 goto err; 1527 } 1528 1529 ep->plen = (u8) plen; 1530 1531 /* 1532 * If we don't have all the pdata yet, then bail. 1533 * We'll continue process when more data arrives. 1534 */ 1535 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) { 1536 1537 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep); 1538 return; 1539 } 1540 1541 if (mpa->flags & MPA_REJECT) { 1542 1543 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep); 1544 err = -ECONNREFUSED; 1545 goto err; 1546 } 1547 1548 /* 1549 * If we get here we have accumulated the entire mpa 1550 * start reply message including private data. And 1551 * the MPA header is valid. 1552 */ 1553 state_set(&ep->com, FPDU_MODE); 1554 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1555 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1556 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1557 ep->mpa_attr.version = mpa->revision; 1558 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1559 1560 if (mpa->revision == 2) { 1561 1562 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep); 1563 ep->mpa_attr.enhanced_rdma_conn = 1564 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1565 1566 if (ep->mpa_attr.enhanced_rdma_conn) { 1567 1568 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep); 1569 mpa_v2_params = (struct mpa_v2_conn_params *) 1570 (ep->mpa_pkt + sizeof(*mpa)); 1571 resp_ird = ntohs(mpa_v2_params->ird) & 1572 MPA_V2_IRD_ORD_MASK; 1573 resp_ord = ntohs(mpa_v2_params->ord) & 1574 MPA_V2_IRD_ORD_MASK; 1575 1576 /* 1577 * This is a double-check. Ideally, below checks are 1578 * not required since ird/ord stuff has been taken 1579 * care of in c4iw_accept_cr 1580 */ 1581 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1582 1583 CTR2(KTR_IW_CXGBE, "%s:pmre %p", __func__, ep); 1584 err = -ENOMEM; 1585 ep->ird = resp_ord; 1586 ep->ord = resp_ird; 1587 insuff_ird = 1; 1588 } 1589 1590 if (ntohs(mpa_v2_params->ird) & 1591 MPA_V2_PEER2PEER_MODEL) { 1592 1593 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep); 1594 if (ntohs(mpa_v2_params->ord) & 1595 MPA_V2_RDMA_WRITE_RTR) { 1596 1597 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep); 1598 ep->mpa_attr.p2p_type = 1599 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1600 } 1601 else if (ntohs(mpa_v2_params->ord) & 1602 MPA_V2_RDMA_READ_RTR) { 1603 1604 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep); 1605 ep->mpa_attr.p2p_type = 1606 FW_RI_INIT_P2PTYPE_READ_REQ; 1607 } 1608 } 1609 } 1610 } else { 1611 1612 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep); 1613 1614 if (mpa->revision == 1) { 1615 1616 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep); 1617 1618 if (peer2peer) { 1619 1620 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep); 1621 ep->mpa_attr.p2p_type = p2p_type; 1622 } 1623 } 1624 } 1625 1626 if (set_tcpinfo(ep)) { 1627 1628 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep); 1629 printf("%s set_tcpinfo error\n", __func__); 1630 goto err; 1631 } 1632 1633 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, " 1634 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__, 1635 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1636 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1637 ep->mpa_attr.p2p_type); 1638 1639 /* 1640 * If responder's RTR does not match with that of initiator, assign 1641 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1642 * generated when moving QP to RTS state. 1643 * A TERM message will be sent after QP has moved to RTS state 1644 */ 1645 if ((ep->mpa_attr.version == 2) && peer2peer && 1646 (ep->mpa_attr.p2p_type != p2p_type)) { 1647 1648 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep); 1649 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1650 rtr_mismatch = 1; 1651 } 1652 1653 1654 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 1655 attrs.mpa_attr = ep->mpa_attr; 1656 attrs.max_ird = ep->ird; 1657 attrs.max_ord = ep->ord; 1658 attrs.llp_stream_handle = ep; 1659 attrs.next_state = C4IW_QP_STATE_RTS; 1660 1661 mask = C4IW_QP_ATTR_NEXT_STATE | 1662 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1663 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1664 1665 /* bind QP and TID with INIT_WR */ 1666 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 1667 1668 if (err) { 1669 1670 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep); 1671 goto err; 1672 } 1673 1674 /* 1675 * If responder's RTR requirement did not match with what initiator 1676 * supports, generate TERM message 1677 */ 1678 if (rtr_mismatch) { 1679 1680 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep); 1681 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1682 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1683 attrs.ecode = MPA_NOMATCH_RTR; 1684 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1685 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1686 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1687 err = -ENOMEM; 1688 goto out; 1689 } 1690 1691 /* 1692 * Generate TERM if initiator IRD is not sufficient for responder 1693 * provided ORD. Currently, we do the same behaviour even when 1694 * responder provided IRD is also not sufficient as regards to 1695 * initiator ORD. 1696 */ 1697 if (insuff_ird) { 1698 1699 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep); 1700 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1701 __func__); 1702 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1703 attrs.ecode = MPA_INSUFF_IRD; 1704 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1705 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1706 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1707 err = -ENOMEM; 1708 goto out; 1709 } 1710 goto out; 1711 err: 1712 state_set(&ep->com, ABORTING); 1713 abort_connection(ep); 1714 out: 1715 connect_reply_upcall(ep, err); 1716 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep); 1717 return; 1718 } 1719 1720 static void 1721 process_mpa_request(struct c4iw_ep *ep) 1722 { 1723 struct mpa_message *mpa; 1724 u16 plen; 1725 int flags = MSG_DONTWAIT; 1726 int rc; 1727 struct iovec iov; 1728 struct uio uio; 1729 enum c4iw_ep_state state = state_read(&ep->com); 1730 1731 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]); 1732 1733 if (state != MPA_REQ_WAIT) 1734 return; 1735 1736 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len]; 1737 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 1738 uio.uio_iov = &iov; 1739 uio.uio_iovcnt = 1; 1740 uio.uio_offset = 0; 1741 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 1742 uio.uio_segflg = UIO_SYSSPACE; 1743 uio.uio_rw = UIO_READ; 1744 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */ 1745 1746 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags); 1747 if (rc == EAGAIN) 1748 return; 1749 else if (rc) { 1750 abort: 1751 STOP_EP_TIMER(ep); 1752 abort_connection(ep); 1753 return; 1754 } 1755 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data", 1756 __func__, ep->com.so)); 1757 ep->mpa_pkt_len += uio.uio_offset; 1758 1759 /* 1760 * If we get more than the supported amount of private data then we must 1761 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another 1762 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last 1763 * byte is filled by the soreceive above. 1764 */ 1765 1766 /* Don't even have the MPA message. Wait for more data to arrive. */ 1767 if (ep->mpa_pkt_len < sizeof(*mpa)) 1768 return; 1769 mpa = (struct mpa_message *) ep->mpa_pkt; 1770 1771 /* 1772 * Validate MPA Header. 1773 */ 1774 if (mpa->revision > mpa_rev) { 1775 log(LOG_ERR, "%s: MPA version mismatch. Local = %d," 1776 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1777 goto abort; 1778 } 1779 1780 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 1781 goto abort; 1782 1783 /* 1784 * Fail if there's too much private data. 1785 */ 1786 plen = ntohs(mpa->private_data_size); 1787 if (plen > MPA_MAX_PRIVATE_DATA) 1788 goto abort; 1789 1790 /* 1791 * If plen does not account for pkt size 1792 */ 1793 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 1794 goto abort; 1795 1796 ep->plen = (u8) plen; 1797 1798 /* 1799 * If we don't have all the pdata yet, then bail. 1800 */ 1801 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1802 return; 1803 1804 /* 1805 * If we get here we have accumulated the entire mpa 1806 * start reply message including private data. 1807 */ 1808 ep->mpa_attr.initiator = 0; 1809 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1810 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1811 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1812 ep->mpa_attr.version = mpa->revision; 1813 if (mpa->revision == 1) 1814 ep->tried_with_mpa_v1 = 1; 1815 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1816 1817 if (mpa->revision == 2) { 1818 ep->mpa_attr.enhanced_rdma_conn = 1819 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1820 if (ep->mpa_attr.enhanced_rdma_conn) { 1821 struct mpa_v2_conn_params *mpa_v2_params; 1822 u16 ird, ord; 1823 1824 mpa_v2_params = (void *)&ep->mpa_pkt[sizeof(*mpa)]; 1825 ird = ntohs(mpa_v2_params->ird); 1826 ord = ntohs(mpa_v2_params->ord); 1827 1828 ep->ird = ird & MPA_V2_IRD_ORD_MASK; 1829 ep->ord = ord & MPA_V2_IRD_ORD_MASK; 1830 if (ird & MPA_V2_PEER2PEER_MODEL && peer2peer) { 1831 if (ord & MPA_V2_RDMA_WRITE_RTR) { 1832 ep->mpa_attr.p2p_type = 1833 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1834 } else if (ord & MPA_V2_RDMA_READ_RTR) { 1835 ep->mpa_attr.p2p_type = 1836 FW_RI_INIT_P2PTYPE_READ_REQ; 1837 } 1838 } 1839 } 1840 } else if (mpa->revision == 1 && peer2peer) 1841 ep->mpa_attr.p2p_type = p2p_type; 1842 1843 if (set_tcpinfo(ep)) 1844 goto abort; 1845 1846 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, " 1847 "xmit_marker_enabled = %d, version = %d", __func__, 1848 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1849 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 1850 1851 state_set(&ep->com, MPA_REQ_RCVD); 1852 STOP_EP_TIMER(ep); 1853 1854 /* drive upcall */ 1855 mutex_lock(&ep->parent_ep->com.mutex); 1856 if (ep->parent_ep->com.state != DEAD) 1857 connect_request_upcall(ep); 1858 else 1859 abort_connection(ep); 1860 mutex_unlock(&ep->parent_ep->com.mutex); 1861 } 1862 1863 /* 1864 * Upcall from the adapter indicating data has been transmitted. 1865 * For us its just the single MPA request or reply. We can now free 1866 * the skb holding the mpa message. 1867 */ 1868 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 1869 { 1870 int err; 1871 struct c4iw_ep *ep = to_ep(cm_id); 1872 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep); 1873 1874 if (state_read(&ep->com) == DEAD) { 1875 1876 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep); 1877 c4iw_put_ep(&ep->com); 1878 return -ECONNRESET; 1879 } 1880 set_bit(ULP_REJECT, &ep->com.history); 1881 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1882 1883 if (mpa_rev == 0) { 1884 1885 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep); 1886 abort_connection(ep); 1887 } 1888 else { 1889 1890 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep); 1891 err = send_mpa_reject(ep, pdata, pdata_len); 1892 err = soshutdown(ep->com.so, 3); 1893 } 1894 c4iw_put_ep(&ep->com); 1895 CTR2(KTR_IW_CXGBE, "%s:crc4 %p", __func__, ep); 1896 return 0; 1897 } 1898 1899 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1900 { 1901 int err; 1902 struct c4iw_qp_attributes attrs; 1903 enum c4iw_qp_attr_mask mask; 1904 struct c4iw_ep *ep = to_ep(cm_id); 1905 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 1906 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 1907 1908 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep); 1909 1910 if (state_read(&ep->com) == DEAD) { 1911 1912 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep); 1913 err = -ECONNRESET; 1914 goto err; 1915 } 1916 1917 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1918 BUG_ON(!qp); 1919 1920 set_bit(ULP_ACCEPT, &ep->com.history); 1921 1922 if ((conn_param->ord > c4iw_max_read_depth) || 1923 (conn_param->ird > c4iw_max_read_depth)) { 1924 1925 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep); 1926 abort_connection(ep); 1927 err = -EINVAL; 1928 goto err; 1929 } 1930 1931 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1932 1933 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep); 1934 1935 if (conn_param->ord > ep->ird) { 1936 1937 CTR2(KTR_IW_CXGBE, "%s:cac4 %p", __func__, ep); 1938 ep->ird = conn_param->ird; 1939 ep->ord = conn_param->ord; 1940 send_mpa_reject(ep, conn_param->private_data, 1941 conn_param->private_data_len); 1942 abort_connection(ep); 1943 err = -ENOMEM; 1944 goto err; 1945 } 1946 1947 if (conn_param->ird > ep->ord) { 1948 1949 CTR2(KTR_IW_CXGBE, "%s:cac5 %p", __func__, ep); 1950 1951 if (!ep->ord) { 1952 1953 CTR2(KTR_IW_CXGBE, "%s:cac6 %p", __func__, ep); 1954 conn_param->ird = 1; 1955 } 1956 else { 1957 CTR2(KTR_IW_CXGBE, "%s:cac7 %p", __func__, ep); 1958 abort_connection(ep); 1959 err = -ENOMEM; 1960 goto err; 1961 } 1962 } 1963 1964 } 1965 ep->ird = conn_param->ird; 1966 ep->ord = conn_param->ord; 1967 1968 if (ep->mpa_attr.version != 2) { 1969 1970 CTR2(KTR_IW_CXGBE, "%s:cac8 %p", __func__, ep); 1971 1972 if (peer2peer && ep->ird == 0) { 1973 1974 CTR2(KTR_IW_CXGBE, "%s:cac9 %p", __func__, ep); 1975 ep->ird = 1; 1976 } 1977 } 1978 1979 1980 cm_id->add_ref(cm_id); 1981 ep->com.cm_id = cm_id; 1982 ep->com.qp = qp; 1983 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 1984 1985 /* bind QP to EP and move to RTS */ 1986 attrs.mpa_attr = ep->mpa_attr; 1987 attrs.max_ird = ep->ird; 1988 attrs.max_ord = ep->ord; 1989 attrs.llp_stream_handle = ep; 1990 attrs.next_state = C4IW_QP_STATE_RTS; 1991 1992 /* bind QP and TID with INIT_WR */ 1993 mask = C4IW_QP_ATTR_NEXT_STATE | 1994 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 1995 C4IW_QP_ATTR_MPA_ATTR | 1996 C4IW_QP_ATTR_MAX_IRD | 1997 C4IW_QP_ATTR_MAX_ORD; 1998 1999 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 2000 2001 if (err) { 2002 2003 CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep); 2004 goto err1; 2005 } 2006 err = send_mpa_reply(ep, conn_param->private_data, 2007 conn_param->private_data_len); 2008 2009 if (err) { 2010 2011 CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep); 2012 goto err1; 2013 } 2014 2015 state_set(&ep->com, FPDU_MODE); 2016 established_upcall(ep); 2017 c4iw_put_ep(&ep->com); 2018 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep); 2019 return 0; 2020 err1: 2021 ep->com.cm_id = NULL; 2022 ep->com.qp = NULL; 2023 cm_id->rem_ref(cm_id); 2024 err: 2025 c4iw_put_ep(&ep->com); 2026 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep); 2027 return err; 2028 } 2029 2030 2031 2032 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2033 { 2034 int err = 0; 2035 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2036 struct c4iw_ep *ep = NULL; 2037 struct rtentry *rt; 2038 struct toedev *tdev; 2039 2040 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id); 2041 2042 if ((conn_param->ord > c4iw_max_read_depth) || 2043 (conn_param->ird > c4iw_max_read_depth)) { 2044 2045 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id); 2046 err = -EINVAL; 2047 goto out; 2048 } 2049 ep = alloc_ep(sizeof(*ep), M_NOWAIT); 2050 2051 if (!ep) { 2052 2053 CTR2(KTR_IW_CXGBE, "%s:cc2 %p", __func__, cm_id); 2054 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2055 err = -ENOMEM; 2056 goto out; 2057 } 2058 init_timer(&ep->timer); 2059 ep->plen = conn_param->private_data_len; 2060 2061 if (ep->plen) { 2062 2063 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep); 2064 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2065 conn_param->private_data, ep->plen); 2066 } 2067 ep->ird = conn_param->ird; 2068 ep->ord = conn_param->ord; 2069 2070 if (peer2peer && ep->ord == 0) { 2071 2072 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep); 2073 ep->ord = 1; 2074 } 2075 2076 cm_id->add_ref(cm_id); 2077 ep->com.dev = dev; 2078 ep->com.cm_id = cm_id; 2079 ep->com.qp = get_qhp(dev, conn_param->qpn); 2080 2081 if (!ep->com.qp) { 2082 2083 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep); 2084 err = -EINVAL; 2085 goto fail2; 2086 } 2087 ep->com.thread = curthread; 2088 ep->com.so = cm_id->so; 2089 2090 init_sock(&ep->com); 2091 2092 /* find a route */ 2093 rt = find_route( 2094 cm_id->local_addr.sin_addr.s_addr, 2095 cm_id->remote_addr.sin_addr.s_addr, 2096 cm_id->local_addr.sin_port, 2097 cm_id->remote_addr.sin_port, 0); 2098 2099 if (!rt) { 2100 2101 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep); 2102 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2103 err = -EHOSTUNREACH; 2104 goto fail3; 2105 } 2106 2107 2108 if (!(rt->rt_ifp->if_flags & IFCAP_TOE)) { 2109 2110 CTR2(KTR_IW_CXGBE, "%s:cc8 %p", __func__, ep); 2111 printf("%s - interface not TOE capable.\n", __func__); 2112 goto fail3; 2113 } 2114 tdev = TOEDEV(rt->rt_ifp); 2115 2116 if (tdev == NULL) { 2117 2118 CTR2(KTR_IW_CXGBE, "%s:cc9 %p", __func__, ep); 2119 printf("%s - No toedev for interface.\n", __func__); 2120 goto fail3; 2121 } 2122 RTFREE(rt); 2123 2124 state_set(&ep->com, CONNECTING); 2125 ep->tos = 0; 2126 ep->com.local_addr = cm_id->local_addr; 2127 ep->com.remote_addr = cm_id->remote_addr; 2128 err = soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr, 2129 ep->com.thread); 2130 2131 if (!err) { 2132 2133 CTR2(KTR_IW_CXGBE, "%s:cca %p", __func__, ep); 2134 goto out; 2135 } 2136 2137 fail3: 2138 CTR2(KTR_IW_CXGBE, "%s:ccb %p", __func__, ep); 2139 RTFREE(rt); 2140 fail2: 2141 cm_id->rem_ref(cm_id); 2142 c4iw_put_ep(&ep->com); 2143 out: 2144 CTR2(KTR_IW_CXGBE, "%s:ccE %p", __func__, ep); 2145 return err; 2146 } 2147 2148 /* 2149 * iwcm->create_listen. Returns -errno on failure. 2150 */ 2151 int 2152 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 2153 { 2154 int rc; 2155 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2156 struct c4iw_listen_ep *ep; 2157 struct socket *so = cm_id->so; 2158 2159 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2160 CTR5(KTR_IW_CXGBE, "%s: cm_id %p, lso %p, ep %p, inp %p", __func__, 2161 cm_id, so, ep, so->so_pcb); 2162 if (ep == NULL) { 2163 log(LOG_ERR, "%s: failed to alloc memory for endpoint\n", 2164 __func__); 2165 rc = ENOMEM; 2166 goto failed; 2167 } 2168 2169 cm_id->add_ref(cm_id); 2170 ep->com.cm_id = cm_id; 2171 ep->com.dev = dev; 2172 ep->backlog = backlog; 2173 ep->com.local_addr = cm_id->local_addr; 2174 ep->com.thread = curthread; 2175 state_set(&ep->com, LISTEN); 2176 ep->com.so = so; 2177 init_sock(&ep->com); 2178 2179 rc = solisten(so, ep->backlog, ep->com.thread); 2180 if (rc != 0) { 2181 log(LOG_ERR, "%s: failed to start listener: %d\n", __func__, 2182 rc); 2183 close_socket(&ep->com, 0); 2184 cm_id->rem_ref(cm_id); 2185 c4iw_put_ep(&ep->com); 2186 goto failed; 2187 } 2188 2189 cm_id->provider_data = ep; 2190 return (0); 2191 2192 failed: 2193 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, FAILED (%d)", __func__, cm_id, rc); 2194 return (-rc); 2195 } 2196 2197 int 2198 c4iw_destroy_listen(struct iw_cm_id *cm_id) 2199 { 2200 int rc; 2201 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 2202 2203 CTR4(KTR_IW_CXGBE, "%s: cm_id %p, so %p, inp %p", __func__, cm_id, 2204 cm_id->so, cm_id->so->so_pcb); 2205 2206 state_set(&ep->com, DEAD); 2207 rc = close_socket(&ep->com, 0); 2208 cm_id->rem_ref(cm_id); 2209 c4iw_put_ep(&ep->com); 2210 2211 return (rc); 2212 } 2213 2214 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2215 { 2216 int ret = 0; 2217 int close = 0; 2218 int fatal = 0; 2219 struct c4iw_rdev *rdev; 2220 2221 mutex_lock(&ep->com.mutex); 2222 2223 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep); 2224 2225 rdev = &ep->com.dev->rdev; 2226 2227 if (c4iw_fatal_error(rdev)) { 2228 2229 CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep); 2230 fatal = 1; 2231 close_complete_upcall(ep); 2232 ep->com.state = DEAD; 2233 } 2234 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep, 2235 states[ep->com.state]); 2236 2237 switch (ep->com.state) { 2238 2239 case MPA_REQ_WAIT: 2240 case MPA_REQ_SENT: 2241 case MPA_REQ_RCVD: 2242 case MPA_REP_SENT: 2243 case FPDU_MODE: 2244 close = 1; 2245 if (abrupt) 2246 ep->com.state = ABORTING; 2247 else { 2248 ep->com.state = CLOSING; 2249 START_EP_TIMER(ep); 2250 } 2251 set_bit(CLOSE_SENT, &ep->com.flags); 2252 break; 2253 2254 case CLOSING: 2255 2256 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2257 2258 close = 1; 2259 if (abrupt) { 2260 STOP_EP_TIMER(ep); 2261 ep->com.state = ABORTING; 2262 } else 2263 ep->com.state = MORIBUND; 2264 } 2265 break; 2266 2267 case MORIBUND: 2268 case ABORTING: 2269 case DEAD: 2270 CTR3(KTR_IW_CXGBE, 2271 "%s ignoring disconnect ep %p state %u", __func__, 2272 ep, ep->com.state); 2273 break; 2274 2275 default: 2276 BUG(); 2277 break; 2278 } 2279 2280 mutex_unlock(&ep->com.mutex); 2281 2282 if (close) { 2283 2284 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep); 2285 2286 if (abrupt) { 2287 2288 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep); 2289 set_bit(EP_DISC_ABORT, &ep->com.history); 2290 ret = abort_connection(ep); 2291 } else { 2292 2293 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep); 2294 set_bit(EP_DISC_CLOSE, &ep->com.history); 2295 2296 if (!ep->parent_ep) 2297 __state_set(&ep->com, MORIBUND); 2298 ret = shutdown_socket(&ep->com); 2299 } 2300 2301 if (ret) { 2302 2303 fatal = 1; 2304 } 2305 } 2306 2307 if (fatal) { 2308 2309 release_ep_resources(ep); 2310 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep); 2311 } 2312 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep); 2313 return ret; 2314 } 2315 2316 #ifdef C4IW_EP_REDIRECT 2317 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, 2318 struct l2t_entry *l2t) 2319 { 2320 struct c4iw_ep *ep = ctx; 2321 2322 if (ep->dst != old) 2323 return 0; 2324 2325 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, 2326 l2t); 2327 dst_hold(new); 2328 cxgb4_l2t_release(ep->l2t); 2329 ep->l2t = l2t; 2330 dst_release(old); 2331 ep->dst = new; 2332 return 1; 2333 } 2334 #endif 2335 2336 2337 2338 static void ep_timeout(unsigned long arg) 2339 { 2340 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2341 int kickit = 0; 2342 2343 CTR2(KTR_IW_CXGBE, "%s:etB %p", __func__, ep); 2344 spin_lock(&timeout_lock); 2345 2346 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 2347 2348 list_add_tail(&ep->entry, &timeout_list); 2349 kickit = 1; 2350 } 2351 spin_unlock(&timeout_lock); 2352 2353 if (kickit) { 2354 2355 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep); 2356 queue_work(c4iw_taskq, &c4iw_task); 2357 } 2358 CTR2(KTR_IW_CXGBE, "%s:etE %p", __func__, ep); 2359 } 2360 2361 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl) 2362 { 2363 uint64_t val = be64toh(*rpl); 2364 int ret; 2365 struct c4iw_wr_wait *wr_waitp; 2366 2367 ret = (int)((val >> 8) & 0xff); 2368 wr_waitp = (struct c4iw_wr_wait *)rpl[1]; 2369 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret); 2370 if (wr_waitp) 2371 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 2372 2373 return (0); 2374 } 2375 2376 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl) 2377 { 2378 struct t4_cqe cqe =*(const struct t4_cqe *)(&rpl[0]); 2379 2380 CTR2(KTR_IW_CXGBE, "%s rpl %p", __func__, rpl); 2381 c4iw_ev_dispatch(sc->iwarp_softc, &cqe); 2382 2383 return (0); 2384 } 2385 2386 static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 2387 { 2388 2389 struct adapter *sc = iq->adapter; 2390 2391 const struct cpl_rdma_terminate *rpl = (const void *)(rss + 1); 2392 unsigned int tid = GET_TID(rpl); 2393 struct c4iw_qp_attributes attrs; 2394 struct toepcb *toep = lookup_tid(sc, tid); 2395 struct socket *so = inp_inpcbtosocket(toep->inp); 2396 struct c4iw_ep *ep = so->so_rcv.sb_upcallarg; 2397 2398 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep); 2399 2400 if (ep && ep->com.qp) { 2401 2402 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 2403 ep->com.qp->wq.sq.qid); 2404 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2405 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 2406 1); 2407 } else 2408 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 2409 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep); 2410 2411 return 0; 2412 } 2413 2414 void 2415 c4iw_cm_init_cpl(struct adapter *sc) 2416 { 2417 2418 t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, terminate); 2419 t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, fw6_wr_rpl); 2420 t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, fw6_cqe_handler); 2421 t4_register_an_handler(sc, c4iw_ev_handler); 2422 } 2423 2424 void 2425 c4iw_cm_term_cpl(struct adapter *sc) 2426 { 2427 2428 t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, NULL); 2429 t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, NULL); 2430 t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, NULL); 2431 } 2432 2433 int __init c4iw_cm_init(void) 2434 { 2435 2436 TAILQ_INIT(&req_list); 2437 spin_lock_init(&req_lock); 2438 INIT_LIST_HEAD(&timeout_list); 2439 spin_lock_init(&timeout_lock); 2440 2441 INIT_WORK(&c4iw_task, process_req); 2442 2443 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe"); 2444 if (!c4iw_taskq) 2445 return -ENOMEM; 2446 2447 2448 return 0; 2449 } 2450 2451 void __exit c4iw_cm_term(void) 2452 { 2453 WARN_ON(!TAILQ_EMPTY(&req_list)); 2454 WARN_ON(!list_empty(&timeout_list)); 2455 flush_workqueue(c4iw_taskq); 2456 destroy_workqueue(c4iw_taskq); 2457 } 2458 #endif 2459