1 /* 2 * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_inet.h" 36 37 #ifdef TCP_OFFLOAD 38 #include <sys/types.h> 39 #include <sys/malloc.h> 40 #include <sys/socket.h> 41 #include <sys/socketvar.h> 42 #include <sys/sockio.h> 43 #include <sys/taskqueue.h> 44 #include <netinet/in.h> 45 #include <net/route.h> 46 47 #include <netinet/in_systm.h> 48 #include <netinet/in_pcb.h> 49 #include <netinet/ip.h> 50 #include <netinet/in_fib.h> 51 #include <netinet/ip_var.h> 52 #include <netinet/tcp_var.h> 53 #include <netinet/tcp.h> 54 #include <netinet/tcpip.h> 55 56 #include <netinet/toecore.h> 57 58 struct sge_iq; 59 struct rss_header; 60 #include <linux/types.h> 61 #include "offload.h" 62 #include "tom/t4_tom.h" 63 64 #define TOEPCB(so) ((struct toepcb *)(so_sototcpcb((so))->t_toe)) 65 66 #include "iw_cxgbe.h" 67 #include <linux/module.h> 68 #include <linux/workqueue.h> 69 #include <linux/notifier.h> 70 #include <linux/inetdevice.h> 71 #include <linux/if_vlan.h> 72 #include <net/netevent.h> 73 74 static spinlock_t req_lock; 75 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list; 76 static struct work_struct c4iw_task; 77 static struct workqueue_struct *c4iw_taskq; 78 static LIST_HEAD(timeout_list); 79 static spinlock_t timeout_lock; 80 81 static void process_req(struct work_struct *ctx); 82 static void start_ep_timer(struct c4iw_ep *ep); 83 static int stop_ep_timer(struct c4iw_ep *ep); 84 static int set_tcpinfo(struct c4iw_ep *ep); 85 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc); 86 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate); 87 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate); 88 static void *alloc_ep(int size, gfp_t flags); 89 void __free_ep(struct c4iw_ep_common *epc); 90 static int find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port, 91 __be16 peer_port, u8 tos, struct nhop4_extended *pnh4); 92 static int close_socket(struct c4iw_ep_common *epc, int close); 93 static int shutdown_socket(struct c4iw_ep_common *epc); 94 static void abort_socket(struct c4iw_ep *ep); 95 static void send_mpa_req(struct c4iw_ep *ep); 96 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen); 97 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen); 98 static void close_complete_upcall(struct c4iw_ep *ep, int status); 99 static int send_abort(struct c4iw_ep *ep); 100 static void peer_close_upcall(struct c4iw_ep *ep); 101 static void peer_abort_upcall(struct c4iw_ep *ep); 102 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 103 static int connect_request_upcall(struct c4iw_ep *ep); 104 static void established_upcall(struct c4iw_ep *ep); 105 static int process_mpa_reply(struct c4iw_ep *ep); 106 static int process_mpa_request(struct c4iw_ep *ep); 107 static void process_peer_close(struct c4iw_ep *ep); 108 static void process_conn_error(struct c4iw_ep *ep); 109 static void process_close_complete(struct c4iw_ep *ep); 110 static void ep_timeout(unsigned long arg); 111 static void init_sock(struct c4iw_ep_common *epc); 112 static void process_data(struct c4iw_ep *ep); 113 static void process_connected(struct c4iw_ep *ep); 114 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag); 115 static void process_socket_event(struct c4iw_ep *ep); 116 static void release_ep_resources(struct c4iw_ep *ep); 117 118 #define START_EP_TIMER(ep) \ 119 do { \ 120 CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \ 121 __func__, __LINE__, (ep)); \ 122 start_ep_timer(ep); \ 123 } while (0) 124 125 #define STOP_EP_TIMER(ep) \ 126 ({ \ 127 CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \ 128 __func__, __LINE__, (ep)); \ 129 stop_ep_timer(ep); \ 130 }) 131 132 #ifdef KTR 133 static char *states[] = { 134 "idle", 135 "listen", 136 "connecting", 137 "mpa_wait_req", 138 "mpa_req_sent", 139 "mpa_req_rcvd", 140 "mpa_rep_sent", 141 "fpdu_mode", 142 "aborting", 143 "closing", 144 "moribund", 145 "dead", 146 NULL, 147 }; 148 #endif 149 150 151 static void deref_cm_id(struct c4iw_ep_common *epc) 152 { 153 epc->cm_id->rem_ref(epc->cm_id); 154 epc->cm_id = NULL; 155 set_bit(CM_ID_DEREFED, &epc->history); 156 } 157 158 static void ref_cm_id(struct c4iw_ep_common *epc) 159 { 160 set_bit(CM_ID_REFED, &epc->history); 161 epc->cm_id->add_ref(epc->cm_id); 162 } 163 164 static void deref_qp(struct c4iw_ep *ep) 165 { 166 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 167 clear_bit(QP_REFERENCED, &ep->com.flags); 168 set_bit(QP_DEREFED, &ep->com.history); 169 } 170 171 static void ref_qp(struct c4iw_ep *ep) 172 { 173 set_bit(QP_REFERENCED, &ep->com.flags); 174 set_bit(QP_REFED, &ep->com.history); 175 c4iw_qp_add_ref(&ep->com.qp->ibqp); 176 } 177 178 static void 179 process_req(struct work_struct *ctx) 180 { 181 struct c4iw_ep_common *epc; 182 183 spin_lock(&req_lock); 184 while (!TAILQ_EMPTY(&req_list)) { 185 epc = TAILQ_FIRST(&req_list); 186 TAILQ_REMOVE(&req_list, epc, entry); 187 epc->entry.tqe_prev = NULL; 188 spin_unlock(&req_lock); 189 if (epc->so) 190 process_socket_event((struct c4iw_ep *)epc); 191 c4iw_put_ep(epc); 192 spin_lock(&req_lock); 193 } 194 spin_unlock(&req_lock); 195 } 196 197 /* 198 * XXX: doesn't belong here in the iWARP driver. 199 * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is 200 * set. Is this a valid assumption for active open? 201 */ 202 static int 203 set_tcpinfo(struct c4iw_ep *ep) 204 { 205 struct socket *so = ep->com.so; 206 struct inpcb *inp = sotoinpcb(so); 207 struct tcpcb *tp; 208 struct toepcb *toep; 209 int rc = 0; 210 211 INP_WLOCK(inp); 212 tp = intotcpcb(inp); 213 if ((tp->t_flags & TF_TOE) == 0) { 214 rc = EINVAL; 215 log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n", 216 __func__, so, ep); 217 goto done; 218 } 219 toep = TOEPCB(so); 220 221 ep->hwtid = toep->tid; 222 ep->snd_seq = tp->snd_nxt; 223 ep->rcv_seq = tp->rcv_nxt; 224 ep->emss = max(tp->t_maxseg, 128); 225 done: 226 INP_WUNLOCK(inp); 227 return (rc); 228 229 } 230 231 static int 232 find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port, 233 __be16 peer_port, u8 tos, struct nhop4_extended *pnh4) 234 { 235 struct in_addr addr; 236 int err; 237 238 CTR5(KTR_IW_CXGBE, "%s:frtB %x, %x, %d, %d", __func__, local_ip, 239 peer_ip, ntohs(local_port), ntohs(peer_port)); 240 241 addr.s_addr = peer_ip; 242 err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, addr, NHR_REF, 0, pnh4); 243 244 CTR2(KTR_IW_CXGBE, "%s:frtE %d", __func__, err); 245 return err; 246 } 247 248 static int 249 close_socket(struct c4iw_ep_common *epc, int close) 250 { 251 struct socket *so = epc->so; 252 int rc; 253 254 CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc, so, 255 states[epc->state]); 256 257 SOCK_LOCK(so); 258 soupcall_clear(so, SO_RCV); 259 SOCK_UNLOCK(so); 260 261 if (close) 262 rc = soclose(so); 263 else 264 rc = soshutdown(so, SHUT_WR | SHUT_RD); 265 epc->so = NULL; 266 267 return (rc); 268 } 269 270 static int 271 shutdown_socket(struct c4iw_ep_common *epc) 272 { 273 274 CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc->so, epc, 275 states[epc->state]); 276 277 return (soshutdown(epc->so, SHUT_WR)); 278 } 279 280 static void 281 abort_socket(struct c4iw_ep *ep) 282 { 283 struct sockopt sopt; 284 int rc; 285 struct linger l; 286 287 CTR4(KTR_IW_CXGBE, "%s ep %p so %p state %s", __func__, ep, ep->com.so, 288 states[ep->com.state]); 289 290 l.l_onoff = 1; 291 l.l_linger = 0; 292 293 /* linger_time of 0 forces RST to be sent */ 294 sopt.sopt_dir = SOPT_SET; 295 sopt.sopt_level = SOL_SOCKET; 296 sopt.sopt_name = SO_LINGER; 297 sopt.sopt_val = (caddr_t)&l; 298 sopt.sopt_valsize = sizeof l; 299 sopt.sopt_td = NULL; 300 rc = sosetopt(ep->com.so, &sopt); 301 if (rc) { 302 log(LOG_ERR, "%s: can't set linger to 0, no RST! err %d\n", 303 __func__, rc); 304 } 305 } 306 307 static void 308 process_peer_close(struct c4iw_ep *ep) 309 { 310 struct c4iw_qp_attributes attrs; 311 int disconnect = 1; 312 int release = 0; 313 314 CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep, 315 ep->com.so, states[ep->com.state]); 316 317 mutex_lock(&ep->com.mutex); 318 switch (ep->com.state) { 319 320 case MPA_REQ_WAIT: 321 CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT CLOSING", 322 __func__, ep); 323 __state_set(&ep->com, CLOSING); 324 break; 325 326 case MPA_REQ_SENT: 327 CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT CLOSING", 328 __func__, ep); 329 __state_set(&ep->com, DEAD); 330 connect_reply_upcall(ep, -ECONNABORTED); 331 332 disconnect = 0; 333 STOP_EP_TIMER(ep); 334 close_socket(&ep->com, 0); 335 deref_cm_id(&ep->com); 336 release = 1; 337 break; 338 339 case MPA_REQ_RCVD: 340 341 /* 342 * We're gonna mark this puppy DEAD, but keep 343 * the reference on it until the ULP accepts or 344 * rejects the CR. 345 */ 346 CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING", 347 __func__, ep); 348 __state_set(&ep->com, CLOSING); 349 c4iw_get_ep(&ep->com); 350 break; 351 352 case MPA_REP_SENT: 353 CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING", 354 __func__, ep); 355 __state_set(&ep->com, CLOSING); 356 break; 357 358 case FPDU_MODE: 359 CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING", 360 __func__, ep); 361 START_EP_TIMER(ep); 362 __state_set(&ep->com, CLOSING); 363 attrs.next_state = C4IW_QP_STATE_CLOSING; 364 c4iw_modify_qp(ep->com.dev, ep->com.qp, 365 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 366 peer_close_upcall(ep); 367 break; 368 369 case ABORTING: 370 CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)", 371 __func__, ep); 372 disconnect = 0; 373 break; 374 375 case CLOSING: 376 CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND", 377 __func__, ep); 378 __state_set(&ep->com, MORIBUND); 379 disconnect = 0; 380 break; 381 382 case MORIBUND: 383 CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__, 384 ep); 385 STOP_EP_TIMER(ep); 386 if (ep->com.cm_id && ep->com.qp) { 387 attrs.next_state = C4IW_QP_STATE_IDLE; 388 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 389 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 390 } 391 close_socket(&ep->com, 0); 392 close_complete_upcall(ep, 0); 393 __state_set(&ep->com, DEAD); 394 release = 1; 395 disconnect = 0; 396 break; 397 398 case DEAD: 399 CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)", 400 __func__, ep); 401 disconnect = 0; 402 break; 403 404 default: 405 panic("%s: ep %p state %d", __func__, ep, 406 ep->com.state); 407 break; 408 } 409 410 mutex_unlock(&ep->com.mutex); 411 412 if (disconnect) { 413 414 CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep); 415 c4iw_ep_disconnect(ep, 0, M_NOWAIT); 416 } 417 if (release) { 418 419 CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep); 420 c4iw_put_ep(&ep->com); 421 } 422 CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep); 423 return; 424 } 425 426 static void 427 process_conn_error(struct c4iw_ep *ep) 428 { 429 struct c4iw_qp_attributes attrs; 430 int ret; 431 int state; 432 433 state = state_read(&ep->com); 434 CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s", 435 __func__, ep, ep->com.so, ep->com.so->so_error, 436 states[ep->com.state]); 437 438 switch (state) { 439 440 case MPA_REQ_WAIT: 441 STOP_EP_TIMER(ep); 442 break; 443 444 case MPA_REQ_SENT: 445 STOP_EP_TIMER(ep); 446 connect_reply_upcall(ep, -ECONNRESET); 447 break; 448 449 case MPA_REP_SENT: 450 ep->com.rpl_err = ECONNRESET; 451 CTR1(KTR_IW_CXGBE, "waking up ep %p", ep); 452 break; 453 454 case MPA_REQ_RCVD: 455 456 /* 457 * We're gonna mark this puppy DEAD, but keep 458 * the reference on it until the ULP accepts or 459 * rejects the CR. 460 */ 461 c4iw_get_ep(&ep->com); 462 break; 463 464 case MORIBUND: 465 case CLOSING: 466 STOP_EP_TIMER(ep); 467 /*FALLTHROUGH*/ 468 case FPDU_MODE: 469 470 if (ep->com.cm_id && ep->com.qp) { 471 472 attrs.next_state = C4IW_QP_STATE_ERROR; 473 ret = c4iw_modify_qp(ep->com.qp->rhp, 474 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 475 &attrs, 1); 476 if (ret) 477 log(LOG_ERR, 478 "%s - qp <- error failed!\n", 479 __func__); 480 } 481 peer_abort_upcall(ep); 482 break; 483 484 case ABORTING: 485 break; 486 487 case DEAD: 488 CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!", 489 __func__, ep->com.so->so_error); 490 return; 491 492 default: 493 panic("%s: ep %p state %d", __func__, ep, state); 494 break; 495 } 496 497 if (state != ABORTING) { 498 499 CTR2(KTR_IW_CXGBE, "%s:pce1 %p", __func__, ep); 500 close_socket(&ep->com, 0); 501 state_set(&ep->com, DEAD); 502 c4iw_put_ep(&ep->com); 503 } 504 CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep); 505 return; 506 } 507 508 static void 509 process_close_complete(struct c4iw_ep *ep) 510 { 511 struct c4iw_qp_attributes attrs; 512 int release = 0; 513 514 CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep, 515 ep->com.so, states[ep->com.state]); 516 517 /* The cm_id may be null if we failed to connect */ 518 mutex_lock(&ep->com.mutex); 519 set_bit(CLOSE_CON_RPL, &ep->com.history); 520 521 switch (ep->com.state) { 522 523 case CLOSING: 524 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND", 525 __func__, ep); 526 __state_set(&ep->com, MORIBUND); 527 break; 528 529 case MORIBUND: 530 CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__, 531 ep); 532 STOP_EP_TIMER(ep); 533 534 if ((ep->com.cm_id) && (ep->com.qp)) { 535 536 CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE", 537 __func__, ep); 538 attrs.next_state = C4IW_QP_STATE_IDLE; 539 c4iw_modify_qp(ep->com.dev, 540 ep->com.qp, 541 C4IW_QP_ATTR_NEXT_STATE, 542 &attrs, 1); 543 } 544 545 if (ep->parent_ep) { 546 547 CTR2(KTR_IW_CXGBE, "%s:pcc3 %p", __func__, ep); 548 close_socket(&ep->com, 1); 549 } 550 else { 551 552 CTR2(KTR_IW_CXGBE, "%s:pcc4 %p", __func__, ep); 553 close_socket(&ep->com, 0); 554 } 555 close_complete_upcall(ep, 0); 556 __state_set(&ep->com, DEAD); 557 release = 1; 558 break; 559 560 case ABORTING: 561 CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep); 562 break; 563 564 case DEAD: 565 default: 566 CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep); 567 panic("%s:pcc6 %p DEAD", __func__, ep); 568 break; 569 } 570 mutex_unlock(&ep->com.mutex); 571 572 if (release) { 573 574 CTR2(KTR_IW_CXGBE, "%s:pcc7 %p", __func__, ep); 575 c4iw_put_ep(&ep->com); 576 } 577 CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep); 578 return; 579 } 580 581 static void 582 init_sock(struct c4iw_ep_common *epc) 583 { 584 int rc; 585 struct sockopt sopt; 586 struct socket *so = epc->so; 587 int on = 1; 588 589 SOCK_LOCK(so); 590 soupcall_set(so, SO_RCV, c4iw_so_upcall, epc); 591 so->so_state |= SS_NBIO; 592 SOCK_UNLOCK(so); 593 sopt.sopt_dir = SOPT_SET; 594 sopt.sopt_level = IPPROTO_TCP; 595 sopt.sopt_name = TCP_NODELAY; 596 sopt.sopt_val = (caddr_t)&on; 597 sopt.sopt_valsize = sizeof on; 598 sopt.sopt_td = NULL; 599 rc = sosetopt(so, &sopt); 600 if (rc) { 601 log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n", 602 __func__, so, rc); 603 } 604 } 605 606 static void 607 process_data(struct c4iw_ep *ep) 608 { 609 struct sockaddr_in *local, *remote; 610 int disconnect = 0; 611 612 CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__, 613 ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv)); 614 615 switch (state_read(&ep->com)) { 616 case MPA_REQ_SENT: 617 disconnect = process_mpa_reply(ep); 618 break; 619 case MPA_REQ_WAIT: 620 in_getsockaddr(ep->com.so, (struct sockaddr **)&local); 621 in_getpeeraddr(ep->com.so, (struct sockaddr **)&remote); 622 ep->com.local_addr = *local; 623 ep->com.remote_addr = *remote; 624 free(local, M_SONAME); 625 free(remote, M_SONAME); 626 disconnect = process_mpa_request(ep); 627 break; 628 default: 629 if (sbused(&ep->com.so->so_rcv)) 630 log(LOG_ERR, "%s: Unexpected streaming data. ep %p, " 631 "state %d, so %p, so_state 0x%x, sbused %u\n", 632 __func__, ep, state_read(&ep->com), ep->com.so, 633 ep->com.so->so_state, sbused(&ep->com.so->so_rcv)); 634 break; 635 } 636 if (disconnect) 637 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 638 639 } 640 641 static void 642 process_connected(struct c4iw_ep *ep) 643 { 644 645 if ((ep->com.so->so_state & SS_ISCONNECTED) && !ep->com.so->so_error) 646 send_mpa_req(ep); 647 else { 648 connect_reply_upcall(ep, -ep->com.so->so_error); 649 close_socket(&ep->com, 0); 650 state_set(&ep->com, DEAD); 651 c4iw_put_ep(&ep->com); 652 } 653 } 654 655 void 656 process_newconn(struct iw_cm_id *parent_cm_id, struct socket *child_so) 657 { 658 struct c4iw_ep *child_ep; 659 struct sockaddr_in *local; 660 struct sockaddr_in *remote; 661 struct c4iw_ep *parent_ep = parent_cm_id->provider_data; 662 663 if (!child_so) { 664 CTR4(KTR_IW_CXGBE, 665 "%s: parent so %p, parent ep %p, child so %p, invalid so", 666 __func__, parent_ep->com.so, parent_ep, child_so); 667 log(LOG_ERR, "%s: invalid child socket\n", __func__); 668 return; 669 } 670 child_ep = alloc_ep(sizeof(*child_ep), M_NOWAIT); 671 if (!child_ep) { 672 CTR3(KTR_IW_CXGBE, "%s: parent so %p, parent ep %p, ENOMEM", 673 __func__, parent_ep->com.so, parent_ep); 674 log(LOG_ERR, "%s: failed to allocate ep entry\n", __func__); 675 return; 676 } 677 SOCKBUF_LOCK(&child_so->so_rcv); 678 soupcall_set(child_so, SO_RCV, c4iw_so_upcall, child_ep); 679 SOCKBUF_UNLOCK(&child_so->so_rcv); 680 681 CTR5(KTR_IW_CXGBE, 682 "%s: parent so %p, parent ep %p, child so %p, child ep %p", 683 __func__, parent_ep->com.so, parent_ep, child_so, child_ep); 684 685 in_getsockaddr(child_so, (struct sockaddr **)&local); 686 in_getpeeraddr(child_so, (struct sockaddr **)&remote); 687 688 child_ep->com.local_addr = *local; 689 child_ep->com.remote_addr = *remote; 690 child_ep->com.dev = parent_ep->com.dev; 691 child_ep->com.so = child_so; 692 child_ep->com.cm_id = NULL; 693 child_ep->com.thread = parent_ep->com.thread; 694 child_ep->parent_ep = parent_ep; 695 696 free(local, M_SONAME); 697 free(remote, M_SONAME); 698 699 c4iw_get_ep(&parent_ep->com); 700 init_timer(&child_ep->timer); 701 state_set(&child_ep->com, MPA_REQ_WAIT); 702 START_EP_TIMER(child_ep); 703 704 /* maybe the request has already been queued up on the socket... */ 705 process_mpa_request(child_ep); 706 return; 707 } 708 709 static int 710 c4iw_so_upcall(struct socket *so, void *arg, int waitflag) 711 { 712 struct c4iw_ep *ep = arg; 713 714 spin_lock(&req_lock); 715 716 CTR6(KTR_IW_CXGBE, 717 "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p", 718 __func__, so, so->so_state, ep, states[ep->com.state], 719 ep->com.entry.tqe_prev); 720 721 if (ep && ep->com.so && !ep->com.entry.tqe_prev) { 722 KASSERT(ep->com.so == so, ("%s: XXX review.", __func__)); 723 c4iw_get_ep(&ep->com); 724 TAILQ_INSERT_TAIL(&req_list, &ep->com, entry); 725 queue_work(c4iw_taskq, &c4iw_task); 726 } 727 728 spin_unlock(&req_lock); 729 return (SU_OK); 730 } 731 732 static void 733 process_socket_event(struct c4iw_ep *ep) 734 { 735 int state = state_read(&ep->com); 736 struct socket *so = ep->com.so; 737 738 CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, " 739 "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state, 740 so->so_error, so->so_rcv.sb_state, ep, states[state]); 741 742 if (state == CONNECTING) { 743 process_connected(ep); 744 return; 745 } 746 747 if (state == LISTEN) { 748 /* socket listening events are handled at IWCM */ 749 CTR3(KTR_IW_CXGBE, "%s Invalid ep state:%u, ep:%p", __func__, 750 ep->com.state, ep); 751 BUG(); 752 return; 753 } 754 755 /* connection error */ 756 if (so->so_error) { 757 process_conn_error(ep); 758 return; 759 } 760 761 /* peer close */ 762 if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state < CLOSING) { 763 process_peer_close(ep); 764 return; 765 } 766 767 /* close complete */ 768 if (so->so_state & SS_ISDISCONNECTED) { 769 process_close_complete(ep); 770 return; 771 } 772 773 /* rx data */ 774 process_data(ep); 775 } 776 777 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters"); 778 779 int db_delay_usecs = 1; 780 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RWTUN, &db_delay_usecs, 0, 781 "Usecs to delay awaiting db fifo to drain"); 782 783 static int dack_mode = 0; 784 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0, 785 "Delayed ack mode (default = 0)"); 786 787 int c4iw_max_read_depth = 8; 788 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0, 789 "Per-connection max ORD/IRD (default = 8)"); 790 791 static int enable_tcp_timestamps; 792 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0, 793 "Enable tcp timestamps (default = 0)"); 794 795 static int enable_tcp_sack; 796 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0, 797 "Enable tcp SACK (default = 0)"); 798 799 static int enable_tcp_window_scaling = 1; 800 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0, 801 "Enable tcp window scaling (default = 1)"); 802 803 int c4iw_debug = 1; 804 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0, 805 "Enable debug logging (default = 0)"); 806 807 static int peer2peer = 1; 808 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0, 809 "Support peer2peer ULPs (default = 1)"); 810 811 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 812 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0, 813 "RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)"); 814 815 static int ep_timeout_secs = 60; 816 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0, 817 "CM Endpoint operation timeout in seconds (default = 60)"); 818 819 static int mpa_rev = 1; 820 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0, 821 "MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)"); 822 823 static int markers_enabled; 824 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0, 825 "Enable MPA MARKERS (default(0) = disabled)"); 826 827 static int crc_enabled = 1; 828 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0, 829 "Enable MPA CRC (default(1) = enabled)"); 830 831 static int rcv_win = 256 * 1024; 832 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0, 833 "TCP receive window in bytes (default = 256KB)"); 834 835 static int snd_win = 128 * 1024; 836 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0, 837 "TCP send window in bytes (default = 128KB)"); 838 839 int db_fc_threshold = 2000; 840 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RWTUN, &db_fc_threshold, 0, 841 "QP count/threshold that triggers automatic"); 842 843 static void 844 start_ep_timer(struct c4iw_ep *ep) 845 { 846 847 if (timer_pending(&ep->timer)) { 848 CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep); 849 printk(KERN_ERR "%s timer already started! ep %p\n", __func__, 850 ep); 851 return; 852 } 853 clear_bit(TIMEOUT, &ep->com.flags); 854 c4iw_get_ep(&ep->com); 855 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 856 ep->timer.data = (unsigned long)ep; 857 ep->timer.function = ep_timeout; 858 add_timer(&ep->timer); 859 } 860 861 static int 862 stop_ep_timer(struct c4iw_ep *ep) 863 { 864 865 del_timer_sync(&ep->timer); 866 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 867 c4iw_put_ep(&ep->com); 868 return 0; 869 } 870 return 1; 871 } 872 873 static enum 874 c4iw_ep_state state_read(struct c4iw_ep_common *epc) 875 { 876 enum c4iw_ep_state state; 877 878 mutex_lock(&epc->mutex); 879 state = epc->state; 880 mutex_unlock(&epc->mutex); 881 882 return (state); 883 } 884 885 static void 886 __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 887 { 888 889 epc->state = new; 890 } 891 892 static void 893 state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 894 { 895 896 mutex_lock(&epc->mutex); 897 __state_set(epc, new); 898 mutex_unlock(&epc->mutex); 899 } 900 901 static void * 902 alloc_ep(int size, gfp_t gfp) 903 { 904 struct c4iw_ep_common *epc; 905 906 epc = kzalloc(size, gfp); 907 if (epc == NULL) 908 return (NULL); 909 910 kref_init(&epc->kref); 911 mutex_init(&epc->mutex); 912 c4iw_init_wr_wait(&epc->wr_wait); 913 914 return (epc); 915 } 916 917 void 918 __free_ep(struct c4iw_ep_common *epc) 919 { 920 CTR2(KTR_IW_CXGBE, "%s:feB %p", __func__, epc); 921 KASSERT(!epc->so, ("%s warning ep->so %p \n", __func__, epc->so)); 922 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list!\n", __func__, epc)); 923 free(epc, M_DEVBUF); 924 CTR2(KTR_IW_CXGBE, "%s:feE %p", __func__, epc); 925 } 926 927 void _c4iw_free_ep(struct kref *kref) 928 { 929 struct c4iw_ep *ep; 930 struct c4iw_ep_common *epc; 931 932 ep = container_of(kref, struct c4iw_ep, com.kref); 933 epc = &ep->com; 934 KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list", 935 __func__, epc)); 936 if (test_bit(QP_REFERENCED, &ep->com.flags)) 937 deref_qp(ep); 938 kfree(ep); 939 } 940 941 static void release_ep_resources(struct c4iw_ep *ep) 942 { 943 CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep); 944 set_bit(RELEASE_RESOURCES, &ep->com.flags); 945 c4iw_put_ep(&ep->com); 946 CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep); 947 } 948 949 static void 950 send_mpa_req(struct c4iw_ep *ep) 951 { 952 int mpalen; 953 struct mpa_message *mpa; 954 struct mpa_v2_conn_params mpa_v2_params; 955 struct mbuf *m; 956 char mpa_rev_to_use = mpa_rev; 957 int err; 958 959 if (ep->retry_with_mpa_v1) 960 mpa_rev_to_use = 1; 961 mpalen = sizeof(*mpa) + ep->plen; 962 if (mpa_rev_to_use == 2) 963 mpalen += sizeof(struct mpa_v2_conn_params); 964 965 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 966 if (mpa == NULL) { 967 failed: 968 connect_reply_upcall(ep, -ENOMEM); 969 return; 970 } 971 972 memset(mpa, 0, mpalen); 973 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 974 mpa->flags = (crc_enabled ? MPA_CRC : 0) | 975 (markers_enabled ? MPA_MARKERS : 0) | 976 (mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0); 977 mpa->private_data_size = htons(ep->plen); 978 mpa->revision = mpa_rev_to_use; 979 980 if (mpa_rev_to_use == 1) { 981 ep->tried_with_mpa_v1 = 1; 982 ep->retry_with_mpa_v1 = 0; 983 } 984 985 if (mpa_rev_to_use == 2) { 986 mpa->private_data_size += 987 htons(sizeof(struct mpa_v2_conn_params)); 988 mpa_v2_params.ird = htons((u16)ep->ird); 989 mpa_v2_params.ord = htons((u16)ep->ord); 990 991 if (peer2peer) { 992 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 993 994 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 995 mpa_v2_params.ord |= 996 htons(MPA_V2_RDMA_WRITE_RTR); 997 } else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 998 mpa_v2_params.ord |= 999 htons(MPA_V2_RDMA_READ_RTR); 1000 } 1001 } 1002 memcpy(mpa->private_data, &mpa_v2_params, 1003 sizeof(struct mpa_v2_conn_params)); 1004 1005 if (ep->plen) { 1006 1007 memcpy(mpa->private_data + 1008 sizeof(struct mpa_v2_conn_params), 1009 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1010 } 1011 } else { 1012 1013 if (ep->plen) 1014 memcpy(mpa->private_data, 1015 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1016 CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep); 1017 } 1018 1019 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1020 if (m == NULL) { 1021 free(mpa, M_CXGBE); 1022 goto failed; 1023 } 1024 m_copyback(m, 0, mpalen, (void *)mpa); 1025 free(mpa, M_CXGBE); 1026 1027 err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1028 ep->com.thread); 1029 if (err) 1030 goto failed; 1031 1032 START_EP_TIMER(ep); 1033 state_set(&ep->com, MPA_REQ_SENT); 1034 ep->mpa_attr.initiator = 1; 1035 } 1036 1037 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1038 { 1039 int mpalen ; 1040 struct mpa_message *mpa; 1041 struct mpa_v2_conn_params mpa_v2_params; 1042 struct mbuf *m; 1043 int err; 1044 1045 CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid, 1046 ep->plen); 1047 1048 mpalen = sizeof(*mpa) + plen; 1049 1050 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1051 1052 mpalen += sizeof(struct mpa_v2_conn_params); 1053 CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep, 1054 ep->mpa_attr.version, mpalen); 1055 } 1056 1057 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1058 if (mpa == NULL) 1059 return (-ENOMEM); 1060 1061 memset(mpa, 0, mpalen); 1062 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1063 mpa->flags = MPA_REJECT; 1064 mpa->revision = mpa_rev; 1065 mpa->private_data_size = htons(plen); 1066 1067 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1068 1069 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1070 mpa->private_data_size += 1071 htons(sizeof(struct mpa_v2_conn_params)); 1072 mpa_v2_params.ird = htons(((u16)ep->ird) | 1073 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1074 0)); 1075 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1076 (p2p_type == 1077 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1078 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1079 FW_RI_INIT_P2PTYPE_READ_REQ ? 1080 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1081 memcpy(mpa->private_data, &mpa_v2_params, 1082 sizeof(struct mpa_v2_conn_params)); 1083 1084 if (ep->plen) 1085 memcpy(mpa->private_data + 1086 sizeof(struct mpa_v2_conn_params), pdata, plen); 1087 CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep, 1088 mpa_v2_params.ird, mpa_v2_params.ord, ep->plen); 1089 } else 1090 if (plen) 1091 memcpy(mpa->private_data, pdata, plen); 1092 1093 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1094 if (m == NULL) { 1095 free(mpa, M_CXGBE); 1096 return (-ENOMEM); 1097 } 1098 m_copyback(m, 0, mpalen, (void *)mpa); 1099 free(mpa, M_CXGBE); 1100 1101 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread); 1102 if (!err) 1103 ep->snd_seq += mpalen; 1104 CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err); 1105 return err; 1106 } 1107 1108 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1109 { 1110 int mpalen; 1111 struct mpa_message *mpa; 1112 struct mbuf *m; 1113 struct mpa_v2_conn_params mpa_v2_params; 1114 int err; 1115 1116 CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep); 1117 1118 mpalen = sizeof(*mpa) + plen; 1119 1120 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1121 1122 CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep, 1123 ep->mpa_attr.version); 1124 mpalen += sizeof(struct mpa_v2_conn_params); 1125 } 1126 1127 mpa = malloc(mpalen, M_CXGBE, M_NOWAIT); 1128 if (mpa == NULL) 1129 return (-ENOMEM); 1130 1131 memset(mpa, 0, sizeof(*mpa)); 1132 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1133 mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) | 1134 (markers_enabled ? MPA_MARKERS : 0); 1135 mpa->revision = ep->mpa_attr.version; 1136 mpa->private_data_size = htons(plen); 1137 1138 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1139 1140 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1141 mpa->private_data_size += 1142 htons(sizeof(struct mpa_v2_conn_params)); 1143 mpa_v2_params.ird = htons((u16)ep->ird); 1144 mpa_v2_params.ord = htons((u16)ep->ord); 1145 CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep, 1146 ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord); 1147 1148 if (peer2peer && (ep->mpa_attr.p2p_type != 1149 FW_RI_INIT_P2PTYPE_DISABLED)) { 1150 1151 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1152 1153 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) { 1154 1155 mpa_v2_params.ord |= 1156 htons(MPA_V2_RDMA_WRITE_RTR); 1157 CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d", 1158 __func__, ep, p2p_type, mpa_v2_params.ird, 1159 mpa_v2_params.ord); 1160 } 1161 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) { 1162 1163 mpa_v2_params.ord |= 1164 htons(MPA_V2_RDMA_READ_RTR); 1165 CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d", 1166 __func__, ep, p2p_type, mpa_v2_params.ird, 1167 mpa_v2_params.ord); 1168 } 1169 } 1170 1171 memcpy(mpa->private_data, &mpa_v2_params, 1172 sizeof(struct mpa_v2_conn_params)); 1173 1174 if (ep->plen) 1175 memcpy(mpa->private_data + 1176 sizeof(struct mpa_v2_conn_params), pdata, plen); 1177 } else 1178 if (plen) 1179 memcpy(mpa->private_data, pdata, plen); 1180 1181 m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA); 1182 if (m == NULL) { 1183 free(mpa, M_CXGBE); 1184 return (-ENOMEM); 1185 } 1186 m_copyback(m, 0, mpalen, (void *)mpa); 1187 free(mpa, M_CXGBE); 1188 1189 1190 state_set(&ep->com, MPA_REP_SENT); 1191 ep->snd_seq += mpalen; 1192 err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, 1193 ep->com.thread); 1194 CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err); 1195 return err; 1196 } 1197 1198 1199 1200 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1201 { 1202 struct iw_cm_event event; 1203 1204 CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep); 1205 memset(&event, 0, sizeof(event)); 1206 event.event = IW_CM_EVENT_CLOSE; 1207 event.status = status; 1208 1209 if (ep->com.cm_id) { 1210 1211 CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep); 1212 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1213 deref_cm_id(&ep->com); 1214 set_bit(CLOSE_UPCALL, &ep->com.history); 1215 } 1216 CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep); 1217 } 1218 1219 static int send_abort(struct c4iw_ep *ep) 1220 { 1221 int err; 1222 1223 CTR2(KTR_IW_CXGBE, "%s:abB %p", __func__, ep); 1224 abort_socket(ep); 1225 err = close_socket(&ep->com, 0); 1226 set_bit(ABORT_CONN, &ep->com.history); 1227 CTR2(KTR_IW_CXGBE, "%s:abE %p", __func__, ep); 1228 return err; 1229 } 1230 1231 static void peer_close_upcall(struct c4iw_ep *ep) 1232 { 1233 struct iw_cm_event event; 1234 1235 CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep); 1236 memset(&event, 0, sizeof(event)); 1237 event.event = IW_CM_EVENT_DISCONNECT; 1238 1239 if (ep->com.cm_id) { 1240 1241 CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep); 1242 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1243 set_bit(DISCONN_UPCALL, &ep->com.history); 1244 } 1245 CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep); 1246 } 1247 1248 static void peer_abort_upcall(struct c4iw_ep *ep) 1249 { 1250 struct iw_cm_event event; 1251 1252 CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep); 1253 memset(&event, 0, sizeof(event)); 1254 event.event = IW_CM_EVENT_CLOSE; 1255 event.status = -ECONNRESET; 1256 1257 if (ep->com.cm_id) { 1258 1259 CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep); 1260 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1261 deref_cm_id(&ep->com); 1262 set_bit(ABORT_UPCALL, &ep->com.history); 1263 } 1264 CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep); 1265 } 1266 1267 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1268 { 1269 struct iw_cm_event event; 1270 1271 CTR3(KTR_IW_CXGBE, "%s:cruB %p", __func__, ep, status); 1272 memset(&event, 0, sizeof(event)); 1273 event.event = IW_CM_EVENT_CONNECT_REPLY; 1274 event.status = (status ==-ECONNABORTED)?-ECONNRESET: status; 1275 event.local_addr = ep->com.local_addr; 1276 event.remote_addr = ep->com.remote_addr; 1277 1278 if ((status == 0) || (status == -ECONNREFUSED)) { 1279 1280 if (!ep->tried_with_mpa_v1) { 1281 1282 CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep); 1283 /* this means MPA_v2 is used */ 1284 event.private_data_len = ep->plen - 1285 sizeof(struct mpa_v2_conn_params); 1286 event.private_data = ep->mpa_pkt + 1287 sizeof(struct mpa_message) + 1288 sizeof(struct mpa_v2_conn_params); 1289 } else { 1290 1291 CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep); 1292 /* this means MPA_v1 is used */ 1293 event.private_data_len = ep->plen; 1294 event.private_data = ep->mpa_pkt + 1295 sizeof(struct mpa_message); 1296 } 1297 } 1298 1299 if (ep->com.cm_id) { 1300 1301 CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep); 1302 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1303 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1304 } 1305 1306 if(status == -ECONNABORTED) { 1307 1308 CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status); 1309 return; 1310 } 1311 1312 if (status < 0) { 1313 1314 CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status); 1315 deref_cm_id(&ep->com); 1316 } 1317 1318 CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep); 1319 } 1320 1321 static int connect_request_upcall(struct c4iw_ep *ep) 1322 { 1323 struct iw_cm_event event; 1324 int ret; 1325 1326 CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep, 1327 ep->tried_with_mpa_v1); 1328 1329 memset(&event, 0, sizeof(event)); 1330 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1331 event.local_addr = ep->com.local_addr; 1332 event.remote_addr = ep->com.remote_addr; 1333 event.provider_data = ep; 1334 event.so = ep->com.so; 1335 1336 if (!ep->tried_with_mpa_v1) { 1337 /* this means MPA_v2 is used */ 1338 event.ord = ep->ord; 1339 event.ird = ep->ird; 1340 event.private_data_len = ep->plen - 1341 sizeof(struct mpa_v2_conn_params); 1342 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1343 sizeof(struct mpa_v2_conn_params); 1344 } else { 1345 1346 /* this means MPA_v1 is used. Send max supported */ 1347 event.ord = c4iw_max_read_depth; 1348 event.ird = c4iw_max_read_depth; 1349 event.private_data_len = ep->plen; 1350 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1351 } 1352 1353 c4iw_get_ep(&ep->com); 1354 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1355 &event); 1356 if(ret) 1357 c4iw_put_ep(&ep->com); 1358 1359 set_bit(CONNREQ_UPCALL, &ep->com.history); 1360 c4iw_put_ep(&ep->parent_ep->com); 1361 return ret; 1362 } 1363 1364 static void established_upcall(struct c4iw_ep *ep) 1365 { 1366 struct iw_cm_event event; 1367 1368 CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep); 1369 memset(&event, 0, sizeof(event)); 1370 event.event = IW_CM_EVENT_ESTABLISHED; 1371 event.ird = ep->ird; 1372 event.ord = ep->ord; 1373 1374 if (ep->com.cm_id) { 1375 1376 CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep); 1377 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1378 set_bit(ESTAB_UPCALL, &ep->com.history); 1379 } 1380 CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep); 1381 } 1382 1383 1384 /* 1385 * process_mpa_reply - process streaming mode MPA reply 1386 * 1387 * Returns: 1388 * 1389 * 0 upon success indicating a connect request was delivered to the ULP 1390 * or the mpa request is incomplete but valid so far. 1391 * 1392 * 1 if a failure requires the caller to close the connection. 1393 * 1394 * 2 if a failure requires the caller to abort the connection. 1395 */ 1396 static int process_mpa_reply(struct c4iw_ep *ep) 1397 { 1398 struct mpa_message *mpa; 1399 struct mpa_v2_conn_params *mpa_v2_params; 1400 u16 plen; 1401 u16 resp_ird, resp_ord; 1402 u8 rtr_mismatch = 0, insuff_ird = 0; 1403 struct c4iw_qp_attributes attrs; 1404 enum c4iw_qp_attr_mask mask; 1405 int err; 1406 struct mbuf *top, *m; 1407 int flags = MSG_DONTWAIT; 1408 struct uio uio; 1409 int disconnect = 0; 1410 1411 CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep); 1412 1413 /* 1414 * Stop mpa timer. If it expired, then 1415 * we ignore the MPA reply. process_timeout() 1416 * will abort the connection. 1417 */ 1418 if (STOP_EP_TIMER(ep)) 1419 return 0; 1420 1421 uio.uio_resid = 1000000; 1422 uio.uio_td = ep->com.thread; 1423 err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags); 1424 1425 if (err) { 1426 1427 if (err == EWOULDBLOCK) { 1428 1429 CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep); 1430 START_EP_TIMER(ep); 1431 return 0; 1432 } 1433 err = -err; 1434 CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep); 1435 goto err; 1436 } 1437 1438 if (ep->com.so->so_rcv.sb_mb) { 1439 1440 CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep); 1441 printf("%s data after soreceive called! so %p sb_mb %p top %p\n", 1442 __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top); 1443 } 1444 1445 m = top; 1446 1447 do { 1448 1449 CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep); 1450 /* 1451 * If we get more than the supported amount of private data 1452 * then we must fail this connection. 1453 */ 1454 if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) { 1455 1456 CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep, 1457 ep->mpa_pkt_len + m->m_len); 1458 err = (-EINVAL); 1459 goto err_stop_timer; 1460 } 1461 1462 /* 1463 * copy the new data into our accumulation buffer. 1464 */ 1465 m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len])); 1466 ep->mpa_pkt_len += m->m_len; 1467 if (!m->m_next) 1468 m = m->m_nextpkt; 1469 else 1470 m = m->m_next; 1471 } while (m); 1472 1473 m_freem(top); 1474 /* 1475 * if we don't even have the mpa message, then bail. 1476 */ 1477 if (ep->mpa_pkt_len < sizeof(*mpa)) { 1478 return 0; 1479 } 1480 mpa = (struct mpa_message *) ep->mpa_pkt; 1481 1482 /* Validate MPA header. */ 1483 if (mpa->revision > mpa_rev) { 1484 1485 CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep, 1486 mpa->revision, mpa_rev); 1487 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, " 1488 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1489 err = -EPROTO; 1490 goto err_stop_timer; 1491 } 1492 1493 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1494 1495 CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep); 1496 err = -EPROTO; 1497 goto err_stop_timer; 1498 } 1499 1500 plen = ntohs(mpa->private_data_size); 1501 1502 /* 1503 * Fail if there's too much private data. 1504 */ 1505 if (plen > MPA_MAX_PRIVATE_DATA) { 1506 1507 CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep); 1508 err = -EPROTO; 1509 goto err_stop_timer; 1510 } 1511 1512 /* 1513 * If plen does not account for pkt size 1514 */ 1515 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1516 1517 CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep); 1518 STOP_EP_TIMER(ep); 1519 err = -EPROTO; 1520 goto err_stop_timer; 1521 } 1522 1523 ep->plen = (u8) plen; 1524 1525 /* 1526 * If we don't have all the pdata yet, then bail. 1527 * We'll continue process when more data arrives. 1528 */ 1529 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) { 1530 1531 CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep); 1532 return 0; 1533 } 1534 1535 if (mpa->flags & MPA_REJECT) { 1536 1537 CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep); 1538 err = -ECONNREFUSED; 1539 goto err_stop_timer; 1540 } 1541 1542 /* 1543 * If we get here we have accumulated the entire mpa 1544 * start reply message including private data. And 1545 * the MPA header is valid. 1546 */ 1547 state_set(&ep->com, FPDU_MODE); 1548 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1549 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1550 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1551 ep->mpa_attr.version = mpa->revision; 1552 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1553 1554 if (mpa->revision == 2) { 1555 1556 CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep); 1557 ep->mpa_attr.enhanced_rdma_conn = 1558 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1559 1560 if (ep->mpa_attr.enhanced_rdma_conn) { 1561 1562 CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep); 1563 mpa_v2_params = (struct mpa_v2_conn_params *) 1564 (ep->mpa_pkt + sizeof(*mpa)); 1565 resp_ird = ntohs(mpa_v2_params->ird) & 1566 MPA_V2_IRD_ORD_MASK; 1567 resp_ord = ntohs(mpa_v2_params->ord) & 1568 MPA_V2_IRD_ORD_MASK; 1569 1570 /* 1571 * This is a double-check. Ideally, below checks are 1572 * not required since ird/ord stuff has been taken 1573 * care of in c4iw_accept_cr 1574 */ 1575 if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) { 1576 1577 CTR2(KTR_IW_CXGBE, "%s:pmre %p", __func__, ep); 1578 err = -ENOMEM; 1579 ep->ird = resp_ord; 1580 ep->ord = resp_ird; 1581 insuff_ird = 1; 1582 } 1583 1584 if (ntohs(mpa_v2_params->ird) & 1585 MPA_V2_PEER2PEER_MODEL) { 1586 1587 CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep); 1588 if (ntohs(mpa_v2_params->ord) & 1589 MPA_V2_RDMA_WRITE_RTR) { 1590 1591 CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep); 1592 ep->mpa_attr.p2p_type = 1593 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1594 } 1595 else if (ntohs(mpa_v2_params->ord) & 1596 MPA_V2_RDMA_READ_RTR) { 1597 1598 CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep); 1599 ep->mpa_attr.p2p_type = 1600 FW_RI_INIT_P2PTYPE_READ_REQ; 1601 } 1602 } 1603 } 1604 } else { 1605 1606 CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep); 1607 1608 if (mpa->revision == 1) { 1609 1610 CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep); 1611 1612 if (peer2peer) { 1613 1614 CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep); 1615 ep->mpa_attr.p2p_type = p2p_type; 1616 } 1617 } 1618 } 1619 1620 if (set_tcpinfo(ep)) { 1621 1622 CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep); 1623 printf("%s set_tcpinfo error\n", __func__); 1624 goto err; 1625 } 1626 1627 CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, " 1628 "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__, 1629 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1630 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1631 ep->mpa_attr.p2p_type); 1632 1633 /* 1634 * If responder's RTR does not match with that of initiator, assign 1635 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1636 * generated when moving QP to RTS state. 1637 * A TERM message will be sent after QP has moved to RTS state 1638 */ 1639 if ((ep->mpa_attr.version == 2) && peer2peer && 1640 (ep->mpa_attr.p2p_type != p2p_type)) { 1641 1642 CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep); 1643 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1644 rtr_mismatch = 1; 1645 } 1646 1647 1648 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 1649 attrs.mpa_attr = ep->mpa_attr; 1650 attrs.max_ird = ep->ird; 1651 attrs.max_ord = ep->ord; 1652 attrs.llp_stream_handle = ep; 1653 attrs.next_state = C4IW_QP_STATE_RTS; 1654 1655 mask = C4IW_QP_ATTR_NEXT_STATE | 1656 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1657 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1658 1659 /* bind QP and TID with INIT_WR */ 1660 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 1661 1662 if (err) { 1663 1664 CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep); 1665 goto err; 1666 } 1667 1668 /* 1669 * If responder's RTR requirement did not match with what initiator 1670 * supports, generate TERM message 1671 */ 1672 if (rtr_mismatch) { 1673 1674 CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep); 1675 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1676 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1677 attrs.ecode = MPA_NOMATCH_RTR; 1678 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1679 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1680 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1681 err = -ENOMEM; 1682 disconnect = 1; 1683 goto out; 1684 } 1685 1686 /* 1687 * Generate TERM if initiator IRD is not sufficient for responder 1688 * provided ORD. Currently, we do the same behaviour even when 1689 * responder provided IRD is also not sufficient as regards to 1690 * initiator ORD. 1691 */ 1692 if (insuff_ird) { 1693 1694 CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep); 1695 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1696 __func__); 1697 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1698 attrs.ecode = MPA_INSUFF_IRD; 1699 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1700 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1701 C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); 1702 err = -ENOMEM; 1703 disconnect = 1; 1704 goto out; 1705 } 1706 goto out; 1707 err_stop_timer: 1708 STOP_EP_TIMER(ep); 1709 err: 1710 disconnect = 2; 1711 out: 1712 connect_reply_upcall(ep, err); 1713 CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep); 1714 return disconnect; 1715 } 1716 1717 /* 1718 * process_mpa_request - process streaming mode MPA request 1719 * 1720 * Returns: 1721 * 1722 * 0 upon success indicating a connect request was delivered to the ULP 1723 * or the mpa request is incomplete but valid so far. 1724 * 1725 * 1 if a failure requires the caller to close the connection. 1726 * 1727 * 2 if a failure requires the caller to abort the connection. 1728 */ 1729 static int 1730 process_mpa_request(struct c4iw_ep *ep) 1731 { 1732 struct mpa_message *mpa; 1733 u16 plen; 1734 int flags = MSG_DONTWAIT; 1735 int rc; 1736 struct iovec iov; 1737 struct uio uio; 1738 enum c4iw_ep_state state = state_read(&ep->com); 1739 1740 CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]); 1741 1742 if (state != MPA_REQ_WAIT) 1743 return 0; 1744 1745 iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len]; 1746 iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 1747 uio.uio_iov = &iov; 1748 uio.uio_iovcnt = 1; 1749 uio.uio_offset = 0; 1750 uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len; 1751 uio.uio_segflg = UIO_SYSSPACE; 1752 uio.uio_rw = UIO_READ; 1753 uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */ 1754 1755 rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags); 1756 if (rc == EAGAIN) 1757 return 0; 1758 else if (rc) 1759 goto err_stop_timer; 1760 1761 KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data", 1762 __func__, ep->com.so)); 1763 ep->mpa_pkt_len += uio.uio_offset; 1764 1765 /* 1766 * If we get more than the supported amount of private data then we must 1767 * fail this connection. XXX: check so_rcv->sb_cc, or peek with another 1768 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last 1769 * byte is filled by the soreceive above. 1770 */ 1771 1772 /* Don't even have the MPA message. Wait for more data to arrive. */ 1773 if (ep->mpa_pkt_len < sizeof(*mpa)) 1774 return 0; 1775 mpa = (struct mpa_message *) ep->mpa_pkt; 1776 1777 /* 1778 * Validate MPA Header. 1779 */ 1780 if (mpa->revision > mpa_rev) { 1781 log(LOG_ERR, "%s: MPA version mismatch. Local = %d," 1782 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1783 goto err_stop_timer; 1784 } 1785 1786 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 1787 goto err_stop_timer; 1788 1789 /* 1790 * Fail if there's too much private data. 1791 */ 1792 plen = ntohs(mpa->private_data_size); 1793 if (plen > MPA_MAX_PRIVATE_DATA) 1794 goto err_stop_timer; 1795 1796 /* 1797 * If plen does not account for pkt size 1798 */ 1799 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 1800 goto err_stop_timer; 1801 1802 ep->plen = (u8) plen; 1803 1804 /* 1805 * If we don't have all the pdata yet, then bail. 1806 */ 1807 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1808 return 0; 1809 1810 /* 1811 * If we get here we have accumulated the entire mpa 1812 * start reply message including private data. 1813 */ 1814 ep->mpa_attr.initiator = 0; 1815 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1816 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1817 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1818 ep->mpa_attr.version = mpa->revision; 1819 if (mpa->revision == 1) 1820 ep->tried_with_mpa_v1 = 1; 1821 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1822 1823 if (mpa->revision == 2) { 1824 ep->mpa_attr.enhanced_rdma_conn = 1825 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1826 if (ep->mpa_attr.enhanced_rdma_conn) { 1827 struct mpa_v2_conn_params *mpa_v2_params; 1828 u16 ird, ord; 1829 1830 mpa_v2_params = (void *)&ep->mpa_pkt[sizeof(*mpa)]; 1831 ird = ntohs(mpa_v2_params->ird); 1832 ord = ntohs(mpa_v2_params->ord); 1833 1834 ep->ird = ird & MPA_V2_IRD_ORD_MASK; 1835 ep->ord = ord & MPA_V2_IRD_ORD_MASK; 1836 if (ird & MPA_V2_PEER2PEER_MODEL && peer2peer) { 1837 if (ord & MPA_V2_RDMA_WRITE_RTR) { 1838 ep->mpa_attr.p2p_type = 1839 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1840 } else if (ord & MPA_V2_RDMA_READ_RTR) { 1841 ep->mpa_attr.p2p_type = 1842 FW_RI_INIT_P2PTYPE_READ_REQ; 1843 } 1844 } 1845 } 1846 } else if (mpa->revision == 1 && peer2peer) 1847 ep->mpa_attr.p2p_type = p2p_type; 1848 1849 if (set_tcpinfo(ep)) 1850 goto err_stop_timer; 1851 1852 CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, " 1853 "xmit_marker_enabled = %d, version = %d", __func__, 1854 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1855 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version); 1856 1857 state_set(&ep->com, MPA_REQ_RCVD); 1858 STOP_EP_TIMER(ep); 1859 1860 /* drive upcall */ 1861 mutex_lock(&ep->parent_ep->com.mutex); 1862 if (ep->parent_ep->com.state != DEAD) { 1863 if(connect_request_upcall(ep)) 1864 goto err_out; 1865 }else { 1866 goto err_out; 1867 } 1868 mutex_unlock(&ep->parent_ep->com.mutex); 1869 return 0; 1870 1871 err_stop_timer: 1872 STOP_EP_TIMER(ep); 1873 err_out: 1874 return 2; 1875 } 1876 1877 /* 1878 * Upcall from the adapter indicating data has been transmitted. 1879 * For us its just the single MPA request or reply. We can now free 1880 * the skb holding the mpa message. 1881 */ 1882 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 1883 { 1884 int err; 1885 struct c4iw_ep *ep = to_ep(cm_id); 1886 CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep); 1887 int disconnect = 0; 1888 1889 if (state_read(&ep->com) == DEAD) { 1890 1891 CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep); 1892 c4iw_put_ep(&ep->com); 1893 return -ECONNRESET; 1894 } 1895 set_bit(ULP_REJECT, &ep->com.history); 1896 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1897 1898 if (mpa_rev == 0) { 1899 1900 CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep); 1901 disconnect = 2; 1902 } 1903 else { 1904 1905 CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep); 1906 err = send_mpa_reject(ep, pdata, pdata_len); 1907 err = soshutdown(ep->com.so, 3); 1908 } 1909 c4iw_put_ep(&ep->com); 1910 if (disconnect) 1911 err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 1912 CTR2(KTR_IW_CXGBE, "%s:crc4 %p", __func__, ep); 1913 return 0; 1914 } 1915 1916 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 1917 { 1918 int err; 1919 struct c4iw_qp_attributes attrs; 1920 enum c4iw_qp_attr_mask mask; 1921 struct c4iw_ep *ep = to_ep(cm_id); 1922 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 1923 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 1924 int abort = 0; 1925 1926 CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep); 1927 1928 if (state_read(&ep->com) == DEAD) { 1929 1930 CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep); 1931 err = -ECONNRESET; 1932 goto err_out; 1933 } 1934 1935 BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD); 1936 BUG_ON(!qp); 1937 1938 set_bit(ULP_ACCEPT, &ep->com.history); 1939 1940 if ((conn_param->ord > c4iw_max_read_depth) || 1941 (conn_param->ird > c4iw_max_read_depth)) { 1942 1943 CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep); 1944 err = -EINVAL; 1945 goto err_abort; 1946 } 1947 1948 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1949 1950 CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep); 1951 1952 if (conn_param->ord > ep->ird) { 1953 1954 CTR2(KTR_IW_CXGBE, "%s:cac4 %p", __func__, ep); 1955 ep->ird = conn_param->ird; 1956 ep->ord = conn_param->ord; 1957 send_mpa_reject(ep, conn_param->private_data, 1958 conn_param->private_data_len); 1959 err = -ENOMEM; 1960 goto err_abort; 1961 } 1962 1963 if (conn_param->ird > ep->ord) { 1964 1965 CTR2(KTR_IW_CXGBE, "%s:cac5 %p", __func__, ep); 1966 1967 if (!ep->ord) { 1968 1969 CTR2(KTR_IW_CXGBE, "%s:cac6 %p", __func__, ep); 1970 conn_param->ird = 1; 1971 } 1972 else { 1973 CTR2(KTR_IW_CXGBE, "%s:cac7 %p", __func__, ep); 1974 err = -ENOMEM; 1975 goto err_abort; 1976 } 1977 } 1978 1979 } 1980 ep->ird = conn_param->ird; 1981 ep->ord = conn_param->ord; 1982 1983 if (ep->mpa_attr.version != 2) { 1984 1985 CTR2(KTR_IW_CXGBE, "%s:cac8 %p", __func__, ep); 1986 1987 if (peer2peer && ep->ird == 0) { 1988 1989 CTR2(KTR_IW_CXGBE, "%s:cac9 %p", __func__, ep); 1990 ep->ird = 1; 1991 } 1992 } 1993 1994 1995 ep->com.cm_id = cm_id; 1996 ref_cm_id(&ep->com); 1997 ep->com.qp = qp; 1998 ref_qp(ep); 1999 //ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq; 2000 2001 /* bind QP to EP and move to RTS */ 2002 attrs.mpa_attr = ep->mpa_attr; 2003 attrs.max_ird = ep->ird; 2004 attrs.max_ord = ep->ord; 2005 attrs.llp_stream_handle = ep; 2006 attrs.next_state = C4IW_QP_STATE_RTS; 2007 2008 /* bind QP and TID with INIT_WR */ 2009 mask = C4IW_QP_ATTR_NEXT_STATE | 2010 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 2011 C4IW_QP_ATTR_MPA_ATTR | 2012 C4IW_QP_ATTR_MAX_IRD | 2013 C4IW_QP_ATTR_MAX_ORD; 2014 2015 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1); 2016 2017 if (err) { 2018 2019 CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep); 2020 goto err_defef_cm_id; 2021 } 2022 err = send_mpa_reply(ep, conn_param->private_data, 2023 conn_param->private_data_len); 2024 2025 if (err) { 2026 2027 CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep); 2028 goto err_defef_cm_id; 2029 } 2030 2031 state_set(&ep->com, FPDU_MODE); 2032 established_upcall(ep); 2033 c4iw_put_ep(&ep->com); 2034 CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep); 2035 return 0; 2036 err_defef_cm_id: 2037 deref_cm_id(&ep->com); 2038 err_abort: 2039 abort = 1; 2040 err_out: 2041 if (abort) 2042 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 2043 c4iw_put_ep(&ep->com); 2044 CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep); 2045 return err; 2046 } 2047 2048 2049 2050 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 2051 { 2052 int err = 0; 2053 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2054 struct c4iw_ep *ep = NULL; 2055 struct nhop4_extended nh4; 2056 struct toedev *tdev; 2057 2058 CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id); 2059 2060 if ((conn_param->ord > c4iw_max_read_depth) || 2061 (conn_param->ird > c4iw_max_read_depth)) { 2062 2063 CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id); 2064 err = -EINVAL; 2065 goto out; 2066 } 2067 ep = alloc_ep(sizeof(*ep), M_NOWAIT); 2068 2069 if (!ep) { 2070 2071 CTR2(KTR_IW_CXGBE, "%s:cc2 %p", __func__, cm_id); 2072 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 2073 err = -ENOMEM; 2074 goto out; 2075 } 2076 init_timer(&ep->timer); 2077 ep->plen = conn_param->private_data_len; 2078 2079 if (ep->plen) { 2080 2081 CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep); 2082 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 2083 conn_param->private_data, ep->plen); 2084 } 2085 ep->ird = conn_param->ird; 2086 ep->ord = conn_param->ord; 2087 2088 if (peer2peer && ep->ord == 0) { 2089 2090 CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep); 2091 ep->ord = 1; 2092 } 2093 2094 ep->com.dev = dev; 2095 ep->com.cm_id = cm_id; 2096 ref_cm_id(&ep->com); 2097 ep->com.qp = get_qhp(dev, conn_param->qpn); 2098 2099 if (!ep->com.qp) { 2100 2101 CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep); 2102 err = -EINVAL; 2103 goto fail2; 2104 } 2105 ref_qp(ep); 2106 ep->com.thread = curthread; 2107 ep->com.so = cm_id->so; 2108 2109 init_sock(&ep->com); 2110 2111 /* find a route */ 2112 err = find_route( 2113 cm_id->local_addr.sin_addr.s_addr, 2114 cm_id->remote_addr.sin_addr.s_addr, 2115 cm_id->local_addr.sin_port, 2116 cm_id->remote_addr.sin_port, 0, &nh4); 2117 2118 if (err) { 2119 2120 CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep); 2121 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 2122 err = -EHOSTUNREACH; 2123 goto fail2; 2124 } 2125 2126 if (!(nh4.nh_ifp->if_capenable & IFCAP_TOE)) { 2127 2128 CTR2(KTR_IW_CXGBE, "%s:cc8 %p", __func__, ep); 2129 printf("%s - interface not TOE capable.\n", __func__); 2130 close_socket(&ep->com, 0); 2131 err = -ENOPROTOOPT; 2132 goto fail3; 2133 } 2134 tdev = TOEDEV(nh4.nh_ifp); 2135 2136 if (tdev == NULL) { 2137 2138 CTR2(KTR_IW_CXGBE, "%s:cc9 %p", __func__, ep); 2139 printf("%s - No toedev for interface.\n", __func__); 2140 goto fail3; 2141 } 2142 fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4); 2143 2144 state_set(&ep->com, CONNECTING); 2145 ep->tos = 0; 2146 ep->com.local_addr = cm_id->local_addr; 2147 ep->com.remote_addr = cm_id->remote_addr; 2148 err = soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr, 2149 ep->com.thread); 2150 2151 if (!err) { 2152 CTR2(KTR_IW_CXGBE, "%s:cca %p", __func__, ep); 2153 goto out; 2154 } else { 2155 close_socket(&ep->com, 0); 2156 goto fail2; 2157 } 2158 2159 fail3: 2160 CTR2(KTR_IW_CXGBE, "%s:ccb %p", __func__, ep); 2161 fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4); 2162 fail2: 2163 deref_cm_id(&ep->com); 2164 c4iw_put_ep(&ep->com); 2165 out: 2166 CTR2(KTR_IW_CXGBE, "%s:ccE %p", __func__, ep); 2167 return err; 2168 } 2169 2170 /* 2171 * iwcm->create_listen_ep. Returns -errno on failure. 2172 */ 2173 int 2174 c4iw_create_listen_ep(struct iw_cm_id *cm_id, int backlog) 2175 { 2176 int rc; 2177 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 2178 struct c4iw_listen_ep *ep; 2179 struct socket *so = cm_id->so; 2180 2181 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 2182 CTR5(KTR_IW_CXGBE, "%s: cm_id %p, lso %p, ep %p, inp %p", __func__, 2183 cm_id, so, ep, so->so_pcb); 2184 if (ep == NULL) { 2185 log(LOG_ERR, "%s: failed to alloc memory for endpoint\n", 2186 __func__); 2187 rc = ENOMEM; 2188 goto failed; 2189 } 2190 2191 ep->com.cm_id = cm_id; 2192 ref_cm_id(&ep->com); 2193 ep->com.dev = dev; 2194 ep->backlog = backlog; 2195 ep->com.local_addr = cm_id->local_addr; 2196 ep->com.thread = curthread; 2197 state_set(&ep->com, LISTEN); 2198 ep->com.so = so; 2199 2200 cm_id->provider_data = ep; 2201 return (0); 2202 2203 failed: 2204 CTR3(KTR_IW_CXGBE, "%s: cm_id %p, FAILED (%d)", __func__, cm_id, rc); 2205 return (-rc); 2206 } 2207 2208 void 2209 c4iw_destroy_listen_ep(struct iw_cm_id *cm_id) 2210 { 2211 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 2212 2213 CTR4(KTR_IW_CXGBE, "%s: cm_id %p, so %p, state %s", __func__, cm_id, 2214 cm_id->so, states[ep->com.state]); 2215 2216 state_set(&ep->com, DEAD); 2217 deref_cm_id(&ep->com); 2218 c4iw_put_ep(&ep->com); 2219 2220 return; 2221 } 2222 2223 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 2224 { 2225 int ret = 0; 2226 int close = 0; 2227 int fatal = 0; 2228 struct c4iw_rdev *rdev; 2229 2230 mutex_lock(&ep->com.mutex); 2231 2232 CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep); 2233 2234 rdev = &ep->com.dev->rdev; 2235 2236 if (c4iw_fatal_error(rdev)) { 2237 2238 CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep); 2239 fatal = 1; 2240 close_complete_upcall(ep, -ECONNRESET); 2241 ep->com.state = DEAD; 2242 } 2243 CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep, 2244 states[ep->com.state]); 2245 2246 switch (ep->com.state) { 2247 2248 case MPA_REQ_WAIT: 2249 case MPA_REQ_SENT: 2250 case MPA_REQ_RCVD: 2251 case MPA_REP_SENT: 2252 case FPDU_MODE: 2253 close = 1; 2254 if (abrupt) 2255 ep->com.state = ABORTING; 2256 else { 2257 ep->com.state = CLOSING; 2258 START_EP_TIMER(ep); 2259 } 2260 set_bit(CLOSE_SENT, &ep->com.flags); 2261 break; 2262 2263 case CLOSING: 2264 2265 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 2266 2267 close = 1; 2268 if (abrupt) { 2269 STOP_EP_TIMER(ep); 2270 ep->com.state = ABORTING; 2271 } else 2272 ep->com.state = MORIBUND; 2273 } 2274 break; 2275 2276 case MORIBUND: 2277 case ABORTING: 2278 case DEAD: 2279 CTR3(KTR_IW_CXGBE, 2280 "%s ignoring disconnect ep %p state %u", __func__, 2281 ep, ep->com.state); 2282 break; 2283 2284 default: 2285 BUG(); 2286 break; 2287 } 2288 2289 mutex_unlock(&ep->com.mutex); 2290 2291 if (close) { 2292 2293 CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep); 2294 2295 if (abrupt) { 2296 2297 CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep); 2298 set_bit(EP_DISC_ABORT, &ep->com.history); 2299 close_complete_upcall(ep, -ECONNRESET); 2300 ret = send_abort(ep); 2301 } else { 2302 2303 CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep); 2304 set_bit(EP_DISC_CLOSE, &ep->com.history); 2305 2306 if (!ep->parent_ep) 2307 __state_set(&ep->com, MORIBUND); 2308 ret = shutdown_socket(&ep->com); 2309 } 2310 2311 if (ret) { 2312 2313 fatal = 1; 2314 } 2315 } 2316 2317 if (fatal) { 2318 set_bit(EP_DISC_FAIL, &ep->com.history); 2319 if (!abrupt) { 2320 STOP_EP_TIMER(ep); 2321 close_complete_upcall(ep, -EIO); 2322 } 2323 if (ep->com.qp) { 2324 struct c4iw_qp_attributes attrs; 2325 2326 attrs.next_state = C4IW_QP_STATE_ERROR; 2327 ret = c4iw_modify_qp(ep->com.dev, ep->com.qp, 2328 C4IW_QP_ATTR_NEXT_STATE, 2329 &attrs, 1); 2330 if (ret) { 2331 CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep); 2332 printf("%s - qp <- error failed!\n", __func__); 2333 } 2334 } 2335 release_ep_resources(ep); 2336 ep->com.state = DEAD; 2337 CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep); 2338 } 2339 CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep); 2340 return ret; 2341 } 2342 2343 #ifdef C4IW_EP_REDIRECT 2344 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new, 2345 struct l2t_entry *l2t) 2346 { 2347 struct c4iw_ep *ep = ctx; 2348 2349 if (ep->dst != old) 2350 return 0; 2351 2352 PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new, 2353 l2t); 2354 dst_hold(new); 2355 cxgb4_l2t_release(ep->l2t); 2356 ep->l2t = l2t; 2357 dst_release(old); 2358 ep->dst = new; 2359 return 1; 2360 } 2361 #endif 2362 2363 2364 2365 static void ep_timeout(unsigned long arg) 2366 { 2367 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 2368 int kickit = 0; 2369 2370 CTR2(KTR_IW_CXGBE, "%s:etB %p", __func__, ep); 2371 spin_lock(&timeout_lock); 2372 2373 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 2374 2375 /* 2376 * Only insert if it is not already on the list. 2377 */ 2378 if (!ep->entry.next) { 2379 list_add_tail(&ep->entry, &timeout_list); 2380 kickit = 1; 2381 } 2382 } 2383 spin_unlock(&timeout_lock); 2384 2385 if (kickit) { 2386 2387 CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep); 2388 queue_work(c4iw_taskq, &c4iw_task); 2389 } 2390 CTR2(KTR_IW_CXGBE, "%s:etE %p", __func__, ep); 2391 } 2392 2393 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl) 2394 { 2395 uint64_t val = be64toh(*rpl); 2396 int ret; 2397 struct c4iw_wr_wait *wr_waitp; 2398 2399 ret = (int)((val >> 8) & 0xff); 2400 wr_waitp = (struct c4iw_wr_wait *)rpl[1]; 2401 CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret); 2402 if (wr_waitp) 2403 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 2404 2405 return (0); 2406 } 2407 2408 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl) 2409 { 2410 struct t4_cqe cqe =*(const struct t4_cqe *)(&rpl[0]); 2411 2412 CTR2(KTR_IW_CXGBE, "%s rpl %p", __func__, rpl); 2413 c4iw_ev_dispatch(sc->iwarp_softc, &cqe); 2414 2415 return (0); 2416 } 2417 2418 static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m) 2419 { 2420 struct adapter *sc = iq->adapter; 2421 const struct cpl_rdma_terminate *cpl = mtod(m, const void *); 2422 unsigned int tid = GET_TID(cpl); 2423 struct c4iw_qp_attributes attrs; 2424 struct toepcb *toep = lookup_tid(sc, tid); 2425 struct socket *so; 2426 struct c4iw_ep *ep; 2427 2428 INP_WLOCK(toep->inp); 2429 so = inp_inpcbtosocket(toep->inp); 2430 ep = so->so_rcv.sb_upcallarg; 2431 INP_WUNLOCK(toep->inp); 2432 2433 CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep); 2434 2435 if (ep && ep->com.qp) { 2436 2437 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 2438 ep->com.qp->wq.sq.qid); 2439 attrs.next_state = C4IW_QP_STATE_TERMINATE; 2440 c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 2441 1); 2442 } else 2443 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 2444 CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep); 2445 2446 return 0; 2447 } 2448 2449 void 2450 c4iw_cm_init_cpl(struct adapter *sc) 2451 { 2452 2453 t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, terminate); 2454 t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, fw6_wr_rpl); 2455 t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, fw6_cqe_handler); 2456 t4_register_an_handler(sc, c4iw_ev_handler); 2457 } 2458 2459 void 2460 c4iw_cm_term_cpl(struct adapter *sc) 2461 { 2462 2463 t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, NULL); 2464 t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, NULL); 2465 t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, NULL); 2466 } 2467 2468 int __init c4iw_cm_init(void) 2469 { 2470 2471 TAILQ_INIT(&req_list); 2472 spin_lock_init(&req_lock); 2473 INIT_LIST_HEAD(&timeout_list); 2474 spin_lock_init(&timeout_lock); 2475 2476 INIT_WORK(&c4iw_task, process_req); 2477 2478 c4iw_taskq = create_singlethread_workqueue("iw_cxgbe"); 2479 if (!c4iw_taskq) 2480 return -ENOMEM; 2481 2482 2483 return 0; 2484 } 2485 2486 void __exit c4iw_cm_term(void) 2487 { 2488 WARN_ON(!TAILQ_EMPTY(&req_list)); 2489 WARN_ON(!list_empty(&timeout_list)); 2490 flush_workqueue(c4iw_taskq); 2491 destroy_workqueue(c4iw_taskq); 2492 } 2493 #endif 2494