1 /* 2 * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 */ 32 #include <linux/module.h> 33 #include <linux/list.h> 34 #include <linux/workqueue.h> 35 #include <linux/skbuff.h> 36 #include <linux/timer.h> 37 #include <linux/notifier.h> 38 #include <linux/inetdevice.h> 39 #include <linux/ip.h> 40 #include <linux/tcp.h> 41 #include <linux/if_vlan.h> 42 43 #include <net/neighbour.h> 44 #include <net/netevent.h> 45 #include <net/route.h> 46 #include <net/tcp.h> 47 #include <net/ip6_route.h> 48 #include <net/addrconf.h> 49 50 #include <rdma/ib_addr.h> 51 52 #include "iw_cxgb4.h" 53 #include "clip_tbl.h" 54 55 static char *states[] = { 56 "idle", 57 "listen", 58 "connecting", 59 "mpa_wait_req", 60 "mpa_req_sent", 61 "mpa_req_rcvd", 62 "mpa_rep_sent", 63 "fpdu_mode", 64 "aborting", 65 "closing", 66 "moribund", 67 "dead", 68 NULL, 69 }; 70 71 static int nocong; 72 module_param(nocong, int, 0644); 73 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); 74 75 static int enable_ecn; 76 module_param(enable_ecn, int, 0644); 77 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); 78 79 static int dack_mode = 1; 80 module_param(dack_mode, int, 0644); 81 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); 82 83 uint c4iw_max_read_depth = 32; 84 module_param(c4iw_max_read_depth, int, 0644); 85 MODULE_PARM_DESC(c4iw_max_read_depth, 86 "Per-connection max ORD/IRD (default=32)"); 87 88 static int enable_tcp_timestamps; 89 module_param(enable_tcp_timestamps, int, 0644); 90 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)"); 91 92 static int enable_tcp_sack; 93 module_param(enable_tcp_sack, int, 0644); 94 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)"); 95 96 static int enable_tcp_window_scaling = 1; 97 module_param(enable_tcp_window_scaling, int, 0644); 98 MODULE_PARM_DESC(enable_tcp_window_scaling, 99 "Enable tcp window scaling (default=1)"); 100 101 int c4iw_debug; 102 module_param(c4iw_debug, int, 0644); 103 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)"); 104 105 static int peer2peer = 1; 106 module_param(peer2peer, int, 0644); 107 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)"); 108 109 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ; 110 module_param(p2p_type, int, 0644); 111 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: " 112 "1=RDMA_READ 0=RDMA_WRITE (default 1)"); 113 114 static int ep_timeout_secs = 60; 115 module_param(ep_timeout_secs, int, 0644); 116 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout " 117 "in seconds (default=60)"); 118 119 static int mpa_rev = 2; 120 module_param(mpa_rev, int, 0644); 121 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, " 122 "1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft" 123 " compliant (default=2)"); 124 125 static int markers_enabled; 126 module_param(markers_enabled, int, 0644); 127 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)"); 128 129 static int crc_enabled = 1; 130 module_param(crc_enabled, int, 0644); 131 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)"); 132 133 static int rcv_win = 256 * 1024; 134 module_param(rcv_win, int, 0644); 135 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)"); 136 137 static int snd_win = 128 * 1024; 138 module_param(snd_win, int, 0644); 139 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)"); 140 141 static struct workqueue_struct *workq; 142 143 static struct sk_buff_head rxq; 144 145 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp); 146 static void ep_timeout(unsigned long arg); 147 static void connect_reply_upcall(struct c4iw_ep *ep, int status); 148 static int sched(struct c4iw_dev *dev, struct sk_buff *skb); 149 150 static LIST_HEAD(timeout_list); 151 static spinlock_t timeout_lock; 152 153 static void deref_cm_id(struct c4iw_ep_common *epc) 154 { 155 epc->cm_id->rem_ref(epc->cm_id); 156 epc->cm_id = NULL; 157 set_bit(CM_ID_DEREFED, &epc->history); 158 } 159 160 static void ref_cm_id(struct c4iw_ep_common *epc) 161 { 162 set_bit(CM_ID_REFED, &epc->history); 163 epc->cm_id->add_ref(epc->cm_id); 164 } 165 166 static void deref_qp(struct c4iw_ep *ep) 167 { 168 c4iw_qp_rem_ref(&ep->com.qp->ibqp); 169 clear_bit(QP_REFERENCED, &ep->com.flags); 170 set_bit(QP_DEREFED, &ep->com.history); 171 } 172 173 static void ref_qp(struct c4iw_ep *ep) 174 { 175 set_bit(QP_REFERENCED, &ep->com.flags); 176 set_bit(QP_REFED, &ep->com.history); 177 c4iw_qp_add_ref(&ep->com.qp->ibqp); 178 } 179 180 static void start_ep_timer(struct c4iw_ep *ep) 181 { 182 PDBG("%s ep %p\n", __func__, ep); 183 if (timer_pending(&ep->timer)) { 184 pr_err("%s timer already started! ep %p\n", 185 __func__, ep); 186 return; 187 } 188 clear_bit(TIMEOUT, &ep->com.flags); 189 c4iw_get_ep(&ep->com); 190 ep->timer.expires = jiffies + ep_timeout_secs * HZ; 191 ep->timer.data = (unsigned long)ep; 192 ep->timer.function = ep_timeout; 193 add_timer(&ep->timer); 194 } 195 196 static int stop_ep_timer(struct c4iw_ep *ep) 197 { 198 PDBG("%s ep %p stopping\n", __func__, ep); 199 del_timer_sync(&ep->timer); 200 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 201 c4iw_put_ep(&ep->com); 202 return 0; 203 } 204 return 1; 205 } 206 207 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, 208 struct l2t_entry *l2e) 209 { 210 int error = 0; 211 212 if (c4iw_fatal_error(rdev)) { 213 kfree_skb(skb); 214 PDBG("%s - device in error state - dropping\n", __func__); 215 return -EIO; 216 } 217 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); 218 if (error < 0) 219 kfree_skb(skb); 220 else if (error == NET_XMIT_DROP) 221 return -ENOMEM; 222 return error < 0 ? error : 0; 223 } 224 225 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb) 226 { 227 int error = 0; 228 229 if (c4iw_fatal_error(rdev)) { 230 kfree_skb(skb); 231 PDBG("%s - device in error state - dropping\n", __func__); 232 return -EIO; 233 } 234 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); 235 if (error < 0) 236 kfree_skb(skb); 237 return error < 0 ? error : 0; 238 } 239 240 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb) 241 { 242 struct cpl_tid_release *req; 243 244 skb = get_skb(skb, sizeof *req, GFP_KERNEL); 245 if (!skb) 246 return; 247 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); 248 INIT_TP_WR(req, hwtid); 249 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); 250 set_wr_txq(skb, CPL_PRIORITY_SETUP, 0); 251 c4iw_ofld_send(rdev, skb); 252 return; 253 } 254 255 static void set_emss(struct c4iw_ep *ep, u16 opt) 256 { 257 ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] - 258 ((AF_INET == ep->com.remote_addr.ss_family) ? 259 sizeof(struct iphdr) : sizeof(struct ipv6hdr)) - 260 sizeof(struct tcphdr); 261 ep->mss = ep->emss; 262 if (TCPOPT_TSTAMP_G(opt)) 263 ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4); 264 if (ep->emss < 128) 265 ep->emss = 128; 266 if (ep->emss & 7) 267 PDBG("Warning: misaligned mtu idx %u mss %u emss=%u\n", 268 TCPOPT_MSS_G(opt), ep->mss, ep->emss); 269 PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt), 270 ep->mss, ep->emss); 271 } 272 273 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc) 274 { 275 enum c4iw_ep_state state; 276 277 mutex_lock(&epc->mutex); 278 state = epc->state; 279 mutex_unlock(&epc->mutex); 280 return state; 281 } 282 283 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 284 { 285 epc->state = new; 286 } 287 288 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new) 289 { 290 mutex_lock(&epc->mutex); 291 PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]); 292 __state_set(epc, new); 293 mutex_unlock(&epc->mutex); 294 return; 295 } 296 297 static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size) 298 { 299 struct sk_buff *skb; 300 unsigned int i; 301 size_t len; 302 303 len = roundup(sizeof(union cpl_wr_size), 16); 304 for (i = 0; i < size; i++) { 305 skb = alloc_skb(len, GFP_KERNEL); 306 if (!skb) 307 goto fail; 308 skb_queue_tail(ep_skb_list, skb); 309 } 310 return 0; 311 fail: 312 skb_queue_purge(ep_skb_list); 313 return -ENOMEM; 314 } 315 316 static void *alloc_ep(int size, gfp_t gfp) 317 { 318 struct c4iw_ep_common *epc; 319 320 epc = kzalloc(size, gfp); 321 if (epc) { 322 kref_init(&epc->kref); 323 mutex_init(&epc->mutex); 324 c4iw_init_wr_wait(&epc->wr_wait); 325 } 326 PDBG("%s alloc ep %p\n", __func__, epc); 327 return epc; 328 } 329 330 static void remove_ep_tid(struct c4iw_ep *ep) 331 { 332 unsigned long flags; 333 334 spin_lock_irqsave(&ep->com.dev->lock, flags); 335 _remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0); 336 spin_unlock_irqrestore(&ep->com.dev->lock, flags); 337 } 338 339 static void insert_ep_tid(struct c4iw_ep *ep) 340 { 341 unsigned long flags; 342 343 spin_lock_irqsave(&ep->com.dev->lock, flags); 344 _insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0); 345 spin_unlock_irqrestore(&ep->com.dev->lock, flags); 346 } 347 348 /* 349 * Atomically lookup the ep ptr given the tid and grab a reference on the ep. 350 */ 351 static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid) 352 { 353 struct c4iw_ep *ep; 354 unsigned long flags; 355 356 spin_lock_irqsave(&dev->lock, flags); 357 ep = idr_find(&dev->hwtid_idr, tid); 358 if (ep) 359 c4iw_get_ep(&ep->com); 360 spin_unlock_irqrestore(&dev->lock, flags); 361 return ep; 362 } 363 364 /* 365 * Atomically lookup the ep ptr given the stid and grab a reference on the ep. 366 */ 367 static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev, 368 unsigned int stid) 369 { 370 struct c4iw_listen_ep *ep; 371 unsigned long flags; 372 373 spin_lock_irqsave(&dev->lock, flags); 374 ep = idr_find(&dev->stid_idr, stid); 375 if (ep) 376 c4iw_get_ep(&ep->com); 377 spin_unlock_irqrestore(&dev->lock, flags); 378 return ep; 379 } 380 381 void _c4iw_free_ep(struct kref *kref) 382 { 383 struct c4iw_ep *ep; 384 385 ep = container_of(kref, struct c4iw_ep, com.kref); 386 PDBG("%s ep %p state %s\n", __func__, ep, states[ep->com.state]); 387 if (test_bit(QP_REFERENCED, &ep->com.flags)) 388 deref_qp(ep); 389 if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { 390 if (ep->com.remote_addr.ss_family == AF_INET6) { 391 struct sockaddr_in6 *sin6 = 392 (struct sockaddr_in6 *) 393 &ep->com.local_addr; 394 395 cxgb4_clip_release( 396 ep->com.dev->rdev.lldi.ports[0], 397 (const u32 *)&sin6->sin6_addr.s6_addr, 398 1); 399 } 400 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 401 dst_release(ep->dst); 402 cxgb4_l2t_release(ep->l2t); 403 if (ep->mpa_skb) 404 kfree_skb(ep->mpa_skb); 405 } 406 if (!skb_queue_empty(&ep->com.ep_skb_list)) 407 skb_queue_purge(&ep->com.ep_skb_list); 408 kfree(ep); 409 } 410 411 static void release_ep_resources(struct c4iw_ep *ep) 412 { 413 set_bit(RELEASE_RESOURCES, &ep->com.flags); 414 415 /* 416 * If we have a hwtid, then remove it from the idr table 417 * so lookups will no longer find this endpoint. Otherwise 418 * we have a race where one thread finds the ep ptr just 419 * before the other thread is freeing the ep memory. 420 */ 421 if (ep->hwtid != -1) 422 remove_ep_tid(ep); 423 c4iw_put_ep(&ep->com); 424 } 425 426 static int status2errno(int status) 427 { 428 switch (status) { 429 case CPL_ERR_NONE: 430 return 0; 431 case CPL_ERR_CONN_RESET: 432 return -ECONNRESET; 433 case CPL_ERR_ARP_MISS: 434 return -EHOSTUNREACH; 435 case CPL_ERR_CONN_TIMEDOUT: 436 return -ETIMEDOUT; 437 case CPL_ERR_TCAM_FULL: 438 return -ENOMEM; 439 case CPL_ERR_CONN_EXIST: 440 return -EADDRINUSE; 441 default: 442 return -EIO; 443 } 444 } 445 446 /* 447 * Try and reuse skbs already allocated... 448 */ 449 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp) 450 { 451 if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) { 452 skb_trim(skb, 0); 453 skb_get(skb); 454 skb_reset_transport_header(skb); 455 } else { 456 skb = alloc_skb(len, gfp); 457 } 458 t4_set_arp_err_handler(skb, NULL, NULL); 459 return skb; 460 } 461 462 static struct net_device *get_real_dev(struct net_device *egress_dev) 463 { 464 return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev; 465 } 466 467 static int our_interface(struct c4iw_dev *dev, struct net_device *egress_dev) 468 { 469 int i; 470 471 egress_dev = get_real_dev(egress_dev); 472 for (i = 0; i < dev->rdev.lldi.nports; i++) 473 if (dev->rdev.lldi.ports[i] == egress_dev) 474 return 1; 475 return 0; 476 } 477 478 static struct dst_entry *find_route6(struct c4iw_dev *dev, __u8 *local_ip, 479 __u8 *peer_ip, __be16 local_port, 480 __be16 peer_port, u8 tos, 481 __u32 sin6_scope_id) 482 { 483 struct dst_entry *dst = NULL; 484 485 if (IS_ENABLED(CONFIG_IPV6)) { 486 struct flowi6 fl6; 487 488 memset(&fl6, 0, sizeof(fl6)); 489 memcpy(&fl6.daddr, peer_ip, 16); 490 memcpy(&fl6.saddr, local_ip, 16); 491 if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) 492 fl6.flowi6_oif = sin6_scope_id; 493 dst = ip6_route_output(&init_net, NULL, &fl6); 494 if (!dst) 495 goto out; 496 if (!our_interface(dev, ip6_dst_idev(dst)->dev) && 497 !(ip6_dst_idev(dst)->dev->flags & IFF_LOOPBACK)) { 498 dst_release(dst); 499 dst = NULL; 500 } 501 } 502 503 out: 504 return dst; 505 } 506 507 static struct dst_entry *find_route(struct c4iw_dev *dev, __be32 local_ip, 508 __be32 peer_ip, __be16 local_port, 509 __be16 peer_port, u8 tos) 510 { 511 struct rtable *rt; 512 struct flowi4 fl4; 513 struct neighbour *n; 514 515 rt = ip_route_output_ports(&init_net, &fl4, NULL, peer_ip, local_ip, 516 peer_port, local_port, IPPROTO_TCP, 517 tos, 0); 518 if (IS_ERR(rt)) 519 return NULL; 520 n = dst_neigh_lookup(&rt->dst, &peer_ip); 521 if (!n) 522 return NULL; 523 if (!our_interface(dev, n->dev) && 524 !(n->dev->flags & IFF_LOOPBACK)) { 525 neigh_release(n); 526 dst_release(&rt->dst); 527 return NULL; 528 } 529 neigh_release(n); 530 return &rt->dst; 531 } 532 533 static void arp_failure_discard(void *handle, struct sk_buff *skb) 534 { 535 pr_err(MOD "ARP failure\n"); 536 kfree_skb(skb); 537 } 538 539 static void mpa_start_arp_failure(void *handle, struct sk_buff *skb) 540 { 541 pr_err("ARP failure during MPA Negotiation - Closing Connection\n"); 542 } 543 544 enum { 545 NUM_FAKE_CPLS = 2, 546 FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0, 547 FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1, 548 }; 549 550 static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) 551 { 552 struct c4iw_ep *ep; 553 554 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 555 release_ep_resources(ep); 556 return 0; 557 } 558 559 static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb) 560 { 561 struct c4iw_ep *ep; 562 563 ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); 564 c4iw_put_ep(&ep->parent_ep->com); 565 release_ep_resources(ep); 566 return 0; 567 } 568 569 /* 570 * Fake up a special CPL opcode and call sched() so process_work() will call 571 * _put_ep_safe() in a safe context to free the ep resources. This is needed 572 * because ARP error handlers are called in an ATOMIC context, and 573 * _c4iw_free_ep() needs to block. 574 */ 575 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb, 576 int cpl) 577 { 578 struct cpl_act_establish *rpl = cplhdr(skb); 579 580 /* Set our special ARP_FAILURE opcode */ 581 rpl->ot.opcode = cpl; 582 583 /* 584 * Save ep in the skb->cb area, after where sched() will save the dev 585 * ptr. 586 */ 587 *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep; 588 sched(ep->com.dev, skb); 589 } 590 591 /* Handle an ARP failure for an accept */ 592 static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb) 593 { 594 struct c4iw_ep *ep = handle; 595 596 pr_err(MOD "ARP failure during accept - tid %u -dropping connection\n", 597 ep->hwtid); 598 599 __state_set(&ep->com, DEAD); 600 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE); 601 } 602 603 /* 604 * Handle an ARP failure for an active open. 605 */ 606 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 607 { 608 struct c4iw_ep *ep = handle; 609 610 printk(KERN_ERR MOD "ARP failure during connect\n"); 611 connect_reply_upcall(ep, -EHOSTUNREACH); 612 __state_set(&ep->com, DEAD); 613 if (ep->com.remote_addr.ss_family == AF_INET6) { 614 struct sockaddr_in6 *sin6 = 615 (struct sockaddr_in6 *)&ep->com.local_addr; 616 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 617 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 618 } 619 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 620 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 621 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 622 } 623 624 /* 625 * Handle an ARP failure for a CPL_ABORT_REQ. Change it into a no RST variant 626 * and send it along. 627 */ 628 static void abort_arp_failure(void *handle, struct sk_buff *skb) 629 { 630 int ret; 631 struct c4iw_ep *ep = handle; 632 struct c4iw_rdev *rdev = &ep->com.dev->rdev; 633 struct cpl_abort_req *req = cplhdr(skb); 634 635 PDBG("%s rdev %p\n", __func__, rdev); 636 req->cmd = CPL_ABORT_NO_RST; 637 ret = c4iw_ofld_send(rdev, skb); 638 if (ret) { 639 __state_set(&ep->com, DEAD); 640 queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); 641 } 642 } 643 644 static int send_flowc(struct c4iw_ep *ep) 645 { 646 struct fw_flowc_wr *flowc; 647 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 648 int i; 649 u16 vlan = ep->l2t->vlan; 650 int nparams; 651 652 if (WARN_ON(!skb)) 653 return -ENOMEM; 654 655 if (vlan == CPL_L2T_VLAN_NONE) 656 nparams = 8; 657 else 658 nparams = 9; 659 660 flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN); 661 662 flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | 663 FW_FLOWC_WR_NPARAMS_V(nparams)); 664 flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN, 665 16)) | FW_WR_FLOWID_V(ep->hwtid)); 666 667 flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN; 668 flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V 669 (ep->com.dev->rdev.lldi.pf)); 670 flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH; 671 flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan); 672 flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT; 673 flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan); 674 flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID; 675 flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid); 676 flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT; 677 flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq); 678 flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT; 679 flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq); 680 flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF; 681 flowc->mnemval[6].val = cpu_to_be32(ep->snd_win); 682 flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS; 683 flowc->mnemval[7].val = cpu_to_be32(ep->emss); 684 if (nparams == 9) { 685 u16 pri; 686 687 pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; 688 flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS; 689 flowc->mnemval[8].val = cpu_to_be32(pri); 690 } else { 691 /* Pad WR to 16 byte boundary */ 692 flowc->mnemval[8].mnemonic = 0; 693 flowc->mnemval[8].val = 0; 694 } 695 for (i = 0; i < 9; i++) { 696 flowc->mnemval[i].r4[0] = 0; 697 flowc->mnemval[i].r4[1] = 0; 698 flowc->mnemval[i].r4[2] = 0; 699 } 700 701 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 702 return c4iw_ofld_send(&ep->com.dev->rdev, skb); 703 } 704 705 static int send_halfclose(struct c4iw_ep *ep) 706 { 707 struct cpl_close_con_req *req; 708 struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list); 709 int wrlen = roundup(sizeof *req, 16); 710 711 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 712 if (WARN_ON(!skb)) 713 return -ENOMEM; 714 715 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 716 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 717 req = (struct cpl_close_con_req *) skb_put(skb, wrlen); 718 memset(req, 0, wrlen); 719 INIT_TP_WR(req, ep->hwtid); 720 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, 721 ep->hwtid)); 722 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 723 } 724 725 static int send_abort(struct c4iw_ep *ep) 726 { 727 struct cpl_abort_req *req; 728 int wrlen = roundup(sizeof *req, 16); 729 struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list); 730 731 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 732 if (WARN_ON(!req_skb)) 733 return -ENOMEM; 734 735 set_wr_txq(req_skb, CPL_PRIORITY_DATA, ep->txq_idx); 736 t4_set_arp_err_handler(req_skb, ep, abort_arp_failure); 737 req = (struct cpl_abort_req *)skb_put(req_skb, wrlen); 738 memset(req, 0, wrlen); 739 INIT_TP_WR(req, ep->hwtid); 740 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); 741 req->cmd = CPL_ABORT_SEND_RST; 742 return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t); 743 } 744 745 static void best_mtu(const unsigned short *mtus, unsigned short mtu, 746 unsigned int *idx, int use_ts, int ipv6) 747 { 748 unsigned short hdr_size = (ipv6 ? 749 sizeof(struct ipv6hdr) : 750 sizeof(struct iphdr)) + 751 sizeof(struct tcphdr) + 752 (use_ts ? 753 round_up(TCPOLEN_TIMESTAMP, 4) : 0); 754 unsigned short data_size = mtu - hdr_size; 755 756 cxgb4_best_aligned_mtu(mtus, hdr_size, data_size, 8, idx); 757 } 758 759 static int send_connect(struct c4iw_ep *ep) 760 { 761 struct cpl_act_open_req *req = NULL; 762 struct cpl_t5_act_open_req *t5req = NULL; 763 struct cpl_t6_act_open_req *t6req = NULL; 764 struct cpl_act_open_req6 *req6 = NULL; 765 struct cpl_t5_act_open_req6 *t5req6 = NULL; 766 struct cpl_t6_act_open_req6 *t6req6 = NULL; 767 struct sk_buff *skb; 768 u64 opt0; 769 u32 opt2; 770 unsigned int mtu_idx; 771 int wscale; 772 int win, sizev4, sizev6, wrlen; 773 struct sockaddr_in *la = (struct sockaddr_in *) 774 &ep->com.local_addr; 775 struct sockaddr_in *ra = (struct sockaddr_in *) 776 &ep->com.remote_addr; 777 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *) 778 &ep->com.local_addr; 779 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *) 780 &ep->com.remote_addr; 781 int ret; 782 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; 783 u32 isn = (prandom_u32() & ~7UL) - 1; 784 785 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 786 case CHELSIO_T4: 787 sizev4 = sizeof(struct cpl_act_open_req); 788 sizev6 = sizeof(struct cpl_act_open_req6); 789 break; 790 case CHELSIO_T5: 791 sizev4 = sizeof(struct cpl_t5_act_open_req); 792 sizev6 = sizeof(struct cpl_t5_act_open_req6); 793 break; 794 case CHELSIO_T6: 795 sizev4 = sizeof(struct cpl_t6_act_open_req); 796 sizev6 = sizeof(struct cpl_t6_act_open_req6); 797 break; 798 default: 799 pr_err("T%d Chip is not supported\n", 800 CHELSIO_CHIP_VERSION(adapter_type)); 801 return -EINVAL; 802 } 803 804 wrlen = (ep->com.remote_addr.ss_family == AF_INET) ? 805 roundup(sizev4, 16) : 806 roundup(sizev6, 16); 807 808 PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid); 809 810 skb = get_skb(NULL, wrlen, GFP_KERNEL); 811 if (!skb) { 812 printk(KERN_ERR MOD "%s - failed to alloc skb.\n", 813 __func__); 814 return -ENOMEM; 815 } 816 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 817 818 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 819 enable_tcp_timestamps, 820 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); 821 wscale = compute_wscale(rcv_win); 822 823 /* 824 * Specify the largest window that will fit in opt0. The 825 * remainder will be specified in the rx_data_ack. 826 */ 827 win = ep->rcv_win >> 10; 828 if (win > RCV_BUFSIZ_M) 829 win = RCV_BUFSIZ_M; 830 831 opt0 = (nocong ? NO_CONG_F : 0) | 832 KEEP_ALIVE_F | 833 DELACK_F | 834 WND_SCALE_V(wscale) | 835 MSS_IDX_V(mtu_idx) | 836 L2T_IDX_V(ep->l2t->idx) | 837 TX_CHAN_V(ep->tx_chan) | 838 SMAC_SEL_V(ep->smac_idx) | 839 DSCP_V(ep->tos >> 2) | 840 ULP_MODE_V(ULP_MODE_TCPDDP) | 841 RCV_BUFSIZ_V(win); 842 opt2 = RX_CHANNEL_V(0) | 843 CCTRL_ECN_V(enable_ecn) | 844 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 845 if (enable_tcp_timestamps) 846 opt2 |= TSTAMPS_EN_F; 847 if (enable_tcp_sack) 848 opt2 |= SACK_EN_F; 849 if (wscale && enable_tcp_window_scaling) 850 opt2 |= WND_SCALE_EN_F; 851 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { 852 if (peer2peer) 853 isn += 4; 854 855 opt2 |= T5_OPT_2_VALID_F; 856 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 857 opt2 |= T5_ISS_F; 858 } 859 860 if (ep->com.remote_addr.ss_family == AF_INET6) 861 cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], 862 (const u32 *)&la6->sin6_addr.s6_addr, 1); 863 864 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure); 865 866 if (ep->com.remote_addr.ss_family == AF_INET) { 867 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 868 case CHELSIO_T4: 869 req = (struct cpl_act_open_req *)skb_put(skb, wrlen); 870 INIT_TP_WR(req, 0); 871 break; 872 case CHELSIO_T5: 873 t5req = (struct cpl_t5_act_open_req *)skb_put(skb, 874 wrlen); 875 INIT_TP_WR(t5req, 0); 876 req = (struct cpl_act_open_req *)t5req; 877 break; 878 case CHELSIO_T6: 879 t6req = (struct cpl_t6_act_open_req *)skb_put(skb, 880 wrlen); 881 INIT_TP_WR(t6req, 0); 882 req = (struct cpl_act_open_req *)t6req; 883 t5req = (struct cpl_t5_act_open_req *)t6req; 884 break; 885 default: 886 pr_err("T%d Chip is not supported\n", 887 CHELSIO_CHIP_VERSION(adapter_type)); 888 ret = -EINVAL; 889 goto clip_release; 890 } 891 892 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, 893 ((ep->rss_qid<<14) | ep->atid))); 894 req->local_port = la->sin_port; 895 req->peer_port = ra->sin_port; 896 req->local_ip = la->sin_addr.s_addr; 897 req->peer_ip = ra->sin_addr.s_addr; 898 req->opt0 = cpu_to_be64(opt0); 899 900 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 901 req->params = cpu_to_be32(cxgb4_select_ntuple( 902 ep->com.dev->rdev.lldi.ports[0], 903 ep->l2t)); 904 req->opt2 = cpu_to_be32(opt2); 905 } else { 906 t5req->params = cpu_to_be64(FILTER_TUPLE_V( 907 cxgb4_select_ntuple( 908 ep->com.dev->rdev.lldi.ports[0], 909 ep->l2t))); 910 t5req->rsvd = cpu_to_be32(isn); 911 PDBG("%s snd_isn %u\n", __func__, t5req->rsvd); 912 t5req->opt2 = cpu_to_be32(opt2); 913 } 914 } else { 915 switch (CHELSIO_CHIP_VERSION(adapter_type)) { 916 case CHELSIO_T4: 917 req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen); 918 INIT_TP_WR(req6, 0); 919 break; 920 case CHELSIO_T5: 921 t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb, 922 wrlen); 923 INIT_TP_WR(t5req6, 0); 924 req6 = (struct cpl_act_open_req6 *)t5req6; 925 break; 926 case CHELSIO_T6: 927 t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb, 928 wrlen); 929 INIT_TP_WR(t6req6, 0); 930 req6 = (struct cpl_act_open_req6 *)t6req6; 931 t5req6 = (struct cpl_t5_act_open_req6 *)t6req6; 932 break; 933 default: 934 pr_err("T%d Chip is not supported\n", 935 CHELSIO_CHIP_VERSION(adapter_type)); 936 ret = -EINVAL; 937 goto clip_release; 938 } 939 940 OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, 941 ((ep->rss_qid<<14)|ep->atid))); 942 req6->local_port = la6->sin6_port; 943 req6->peer_port = ra6->sin6_port; 944 req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr)); 945 req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8)); 946 req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr)); 947 req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8)); 948 req6->opt0 = cpu_to_be64(opt0); 949 950 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 951 req6->params = cpu_to_be32(cxgb4_select_ntuple( 952 ep->com.dev->rdev.lldi.ports[0], 953 ep->l2t)); 954 req6->opt2 = cpu_to_be32(opt2); 955 } else { 956 t5req6->params = cpu_to_be64(FILTER_TUPLE_V( 957 cxgb4_select_ntuple( 958 ep->com.dev->rdev.lldi.ports[0], 959 ep->l2t))); 960 t5req6->rsvd = cpu_to_be32(isn); 961 PDBG("%s snd_isn %u\n", __func__, t5req6->rsvd); 962 t5req6->opt2 = cpu_to_be32(opt2); 963 } 964 } 965 966 set_bit(ACT_OPEN_REQ, &ep->com.history); 967 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 968 clip_release: 969 if (ret && ep->com.remote_addr.ss_family == AF_INET6) 970 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 971 (const u32 *)&la6->sin6_addr.s6_addr, 1); 972 return ret; 973 } 974 975 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb, 976 u8 mpa_rev_to_use) 977 { 978 int mpalen, wrlen, ret; 979 struct fw_ofld_tx_data_wr *req; 980 struct mpa_message *mpa; 981 struct mpa_v2_conn_params mpa_v2_params; 982 983 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 984 985 BUG_ON(skb_cloned(skb)); 986 987 mpalen = sizeof(*mpa) + ep->plen; 988 if (mpa_rev_to_use == 2) 989 mpalen += sizeof(struct mpa_v2_conn_params); 990 wrlen = roundup(mpalen + sizeof *req, 16); 991 skb = get_skb(skb, wrlen, GFP_KERNEL); 992 if (!skb) { 993 connect_reply_upcall(ep, -ENOMEM); 994 return -ENOMEM; 995 } 996 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 997 998 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 999 memset(req, 0, wrlen); 1000 req->op_to_immdlen = cpu_to_be32( 1001 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1002 FW_WR_COMPL_F | 1003 FW_WR_IMMDLEN_V(mpalen)); 1004 req->flowid_len16 = cpu_to_be32( 1005 FW_WR_FLOWID_V(ep->hwtid) | 1006 FW_WR_LEN16_V(wrlen >> 4)); 1007 req->plen = cpu_to_be32(mpalen); 1008 req->tunnel_to_proxy = cpu_to_be32( 1009 FW_OFLD_TX_DATA_WR_FLUSH_F | 1010 FW_OFLD_TX_DATA_WR_SHOVE_F); 1011 1012 mpa = (struct mpa_message *)(req + 1); 1013 memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)); 1014 1015 mpa->flags = 0; 1016 if (crc_enabled) 1017 mpa->flags |= MPA_CRC; 1018 if (markers_enabled) { 1019 mpa->flags |= MPA_MARKERS; 1020 ep->mpa_attr.recv_marker_enabled = 1; 1021 } else { 1022 ep->mpa_attr.recv_marker_enabled = 0; 1023 } 1024 if (mpa_rev_to_use == 2) 1025 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1026 1027 mpa->private_data_size = htons(ep->plen); 1028 mpa->revision = mpa_rev_to_use; 1029 if (mpa_rev_to_use == 1) { 1030 ep->tried_with_mpa_v1 = 1; 1031 ep->retry_with_mpa_v1 = 0; 1032 } 1033 1034 if (mpa_rev_to_use == 2) { 1035 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1036 sizeof (struct mpa_v2_conn_params)); 1037 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, 1038 ep->ord); 1039 mpa_v2_params.ird = htons((u16)ep->ird); 1040 mpa_v2_params.ord = htons((u16)ep->ord); 1041 1042 if (peer2peer) { 1043 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1044 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 1045 mpa_v2_params.ord |= 1046 htons(MPA_V2_RDMA_WRITE_RTR); 1047 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 1048 mpa_v2_params.ord |= 1049 htons(MPA_V2_RDMA_READ_RTR); 1050 } 1051 memcpy(mpa->private_data, &mpa_v2_params, 1052 sizeof(struct mpa_v2_conn_params)); 1053 1054 if (ep->plen) 1055 memcpy(mpa->private_data + 1056 sizeof(struct mpa_v2_conn_params), 1057 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1058 } else 1059 if (ep->plen) 1060 memcpy(mpa->private_data, 1061 ep->mpa_pkt + sizeof(*mpa), ep->plen); 1062 1063 /* 1064 * Reference the mpa skb. This ensures the data area 1065 * will remain in memory until the hw acks the tx. 1066 * Function fw4_ack() will deref it. 1067 */ 1068 skb_get(skb); 1069 t4_set_arp_err_handler(skb, NULL, arp_failure_discard); 1070 BUG_ON(ep->mpa_skb); 1071 ep->mpa_skb = skb; 1072 ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1073 if (ret) 1074 return ret; 1075 start_ep_timer(ep); 1076 __state_set(&ep->com, MPA_REQ_SENT); 1077 ep->mpa_attr.initiator = 1; 1078 ep->snd_seq += mpalen; 1079 return ret; 1080 } 1081 1082 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen) 1083 { 1084 int mpalen, wrlen; 1085 struct fw_ofld_tx_data_wr *req; 1086 struct mpa_message *mpa; 1087 struct sk_buff *skb; 1088 struct mpa_v2_conn_params mpa_v2_params; 1089 1090 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 1091 1092 mpalen = sizeof(*mpa) + plen; 1093 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 1094 mpalen += sizeof(struct mpa_v2_conn_params); 1095 wrlen = roundup(mpalen + sizeof *req, 16); 1096 1097 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1098 if (!skb) { 1099 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 1100 return -ENOMEM; 1101 } 1102 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1103 1104 req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen); 1105 memset(req, 0, wrlen); 1106 req->op_to_immdlen = cpu_to_be32( 1107 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1108 FW_WR_COMPL_F | 1109 FW_WR_IMMDLEN_V(mpalen)); 1110 req->flowid_len16 = cpu_to_be32( 1111 FW_WR_FLOWID_V(ep->hwtid) | 1112 FW_WR_LEN16_V(wrlen >> 4)); 1113 req->plen = cpu_to_be32(mpalen); 1114 req->tunnel_to_proxy = cpu_to_be32( 1115 FW_OFLD_TX_DATA_WR_FLUSH_F | 1116 FW_OFLD_TX_DATA_WR_SHOVE_F); 1117 1118 mpa = (struct mpa_message *)(req + 1); 1119 memset(mpa, 0, sizeof(*mpa)); 1120 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1121 mpa->flags = MPA_REJECT; 1122 mpa->revision = ep->mpa_attr.version; 1123 mpa->private_data_size = htons(plen); 1124 1125 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1126 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1127 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1128 sizeof (struct mpa_v2_conn_params)); 1129 mpa_v2_params.ird = htons(((u16)ep->ird) | 1130 (peer2peer ? MPA_V2_PEER2PEER_MODEL : 1131 0)); 1132 mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ? 1133 (p2p_type == 1134 FW_RI_INIT_P2PTYPE_RDMA_WRITE ? 1135 MPA_V2_RDMA_WRITE_RTR : p2p_type == 1136 FW_RI_INIT_P2PTYPE_READ_REQ ? 1137 MPA_V2_RDMA_READ_RTR : 0) : 0)); 1138 memcpy(mpa->private_data, &mpa_v2_params, 1139 sizeof(struct mpa_v2_conn_params)); 1140 1141 if (ep->plen) 1142 memcpy(mpa->private_data + 1143 sizeof(struct mpa_v2_conn_params), pdata, plen); 1144 } else 1145 if (plen) 1146 memcpy(mpa->private_data, pdata, plen); 1147 1148 /* 1149 * Reference the mpa skb again. This ensures the data area 1150 * will remain in memory until the hw acks the tx. 1151 * Function fw4_ack() will deref it. 1152 */ 1153 skb_get(skb); 1154 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1155 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); 1156 BUG_ON(ep->mpa_skb); 1157 ep->mpa_skb = skb; 1158 ep->snd_seq += mpalen; 1159 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1160 } 1161 1162 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen) 1163 { 1164 int mpalen, wrlen; 1165 struct fw_ofld_tx_data_wr *req; 1166 struct mpa_message *mpa; 1167 struct sk_buff *skb; 1168 struct mpa_v2_conn_params mpa_v2_params; 1169 1170 PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen); 1171 1172 mpalen = sizeof(*mpa) + plen; 1173 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) 1174 mpalen += sizeof(struct mpa_v2_conn_params); 1175 wrlen = roundup(mpalen + sizeof *req, 16); 1176 1177 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1178 if (!skb) { 1179 printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__); 1180 return -ENOMEM; 1181 } 1182 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 1183 1184 req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen); 1185 memset(req, 0, wrlen); 1186 req->op_to_immdlen = cpu_to_be32( 1187 FW_WR_OP_V(FW_OFLD_TX_DATA_WR) | 1188 FW_WR_COMPL_F | 1189 FW_WR_IMMDLEN_V(mpalen)); 1190 req->flowid_len16 = cpu_to_be32( 1191 FW_WR_FLOWID_V(ep->hwtid) | 1192 FW_WR_LEN16_V(wrlen >> 4)); 1193 req->plen = cpu_to_be32(mpalen); 1194 req->tunnel_to_proxy = cpu_to_be32( 1195 FW_OFLD_TX_DATA_WR_FLUSH_F | 1196 FW_OFLD_TX_DATA_WR_SHOVE_F); 1197 1198 mpa = (struct mpa_message *)(req + 1); 1199 memset(mpa, 0, sizeof(*mpa)); 1200 memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key)); 1201 mpa->flags = 0; 1202 if (ep->mpa_attr.crc_enabled) 1203 mpa->flags |= MPA_CRC; 1204 if (ep->mpa_attr.recv_marker_enabled) 1205 mpa->flags |= MPA_MARKERS; 1206 mpa->revision = ep->mpa_attr.version; 1207 mpa->private_data_size = htons(plen); 1208 1209 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 1210 mpa->flags |= MPA_ENHANCED_RDMA_CONN; 1211 mpa->private_data_size = htons(ntohs(mpa->private_data_size) + 1212 sizeof (struct mpa_v2_conn_params)); 1213 mpa_v2_params.ird = htons((u16)ep->ird); 1214 mpa_v2_params.ord = htons((u16)ep->ord); 1215 if (peer2peer && (ep->mpa_attr.p2p_type != 1216 FW_RI_INIT_P2PTYPE_DISABLED)) { 1217 mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL); 1218 1219 if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) 1220 mpa_v2_params.ord |= 1221 htons(MPA_V2_RDMA_WRITE_RTR); 1222 else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) 1223 mpa_v2_params.ord |= 1224 htons(MPA_V2_RDMA_READ_RTR); 1225 } 1226 1227 memcpy(mpa->private_data, &mpa_v2_params, 1228 sizeof(struct mpa_v2_conn_params)); 1229 1230 if (ep->plen) 1231 memcpy(mpa->private_data + 1232 sizeof(struct mpa_v2_conn_params), pdata, plen); 1233 } else 1234 if (plen) 1235 memcpy(mpa->private_data, pdata, plen); 1236 1237 /* 1238 * Reference the mpa skb. This ensures the data area 1239 * will remain in memory until the hw acks the tx. 1240 * Function fw4_ack() will deref it. 1241 */ 1242 skb_get(skb); 1243 t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure); 1244 ep->mpa_skb = skb; 1245 __state_set(&ep->com, MPA_REP_SENT); 1246 ep->snd_seq += mpalen; 1247 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 1248 } 1249 1250 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb) 1251 { 1252 struct c4iw_ep *ep; 1253 struct cpl_act_establish *req = cplhdr(skb); 1254 unsigned int tid = GET_TID(req); 1255 unsigned int atid = TID_TID_G(ntohl(req->tos_atid)); 1256 struct tid_info *t = dev->rdev.lldi.tids; 1257 int ret; 1258 1259 ep = lookup_atid(t, atid); 1260 1261 PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid, 1262 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn)); 1263 1264 mutex_lock(&ep->com.mutex); 1265 dst_confirm(ep->dst); 1266 1267 /* setup the hwtid for this connection */ 1268 ep->hwtid = tid; 1269 cxgb4_insert_tid(t, ep, tid); 1270 insert_ep_tid(ep); 1271 1272 ep->snd_seq = be32_to_cpu(req->snd_isn); 1273 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 1274 1275 set_emss(ep, ntohs(req->tcp_opt)); 1276 1277 /* dealloc the atid */ 1278 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 1279 cxgb4_free_atid(t, atid); 1280 set_bit(ACT_ESTAB, &ep->com.history); 1281 1282 /* start MPA negotiation */ 1283 ret = send_flowc(ep); 1284 if (ret) 1285 goto err; 1286 if (ep->retry_with_mpa_v1) 1287 ret = send_mpa_req(ep, skb, 1); 1288 else 1289 ret = send_mpa_req(ep, skb, mpa_rev); 1290 if (ret) 1291 goto err; 1292 mutex_unlock(&ep->com.mutex); 1293 return 0; 1294 err: 1295 mutex_unlock(&ep->com.mutex); 1296 connect_reply_upcall(ep, -ENOMEM); 1297 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 1298 return 0; 1299 } 1300 1301 static void close_complete_upcall(struct c4iw_ep *ep, int status) 1302 { 1303 struct iw_cm_event event; 1304 1305 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1306 memset(&event, 0, sizeof(event)); 1307 event.event = IW_CM_EVENT_CLOSE; 1308 event.status = status; 1309 if (ep->com.cm_id) { 1310 PDBG("close complete delivered ep %p cm_id %p tid %u\n", 1311 ep, ep->com.cm_id, ep->hwtid); 1312 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1313 deref_cm_id(&ep->com); 1314 set_bit(CLOSE_UPCALL, &ep->com.history); 1315 } 1316 } 1317 1318 static void peer_close_upcall(struct c4iw_ep *ep) 1319 { 1320 struct iw_cm_event event; 1321 1322 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1323 memset(&event, 0, sizeof(event)); 1324 event.event = IW_CM_EVENT_DISCONNECT; 1325 if (ep->com.cm_id) { 1326 PDBG("peer close delivered ep %p cm_id %p tid %u\n", 1327 ep, ep->com.cm_id, ep->hwtid); 1328 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1329 set_bit(DISCONN_UPCALL, &ep->com.history); 1330 } 1331 } 1332 1333 static void peer_abort_upcall(struct c4iw_ep *ep) 1334 { 1335 struct iw_cm_event event; 1336 1337 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1338 memset(&event, 0, sizeof(event)); 1339 event.event = IW_CM_EVENT_CLOSE; 1340 event.status = -ECONNRESET; 1341 if (ep->com.cm_id) { 1342 PDBG("abort delivered ep %p cm_id %p tid %u\n", ep, 1343 ep->com.cm_id, ep->hwtid); 1344 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1345 deref_cm_id(&ep->com); 1346 set_bit(ABORT_UPCALL, &ep->com.history); 1347 } 1348 } 1349 1350 static void connect_reply_upcall(struct c4iw_ep *ep, int status) 1351 { 1352 struct iw_cm_event event; 1353 1354 PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status); 1355 memset(&event, 0, sizeof(event)); 1356 event.event = IW_CM_EVENT_CONNECT_REPLY; 1357 event.status = status; 1358 memcpy(&event.local_addr, &ep->com.local_addr, 1359 sizeof(ep->com.local_addr)); 1360 memcpy(&event.remote_addr, &ep->com.remote_addr, 1361 sizeof(ep->com.remote_addr)); 1362 1363 if ((status == 0) || (status == -ECONNREFUSED)) { 1364 if (!ep->tried_with_mpa_v1) { 1365 /* this means MPA_v2 is used */ 1366 event.ord = ep->ird; 1367 event.ird = ep->ord; 1368 event.private_data_len = ep->plen - 1369 sizeof(struct mpa_v2_conn_params); 1370 event.private_data = ep->mpa_pkt + 1371 sizeof(struct mpa_message) + 1372 sizeof(struct mpa_v2_conn_params); 1373 } else { 1374 /* this means MPA_v1 is used */ 1375 event.ord = cur_max_read_depth(ep->com.dev); 1376 event.ird = cur_max_read_depth(ep->com.dev); 1377 event.private_data_len = ep->plen; 1378 event.private_data = ep->mpa_pkt + 1379 sizeof(struct mpa_message); 1380 } 1381 } 1382 1383 PDBG("%s ep %p tid %u status %d\n", __func__, ep, 1384 ep->hwtid, status); 1385 set_bit(CONN_RPL_UPCALL, &ep->com.history); 1386 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1387 1388 if (status < 0) 1389 deref_cm_id(&ep->com); 1390 } 1391 1392 static int connect_request_upcall(struct c4iw_ep *ep) 1393 { 1394 struct iw_cm_event event; 1395 int ret; 1396 1397 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1398 memset(&event, 0, sizeof(event)); 1399 event.event = IW_CM_EVENT_CONNECT_REQUEST; 1400 memcpy(&event.local_addr, &ep->com.local_addr, 1401 sizeof(ep->com.local_addr)); 1402 memcpy(&event.remote_addr, &ep->com.remote_addr, 1403 sizeof(ep->com.remote_addr)); 1404 event.provider_data = ep; 1405 if (!ep->tried_with_mpa_v1) { 1406 /* this means MPA_v2 is used */ 1407 event.ord = ep->ord; 1408 event.ird = ep->ird; 1409 event.private_data_len = ep->plen - 1410 sizeof(struct mpa_v2_conn_params); 1411 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) + 1412 sizeof(struct mpa_v2_conn_params); 1413 } else { 1414 /* this means MPA_v1 is used. Send max supported */ 1415 event.ord = cur_max_read_depth(ep->com.dev); 1416 event.ird = cur_max_read_depth(ep->com.dev); 1417 event.private_data_len = ep->plen; 1418 event.private_data = ep->mpa_pkt + sizeof(struct mpa_message); 1419 } 1420 c4iw_get_ep(&ep->com); 1421 ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id, 1422 &event); 1423 if (ret) 1424 c4iw_put_ep(&ep->com); 1425 set_bit(CONNREQ_UPCALL, &ep->com.history); 1426 c4iw_put_ep(&ep->parent_ep->com); 1427 return ret; 1428 } 1429 1430 static void established_upcall(struct c4iw_ep *ep) 1431 { 1432 struct iw_cm_event event; 1433 1434 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1435 memset(&event, 0, sizeof(event)); 1436 event.event = IW_CM_EVENT_ESTABLISHED; 1437 event.ird = ep->ord; 1438 event.ord = ep->ird; 1439 if (ep->com.cm_id) { 1440 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1441 ep->com.cm_id->event_handler(ep->com.cm_id, &event); 1442 set_bit(ESTAB_UPCALL, &ep->com.history); 1443 } 1444 } 1445 1446 static int update_rx_credits(struct c4iw_ep *ep, u32 credits) 1447 { 1448 struct cpl_rx_data_ack *req; 1449 struct sk_buff *skb; 1450 int wrlen = roundup(sizeof *req, 16); 1451 1452 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 1453 skb = get_skb(NULL, wrlen, GFP_KERNEL); 1454 if (!skb) { 1455 printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n"); 1456 return 0; 1457 } 1458 1459 /* 1460 * If we couldn't specify the entire rcv window at connection setup 1461 * due to the limit in the number of bits in the RCV_BUFSIZ field, 1462 * then add the overage in to the credits returned. 1463 */ 1464 if (ep->rcv_win > RCV_BUFSIZ_M * 1024) 1465 credits += ep->rcv_win - RCV_BUFSIZ_M * 1024; 1466 1467 req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen); 1468 memset(req, 0, wrlen); 1469 INIT_TP_WR(req, ep->hwtid); 1470 OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, 1471 ep->hwtid)); 1472 req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK_F | 1473 RX_DACK_CHANGE_F | 1474 RX_DACK_MODE_V(dack_mode)); 1475 set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx); 1476 c4iw_ofld_send(&ep->com.dev->rdev, skb); 1477 return credits; 1478 } 1479 1480 #define RELAXED_IRD_NEGOTIATION 1 1481 1482 /* 1483 * process_mpa_reply - process streaming mode MPA reply 1484 * 1485 * Returns: 1486 * 1487 * 0 upon success indicating a connect request was delivered to the ULP 1488 * or the mpa request is incomplete but valid so far. 1489 * 1490 * 1 if a failure requires the caller to close the connection. 1491 * 1492 * 2 if a failure requires the caller to abort the connection. 1493 */ 1494 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) 1495 { 1496 struct mpa_message *mpa; 1497 struct mpa_v2_conn_params *mpa_v2_params; 1498 u16 plen; 1499 u16 resp_ird, resp_ord; 1500 u8 rtr_mismatch = 0, insuff_ird = 0; 1501 struct c4iw_qp_attributes attrs; 1502 enum c4iw_qp_attr_mask mask; 1503 int err; 1504 int disconnect = 0; 1505 1506 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1507 1508 /* 1509 * If we get more than the supported amount of private data 1510 * then we must fail this connection. 1511 */ 1512 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) { 1513 err = -EINVAL; 1514 goto err_stop_timer; 1515 } 1516 1517 /* 1518 * copy the new data into our accumulation buffer. 1519 */ 1520 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1521 skb->len); 1522 ep->mpa_pkt_len += skb->len; 1523 1524 /* 1525 * if we don't even have the mpa message, then bail. 1526 */ 1527 if (ep->mpa_pkt_len < sizeof(*mpa)) 1528 return 0; 1529 mpa = (struct mpa_message *) ep->mpa_pkt; 1530 1531 /* Validate MPA header. */ 1532 if (mpa->revision > mpa_rev) { 1533 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1534 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1535 err = -EPROTO; 1536 goto err_stop_timer; 1537 } 1538 if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) { 1539 err = -EPROTO; 1540 goto err_stop_timer; 1541 } 1542 1543 plen = ntohs(mpa->private_data_size); 1544 1545 /* 1546 * Fail if there's too much private data. 1547 */ 1548 if (plen > MPA_MAX_PRIVATE_DATA) { 1549 err = -EPROTO; 1550 goto err_stop_timer; 1551 } 1552 1553 /* 1554 * If plen does not account for pkt size 1555 */ 1556 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { 1557 err = -EPROTO; 1558 goto err_stop_timer; 1559 } 1560 1561 ep->plen = (u8) plen; 1562 1563 /* 1564 * If we don't have all the pdata yet, then bail. 1565 * We'll continue process when more data arrives. 1566 */ 1567 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1568 return 0; 1569 1570 if (mpa->flags & MPA_REJECT) { 1571 err = -ECONNREFUSED; 1572 goto err_stop_timer; 1573 } 1574 1575 /* 1576 * Stop mpa timer. If it expired, then 1577 * we ignore the MPA reply. process_timeout() 1578 * will abort the connection. 1579 */ 1580 if (stop_ep_timer(ep)) 1581 return 0; 1582 1583 /* 1584 * If we get here we have accumulated the entire mpa 1585 * start reply message including private data. And 1586 * the MPA header is valid. 1587 */ 1588 __state_set(&ep->com, FPDU_MODE); 1589 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1590 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1591 ep->mpa_attr.version = mpa->revision; 1592 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1593 1594 if (mpa->revision == 2) { 1595 ep->mpa_attr.enhanced_rdma_conn = 1596 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1597 if (ep->mpa_attr.enhanced_rdma_conn) { 1598 mpa_v2_params = (struct mpa_v2_conn_params *) 1599 (ep->mpa_pkt + sizeof(*mpa)); 1600 resp_ird = ntohs(mpa_v2_params->ird) & 1601 MPA_V2_IRD_ORD_MASK; 1602 resp_ord = ntohs(mpa_v2_params->ord) & 1603 MPA_V2_IRD_ORD_MASK; 1604 PDBG("%s responder ird %u ord %u ep ird %u ord %u\n", 1605 __func__, resp_ird, resp_ord, ep->ird, ep->ord); 1606 1607 /* 1608 * This is a double-check. Ideally, below checks are 1609 * not required since ird/ord stuff has been taken 1610 * care of in c4iw_accept_cr 1611 */ 1612 if (ep->ird < resp_ord) { 1613 if (RELAXED_IRD_NEGOTIATION && resp_ord <= 1614 ep->com.dev->rdev.lldi.max_ordird_qp) 1615 ep->ird = resp_ord; 1616 else 1617 insuff_ird = 1; 1618 } else if (ep->ird > resp_ord) { 1619 ep->ird = resp_ord; 1620 } 1621 if (ep->ord > resp_ird) { 1622 if (RELAXED_IRD_NEGOTIATION) 1623 ep->ord = resp_ird; 1624 else 1625 insuff_ird = 1; 1626 } 1627 if (insuff_ird) { 1628 err = -ENOMEM; 1629 ep->ird = resp_ord; 1630 ep->ord = resp_ird; 1631 } 1632 1633 if (ntohs(mpa_v2_params->ird) & 1634 MPA_V2_PEER2PEER_MODEL) { 1635 if (ntohs(mpa_v2_params->ord) & 1636 MPA_V2_RDMA_WRITE_RTR) 1637 ep->mpa_attr.p2p_type = 1638 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1639 else if (ntohs(mpa_v2_params->ord) & 1640 MPA_V2_RDMA_READ_RTR) 1641 ep->mpa_attr.p2p_type = 1642 FW_RI_INIT_P2PTYPE_READ_REQ; 1643 } 1644 } 1645 } else if (mpa->revision == 1) 1646 if (peer2peer) 1647 ep->mpa_attr.p2p_type = p2p_type; 1648 1649 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1650 "xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = " 1651 "%d\n", __func__, ep->mpa_attr.crc_enabled, 1652 ep->mpa_attr.recv_marker_enabled, 1653 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1654 ep->mpa_attr.p2p_type, p2p_type); 1655 1656 /* 1657 * If responder's RTR does not match with that of initiator, assign 1658 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not 1659 * generated when moving QP to RTS state. 1660 * A TERM message will be sent after QP has moved to RTS state 1661 */ 1662 if ((ep->mpa_attr.version == 2) && peer2peer && 1663 (ep->mpa_attr.p2p_type != p2p_type)) { 1664 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1665 rtr_mismatch = 1; 1666 } 1667 1668 attrs.mpa_attr = ep->mpa_attr; 1669 attrs.max_ird = ep->ird; 1670 attrs.max_ord = ep->ord; 1671 attrs.llp_stream_handle = ep; 1672 attrs.next_state = C4IW_QP_STATE_RTS; 1673 1674 mask = C4IW_QP_ATTR_NEXT_STATE | 1675 C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR | 1676 C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD; 1677 1678 /* bind QP and TID with INIT_WR */ 1679 err = c4iw_modify_qp(ep->com.qp->rhp, 1680 ep->com.qp, mask, &attrs, 1); 1681 if (err) 1682 goto err; 1683 1684 /* 1685 * If responder's RTR requirement did not match with what initiator 1686 * supports, generate TERM message 1687 */ 1688 if (rtr_mismatch) { 1689 printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__); 1690 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1691 attrs.ecode = MPA_NOMATCH_RTR; 1692 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1693 attrs.send_term = 1; 1694 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1695 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1696 err = -ENOMEM; 1697 disconnect = 1; 1698 goto out; 1699 } 1700 1701 /* 1702 * Generate TERM if initiator IRD is not sufficient for responder 1703 * provided ORD. Currently, we do the same behaviour even when 1704 * responder provided IRD is also not sufficient as regards to 1705 * initiator ORD. 1706 */ 1707 if (insuff_ird) { 1708 printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n", 1709 __func__); 1710 attrs.layer_etype = LAYER_MPA | DDP_LLP; 1711 attrs.ecode = MPA_INSUFF_IRD; 1712 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1713 attrs.send_term = 1; 1714 err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1715 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1716 err = -ENOMEM; 1717 disconnect = 1; 1718 goto out; 1719 } 1720 goto out; 1721 err_stop_timer: 1722 stop_ep_timer(ep); 1723 err: 1724 disconnect = 2; 1725 out: 1726 connect_reply_upcall(ep, err); 1727 return disconnect; 1728 } 1729 1730 /* 1731 * process_mpa_request - process streaming mode MPA request 1732 * 1733 * Returns: 1734 * 1735 * 0 upon success indicating a connect request was delivered to the ULP 1736 * or the mpa request is incomplete but valid so far. 1737 * 1738 * 1 if a failure requires the caller to close the connection. 1739 * 1740 * 2 if a failure requires the caller to abort the connection. 1741 */ 1742 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb) 1743 { 1744 struct mpa_message *mpa; 1745 struct mpa_v2_conn_params *mpa_v2_params; 1746 u16 plen; 1747 1748 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1749 1750 /* 1751 * If we get more than the supported amount of private data 1752 * then we must fail this connection. 1753 */ 1754 if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) 1755 goto err_stop_timer; 1756 1757 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1758 1759 /* 1760 * Copy the new data into our accumulation buffer. 1761 */ 1762 skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]), 1763 skb->len); 1764 ep->mpa_pkt_len += skb->len; 1765 1766 /* 1767 * If we don't even have the mpa message, then bail. 1768 * We'll continue process when more data arrives. 1769 */ 1770 if (ep->mpa_pkt_len < sizeof(*mpa)) 1771 return 0; 1772 1773 PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__); 1774 mpa = (struct mpa_message *) ep->mpa_pkt; 1775 1776 /* 1777 * Validate MPA Header. 1778 */ 1779 if (mpa->revision > mpa_rev) { 1780 printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d," 1781 " Received = %d\n", __func__, mpa_rev, mpa->revision); 1782 goto err_stop_timer; 1783 } 1784 1785 if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) 1786 goto err_stop_timer; 1787 1788 plen = ntohs(mpa->private_data_size); 1789 1790 /* 1791 * Fail if there's too much private data. 1792 */ 1793 if (plen > MPA_MAX_PRIVATE_DATA) 1794 goto err_stop_timer; 1795 1796 /* 1797 * If plen does not account for pkt size 1798 */ 1799 if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) 1800 goto err_stop_timer; 1801 ep->plen = (u8) plen; 1802 1803 /* 1804 * If we don't have all the pdata yet, then bail. 1805 */ 1806 if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) 1807 return 0; 1808 1809 /* 1810 * If we get here we have accumulated the entire mpa 1811 * start reply message including private data. 1812 */ 1813 ep->mpa_attr.initiator = 0; 1814 ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0; 1815 ep->mpa_attr.recv_marker_enabled = markers_enabled; 1816 ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0; 1817 ep->mpa_attr.version = mpa->revision; 1818 if (mpa->revision == 1) 1819 ep->tried_with_mpa_v1 = 1; 1820 ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; 1821 1822 if (mpa->revision == 2) { 1823 ep->mpa_attr.enhanced_rdma_conn = 1824 mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0; 1825 if (ep->mpa_attr.enhanced_rdma_conn) { 1826 mpa_v2_params = (struct mpa_v2_conn_params *) 1827 (ep->mpa_pkt + sizeof(*mpa)); 1828 ep->ird = ntohs(mpa_v2_params->ird) & 1829 MPA_V2_IRD_ORD_MASK; 1830 ep->ord = ntohs(mpa_v2_params->ord) & 1831 MPA_V2_IRD_ORD_MASK; 1832 PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird, 1833 ep->ord); 1834 if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL) 1835 if (peer2peer) { 1836 if (ntohs(mpa_v2_params->ord) & 1837 MPA_V2_RDMA_WRITE_RTR) 1838 ep->mpa_attr.p2p_type = 1839 FW_RI_INIT_P2PTYPE_RDMA_WRITE; 1840 else if (ntohs(mpa_v2_params->ord) & 1841 MPA_V2_RDMA_READ_RTR) 1842 ep->mpa_attr.p2p_type = 1843 FW_RI_INIT_P2PTYPE_READ_REQ; 1844 } 1845 } 1846 } else if (mpa->revision == 1) 1847 if (peer2peer) 1848 ep->mpa_attr.p2p_type = p2p_type; 1849 1850 PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, " 1851 "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__, 1852 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled, 1853 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version, 1854 ep->mpa_attr.p2p_type); 1855 1856 __state_set(&ep->com, MPA_REQ_RCVD); 1857 1858 /* drive upcall */ 1859 mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING); 1860 if (ep->parent_ep->com.state != DEAD) { 1861 if (connect_request_upcall(ep)) 1862 goto err_unlock_parent; 1863 } else { 1864 goto err_unlock_parent; 1865 } 1866 mutex_unlock(&ep->parent_ep->com.mutex); 1867 return 0; 1868 1869 err_unlock_parent: 1870 mutex_unlock(&ep->parent_ep->com.mutex); 1871 goto err_out; 1872 err_stop_timer: 1873 (void)stop_ep_timer(ep); 1874 err_out: 1875 return 2; 1876 } 1877 1878 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb) 1879 { 1880 struct c4iw_ep *ep; 1881 struct cpl_rx_data *hdr = cplhdr(skb); 1882 unsigned int dlen = ntohs(hdr->len); 1883 unsigned int tid = GET_TID(hdr); 1884 __u8 status = hdr->status; 1885 int disconnect = 0; 1886 1887 ep = get_ep_from_tid(dev, tid); 1888 if (!ep) 1889 return 0; 1890 PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen); 1891 skb_pull(skb, sizeof(*hdr)); 1892 skb_trim(skb, dlen); 1893 mutex_lock(&ep->com.mutex); 1894 1895 /* update RX credits */ 1896 update_rx_credits(ep, dlen); 1897 1898 switch (ep->com.state) { 1899 case MPA_REQ_SENT: 1900 ep->rcv_seq += dlen; 1901 disconnect = process_mpa_reply(ep, skb); 1902 break; 1903 case MPA_REQ_WAIT: 1904 ep->rcv_seq += dlen; 1905 disconnect = process_mpa_request(ep, skb); 1906 break; 1907 case FPDU_MODE: { 1908 struct c4iw_qp_attributes attrs; 1909 BUG_ON(!ep->com.qp); 1910 if (status) 1911 pr_err("%s Unexpected streaming data." \ 1912 " qpid %u ep %p state %d tid %u status %d\n", 1913 __func__, ep->com.qp->wq.sq.qid, ep, 1914 ep->com.state, ep->hwtid, status); 1915 attrs.next_state = C4IW_QP_STATE_TERMINATE; 1916 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 1917 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 1918 disconnect = 1; 1919 break; 1920 } 1921 default: 1922 break; 1923 } 1924 mutex_unlock(&ep->com.mutex); 1925 if (disconnect) 1926 c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL); 1927 c4iw_put_ep(&ep->com); 1928 return 0; 1929 } 1930 1931 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 1932 { 1933 struct c4iw_ep *ep; 1934 struct cpl_abort_rpl_rss *rpl = cplhdr(skb); 1935 int release = 0; 1936 unsigned int tid = GET_TID(rpl); 1937 1938 ep = get_ep_from_tid(dev, tid); 1939 if (!ep) { 1940 printk(KERN_WARNING MOD "Abort rpl to freed endpoint\n"); 1941 return 0; 1942 } 1943 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 1944 mutex_lock(&ep->com.mutex); 1945 switch (ep->com.state) { 1946 case ABORTING: 1947 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 1948 __state_set(&ep->com, DEAD); 1949 release = 1; 1950 break; 1951 default: 1952 printk(KERN_ERR "%s ep %p state %d\n", 1953 __func__, ep, ep->com.state); 1954 break; 1955 } 1956 mutex_unlock(&ep->com.mutex); 1957 1958 if (release) 1959 release_ep_resources(ep); 1960 c4iw_put_ep(&ep->com); 1961 return 0; 1962 } 1963 1964 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) 1965 { 1966 struct sk_buff *skb; 1967 struct fw_ofld_connection_wr *req; 1968 unsigned int mtu_idx; 1969 int wscale; 1970 struct sockaddr_in *sin; 1971 int win; 1972 1973 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); 1974 req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); 1975 memset(req, 0, sizeof(*req)); 1976 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR)); 1977 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 1978 req->le.filter = cpu_to_be32(cxgb4_select_ntuple( 1979 ep->com.dev->rdev.lldi.ports[0], 1980 ep->l2t)); 1981 sin = (struct sockaddr_in *)&ep->com.local_addr; 1982 req->le.lport = sin->sin_port; 1983 req->le.u.ipv4.lip = sin->sin_addr.s_addr; 1984 sin = (struct sockaddr_in *)&ep->com.remote_addr; 1985 req->le.pport = sin->sin_port; 1986 req->le.u.ipv4.pip = sin->sin_addr.s_addr; 1987 req->tcb.t_state_to_astid = 1988 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) | 1989 FW_OFLD_CONNECTION_WR_ASTID_V(atid)); 1990 req->tcb.cplrxdataack_cplpassacceptrpl = 1991 htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F); 1992 req->tcb.tx_max = (__force __be32) jiffies; 1993 req->tcb.rcv_adv = htons(1); 1994 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 1995 enable_tcp_timestamps, 1996 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); 1997 wscale = compute_wscale(rcv_win); 1998 1999 /* 2000 * Specify the largest window that will fit in opt0. The 2001 * remainder will be specified in the rx_data_ack. 2002 */ 2003 win = ep->rcv_win >> 10; 2004 if (win > RCV_BUFSIZ_M) 2005 win = RCV_BUFSIZ_M; 2006 2007 req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F | 2008 (nocong ? NO_CONG_F : 0) | 2009 KEEP_ALIVE_F | 2010 DELACK_F | 2011 WND_SCALE_V(wscale) | 2012 MSS_IDX_V(mtu_idx) | 2013 L2T_IDX_V(ep->l2t->idx) | 2014 TX_CHAN_V(ep->tx_chan) | 2015 SMAC_SEL_V(ep->smac_idx) | 2016 DSCP_V(ep->tos >> 2) | 2017 ULP_MODE_V(ULP_MODE_TCPDDP) | 2018 RCV_BUFSIZ_V(win)); 2019 req->tcb.opt2 = (__force __be32) (PACE_V(1) | 2020 TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | 2021 RX_CHANNEL_V(0) | 2022 CCTRL_ECN_V(enable_ecn) | 2023 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid)); 2024 if (enable_tcp_timestamps) 2025 req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F; 2026 if (enable_tcp_sack) 2027 req->tcb.opt2 |= (__force __be32)SACK_EN_F; 2028 if (wscale && enable_tcp_window_scaling) 2029 req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F; 2030 req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0); 2031 req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2); 2032 set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); 2033 set_bit(ACT_OFLD_CONN, &ep->com.history); 2034 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2035 } 2036 2037 /* 2038 * Some of the error codes above implicitly indicate that there is no TID 2039 * allocated with the result of an ACT_OPEN. We use this predicate to make 2040 * that explicit. 2041 */ 2042 static inline int act_open_has_tid(int status) 2043 { 2044 return (status != CPL_ERR_TCAM_PARITY && 2045 status != CPL_ERR_TCAM_MISS && 2046 status != CPL_ERR_TCAM_FULL && 2047 status != CPL_ERR_CONN_EXIST_SYNRECV && 2048 status != CPL_ERR_CONN_EXIST); 2049 } 2050 2051 /* Returns whether a CPL status conveys negative advice. 2052 */ 2053 static int is_neg_adv(unsigned int status) 2054 { 2055 return status == CPL_ERR_RTX_NEG_ADVICE || 2056 status == CPL_ERR_PERSIST_NEG_ADVICE || 2057 status == CPL_ERR_KEEPALV_NEG_ADVICE; 2058 } 2059 2060 static char *neg_adv_str(unsigned int status) 2061 { 2062 switch (status) { 2063 case CPL_ERR_RTX_NEG_ADVICE: 2064 return "Retransmit timeout"; 2065 case CPL_ERR_PERSIST_NEG_ADVICE: 2066 return "Persist timeout"; 2067 case CPL_ERR_KEEPALV_NEG_ADVICE: 2068 return "Keepalive timeout"; 2069 default: 2070 return "Unknown"; 2071 } 2072 } 2073 2074 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi) 2075 { 2076 ep->snd_win = snd_win; 2077 ep->rcv_win = rcv_win; 2078 PDBG("%s snd_win %d rcv_win %d\n", __func__, ep->snd_win, ep->rcv_win); 2079 } 2080 2081 #define ACT_OPEN_RETRY_COUNT 2 2082 2083 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip, 2084 struct dst_entry *dst, struct c4iw_dev *cdev, 2085 bool clear_mpa_v1, enum chip_type adapter_type, u8 tos) 2086 { 2087 struct neighbour *n; 2088 int err, step; 2089 struct net_device *pdev; 2090 2091 n = dst_neigh_lookup(dst, peer_ip); 2092 if (!n) 2093 return -ENODEV; 2094 2095 rcu_read_lock(); 2096 err = -ENOMEM; 2097 if (n->dev->flags & IFF_LOOPBACK) { 2098 if (iptype == 4) 2099 pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip); 2100 else if (IS_ENABLED(CONFIG_IPV6)) 2101 for_each_netdev(&init_net, pdev) { 2102 if (ipv6_chk_addr(&init_net, 2103 (struct in6_addr *)peer_ip, 2104 pdev, 1)) 2105 break; 2106 } 2107 else 2108 pdev = NULL; 2109 2110 if (!pdev) { 2111 err = -ENODEV; 2112 goto out; 2113 } 2114 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 2115 n, pdev, rt_tos2priority(tos)); 2116 if (!ep->l2t) 2117 goto out; 2118 ep->mtu = pdev->mtu; 2119 ep->tx_chan = cxgb4_port_chan(pdev); 2120 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, 2121 cxgb4_port_viid(pdev)); 2122 step = cdev->rdev.lldi.ntxq / 2123 cdev->rdev.lldi.nchan; 2124 ep->txq_idx = cxgb4_port_idx(pdev) * step; 2125 step = cdev->rdev.lldi.nrxq / 2126 cdev->rdev.lldi.nchan; 2127 ep->ctrlq_idx = cxgb4_port_idx(pdev); 2128 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 2129 cxgb4_port_idx(pdev) * step]; 2130 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 2131 dev_put(pdev); 2132 } else { 2133 pdev = get_real_dev(n->dev); 2134 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 2135 n, pdev, 0); 2136 if (!ep->l2t) 2137 goto out; 2138 ep->mtu = dst_mtu(dst); 2139 ep->tx_chan = cxgb4_port_chan(pdev); 2140 ep->smac_idx = cxgb4_tp_smt_idx(adapter_type, 2141 cxgb4_port_viid(pdev)); 2142 step = cdev->rdev.lldi.ntxq / 2143 cdev->rdev.lldi.nchan; 2144 ep->txq_idx = cxgb4_port_idx(pdev) * step; 2145 ep->ctrlq_idx = cxgb4_port_idx(pdev); 2146 step = cdev->rdev.lldi.nrxq / 2147 cdev->rdev.lldi.nchan; 2148 ep->rss_qid = cdev->rdev.lldi.rxq_ids[ 2149 cxgb4_port_idx(pdev) * step]; 2150 set_tcp_window(ep, (struct port_info *)netdev_priv(pdev)); 2151 2152 if (clear_mpa_v1) { 2153 ep->retry_with_mpa_v1 = 0; 2154 ep->tried_with_mpa_v1 = 0; 2155 } 2156 } 2157 err = 0; 2158 out: 2159 rcu_read_unlock(); 2160 2161 neigh_release(n); 2162 2163 return err; 2164 } 2165 2166 static int c4iw_reconnect(struct c4iw_ep *ep) 2167 { 2168 int err = 0; 2169 int size = 0; 2170 struct sockaddr_in *laddr = (struct sockaddr_in *) 2171 &ep->com.cm_id->m_local_addr; 2172 struct sockaddr_in *raddr = (struct sockaddr_in *) 2173 &ep->com.cm_id->m_remote_addr; 2174 struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *) 2175 &ep->com.cm_id->m_local_addr; 2176 struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *) 2177 &ep->com.cm_id->m_remote_addr; 2178 int iptype; 2179 __u8 *ra; 2180 2181 PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); 2182 init_timer(&ep->timer); 2183 c4iw_init_wr_wait(&ep->com.wr_wait); 2184 2185 /* When MPA revision is different on nodes, the node with MPA_rev=2 2186 * tries to reconnect with MPA_rev 1 for the same EP through 2187 * c4iw_reconnect(), where the same EP is assigned with new tid for 2188 * further connection establishment. As we are using the same EP pointer 2189 * for reconnect, few skbs are used during the previous c4iw_connect(), 2190 * which leaves the EP with inadequate skbs for further 2191 * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty 2192 * skb_list() during peer_abort(). Allocate skbs which is already used. 2193 */ 2194 size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list)); 2195 if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) { 2196 err = -ENOMEM; 2197 goto fail1; 2198 } 2199 2200 /* 2201 * Allocate an active TID to initiate a TCP connection. 2202 */ 2203 ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); 2204 if (ep->atid == -1) { 2205 pr_err("%s - cannot alloc atid.\n", __func__); 2206 err = -ENOMEM; 2207 goto fail2; 2208 } 2209 insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); 2210 2211 /* find a route */ 2212 if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) { 2213 ep->dst = find_route(ep->com.dev, laddr->sin_addr.s_addr, 2214 raddr->sin_addr.s_addr, laddr->sin_port, 2215 raddr->sin_port, ep->com.cm_id->tos); 2216 iptype = 4; 2217 ra = (__u8 *)&raddr->sin_addr; 2218 } else { 2219 ep->dst = find_route6(ep->com.dev, laddr6->sin6_addr.s6_addr, 2220 raddr6->sin6_addr.s6_addr, 2221 laddr6->sin6_port, raddr6->sin6_port, 0, 2222 raddr6->sin6_scope_id); 2223 iptype = 6; 2224 ra = (__u8 *)&raddr6->sin6_addr; 2225 } 2226 if (!ep->dst) { 2227 pr_err("%s - cannot find route.\n", __func__); 2228 err = -EHOSTUNREACH; 2229 goto fail3; 2230 } 2231 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false, 2232 ep->com.dev->rdev.lldi.adapter_type, 2233 ep->com.cm_id->tos); 2234 if (err) { 2235 pr_err("%s - cannot alloc l2e.\n", __func__); 2236 goto fail4; 2237 } 2238 2239 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 2240 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 2241 ep->l2t->idx); 2242 2243 state_set(&ep->com, CONNECTING); 2244 ep->tos = ep->com.cm_id->tos; 2245 2246 /* send connect request to rnic */ 2247 err = send_connect(ep); 2248 if (!err) 2249 goto out; 2250 2251 cxgb4_l2t_release(ep->l2t); 2252 fail4: 2253 dst_release(ep->dst); 2254 fail3: 2255 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 2256 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 2257 fail2: 2258 /* 2259 * remember to send notification to upper layer. 2260 * We are in here so the upper layer is not aware that this is 2261 * re-connect attempt and so, upper layer is still waiting for 2262 * response of 1st connect request. 2263 */ 2264 connect_reply_upcall(ep, -ECONNRESET); 2265 fail1: 2266 c4iw_put_ep(&ep->com); 2267 out: 2268 return err; 2269 } 2270 2271 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2272 { 2273 struct c4iw_ep *ep; 2274 struct cpl_act_open_rpl *rpl = cplhdr(skb); 2275 unsigned int atid = TID_TID_G(AOPEN_ATID_G( 2276 ntohl(rpl->atid_status))); 2277 struct tid_info *t = dev->rdev.lldi.tids; 2278 int status = AOPEN_STATUS_G(ntohl(rpl->atid_status)); 2279 struct sockaddr_in *la; 2280 struct sockaddr_in *ra; 2281 struct sockaddr_in6 *la6; 2282 struct sockaddr_in6 *ra6; 2283 int ret = 0; 2284 2285 ep = lookup_atid(t, atid); 2286 la = (struct sockaddr_in *)&ep->com.local_addr; 2287 ra = (struct sockaddr_in *)&ep->com.remote_addr; 2288 la6 = (struct sockaddr_in6 *)&ep->com.local_addr; 2289 ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr; 2290 2291 PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid, 2292 status, status2errno(status)); 2293 2294 if (is_neg_adv(status)) { 2295 PDBG("%s Connection problems for atid %u status %u (%s)\n", 2296 __func__, atid, status, neg_adv_str(status)); 2297 ep->stats.connect_neg_adv++; 2298 mutex_lock(&dev->rdev.stats.lock); 2299 dev->rdev.stats.neg_adv++; 2300 mutex_unlock(&dev->rdev.stats.lock); 2301 return 0; 2302 } 2303 2304 set_bit(ACT_OPEN_RPL, &ep->com.history); 2305 2306 /* 2307 * Log interesting failures. 2308 */ 2309 switch (status) { 2310 case CPL_ERR_CONN_RESET: 2311 case CPL_ERR_CONN_TIMEDOUT: 2312 break; 2313 case CPL_ERR_TCAM_FULL: 2314 mutex_lock(&dev->rdev.stats.lock); 2315 dev->rdev.stats.tcam_full++; 2316 mutex_unlock(&dev->rdev.stats.lock); 2317 if (ep->com.local_addr.ss_family == AF_INET && 2318 dev->rdev.lldi.enable_fw_ofld_conn) { 2319 ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G( 2320 ntohl(rpl->atid_status)))); 2321 if (ret) 2322 goto fail; 2323 return 0; 2324 } 2325 break; 2326 case CPL_ERR_CONN_EXIST: 2327 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 2328 set_bit(ACT_RETRY_INUSE, &ep->com.history); 2329 if (ep->com.remote_addr.ss_family == AF_INET6) { 2330 struct sockaddr_in6 *sin6 = 2331 (struct sockaddr_in6 *) 2332 &ep->com.local_addr; 2333 cxgb4_clip_release( 2334 ep->com.dev->rdev.lldi.ports[0], 2335 (const u32 *) 2336 &sin6->sin6_addr.s6_addr, 1); 2337 } 2338 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, 2339 atid); 2340 cxgb4_free_atid(t, atid); 2341 dst_release(ep->dst); 2342 cxgb4_l2t_release(ep->l2t); 2343 c4iw_reconnect(ep); 2344 return 0; 2345 } 2346 break; 2347 default: 2348 if (ep->com.local_addr.ss_family == AF_INET) { 2349 pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n", 2350 atid, status, status2errno(status), 2351 &la->sin_addr.s_addr, ntohs(la->sin_port), 2352 &ra->sin_addr.s_addr, ntohs(ra->sin_port)); 2353 } else { 2354 pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n", 2355 atid, status, status2errno(status), 2356 la6->sin6_addr.s6_addr, ntohs(la6->sin6_port), 2357 ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port)); 2358 } 2359 break; 2360 } 2361 2362 fail: 2363 connect_reply_upcall(ep, status2errno(status)); 2364 state_set(&ep->com, DEAD); 2365 2366 if (ep->com.remote_addr.ss_family == AF_INET6) { 2367 struct sockaddr_in6 *sin6 = 2368 (struct sockaddr_in6 *)&ep->com.local_addr; 2369 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 2370 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2371 } 2372 if (status && act_open_has_tid(status)) 2373 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); 2374 2375 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid); 2376 cxgb4_free_atid(t, atid); 2377 dst_release(ep->dst); 2378 cxgb4_l2t_release(ep->l2t); 2379 c4iw_put_ep(&ep->com); 2380 2381 return 0; 2382 } 2383 2384 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2385 { 2386 struct cpl_pass_open_rpl *rpl = cplhdr(skb); 2387 unsigned int stid = GET_TID(rpl); 2388 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2389 2390 if (!ep) { 2391 PDBG("%s stid %d lookup failure!\n", __func__, stid); 2392 goto out; 2393 } 2394 PDBG("%s ep %p status %d error %d\n", __func__, ep, 2395 rpl->status, status2errno(rpl->status)); 2396 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2397 c4iw_put_ep(&ep->com); 2398 out: 2399 return 0; 2400 } 2401 2402 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2403 { 2404 struct cpl_close_listsvr_rpl *rpl = cplhdr(skb); 2405 unsigned int stid = GET_TID(rpl); 2406 struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid); 2407 2408 PDBG("%s ep %p\n", __func__, ep); 2409 c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); 2410 c4iw_put_ep(&ep->com); 2411 return 0; 2412 } 2413 2414 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, 2415 struct cpl_pass_accept_req *req) 2416 { 2417 struct cpl_pass_accept_rpl *rpl; 2418 unsigned int mtu_idx; 2419 u64 opt0; 2420 u32 opt2; 2421 int wscale; 2422 struct cpl_t5_pass_accept_rpl *rpl5 = NULL; 2423 int win; 2424 enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type; 2425 2426 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2427 BUG_ON(skb_cloned(skb)); 2428 2429 skb_get(skb); 2430 rpl = cplhdr(skb); 2431 if (!is_t4(adapter_type)) { 2432 skb_trim(skb, roundup(sizeof(*rpl5), 16)); 2433 rpl5 = (void *)rpl; 2434 INIT_TP_WR(rpl5, ep->hwtid); 2435 } else { 2436 skb_trim(skb, sizeof(*rpl)); 2437 INIT_TP_WR(rpl, ep->hwtid); 2438 } 2439 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL, 2440 ep->hwtid)); 2441 2442 best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx, 2443 enable_tcp_timestamps && req->tcpopt.tstamp, 2444 (AF_INET == ep->com.remote_addr.ss_family) ? 0 : 1); 2445 wscale = compute_wscale(rcv_win); 2446 2447 /* 2448 * Specify the largest window that will fit in opt0. The 2449 * remainder will be specified in the rx_data_ack. 2450 */ 2451 win = ep->rcv_win >> 10; 2452 if (win > RCV_BUFSIZ_M) 2453 win = RCV_BUFSIZ_M; 2454 opt0 = (nocong ? NO_CONG_F : 0) | 2455 KEEP_ALIVE_F | 2456 DELACK_F | 2457 WND_SCALE_V(wscale) | 2458 MSS_IDX_V(mtu_idx) | 2459 L2T_IDX_V(ep->l2t->idx) | 2460 TX_CHAN_V(ep->tx_chan) | 2461 SMAC_SEL_V(ep->smac_idx) | 2462 DSCP_V(ep->tos >> 2) | 2463 ULP_MODE_V(ULP_MODE_TCPDDP) | 2464 RCV_BUFSIZ_V(win); 2465 opt2 = RX_CHANNEL_V(0) | 2466 RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid); 2467 2468 if (enable_tcp_timestamps && req->tcpopt.tstamp) 2469 opt2 |= TSTAMPS_EN_F; 2470 if (enable_tcp_sack && req->tcpopt.sack) 2471 opt2 |= SACK_EN_F; 2472 if (wscale && enable_tcp_window_scaling) 2473 opt2 |= WND_SCALE_EN_F; 2474 if (enable_ecn) { 2475 const struct tcphdr *tcph; 2476 u32 hlen = ntohl(req->hdr_len); 2477 2478 if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5) 2479 tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) + 2480 IP_HDR_LEN_G(hlen); 2481 else 2482 tcph = (const void *)(req + 1) + 2483 T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen); 2484 if (tcph->ece && tcph->cwr) 2485 opt2 |= CCTRL_ECN_V(1); 2486 } 2487 if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) { 2488 u32 isn = (prandom_u32() & ~7UL) - 1; 2489 opt2 |= T5_OPT_2_VALID_F; 2490 opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); 2491 opt2 |= T5_ISS_F; 2492 rpl5 = (void *)rpl; 2493 memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); 2494 if (peer2peer) 2495 isn += 4; 2496 rpl5->iss = cpu_to_be32(isn); 2497 PDBG("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss)); 2498 } 2499 2500 rpl->opt0 = cpu_to_be64(opt0); 2501 rpl->opt2 = cpu_to_be32(opt2); 2502 set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx); 2503 t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure); 2504 2505 return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); 2506 } 2507 2508 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb) 2509 { 2510 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); 2511 BUG_ON(skb_cloned(skb)); 2512 skb_trim(skb, sizeof(struct cpl_tid_release)); 2513 release_tid(&dev->rdev, hwtid, skb); 2514 return; 2515 } 2516 2517 static void get_4tuple(struct cpl_pass_accept_req *req, enum chip_type type, 2518 int *iptype, __u8 *local_ip, __u8 *peer_ip, 2519 __be16 *local_port, __be16 *peer_port) 2520 { 2521 int eth_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ? 2522 ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)) : 2523 T6_ETH_HDR_LEN_G(be32_to_cpu(req->hdr_len)); 2524 int ip_len = (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) ? 2525 IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)) : 2526 T6_IP_HDR_LEN_G(be32_to_cpu(req->hdr_len)); 2527 struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len); 2528 struct ipv6hdr *ip6 = (struct ipv6hdr *)((u8 *)(req + 1) + eth_len); 2529 struct tcphdr *tcp = (struct tcphdr *) 2530 ((u8 *)(req + 1) + eth_len + ip_len); 2531 2532 if (ip->version == 4) { 2533 PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__, 2534 ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source), 2535 ntohs(tcp->dest)); 2536 *iptype = 4; 2537 memcpy(peer_ip, &ip->saddr, 4); 2538 memcpy(local_ip, &ip->daddr, 4); 2539 } else { 2540 PDBG("%s saddr %pI6 daddr %pI6 sport %u dport %u\n", __func__, 2541 ip6->saddr.s6_addr, ip6->daddr.s6_addr, ntohs(tcp->source), 2542 ntohs(tcp->dest)); 2543 *iptype = 6; 2544 memcpy(peer_ip, ip6->saddr.s6_addr, 16); 2545 memcpy(local_ip, ip6->daddr.s6_addr, 16); 2546 } 2547 *peer_port = tcp->source; 2548 *local_port = tcp->dest; 2549 2550 return; 2551 } 2552 2553 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb) 2554 { 2555 struct c4iw_ep *child_ep = NULL, *parent_ep; 2556 struct cpl_pass_accept_req *req = cplhdr(skb); 2557 unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid)); 2558 struct tid_info *t = dev->rdev.lldi.tids; 2559 unsigned int hwtid = GET_TID(req); 2560 struct dst_entry *dst; 2561 __u8 local_ip[16], peer_ip[16]; 2562 __be16 local_port, peer_port; 2563 struct sockaddr_in6 *sin6; 2564 int err; 2565 u16 peer_mss = ntohs(req->tcpopt.mss); 2566 int iptype; 2567 unsigned short hdrs; 2568 u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid)); 2569 2570 parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); 2571 if (!parent_ep) { 2572 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 2573 goto reject; 2574 } 2575 2576 if (state_read(&parent_ep->com) != LISTEN) { 2577 PDBG("%s - listening ep not in LISTEN\n", __func__); 2578 goto reject; 2579 } 2580 2581 get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type, &iptype, 2582 local_ip, peer_ip, &local_port, &peer_port); 2583 2584 /* Find output route */ 2585 if (iptype == 4) { 2586 PDBG("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n" 2587 , __func__, parent_ep, hwtid, 2588 local_ip, peer_ip, ntohs(local_port), 2589 ntohs(peer_port), peer_mss); 2590 dst = find_route(dev, *(__be32 *)local_ip, *(__be32 *)peer_ip, 2591 local_port, peer_port, 2592 tos); 2593 } else { 2594 PDBG("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n" 2595 , __func__, parent_ep, hwtid, 2596 local_ip, peer_ip, ntohs(local_port), 2597 ntohs(peer_port), peer_mss); 2598 dst = find_route6(dev, local_ip, peer_ip, local_port, peer_port, 2599 PASS_OPEN_TOS_G(ntohl(req->tos_stid)), 2600 ((struct sockaddr_in6 *) 2601 &parent_ep->com.local_addr)->sin6_scope_id); 2602 } 2603 if (!dst) { 2604 printk(KERN_ERR MOD "%s - failed to find dst entry!\n", 2605 __func__); 2606 goto reject; 2607 } 2608 2609 child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL); 2610 if (!child_ep) { 2611 printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n", 2612 __func__); 2613 dst_release(dst); 2614 goto reject; 2615 } 2616 2617 err = import_ep(child_ep, iptype, peer_ip, dst, dev, false, 2618 parent_ep->com.dev->rdev.lldi.adapter_type, tos); 2619 if (err) { 2620 printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n", 2621 __func__); 2622 dst_release(dst); 2623 kfree(child_ep); 2624 goto reject; 2625 } 2626 2627 hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + 2628 ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); 2629 if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) 2630 child_ep->mtu = peer_mss + hdrs; 2631 2632 skb_queue_head_init(&child_ep->com.ep_skb_list); 2633 if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF)) 2634 goto fail; 2635 2636 state_set(&child_ep->com, CONNECTING); 2637 child_ep->com.dev = dev; 2638 child_ep->com.cm_id = NULL; 2639 2640 if (iptype == 4) { 2641 struct sockaddr_in *sin = (struct sockaddr_in *) 2642 &child_ep->com.local_addr; 2643 2644 sin->sin_family = PF_INET; 2645 sin->sin_port = local_port; 2646 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2647 2648 sin = (struct sockaddr_in *)&child_ep->com.local_addr; 2649 sin->sin_family = PF_INET; 2650 sin->sin_port = ((struct sockaddr_in *) 2651 &parent_ep->com.local_addr)->sin_port; 2652 sin->sin_addr.s_addr = *(__be32 *)local_ip; 2653 2654 sin = (struct sockaddr_in *)&child_ep->com.remote_addr; 2655 sin->sin_family = PF_INET; 2656 sin->sin_port = peer_port; 2657 sin->sin_addr.s_addr = *(__be32 *)peer_ip; 2658 } else { 2659 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2660 sin6->sin6_family = PF_INET6; 2661 sin6->sin6_port = local_port; 2662 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2663 2664 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2665 sin6->sin6_family = PF_INET6; 2666 sin6->sin6_port = ((struct sockaddr_in6 *) 2667 &parent_ep->com.local_addr)->sin6_port; 2668 memcpy(sin6->sin6_addr.s6_addr, local_ip, 16); 2669 2670 sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr; 2671 sin6->sin6_family = PF_INET6; 2672 sin6->sin6_port = peer_port; 2673 memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16); 2674 } 2675 2676 c4iw_get_ep(&parent_ep->com); 2677 child_ep->parent_ep = parent_ep; 2678 child_ep->tos = tos; 2679 child_ep->dst = dst; 2680 child_ep->hwtid = hwtid; 2681 2682 PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__, 2683 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid); 2684 2685 init_timer(&child_ep->timer); 2686 cxgb4_insert_tid(t, child_ep, hwtid); 2687 insert_ep_tid(child_ep); 2688 if (accept_cr(child_ep, skb, req)) { 2689 c4iw_put_ep(&parent_ep->com); 2690 release_ep_resources(child_ep); 2691 } else { 2692 set_bit(PASS_ACCEPT_REQ, &child_ep->com.history); 2693 } 2694 if (iptype == 6) { 2695 sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr; 2696 cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0], 2697 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 2698 } 2699 goto out; 2700 fail: 2701 c4iw_put_ep(&child_ep->com); 2702 reject: 2703 reject_cr(dev, hwtid, skb); 2704 if (parent_ep) 2705 c4iw_put_ep(&parent_ep->com); 2706 out: 2707 return 0; 2708 } 2709 2710 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb) 2711 { 2712 struct c4iw_ep *ep; 2713 struct cpl_pass_establish *req = cplhdr(skb); 2714 unsigned int tid = GET_TID(req); 2715 int ret; 2716 2717 ep = get_ep_from_tid(dev, tid); 2718 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2719 ep->snd_seq = be32_to_cpu(req->snd_isn); 2720 ep->rcv_seq = be32_to_cpu(req->rcv_isn); 2721 2722 PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, 2723 ntohs(req->tcp_opt)); 2724 2725 set_emss(ep, ntohs(req->tcp_opt)); 2726 2727 dst_confirm(ep->dst); 2728 mutex_lock(&ep->com.mutex); 2729 ep->com.state = MPA_REQ_WAIT; 2730 start_ep_timer(ep); 2731 set_bit(PASS_ESTAB, &ep->com.history); 2732 ret = send_flowc(ep); 2733 mutex_unlock(&ep->com.mutex); 2734 if (ret) 2735 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 2736 c4iw_put_ep(&ep->com); 2737 2738 return 0; 2739 } 2740 2741 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb) 2742 { 2743 struct cpl_peer_close *hdr = cplhdr(skb); 2744 struct c4iw_ep *ep; 2745 struct c4iw_qp_attributes attrs; 2746 int disconnect = 1; 2747 int release = 0; 2748 unsigned int tid = GET_TID(hdr); 2749 int ret; 2750 2751 ep = get_ep_from_tid(dev, tid); 2752 if (!ep) 2753 return 0; 2754 2755 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2756 dst_confirm(ep->dst); 2757 2758 set_bit(PEER_CLOSE, &ep->com.history); 2759 mutex_lock(&ep->com.mutex); 2760 switch (ep->com.state) { 2761 case MPA_REQ_WAIT: 2762 __state_set(&ep->com, CLOSING); 2763 break; 2764 case MPA_REQ_SENT: 2765 __state_set(&ep->com, CLOSING); 2766 connect_reply_upcall(ep, -ECONNRESET); 2767 break; 2768 case MPA_REQ_RCVD: 2769 2770 /* 2771 * We're gonna mark this puppy DEAD, but keep 2772 * the reference on it until the ULP accepts or 2773 * rejects the CR. Also wake up anyone waiting 2774 * in rdma connection migration (see c4iw_accept_cr()). 2775 */ 2776 __state_set(&ep->com, CLOSING); 2777 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2778 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2779 break; 2780 case MPA_REP_SENT: 2781 __state_set(&ep->com, CLOSING); 2782 PDBG("waking up ep %p tid %u\n", ep, ep->hwtid); 2783 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2784 break; 2785 case FPDU_MODE: 2786 start_ep_timer(ep); 2787 __state_set(&ep->com, CLOSING); 2788 attrs.next_state = C4IW_QP_STATE_CLOSING; 2789 ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2790 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2791 if (ret != -ECONNRESET) { 2792 peer_close_upcall(ep); 2793 disconnect = 1; 2794 } 2795 break; 2796 case ABORTING: 2797 disconnect = 0; 2798 break; 2799 case CLOSING: 2800 __state_set(&ep->com, MORIBUND); 2801 disconnect = 0; 2802 break; 2803 case MORIBUND: 2804 (void)stop_ep_timer(ep); 2805 if (ep->com.cm_id && ep->com.qp) { 2806 attrs.next_state = C4IW_QP_STATE_IDLE; 2807 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 2808 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 2809 } 2810 close_complete_upcall(ep, 0); 2811 __state_set(&ep->com, DEAD); 2812 release = 1; 2813 disconnect = 0; 2814 break; 2815 case DEAD: 2816 disconnect = 0; 2817 break; 2818 default: 2819 BUG_ON(1); 2820 } 2821 mutex_unlock(&ep->com.mutex); 2822 if (disconnect) 2823 c4iw_ep_disconnect(ep, 0, GFP_KERNEL); 2824 if (release) 2825 release_ep_resources(ep); 2826 c4iw_put_ep(&ep->com); 2827 return 0; 2828 } 2829 2830 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb) 2831 { 2832 struct cpl_abort_req_rss *req = cplhdr(skb); 2833 struct c4iw_ep *ep; 2834 struct cpl_abort_rpl *rpl; 2835 struct sk_buff *rpl_skb; 2836 struct c4iw_qp_attributes attrs; 2837 int ret; 2838 int release = 0; 2839 unsigned int tid = GET_TID(req); 2840 2841 ep = get_ep_from_tid(dev, tid); 2842 if (!ep) 2843 return 0; 2844 2845 if (is_neg_adv(req->status)) { 2846 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", 2847 __func__, ep->hwtid, req->status, 2848 neg_adv_str(req->status)); 2849 ep->stats.abort_neg_adv++; 2850 mutex_lock(&dev->rdev.stats.lock); 2851 dev->rdev.stats.neg_adv++; 2852 mutex_unlock(&dev->rdev.stats.lock); 2853 goto deref_ep; 2854 } 2855 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 2856 ep->com.state); 2857 set_bit(PEER_ABORT, &ep->com.history); 2858 2859 /* 2860 * Wake up any threads in rdma_init() or rdma_fini(). 2861 * However, this is not needed if com state is just 2862 * MPA_REQ_SENT 2863 */ 2864 if (ep->com.state != MPA_REQ_SENT) 2865 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 2866 2867 mutex_lock(&ep->com.mutex); 2868 switch (ep->com.state) { 2869 case CONNECTING: 2870 c4iw_put_ep(&ep->parent_ep->com); 2871 break; 2872 case MPA_REQ_WAIT: 2873 (void)stop_ep_timer(ep); 2874 break; 2875 case MPA_REQ_SENT: 2876 (void)stop_ep_timer(ep); 2877 if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1)) 2878 connect_reply_upcall(ep, -ECONNRESET); 2879 else { 2880 /* 2881 * we just don't send notification upwards because we 2882 * want to retry with mpa_v1 without upper layers even 2883 * knowing it. 2884 * 2885 * do some housekeeping so as to re-initiate the 2886 * connection 2887 */ 2888 PDBG("%s: mpa_rev=%d. Retrying with mpav1\n", __func__, 2889 mpa_rev); 2890 ep->retry_with_mpa_v1 = 1; 2891 } 2892 break; 2893 case MPA_REP_SENT: 2894 break; 2895 case MPA_REQ_RCVD: 2896 break; 2897 case MORIBUND: 2898 case CLOSING: 2899 stop_ep_timer(ep); 2900 /*FALLTHROUGH*/ 2901 case FPDU_MODE: 2902 if (ep->com.cm_id && ep->com.qp) { 2903 attrs.next_state = C4IW_QP_STATE_ERROR; 2904 ret = c4iw_modify_qp(ep->com.qp->rhp, 2905 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 2906 &attrs, 1); 2907 if (ret) 2908 printk(KERN_ERR MOD 2909 "%s - qp <- error failed!\n", 2910 __func__); 2911 } 2912 peer_abort_upcall(ep); 2913 break; 2914 case ABORTING: 2915 break; 2916 case DEAD: 2917 PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__); 2918 mutex_unlock(&ep->com.mutex); 2919 goto deref_ep; 2920 default: 2921 BUG_ON(1); 2922 break; 2923 } 2924 dst_confirm(ep->dst); 2925 if (ep->com.state != ABORTING) { 2926 __state_set(&ep->com, DEAD); 2927 /* we don't release if we want to retry with mpa_v1 */ 2928 if (!ep->retry_with_mpa_v1) 2929 release = 1; 2930 } 2931 mutex_unlock(&ep->com.mutex); 2932 2933 rpl_skb = skb_dequeue(&ep->com.ep_skb_list); 2934 if (WARN_ON(!rpl_skb)) { 2935 release = 1; 2936 goto out; 2937 } 2938 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx); 2939 rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl)); 2940 INIT_TP_WR(rpl, ep->hwtid); 2941 OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid)); 2942 rpl->cmd = CPL_ABORT_NO_RST; 2943 c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb); 2944 out: 2945 if (release) 2946 release_ep_resources(ep); 2947 else if (ep->retry_with_mpa_v1) { 2948 if (ep->com.remote_addr.ss_family == AF_INET6) { 2949 struct sockaddr_in6 *sin6 = 2950 (struct sockaddr_in6 *) 2951 &ep->com.local_addr; 2952 cxgb4_clip_release( 2953 ep->com.dev->rdev.lldi.ports[0], 2954 (const u32 *)&sin6->sin6_addr.s6_addr, 2955 1); 2956 } 2957 remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid); 2958 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); 2959 dst_release(ep->dst); 2960 cxgb4_l2t_release(ep->l2t); 2961 c4iw_reconnect(ep); 2962 } 2963 2964 deref_ep: 2965 c4iw_put_ep(&ep->com); 2966 /* Dereferencing ep, referenced in peer_abort_intr() */ 2967 c4iw_put_ep(&ep->com); 2968 return 0; 2969 } 2970 2971 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 2972 { 2973 struct c4iw_ep *ep; 2974 struct c4iw_qp_attributes attrs; 2975 struct cpl_close_con_rpl *rpl = cplhdr(skb); 2976 int release = 0; 2977 unsigned int tid = GET_TID(rpl); 2978 2979 ep = get_ep_from_tid(dev, tid); 2980 if (!ep) 2981 return 0; 2982 2983 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 2984 BUG_ON(!ep); 2985 2986 /* The cm_id may be null if we failed to connect */ 2987 mutex_lock(&ep->com.mutex); 2988 set_bit(CLOSE_CON_RPL, &ep->com.history); 2989 switch (ep->com.state) { 2990 case CLOSING: 2991 __state_set(&ep->com, MORIBUND); 2992 break; 2993 case MORIBUND: 2994 (void)stop_ep_timer(ep); 2995 if ((ep->com.cm_id) && (ep->com.qp)) { 2996 attrs.next_state = C4IW_QP_STATE_IDLE; 2997 c4iw_modify_qp(ep->com.qp->rhp, 2998 ep->com.qp, 2999 C4IW_QP_ATTR_NEXT_STATE, 3000 &attrs, 1); 3001 } 3002 close_complete_upcall(ep, 0); 3003 __state_set(&ep->com, DEAD); 3004 release = 1; 3005 break; 3006 case ABORTING: 3007 case DEAD: 3008 break; 3009 default: 3010 BUG_ON(1); 3011 break; 3012 } 3013 mutex_unlock(&ep->com.mutex); 3014 if (release) 3015 release_ep_resources(ep); 3016 c4iw_put_ep(&ep->com); 3017 return 0; 3018 } 3019 3020 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb) 3021 { 3022 struct cpl_rdma_terminate *rpl = cplhdr(skb); 3023 unsigned int tid = GET_TID(rpl); 3024 struct c4iw_ep *ep; 3025 struct c4iw_qp_attributes attrs; 3026 3027 ep = get_ep_from_tid(dev, tid); 3028 BUG_ON(!ep); 3029 3030 if (ep && ep->com.qp) { 3031 printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid, 3032 ep->com.qp->wq.sq.qid); 3033 attrs.next_state = C4IW_QP_STATE_TERMINATE; 3034 c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, 3035 C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); 3036 } else 3037 printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid); 3038 c4iw_put_ep(&ep->com); 3039 3040 return 0; 3041 } 3042 3043 /* 3044 * Upcall from the adapter indicating data has been transmitted. 3045 * For us its just the single MPA request or reply. We can now free 3046 * the skb holding the mpa message. 3047 */ 3048 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb) 3049 { 3050 struct c4iw_ep *ep; 3051 struct cpl_fw4_ack *hdr = cplhdr(skb); 3052 u8 credits = hdr->credits; 3053 unsigned int tid = GET_TID(hdr); 3054 3055 3056 ep = get_ep_from_tid(dev, tid); 3057 if (!ep) 3058 return 0; 3059 PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits); 3060 if (credits == 0) { 3061 PDBG("%s 0 credit ack ep %p tid %u state %u\n", 3062 __func__, ep, ep->hwtid, state_read(&ep->com)); 3063 goto out; 3064 } 3065 3066 dst_confirm(ep->dst); 3067 if (ep->mpa_skb) { 3068 PDBG("%s last streaming msg ack ep %p tid %u state %u " 3069 "initiator %u freeing skb\n", __func__, ep, ep->hwtid, 3070 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0); 3071 mutex_lock(&ep->com.mutex); 3072 kfree_skb(ep->mpa_skb); 3073 ep->mpa_skb = NULL; 3074 if (test_bit(STOP_MPA_TIMER, &ep->com.flags)) 3075 stop_ep_timer(ep); 3076 mutex_unlock(&ep->com.mutex); 3077 } 3078 out: 3079 c4iw_put_ep(&ep->com); 3080 return 0; 3081 } 3082 3083 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len) 3084 { 3085 int abort; 3086 struct c4iw_ep *ep = to_ep(cm_id); 3087 3088 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 3089 3090 mutex_lock(&ep->com.mutex); 3091 if (ep->com.state != MPA_REQ_RCVD) { 3092 mutex_unlock(&ep->com.mutex); 3093 c4iw_put_ep(&ep->com); 3094 return -ECONNRESET; 3095 } 3096 set_bit(ULP_REJECT, &ep->com.history); 3097 if (mpa_rev == 0) 3098 abort = 1; 3099 else 3100 abort = send_mpa_reject(ep, pdata, pdata_len); 3101 mutex_unlock(&ep->com.mutex); 3102 3103 stop_ep_timer(ep); 3104 c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL); 3105 c4iw_put_ep(&ep->com); 3106 return 0; 3107 } 3108 3109 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 3110 { 3111 int err; 3112 struct c4iw_qp_attributes attrs; 3113 enum c4iw_qp_attr_mask mask; 3114 struct c4iw_ep *ep = to_ep(cm_id); 3115 struct c4iw_dev *h = to_c4iw_dev(cm_id->device); 3116 struct c4iw_qp *qp = get_qhp(h, conn_param->qpn); 3117 int abort = 0; 3118 3119 PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid); 3120 3121 mutex_lock(&ep->com.mutex); 3122 if (ep->com.state != MPA_REQ_RCVD) { 3123 err = -ECONNRESET; 3124 goto err_out; 3125 } 3126 3127 BUG_ON(!qp); 3128 3129 set_bit(ULP_ACCEPT, &ep->com.history); 3130 if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) || 3131 (conn_param->ird > cur_max_read_depth(ep->com.dev))) { 3132 err = -EINVAL; 3133 goto err_abort; 3134 } 3135 3136 if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { 3137 if (conn_param->ord > ep->ird) { 3138 if (RELAXED_IRD_NEGOTIATION) { 3139 ep->ord = ep->ird; 3140 } else { 3141 ep->ird = conn_param->ird; 3142 ep->ord = conn_param->ord; 3143 send_mpa_reject(ep, conn_param->private_data, 3144 conn_param->private_data_len); 3145 err = -ENOMEM; 3146 goto err_abort; 3147 } 3148 } 3149 if (conn_param->ird < ep->ord) { 3150 if (RELAXED_IRD_NEGOTIATION && 3151 ep->ord <= h->rdev.lldi.max_ordird_qp) { 3152 conn_param->ird = ep->ord; 3153 } else { 3154 err = -ENOMEM; 3155 goto err_abort; 3156 } 3157 } 3158 } 3159 ep->ird = conn_param->ird; 3160 ep->ord = conn_param->ord; 3161 3162 if (ep->mpa_attr.version == 1) { 3163 if (peer2peer && ep->ird == 0) 3164 ep->ird = 1; 3165 } else { 3166 if (peer2peer && 3167 (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) && 3168 (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0) 3169 ep->ird = 1; 3170 } 3171 3172 PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord); 3173 3174 ep->com.cm_id = cm_id; 3175 ref_cm_id(&ep->com); 3176 ep->com.qp = qp; 3177 ref_qp(ep); 3178 3179 /* bind QP to EP and move to RTS */ 3180 attrs.mpa_attr = ep->mpa_attr; 3181 attrs.max_ird = ep->ird; 3182 attrs.max_ord = ep->ord; 3183 attrs.llp_stream_handle = ep; 3184 attrs.next_state = C4IW_QP_STATE_RTS; 3185 3186 /* bind QP and TID with INIT_WR */ 3187 mask = C4IW_QP_ATTR_NEXT_STATE | 3188 C4IW_QP_ATTR_LLP_STREAM_HANDLE | 3189 C4IW_QP_ATTR_MPA_ATTR | 3190 C4IW_QP_ATTR_MAX_IRD | 3191 C4IW_QP_ATTR_MAX_ORD; 3192 3193 err = c4iw_modify_qp(ep->com.qp->rhp, 3194 ep->com.qp, mask, &attrs, 1); 3195 if (err) 3196 goto err_deref_cm_id; 3197 3198 set_bit(STOP_MPA_TIMER, &ep->com.flags); 3199 err = send_mpa_reply(ep, conn_param->private_data, 3200 conn_param->private_data_len); 3201 if (err) 3202 goto err_deref_cm_id; 3203 3204 __state_set(&ep->com, FPDU_MODE); 3205 established_upcall(ep); 3206 mutex_unlock(&ep->com.mutex); 3207 c4iw_put_ep(&ep->com); 3208 return 0; 3209 err_deref_cm_id: 3210 deref_cm_id(&ep->com); 3211 err_abort: 3212 abort = 1; 3213 err_out: 3214 mutex_unlock(&ep->com.mutex); 3215 if (abort) 3216 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 3217 c4iw_put_ep(&ep->com); 3218 return err; 3219 } 3220 3221 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 3222 { 3223 struct in_device *ind; 3224 int found = 0; 3225 struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr; 3226 struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr; 3227 3228 ind = in_dev_get(dev->rdev.lldi.ports[0]); 3229 if (!ind) 3230 return -EADDRNOTAVAIL; 3231 for_primary_ifa(ind) { 3232 laddr->sin_addr.s_addr = ifa->ifa_address; 3233 raddr->sin_addr.s_addr = ifa->ifa_address; 3234 found = 1; 3235 break; 3236 } 3237 endfor_ifa(ind); 3238 in_dev_put(ind); 3239 return found ? 0 : -EADDRNOTAVAIL; 3240 } 3241 3242 static int get_lladdr(struct net_device *dev, struct in6_addr *addr, 3243 unsigned char banned_flags) 3244 { 3245 struct inet6_dev *idev; 3246 int err = -EADDRNOTAVAIL; 3247 3248 rcu_read_lock(); 3249 idev = __in6_dev_get(dev); 3250 if (idev != NULL) { 3251 struct inet6_ifaddr *ifp; 3252 3253 read_lock_bh(&idev->lock); 3254 list_for_each_entry(ifp, &idev->addr_list, if_list) { 3255 if (ifp->scope == IFA_LINK && 3256 !(ifp->flags & banned_flags)) { 3257 memcpy(addr, &ifp->addr, 16); 3258 err = 0; 3259 break; 3260 } 3261 } 3262 read_unlock_bh(&idev->lock); 3263 } 3264 rcu_read_unlock(); 3265 return err; 3266 } 3267 3268 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id) 3269 { 3270 struct in6_addr uninitialized_var(addr); 3271 struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr; 3272 struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr; 3273 3274 if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) { 3275 memcpy(la6->sin6_addr.s6_addr, &addr, 16); 3276 memcpy(ra6->sin6_addr.s6_addr, &addr, 16); 3277 return 0; 3278 } 3279 return -EADDRNOTAVAIL; 3280 } 3281 3282 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param) 3283 { 3284 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3285 struct c4iw_ep *ep; 3286 int err = 0; 3287 struct sockaddr_in *laddr; 3288 struct sockaddr_in *raddr; 3289 struct sockaddr_in6 *laddr6; 3290 struct sockaddr_in6 *raddr6; 3291 __u8 *ra; 3292 int iptype; 3293 3294 if ((conn_param->ord > cur_max_read_depth(dev)) || 3295 (conn_param->ird > cur_max_read_depth(dev))) { 3296 err = -EINVAL; 3297 goto out; 3298 } 3299 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3300 if (!ep) { 3301 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 3302 err = -ENOMEM; 3303 goto out; 3304 } 3305 3306 skb_queue_head_init(&ep->com.ep_skb_list); 3307 if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) { 3308 err = -ENOMEM; 3309 goto fail1; 3310 } 3311 3312 init_timer(&ep->timer); 3313 ep->plen = conn_param->private_data_len; 3314 if (ep->plen) 3315 memcpy(ep->mpa_pkt + sizeof(struct mpa_message), 3316 conn_param->private_data, ep->plen); 3317 ep->ird = conn_param->ird; 3318 ep->ord = conn_param->ord; 3319 3320 if (peer2peer && ep->ord == 0) 3321 ep->ord = 1; 3322 3323 ep->com.cm_id = cm_id; 3324 ref_cm_id(&ep->com); 3325 ep->com.dev = dev; 3326 ep->com.qp = get_qhp(dev, conn_param->qpn); 3327 if (!ep->com.qp) { 3328 PDBG("%s qpn 0x%x not found!\n", __func__, conn_param->qpn); 3329 err = -EINVAL; 3330 goto fail2; 3331 } 3332 ref_qp(ep); 3333 PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn, 3334 ep->com.qp, cm_id); 3335 3336 /* 3337 * Allocate an active TID to initiate a TCP connection. 3338 */ 3339 ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep); 3340 if (ep->atid == -1) { 3341 printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); 3342 err = -ENOMEM; 3343 goto fail2; 3344 } 3345 insert_handle(dev, &dev->atid_idr, ep, ep->atid); 3346 3347 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3348 sizeof(ep->com.local_addr)); 3349 memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr, 3350 sizeof(ep->com.remote_addr)); 3351 3352 laddr = (struct sockaddr_in *)&ep->com.local_addr; 3353 raddr = (struct sockaddr_in *)&ep->com.remote_addr; 3354 laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr; 3355 raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr; 3356 3357 if (cm_id->m_remote_addr.ss_family == AF_INET) { 3358 iptype = 4; 3359 ra = (__u8 *)&raddr->sin_addr; 3360 3361 /* 3362 * Handle loopback requests to INADDR_ANY. 3363 */ 3364 if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) { 3365 err = pick_local_ipaddrs(dev, cm_id); 3366 if (err) 3367 goto fail2; 3368 } 3369 3370 /* find a route */ 3371 PDBG("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n", 3372 __func__, &laddr->sin_addr, ntohs(laddr->sin_port), 3373 ra, ntohs(raddr->sin_port)); 3374 ep->dst = find_route(dev, laddr->sin_addr.s_addr, 3375 raddr->sin_addr.s_addr, laddr->sin_port, 3376 raddr->sin_port, cm_id->tos); 3377 } else { 3378 iptype = 6; 3379 ra = (__u8 *)&raddr6->sin6_addr; 3380 3381 /* 3382 * Handle loopback requests to INADDR_ANY. 3383 */ 3384 if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) { 3385 err = pick_local_ip6addrs(dev, cm_id); 3386 if (err) 3387 goto fail2; 3388 } 3389 3390 /* find a route */ 3391 PDBG("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n", 3392 __func__, laddr6->sin6_addr.s6_addr, 3393 ntohs(laddr6->sin6_port), 3394 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port)); 3395 ep->dst = find_route6(dev, laddr6->sin6_addr.s6_addr, 3396 raddr6->sin6_addr.s6_addr, 3397 laddr6->sin6_port, raddr6->sin6_port, 0, 3398 raddr6->sin6_scope_id); 3399 } 3400 if (!ep->dst) { 3401 printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); 3402 err = -EHOSTUNREACH; 3403 goto fail3; 3404 } 3405 3406 err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true, 3407 ep->com.dev->rdev.lldi.adapter_type, cm_id->tos); 3408 if (err) { 3409 printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); 3410 goto fail4; 3411 } 3412 3413 PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", 3414 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, 3415 ep->l2t->idx); 3416 3417 state_set(&ep->com, CONNECTING); 3418 ep->tos = cm_id->tos; 3419 3420 /* send connect request to rnic */ 3421 err = send_connect(ep); 3422 if (!err) 3423 goto out; 3424 3425 cxgb4_l2t_release(ep->l2t); 3426 fail4: 3427 dst_release(ep->dst); 3428 fail3: 3429 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); 3430 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); 3431 fail2: 3432 skb_queue_purge(&ep->com.ep_skb_list); 3433 deref_cm_id(&ep->com); 3434 fail1: 3435 c4iw_put_ep(&ep->com); 3436 out: 3437 return err; 3438 } 3439 3440 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3441 { 3442 int err; 3443 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) 3444 &ep->com.local_addr; 3445 3446 if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) { 3447 err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0], 3448 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3449 if (err) 3450 return err; 3451 } 3452 c4iw_init_wr_wait(&ep->com.wr_wait); 3453 err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0], 3454 ep->stid, &sin6->sin6_addr, 3455 sin6->sin6_port, 3456 ep->com.dev->rdev.lldi.rxq_ids[0]); 3457 if (!err) 3458 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3459 &ep->com.wr_wait, 3460 0, 0, __func__); 3461 else if (err > 0) 3462 err = net_xmit_errno(err); 3463 if (err) { 3464 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3465 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3466 pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n", 3467 err, ep->stid, 3468 sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port)); 3469 } 3470 return err; 3471 } 3472 3473 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep) 3474 { 3475 int err; 3476 struct sockaddr_in *sin = (struct sockaddr_in *) 3477 &ep->com.local_addr; 3478 3479 if (dev->rdev.lldi.enable_fw_ofld_conn) { 3480 do { 3481 err = cxgb4_create_server_filter( 3482 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3483 sin->sin_addr.s_addr, sin->sin_port, 0, 3484 ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0); 3485 if (err == -EBUSY) { 3486 if (c4iw_fatal_error(&ep->com.dev->rdev)) { 3487 err = -EIO; 3488 break; 3489 } 3490 set_current_state(TASK_UNINTERRUPTIBLE); 3491 schedule_timeout(usecs_to_jiffies(100)); 3492 } 3493 } while (err == -EBUSY); 3494 } else { 3495 c4iw_init_wr_wait(&ep->com.wr_wait); 3496 err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], 3497 ep->stid, sin->sin_addr.s_addr, sin->sin_port, 3498 0, ep->com.dev->rdev.lldi.rxq_ids[0]); 3499 if (!err) 3500 err = c4iw_wait_for_reply(&ep->com.dev->rdev, 3501 &ep->com.wr_wait, 3502 0, 0, __func__); 3503 else if (err > 0) 3504 err = net_xmit_errno(err); 3505 } 3506 if (err) 3507 pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n" 3508 , err, ep->stid, 3509 &sin->sin_addr, ntohs(sin->sin_port)); 3510 return err; 3511 } 3512 3513 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog) 3514 { 3515 int err = 0; 3516 struct c4iw_dev *dev = to_c4iw_dev(cm_id->device); 3517 struct c4iw_listen_ep *ep; 3518 3519 might_sleep(); 3520 3521 ep = alloc_ep(sizeof(*ep), GFP_KERNEL); 3522 if (!ep) { 3523 printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__); 3524 err = -ENOMEM; 3525 goto fail1; 3526 } 3527 skb_queue_head_init(&ep->com.ep_skb_list); 3528 PDBG("%s ep %p\n", __func__, ep); 3529 ep->com.cm_id = cm_id; 3530 ref_cm_id(&ep->com); 3531 ep->com.dev = dev; 3532 ep->backlog = backlog; 3533 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3534 sizeof(ep->com.local_addr)); 3535 3536 /* 3537 * Allocate a server TID. 3538 */ 3539 if (dev->rdev.lldi.enable_fw_ofld_conn && 3540 ep->com.local_addr.ss_family == AF_INET) 3541 ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, 3542 cm_id->m_local_addr.ss_family, ep); 3543 else 3544 ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, 3545 cm_id->m_local_addr.ss_family, ep); 3546 3547 if (ep->stid == -1) { 3548 printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__); 3549 err = -ENOMEM; 3550 goto fail2; 3551 } 3552 insert_handle(dev, &dev->stid_idr, ep, ep->stid); 3553 3554 memcpy(&ep->com.local_addr, &cm_id->m_local_addr, 3555 sizeof(ep->com.local_addr)); 3556 3557 state_set(&ep->com, LISTEN); 3558 if (ep->com.local_addr.ss_family == AF_INET) 3559 err = create_server4(dev, ep); 3560 else 3561 err = create_server6(dev, ep); 3562 if (!err) { 3563 cm_id->provider_data = ep; 3564 goto out; 3565 } 3566 3567 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3568 ep->com.local_addr.ss_family); 3569 fail2: 3570 deref_cm_id(&ep->com); 3571 c4iw_put_ep(&ep->com); 3572 fail1: 3573 out: 3574 return err; 3575 } 3576 3577 int c4iw_destroy_listen(struct iw_cm_id *cm_id) 3578 { 3579 int err; 3580 struct c4iw_listen_ep *ep = to_listen_ep(cm_id); 3581 3582 PDBG("%s ep %p\n", __func__, ep); 3583 3584 might_sleep(); 3585 state_set(&ep->com, DEAD); 3586 if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn && 3587 ep->com.local_addr.ss_family == AF_INET) { 3588 err = cxgb4_remove_server_filter( 3589 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3590 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3591 } else { 3592 struct sockaddr_in6 *sin6; 3593 c4iw_init_wr_wait(&ep->com.wr_wait); 3594 err = cxgb4_remove_server( 3595 ep->com.dev->rdev.lldi.ports[0], ep->stid, 3596 ep->com.dev->rdev.lldi.rxq_ids[0], 0); 3597 if (err) 3598 goto done; 3599 err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 3600 0, 0, __func__); 3601 sin6 = (struct sockaddr_in6 *)&ep->com.local_addr; 3602 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3603 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3604 } 3605 remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid); 3606 cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, 3607 ep->com.local_addr.ss_family); 3608 done: 3609 deref_cm_id(&ep->com); 3610 c4iw_put_ep(&ep->com); 3611 return err; 3612 } 3613 3614 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp) 3615 { 3616 int ret = 0; 3617 int close = 0; 3618 int fatal = 0; 3619 struct c4iw_rdev *rdev; 3620 3621 mutex_lock(&ep->com.mutex); 3622 3623 PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep, 3624 states[ep->com.state], abrupt); 3625 3626 /* 3627 * Ref the ep here in case we have fatal errors causing the 3628 * ep to be released and freed. 3629 */ 3630 c4iw_get_ep(&ep->com); 3631 3632 rdev = &ep->com.dev->rdev; 3633 if (c4iw_fatal_error(rdev)) { 3634 fatal = 1; 3635 close_complete_upcall(ep, -EIO); 3636 ep->com.state = DEAD; 3637 } 3638 switch (ep->com.state) { 3639 case MPA_REQ_WAIT: 3640 case MPA_REQ_SENT: 3641 case MPA_REQ_RCVD: 3642 case MPA_REP_SENT: 3643 case FPDU_MODE: 3644 case CONNECTING: 3645 close = 1; 3646 if (abrupt) 3647 ep->com.state = ABORTING; 3648 else { 3649 ep->com.state = CLOSING; 3650 3651 /* 3652 * if we close before we see the fw4_ack() then we fix 3653 * up the timer state since we're reusing it. 3654 */ 3655 if (ep->mpa_skb && 3656 test_bit(STOP_MPA_TIMER, &ep->com.flags)) { 3657 clear_bit(STOP_MPA_TIMER, &ep->com.flags); 3658 stop_ep_timer(ep); 3659 } 3660 start_ep_timer(ep); 3661 } 3662 set_bit(CLOSE_SENT, &ep->com.flags); 3663 break; 3664 case CLOSING: 3665 if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) { 3666 close = 1; 3667 if (abrupt) { 3668 (void)stop_ep_timer(ep); 3669 ep->com.state = ABORTING; 3670 } else 3671 ep->com.state = MORIBUND; 3672 } 3673 break; 3674 case MORIBUND: 3675 case ABORTING: 3676 case DEAD: 3677 PDBG("%s ignoring disconnect ep %p state %u\n", 3678 __func__, ep, ep->com.state); 3679 break; 3680 default: 3681 BUG(); 3682 break; 3683 } 3684 3685 if (close) { 3686 if (abrupt) { 3687 set_bit(EP_DISC_ABORT, &ep->com.history); 3688 close_complete_upcall(ep, -ECONNRESET); 3689 ret = send_abort(ep); 3690 } else { 3691 set_bit(EP_DISC_CLOSE, &ep->com.history); 3692 ret = send_halfclose(ep); 3693 } 3694 if (ret) { 3695 set_bit(EP_DISC_FAIL, &ep->com.history); 3696 if (!abrupt) { 3697 stop_ep_timer(ep); 3698 close_complete_upcall(ep, -EIO); 3699 } 3700 if (ep->com.qp) { 3701 struct c4iw_qp_attributes attrs; 3702 3703 attrs.next_state = C4IW_QP_STATE_ERROR; 3704 ret = c4iw_modify_qp(ep->com.qp->rhp, 3705 ep->com.qp, 3706 C4IW_QP_ATTR_NEXT_STATE, 3707 &attrs, 1); 3708 if (ret) 3709 pr_err(MOD 3710 "%s - qp <- error failed!\n", 3711 __func__); 3712 } 3713 fatal = 1; 3714 } 3715 } 3716 mutex_unlock(&ep->com.mutex); 3717 c4iw_put_ep(&ep->com); 3718 if (fatal) 3719 release_ep_resources(ep); 3720 return ret; 3721 } 3722 3723 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3724 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3725 { 3726 struct c4iw_ep *ep; 3727 int atid = be32_to_cpu(req->tid); 3728 3729 ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, 3730 (__force u32) req->tid); 3731 if (!ep) 3732 return; 3733 3734 switch (req->retval) { 3735 case FW_ENOMEM: 3736 set_bit(ACT_RETRY_NOMEM, &ep->com.history); 3737 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3738 send_fw_act_open_req(ep, atid); 3739 return; 3740 } 3741 case FW_EADDRINUSE: 3742 set_bit(ACT_RETRY_INUSE, &ep->com.history); 3743 if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { 3744 send_fw_act_open_req(ep, atid); 3745 return; 3746 } 3747 break; 3748 default: 3749 pr_info("%s unexpected ofld conn wr retval %d\n", 3750 __func__, req->retval); 3751 break; 3752 } 3753 pr_err("active ofld_connect_wr failure %d atid %d\n", 3754 req->retval, atid); 3755 mutex_lock(&dev->rdev.stats.lock); 3756 dev->rdev.stats.act_ofld_conn_fails++; 3757 mutex_unlock(&dev->rdev.stats.lock); 3758 connect_reply_upcall(ep, status2errno(req->retval)); 3759 state_set(&ep->com, DEAD); 3760 if (ep->com.remote_addr.ss_family == AF_INET6) { 3761 struct sockaddr_in6 *sin6 = 3762 (struct sockaddr_in6 *)&ep->com.local_addr; 3763 cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0], 3764 (const u32 *)&sin6->sin6_addr.s6_addr, 1); 3765 } 3766 remove_handle(dev, &dev->atid_idr, atid); 3767 cxgb4_free_atid(dev->rdev.lldi.tids, atid); 3768 dst_release(ep->dst); 3769 cxgb4_l2t_release(ep->l2t); 3770 c4iw_put_ep(&ep->com); 3771 } 3772 3773 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, 3774 struct cpl_fw6_msg_ofld_connection_wr_rpl *req) 3775 { 3776 struct sk_buff *rpl_skb; 3777 struct cpl_pass_accept_req *cpl; 3778 int ret; 3779 3780 rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; 3781 BUG_ON(!rpl_skb); 3782 if (req->retval) { 3783 PDBG("%s passive open failure %d\n", __func__, req->retval); 3784 mutex_lock(&dev->rdev.stats.lock); 3785 dev->rdev.stats.pas_ofld_conn_fails++; 3786 mutex_unlock(&dev->rdev.stats.lock); 3787 kfree_skb(rpl_skb); 3788 } else { 3789 cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); 3790 OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 3791 (__force u32) htonl( 3792 (__force u32) req->tid))); 3793 ret = pass_accept_req(dev, rpl_skb); 3794 if (!ret) 3795 kfree_skb(rpl_skb); 3796 } 3797 return; 3798 } 3799 3800 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 3801 { 3802 struct cpl_fw6_msg *rpl = cplhdr(skb); 3803 struct cpl_fw6_msg_ofld_connection_wr_rpl *req; 3804 3805 switch (rpl->type) { 3806 case FW6_TYPE_CQE: 3807 c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); 3808 break; 3809 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 3810 req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; 3811 switch (req->t_state) { 3812 case TCP_SYN_SENT: 3813 active_ofld_conn_reply(dev, skb, req); 3814 break; 3815 case TCP_SYN_RECV: 3816 passive_ofld_conn_reply(dev, skb, req); 3817 break; 3818 default: 3819 pr_err("%s unexpected ofld conn wr state %d\n", 3820 __func__, req->t_state); 3821 break; 3822 } 3823 break; 3824 } 3825 return 0; 3826 } 3827 3828 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) 3829 { 3830 __be32 l2info; 3831 __be16 hdr_len, vlantag, len; 3832 u16 eth_hdr_len; 3833 int tcp_hdr_len, ip_hdr_len; 3834 u8 intf; 3835 struct cpl_rx_pkt *cpl = cplhdr(skb); 3836 struct cpl_pass_accept_req *req; 3837 struct tcp_options_received tmp_opt; 3838 struct c4iw_dev *dev; 3839 enum chip_type type; 3840 3841 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 3842 /* Store values from cpl_rx_pkt in temporary location. */ 3843 vlantag = cpl->vlan; 3844 len = cpl->len; 3845 l2info = cpl->l2info; 3846 hdr_len = cpl->hdr_len; 3847 intf = cpl->iff; 3848 3849 __skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); 3850 3851 /* 3852 * We need to parse the TCP options from SYN packet. 3853 * to generate cpl_pass_accept_req. 3854 */ 3855 memset(&tmp_opt, 0, sizeof(tmp_opt)); 3856 tcp_clear_options(&tmp_opt); 3857 tcp_parse_options(skb, &tmp_opt, 0, NULL); 3858 3859 req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); 3860 memset(req, 0, sizeof(*req)); 3861 req->l2info = cpu_to_be16(SYN_INTF_V(intf) | 3862 SYN_MAC_IDX_V(RX_MACIDX_G( 3863 be32_to_cpu(l2info))) | 3864 SYN_XACT_MATCH_F); 3865 type = dev->rdev.lldi.adapter_type; 3866 tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len)); 3867 ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len)); 3868 req->hdr_len = 3869 cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info)))); 3870 if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) { 3871 eth_hdr_len = is_t4(type) ? 3872 RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) : 3873 RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info)); 3874 req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) | 3875 IP_HDR_LEN_V(ip_hdr_len) | 3876 ETH_HDR_LEN_V(eth_hdr_len)); 3877 } else { /* T6 and later */ 3878 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info)); 3879 req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) | 3880 T6_IP_HDR_LEN_V(ip_hdr_len) | 3881 T6_ETH_HDR_LEN_V(eth_hdr_len)); 3882 } 3883 req->vlan = vlantag; 3884 req->len = len; 3885 req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) | 3886 PASS_OPEN_TOS_V(tos)); 3887 req->tcpopt.mss = htons(tmp_opt.mss_clamp); 3888 if (tmp_opt.wscale_ok) 3889 req->tcpopt.wsf = tmp_opt.snd_wscale; 3890 req->tcpopt.tstamp = tmp_opt.saw_tstamp; 3891 if (tmp_opt.sack_ok) 3892 req->tcpopt.sack = 1; 3893 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); 3894 return; 3895 } 3896 3897 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, 3898 __be32 laddr, __be16 lport, 3899 __be32 raddr, __be16 rport, 3900 u32 rcv_isn, u32 filter, u16 window, 3901 u32 rss_qid, u8 port_id) 3902 { 3903 struct sk_buff *req_skb; 3904 struct fw_ofld_connection_wr *req; 3905 struct cpl_pass_accept_req *cpl = cplhdr(skb); 3906 int ret; 3907 3908 req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); 3909 req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); 3910 memset(req, 0, sizeof(*req)); 3911 req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F); 3912 req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16))); 3913 req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F); 3914 req->le.filter = (__force __be32) filter; 3915 req->le.lport = lport; 3916 req->le.pport = rport; 3917 req->le.u.ipv4.lip = laddr; 3918 req->le.u.ipv4.pip = raddr; 3919 req->tcb.rcv_nxt = htonl(rcv_isn + 1); 3920 req->tcb.rcv_adv = htons(window); 3921 req->tcb.t_state_to_astid = 3922 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) | 3923 FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) | 3924 FW_OFLD_CONNECTION_WR_ASTID_V( 3925 PASS_OPEN_TID_G(ntohl(cpl->tos_stid)))); 3926 3927 /* 3928 * We store the qid in opt2 which will be used by the firmware 3929 * to send us the wr response. 3930 */ 3931 req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid)); 3932 3933 /* 3934 * We initialize the MSS index in TCB to 0xF. 3935 * So that when driver sends cpl_pass_accept_rpl 3936 * TCB picks up the correct value. If this was 0 3937 * TP will ignore any value > 0 for MSS index. 3938 */ 3939 req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF)); 3940 req->cookie = (uintptr_t)skb; 3941 3942 set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); 3943 ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); 3944 if (ret < 0) { 3945 pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__, 3946 ret); 3947 kfree_skb(skb); 3948 kfree_skb(req_skb); 3949 } 3950 } 3951 3952 /* 3953 * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt 3954 * messages when a filter is being used instead of server to 3955 * redirect a syn packet. When packets hit filter they are redirected 3956 * to the offload queue and driver tries to establish the connection 3957 * using firmware work request. 3958 */ 3959 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) 3960 { 3961 int stid; 3962 unsigned int filter; 3963 struct ethhdr *eh = NULL; 3964 struct vlan_ethhdr *vlan_eh = NULL; 3965 struct iphdr *iph; 3966 struct tcphdr *tcph; 3967 struct rss_header *rss = (void *)skb->data; 3968 struct cpl_rx_pkt *cpl = (void *)skb->data; 3969 struct cpl_pass_accept_req *req = (void *)(rss + 1); 3970 struct l2t_entry *e; 3971 struct dst_entry *dst; 3972 struct c4iw_ep *lep = NULL; 3973 u16 window; 3974 struct port_info *pi; 3975 struct net_device *pdev; 3976 u16 rss_qid, eth_hdr_len; 3977 int step; 3978 u32 tx_chan; 3979 struct neighbour *neigh; 3980 3981 /* Drop all non-SYN packets */ 3982 if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F))) 3983 goto reject; 3984 3985 /* 3986 * Drop all packets which did not hit the filter. 3987 * Unlikely to happen. 3988 */ 3989 if (!(rss->filter_hit && rss->filter_tid)) 3990 goto reject; 3991 3992 /* 3993 * Calculate the server tid from filter hit index from cpl_rx_pkt. 3994 */ 3995 stid = (__force int) cpu_to_be32((__force u32) rss->hash_val); 3996 3997 lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid); 3998 if (!lep) { 3999 PDBG("%s connect request on invalid stid %d\n", __func__, stid); 4000 goto reject; 4001 } 4002 4003 switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) { 4004 case CHELSIO_T4: 4005 eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 4006 break; 4007 case CHELSIO_T5: 4008 eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 4009 break; 4010 case CHELSIO_T6: 4011 eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info)); 4012 break; 4013 default: 4014 pr_err("T%d Chip is not supported\n", 4015 CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)); 4016 goto reject; 4017 } 4018 4019 if (eth_hdr_len == ETH_HLEN) { 4020 eh = (struct ethhdr *)(req + 1); 4021 iph = (struct iphdr *)(eh + 1); 4022 } else { 4023 vlan_eh = (struct vlan_ethhdr *)(req + 1); 4024 iph = (struct iphdr *)(vlan_eh + 1); 4025 skb->vlan_tci = ntohs(cpl->vlan); 4026 } 4027 4028 if (iph->version != 0x4) 4029 goto reject; 4030 4031 tcph = (struct tcphdr *)(iph + 1); 4032 skb_set_network_header(skb, (void *)iph - (void *)rss); 4033 skb_set_transport_header(skb, (void *)tcph - (void *)rss); 4034 skb_get(skb); 4035 4036 PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, 4037 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), 4038 ntohs(tcph->source), iph->tos); 4039 4040 dst = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, 4041 iph->tos); 4042 if (!dst) { 4043 pr_err("%s - failed to find dst entry!\n", 4044 __func__); 4045 goto reject; 4046 } 4047 neigh = dst_neigh_lookup_skb(dst, skb); 4048 4049 if (!neigh) { 4050 pr_err("%s - failed to allocate neigh!\n", 4051 __func__); 4052 goto free_dst; 4053 } 4054 4055 if (neigh->dev->flags & IFF_LOOPBACK) { 4056 pdev = ip_dev_find(&init_net, iph->daddr); 4057 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 4058 pdev, 0); 4059 pi = (struct port_info *)netdev_priv(pdev); 4060 tx_chan = cxgb4_port_chan(pdev); 4061 dev_put(pdev); 4062 } else { 4063 pdev = get_real_dev(neigh->dev); 4064 e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, 4065 pdev, 0); 4066 pi = (struct port_info *)netdev_priv(pdev); 4067 tx_chan = cxgb4_port_chan(pdev); 4068 } 4069 neigh_release(neigh); 4070 if (!e) { 4071 pr_err("%s - failed to allocate l2t entry!\n", 4072 __func__); 4073 goto free_dst; 4074 } 4075 4076 step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; 4077 rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; 4078 window = (__force u16) htons((__force u16)tcph->window); 4079 4080 /* Calcuate filter portion for LE region. */ 4081 filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple( 4082 dev->rdev.lldi.ports[0], 4083 e)); 4084 4085 /* 4086 * Synthesize the cpl_pass_accept_req. We have everything except the 4087 * TID. Once firmware sends a reply with TID we update the TID field 4088 * in cpl and pass it through the regular cpl_pass_accept_req path. 4089 */ 4090 build_cpl_pass_accept_req(skb, stid, iph->tos); 4091 send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, 4092 tcph->source, ntohl(tcph->seq), filter, window, 4093 rss_qid, pi->port_id); 4094 cxgb4_l2t_release(e); 4095 free_dst: 4096 dst_release(dst); 4097 reject: 4098 if (lep) 4099 c4iw_put_ep(&lep->com); 4100 return 0; 4101 } 4102 4103 /* 4104 * These are the real handlers that are called from a 4105 * work queue. 4106 */ 4107 static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = { 4108 [CPL_ACT_ESTABLISH] = act_establish, 4109 [CPL_ACT_OPEN_RPL] = act_open_rpl, 4110 [CPL_RX_DATA] = rx_data, 4111 [CPL_ABORT_RPL_RSS] = abort_rpl, 4112 [CPL_ABORT_RPL] = abort_rpl, 4113 [CPL_PASS_OPEN_RPL] = pass_open_rpl, 4114 [CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl, 4115 [CPL_PASS_ACCEPT_REQ] = pass_accept_req, 4116 [CPL_PASS_ESTABLISH] = pass_establish, 4117 [CPL_PEER_CLOSE] = peer_close, 4118 [CPL_ABORT_REQ_RSS] = peer_abort, 4119 [CPL_CLOSE_CON_RPL] = close_con_rpl, 4120 [CPL_RDMA_TERMINATE] = terminate, 4121 [CPL_FW4_ACK] = fw4_ack, 4122 [CPL_FW6_MSG] = deferred_fw6_msg, 4123 [CPL_RX_PKT] = rx_pkt, 4124 [FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe, 4125 [FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe 4126 }; 4127 4128 static void process_timeout(struct c4iw_ep *ep) 4129 { 4130 struct c4iw_qp_attributes attrs; 4131 int abort = 1; 4132 4133 mutex_lock(&ep->com.mutex); 4134 PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid, 4135 ep->com.state); 4136 set_bit(TIMEDOUT, &ep->com.history); 4137 switch (ep->com.state) { 4138 case MPA_REQ_SENT: 4139 connect_reply_upcall(ep, -ETIMEDOUT); 4140 break; 4141 case MPA_REQ_WAIT: 4142 case MPA_REQ_RCVD: 4143 case MPA_REP_SENT: 4144 case FPDU_MODE: 4145 break; 4146 case CLOSING: 4147 case MORIBUND: 4148 if (ep->com.cm_id && ep->com.qp) { 4149 attrs.next_state = C4IW_QP_STATE_ERROR; 4150 c4iw_modify_qp(ep->com.qp->rhp, 4151 ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, 4152 &attrs, 1); 4153 } 4154 close_complete_upcall(ep, -ETIMEDOUT); 4155 break; 4156 case ABORTING: 4157 case DEAD: 4158 4159 /* 4160 * These states are expected if the ep timed out at the same 4161 * time as another thread was calling stop_ep_timer(). 4162 * So we silently do nothing for these states. 4163 */ 4164 abort = 0; 4165 break; 4166 default: 4167 WARN(1, "%s unexpected state ep %p tid %u state %u\n", 4168 __func__, ep, ep->hwtid, ep->com.state); 4169 abort = 0; 4170 } 4171 mutex_unlock(&ep->com.mutex); 4172 if (abort) 4173 c4iw_ep_disconnect(ep, 1, GFP_KERNEL); 4174 c4iw_put_ep(&ep->com); 4175 } 4176 4177 static void process_timedout_eps(void) 4178 { 4179 struct c4iw_ep *ep; 4180 4181 spin_lock_irq(&timeout_lock); 4182 while (!list_empty(&timeout_list)) { 4183 struct list_head *tmp; 4184 4185 tmp = timeout_list.next; 4186 list_del(tmp); 4187 tmp->next = NULL; 4188 tmp->prev = NULL; 4189 spin_unlock_irq(&timeout_lock); 4190 ep = list_entry(tmp, struct c4iw_ep, entry); 4191 process_timeout(ep); 4192 spin_lock_irq(&timeout_lock); 4193 } 4194 spin_unlock_irq(&timeout_lock); 4195 } 4196 4197 static void process_work(struct work_struct *work) 4198 { 4199 struct sk_buff *skb = NULL; 4200 struct c4iw_dev *dev; 4201 struct cpl_act_establish *rpl; 4202 unsigned int opcode; 4203 int ret; 4204 4205 process_timedout_eps(); 4206 while ((skb = skb_dequeue(&rxq))) { 4207 rpl = cplhdr(skb); 4208 dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *))); 4209 opcode = rpl->ot.opcode; 4210 4211 BUG_ON(!work_handlers[opcode]); 4212 ret = work_handlers[opcode](dev, skb); 4213 if (!ret) 4214 kfree_skb(skb); 4215 process_timedout_eps(); 4216 } 4217 } 4218 4219 static DECLARE_WORK(skb_work, process_work); 4220 4221 static void ep_timeout(unsigned long arg) 4222 { 4223 struct c4iw_ep *ep = (struct c4iw_ep *)arg; 4224 int kickit = 0; 4225 4226 spin_lock(&timeout_lock); 4227 if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { 4228 /* 4229 * Only insert if it is not already on the list. 4230 */ 4231 if (!ep->entry.next) { 4232 list_add_tail(&ep->entry, &timeout_list); 4233 kickit = 1; 4234 } 4235 } 4236 spin_unlock(&timeout_lock); 4237 if (kickit) 4238 queue_work(workq, &skb_work); 4239 } 4240 4241 /* 4242 * All the CM events are handled on a work queue to have a safe context. 4243 */ 4244 static int sched(struct c4iw_dev *dev, struct sk_buff *skb) 4245 { 4246 4247 /* 4248 * Save dev in the skb->cb area. 4249 */ 4250 *((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev; 4251 4252 /* 4253 * Queue the skb and schedule the worker thread. 4254 */ 4255 skb_queue_tail(&rxq, skb); 4256 queue_work(workq, &skb_work); 4257 return 0; 4258 } 4259 4260 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb) 4261 { 4262 struct cpl_set_tcb_rpl *rpl = cplhdr(skb); 4263 4264 if (rpl->status != CPL_ERR_NONE) { 4265 printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u " 4266 "for tid %u\n", rpl->status, GET_TID(rpl)); 4267 } 4268 kfree_skb(skb); 4269 return 0; 4270 } 4271 4272 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb) 4273 { 4274 struct cpl_fw6_msg *rpl = cplhdr(skb); 4275 struct c4iw_wr_wait *wr_waitp; 4276 int ret; 4277 4278 PDBG("%s type %u\n", __func__, rpl->type); 4279 4280 switch (rpl->type) { 4281 case FW6_TYPE_WR_RPL: 4282 ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff); 4283 wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1]; 4284 PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); 4285 if (wr_waitp) 4286 c4iw_wake_up(wr_waitp, ret ? -ret : 0); 4287 kfree_skb(skb); 4288 break; 4289 case FW6_TYPE_CQE: 4290 case FW6_TYPE_OFLD_CONNECTION_WR_RPL: 4291 sched(dev, skb); 4292 break; 4293 default: 4294 printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__, 4295 rpl->type); 4296 kfree_skb(skb); 4297 break; 4298 } 4299 return 0; 4300 } 4301 4302 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb) 4303 { 4304 struct cpl_abort_req_rss *req = cplhdr(skb); 4305 struct c4iw_ep *ep; 4306 unsigned int tid = GET_TID(req); 4307 4308 ep = get_ep_from_tid(dev, tid); 4309 /* This EP will be dereferenced in peer_abort() */ 4310 if (!ep) { 4311 printk(KERN_WARNING MOD 4312 "Abort on non-existent endpoint, tid %d\n", tid); 4313 kfree_skb(skb); 4314 return 0; 4315 } 4316 if (is_neg_adv(req->status)) { 4317 PDBG("%s Negative advice on abort- tid %u status %d (%s)\n", 4318 __func__, ep->hwtid, req->status, 4319 neg_adv_str(req->status)); 4320 goto out; 4321 } 4322 PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid, 4323 ep->com.state); 4324 4325 c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); 4326 out: 4327 sched(dev, skb); 4328 return 0; 4329 } 4330 4331 /* 4332 * Most upcalls from the T4 Core go to sched() to 4333 * schedule the processing on a work queue. 4334 */ 4335 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = { 4336 [CPL_ACT_ESTABLISH] = sched, 4337 [CPL_ACT_OPEN_RPL] = sched, 4338 [CPL_RX_DATA] = sched, 4339 [CPL_ABORT_RPL_RSS] = sched, 4340 [CPL_ABORT_RPL] = sched, 4341 [CPL_PASS_OPEN_RPL] = sched, 4342 [CPL_CLOSE_LISTSRV_RPL] = sched, 4343 [CPL_PASS_ACCEPT_REQ] = sched, 4344 [CPL_PASS_ESTABLISH] = sched, 4345 [CPL_PEER_CLOSE] = sched, 4346 [CPL_CLOSE_CON_RPL] = sched, 4347 [CPL_ABORT_REQ_RSS] = peer_abort_intr, 4348 [CPL_RDMA_TERMINATE] = sched, 4349 [CPL_FW4_ACK] = sched, 4350 [CPL_SET_TCB_RPL] = set_tcb_rpl, 4351 [CPL_FW6_MSG] = fw6_msg, 4352 [CPL_RX_PKT] = sched 4353 }; 4354 4355 int __init c4iw_cm_init(void) 4356 { 4357 spin_lock_init(&timeout_lock); 4358 skb_queue_head_init(&rxq); 4359 4360 workq = create_singlethread_workqueue("iw_cxgb4"); 4361 if (!workq) 4362 return -ENOMEM; 4363 4364 return 0; 4365 } 4366 4367 void c4iw_cm_term(void) 4368 { 4369 WARN_ON(!list_empty(&timeout_list)); 4370 flush_workqueue(workq); 4371 destroy_workqueue(workq); 4372 } 4373