xref: /linux/drivers/infiniband/hw/cxgb4/cm.c (revision e58e871becec2d3b04ed91c0c16fe8deac9c9dfa)
1 /*
2  * Copyright (c) 2009-2014 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
39 #include <linux/ip.h>
40 #include <linux/tcp.h>
41 #include <linux/if_vlan.h>
42 
43 #include <net/neighbour.h>
44 #include <net/netevent.h>
45 #include <net/route.h>
46 #include <net/tcp.h>
47 #include <net/ip6_route.h>
48 #include <net/addrconf.h>
49 
50 #include <rdma/ib_addr.h>
51 
52 #include <libcxgb_cm.h>
53 #include "iw_cxgb4.h"
54 #include "clip_tbl.h"
55 
56 static char *states[] = {
57 	"idle",
58 	"listen",
59 	"connecting",
60 	"mpa_wait_req",
61 	"mpa_req_sent",
62 	"mpa_req_rcvd",
63 	"mpa_rep_sent",
64 	"fpdu_mode",
65 	"aborting",
66 	"closing",
67 	"moribund",
68 	"dead",
69 	NULL,
70 };
71 
72 static int nocong;
73 module_param(nocong, int, 0644);
74 MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)");
75 
76 static int enable_ecn;
77 module_param(enable_ecn, int, 0644);
78 MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)");
79 
80 static int dack_mode = 1;
81 module_param(dack_mode, int, 0644);
82 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
83 
84 uint c4iw_max_read_depth = 32;
85 module_param(c4iw_max_read_depth, int, 0644);
86 MODULE_PARM_DESC(c4iw_max_read_depth,
87 		 "Per-connection max ORD/IRD (default=32)");
88 
89 static int enable_tcp_timestamps;
90 module_param(enable_tcp_timestamps, int, 0644);
91 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
92 
93 static int enable_tcp_sack;
94 module_param(enable_tcp_sack, int, 0644);
95 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
96 
97 static int enable_tcp_window_scaling = 1;
98 module_param(enable_tcp_window_scaling, int, 0644);
99 MODULE_PARM_DESC(enable_tcp_window_scaling,
100 		 "Enable tcp window scaling (default=1)");
101 
102 int c4iw_debug;
103 module_param(c4iw_debug, int, 0644);
104 MODULE_PARM_DESC(c4iw_debug, "obsolete");
105 
106 static int peer2peer = 1;
107 module_param(peer2peer, int, 0644);
108 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=1)");
109 
110 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
111 module_param(p2p_type, int, 0644);
112 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
113 			   "1=RDMA_READ 0=RDMA_WRITE (default 1)");
114 
115 static int ep_timeout_secs = 60;
116 module_param(ep_timeout_secs, int, 0644);
117 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
118 				   "in seconds (default=60)");
119 
120 static int mpa_rev = 2;
121 module_param(mpa_rev, int, 0644);
122 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
123 		"1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft"
124 		" compliant (default=2)");
125 
126 static int markers_enabled;
127 module_param(markers_enabled, int, 0644);
128 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
129 
130 static int crc_enabled = 1;
131 module_param(crc_enabled, int, 0644);
132 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
133 
134 static int rcv_win = 256 * 1024;
135 module_param(rcv_win, int, 0644);
136 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
137 
138 static int snd_win = 128 * 1024;
139 module_param(snd_win, int, 0644);
140 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=128KB)");
141 
142 static struct workqueue_struct *workq;
143 
144 static struct sk_buff_head rxq;
145 
146 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
147 static void ep_timeout(unsigned long arg);
148 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
149 static int sched(struct c4iw_dev *dev, struct sk_buff *skb);
150 
151 static LIST_HEAD(timeout_list);
152 static spinlock_t timeout_lock;
153 
154 static void deref_cm_id(struct c4iw_ep_common *epc)
155 {
156 	epc->cm_id->rem_ref(epc->cm_id);
157 	epc->cm_id = NULL;
158 	set_bit(CM_ID_DEREFED, &epc->history);
159 }
160 
161 static void ref_cm_id(struct c4iw_ep_common *epc)
162 {
163 	set_bit(CM_ID_REFED, &epc->history);
164 	epc->cm_id->add_ref(epc->cm_id);
165 }
166 
167 static void deref_qp(struct c4iw_ep *ep)
168 {
169 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
170 	clear_bit(QP_REFERENCED, &ep->com.flags);
171 	set_bit(QP_DEREFED, &ep->com.history);
172 }
173 
174 static void ref_qp(struct c4iw_ep *ep)
175 {
176 	set_bit(QP_REFERENCED, &ep->com.flags);
177 	set_bit(QP_REFED, &ep->com.history);
178 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
179 }
180 
181 static void start_ep_timer(struct c4iw_ep *ep)
182 {
183 	pr_debug("%s ep %p\n", __func__, ep);
184 	if (timer_pending(&ep->timer)) {
185 		pr_err("%s timer already started! ep %p\n",
186 		       __func__, ep);
187 		return;
188 	}
189 	clear_bit(TIMEOUT, &ep->com.flags);
190 	c4iw_get_ep(&ep->com);
191 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
192 	ep->timer.data = (unsigned long)ep;
193 	ep->timer.function = ep_timeout;
194 	add_timer(&ep->timer);
195 }
196 
197 static int stop_ep_timer(struct c4iw_ep *ep)
198 {
199 	pr_debug("%s ep %p stopping\n", __func__, ep);
200 	del_timer_sync(&ep->timer);
201 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
202 		c4iw_put_ep(&ep->com);
203 		return 0;
204 	}
205 	return 1;
206 }
207 
208 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
209 		  struct l2t_entry *l2e)
210 {
211 	int	error = 0;
212 
213 	if (c4iw_fatal_error(rdev)) {
214 		kfree_skb(skb);
215 		pr_debug("%s - device in error state - dropping\n", __func__);
216 		return -EIO;
217 	}
218 	error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
219 	if (error < 0)
220 		kfree_skb(skb);
221 	else if (error == NET_XMIT_DROP)
222 		return -ENOMEM;
223 	return error < 0 ? error : 0;
224 }
225 
226 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
227 {
228 	int	error = 0;
229 
230 	if (c4iw_fatal_error(rdev)) {
231 		kfree_skb(skb);
232 		pr_debug("%s - device in error state - dropping\n", __func__);
233 		return -EIO;
234 	}
235 	error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
236 	if (error < 0)
237 		kfree_skb(skb);
238 	return error < 0 ? error : 0;
239 }
240 
241 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
242 {
243 	u32 len = roundup(sizeof(struct cpl_tid_release), 16);
244 
245 	skb = get_skb(skb, len, GFP_KERNEL);
246 	if (!skb)
247 		return;
248 
249 	cxgb_mk_tid_release(skb, len, hwtid, 0);
250 	c4iw_ofld_send(rdev, skb);
251 	return;
252 }
253 
254 static void set_emss(struct c4iw_ep *ep, u16 opt)
255 {
256 	ep->emss = ep->com.dev->rdev.lldi.mtus[TCPOPT_MSS_G(opt)] -
257 		   ((AF_INET == ep->com.remote_addr.ss_family) ?
258 		    sizeof(struct iphdr) : sizeof(struct ipv6hdr)) -
259 		   sizeof(struct tcphdr);
260 	ep->mss = ep->emss;
261 	if (TCPOPT_TSTAMP_G(opt))
262 		ep->emss -= round_up(TCPOLEN_TIMESTAMP, 4);
263 	if (ep->emss < 128)
264 		ep->emss = 128;
265 	if (ep->emss & 7)
266 		pr_debug("Warning: misaligned mtu idx %u mss %u emss=%u\n",
267 			 TCPOPT_MSS_G(opt), ep->mss, ep->emss);
268 	pr_debug("%s mss_idx %u mss %u emss=%u\n", __func__, TCPOPT_MSS_G(opt),
269 		 ep->mss, ep->emss);
270 }
271 
272 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
273 {
274 	enum c4iw_ep_state state;
275 
276 	mutex_lock(&epc->mutex);
277 	state = epc->state;
278 	mutex_unlock(&epc->mutex);
279 	return state;
280 }
281 
282 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
283 {
284 	epc->state = new;
285 }
286 
287 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
288 {
289 	mutex_lock(&epc->mutex);
290 	pr_debug("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
291 	__state_set(epc, new);
292 	mutex_unlock(&epc->mutex);
293 	return;
294 }
295 
296 static int alloc_ep_skb_list(struct sk_buff_head *ep_skb_list, int size)
297 {
298 	struct sk_buff *skb;
299 	unsigned int i;
300 	size_t len;
301 
302 	len = roundup(sizeof(union cpl_wr_size), 16);
303 	for (i = 0; i < size; i++) {
304 		skb = alloc_skb(len, GFP_KERNEL);
305 		if (!skb)
306 			goto fail;
307 		skb_queue_tail(ep_skb_list, skb);
308 	}
309 	return 0;
310 fail:
311 	skb_queue_purge(ep_skb_list);
312 	return -ENOMEM;
313 }
314 
315 static void *alloc_ep(int size, gfp_t gfp)
316 {
317 	struct c4iw_ep_common *epc;
318 
319 	epc = kzalloc(size, gfp);
320 	if (epc) {
321 		kref_init(&epc->kref);
322 		mutex_init(&epc->mutex);
323 		c4iw_init_wr_wait(&epc->wr_wait);
324 	}
325 	pr_debug("%s alloc ep %p\n", __func__, epc);
326 	return epc;
327 }
328 
329 static void remove_ep_tid(struct c4iw_ep *ep)
330 {
331 	unsigned long flags;
332 
333 	spin_lock_irqsave(&ep->com.dev->lock, flags);
334 	_remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid, 0);
335 	if (idr_is_empty(&ep->com.dev->hwtid_idr))
336 		wake_up(&ep->com.dev->wait);
337 	spin_unlock_irqrestore(&ep->com.dev->lock, flags);
338 }
339 
340 static void insert_ep_tid(struct c4iw_ep *ep)
341 {
342 	unsigned long flags;
343 
344 	spin_lock_irqsave(&ep->com.dev->lock, flags);
345 	_insert_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep, ep->hwtid, 0);
346 	spin_unlock_irqrestore(&ep->com.dev->lock, flags);
347 }
348 
349 /*
350  * Atomically lookup the ep ptr given the tid and grab a reference on the ep.
351  */
352 static struct c4iw_ep *get_ep_from_tid(struct c4iw_dev *dev, unsigned int tid)
353 {
354 	struct c4iw_ep *ep;
355 	unsigned long flags;
356 
357 	spin_lock_irqsave(&dev->lock, flags);
358 	ep = idr_find(&dev->hwtid_idr, tid);
359 	if (ep)
360 		c4iw_get_ep(&ep->com);
361 	spin_unlock_irqrestore(&dev->lock, flags);
362 	return ep;
363 }
364 
365 /*
366  * Atomically lookup the ep ptr given the stid and grab a reference on the ep.
367  */
368 static struct c4iw_listen_ep *get_ep_from_stid(struct c4iw_dev *dev,
369 					       unsigned int stid)
370 {
371 	struct c4iw_listen_ep *ep;
372 	unsigned long flags;
373 
374 	spin_lock_irqsave(&dev->lock, flags);
375 	ep = idr_find(&dev->stid_idr, stid);
376 	if (ep)
377 		c4iw_get_ep(&ep->com);
378 	spin_unlock_irqrestore(&dev->lock, flags);
379 	return ep;
380 }
381 
382 void _c4iw_free_ep(struct kref *kref)
383 {
384 	struct c4iw_ep *ep;
385 
386 	ep = container_of(kref, struct c4iw_ep, com.kref);
387 	pr_debug("%s ep %p state %s\n", __func__, ep, states[ep->com.state]);
388 	if (test_bit(QP_REFERENCED, &ep->com.flags))
389 		deref_qp(ep);
390 	if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
391 		if (ep->com.remote_addr.ss_family == AF_INET6) {
392 			struct sockaddr_in6 *sin6 =
393 					(struct sockaddr_in6 *)
394 					&ep->com.local_addr;
395 
396 			cxgb4_clip_release(
397 					ep->com.dev->rdev.lldi.ports[0],
398 					(const u32 *)&sin6->sin6_addr.s6_addr,
399 					1);
400 		}
401 		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
402 		dst_release(ep->dst);
403 		cxgb4_l2t_release(ep->l2t);
404 		if (ep->mpa_skb)
405 			kfree_skb(ep->mpa_skb);
406 	}
407 	if (!skb_queue_empty(&ep->com.ep_skb_list))
408 		skb_queue_purge(&ep->com.ep_skb_list);
409 	kfree(ep);
410 }
411 
412 static void release_ep_resources(struct c4iw_ep *ep)
413 {
414 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
415 
416 	/*
417 	 * If we have a hwtid, then remove it from the idr table
418 	 * so lookups will no longer find this endpoint.  Otherwise
419 	 * we have a race where one thread finds the ep ptr just
420 	 * before the other thread is freeing the ep memory.
421 	 */
422 	if (ep->hwtid != -1)
423 		remove_ep_tid(ep);
424 	c4iw_put_ep(&ep->com);
425 }
426 
427 static int status2errno(int status)
428 {
429 	switch (status) {
430 	case CPL_ERR_NONE:
431 		return 0;
432 	case CPL_ERR_CONN_RESET:
433 		return -ECONNRESET;
434 	case CPL_ERR_ARP_MISS:
435 		return -EHOSTUNREACH;
436 	case CPL_ERR_CONN_TIMEDOUT:
437 		return -ETIMEDOUT;
438 	case CPL_ERR_TCAM_FULL:
439 		return -ENOMEM;
440 	case CPL_ERR_CONN_EXIST:
441 		return -EADDRINUSE;
442 	default:
443 		return -EIO;
444 	}
445 }
446 
447 /*
448  * Try and reuse skbs already allocated...
449  */
450 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
451 {
452 	if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
453 		skb_trim(skb, 0);
454 		skb_get(skb);
455 		skb_reset_transport_header(skb);
456 	} else {
457 		skb = alloc_skb(len, gfp);
458 	}
459 	t4_set_arp_err_handler(skb, NULL, NULL);
460 	return skb;
461 }
462 
463 static struct net_device *get_real_dev(struct net_device *egress_dev)
464 {
465 	return rdma_vlan_dev_real_dev(egress_dev) ? : egress_dev;
466 }
467 
468 static void arp_failure_discard(void *handle, struct sk_buff *skb)
469 {
470 	pr_err("ARP failure\n");
471 	kfree_skb(skb);
472 }
473 
474 static void mpa_start_arp_failure(void *handle, struct sk_buff *skb)
475 {
476 	pr_err("ARP failure during MPA Negotiation - Closing Connection\n");
477 }
478 
479 enum {
480 	NUM_FAKE_CPLS = 2,
481 	FAKE_CPL_PUT_EP_SAFE = NUM_CPL_CMDS + 0,
482 	FAKE_CPL_PASS_PUT_EP_SAFE = NUM_CPL_CMDS + 1,
483 };
484 
485 static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
486 {
487 	struct c4iw_ep *ep;
488 
489 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
490 	release_ep_resources(ep);
491 	kfree_skb(skb);
492 	return 0;
493 }
494 
495 static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
496 {
497 	struct c4iw_ep *ep;
498 
499 	ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
500 	c4iw_put_ep(&ep->parent_ep->com);
501 	release_ep_resources(ep);
502 	kfree_skb(skb);
503 	return 0;
504 }
505 
506 /*
507  * Fake up a special CPL opcode and call sched() so process_work() will call
508  * _put_ep_safe() in a safe context to free the ep resources.  This is needed
509  * because ARP error handlers are called in an ATOMIC context, and
510  * _c4iw_free_ep() needs to block.
511  */
512 static void queue_arp_failure_cpl(struct c4iw_ep *ep, struct sk_buff *skb,
513 				  int cpl)
514 {
515 	struct cpl_act_establish *rpl = cplhdr(skb);
516 
517 	/* Set our special ARP_FAILURE opcode */
518 	rpl->ot.opcode = cpl;
519 
520 	/*
521 	 * Save ep in the skb->cb area, after where sched() will save the dev
522 	 * ptr.
523 	 */
524 	*((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))) = ep;
525 	sched(ep->com.dev, skb);
526 }
527 
528 /* Handle an ARP failure for an accept */
529 static void pass_accept_rpl_arp_failure(void *handle, struct sk_buff *skb)
530 {
531 	struct c4iw_ep *ep = handle;
532 
533 	pr_err("ARP failure during accept - tid %u - dropping connection\n",
534 	       ep->hwtid);
535 
536 	__state_set(&ep->com, DEAD);
537 	queue_arp_failure_cpl(ep, skb, FAKE_CPL_PASS_PUT_EP_SAFE);
538 }
539 
540 /*
541  * Handle an ARP failure for an active open.
542  */
543 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
544 {
545 	struct c4iw_ep *ep = handle;
546 
547 	pr_err("ARP failure during connect\n");
548 	connect_reply_upcall(ep, -EHOSTUNREACH);
549 	__state_set(&ep->com, DEAD);
550 	if (ep->com.remote_addr.ss_family == AF_INET6) {
551 		struct sockaddr_in6 *sin6 =
552 			(struct sockaddr_in6 *)&ep->com.local_addr;
553 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
554 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
555 	}
556 	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
557 	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
558 	queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
559 }
560 
561 /*
562  * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
563  * and send it along.
564  */
565 static void abort_arp_failure(void *handle, struct sk_buff *skb)
566 {
567 	int ret;
568 	struct c4iw_ep *ep = handle;
569 	struct c4iw_rdev *rdev = &ep->com.dev->rdev;
570 	struct cpl_abort_req *req = cplhdr(skb);
571 
572 	pr_debug("%s rdev %p\n", __func__, rdev);
573 	req->cmd = CPL_ABORT_NO_RST;
574 	skb_get(skb);
575 	ret = c4iw_ofld_send(rdev, skb);
576 	if (ret) {
577 		__state_set(&ep->com, DEAD);
578 		queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
579 	} else
580 		kfree_skb(skb);
581 }
582 
583 static int send_flowc(struct c4iw_ep *ep)
584 {
585 	struct fw_flowc_wr *flowc;
586 	struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
587 	int i;
588 	u16 vlan = ep->l2t->vlan;
589 	int nparams;
590 
591 	if (WARN_ON(!skb))
592 		return -ENOMEM;
593 
594 	if (vlan == CPL_L2T_VLAN_NONE)
595 		nparams = 8;
596 	else
597 		nparams = 9;
598 
599 	flowc = (struct fw_flowc_wr *)__skb_put(skb, FLOWC_LEN);
600 
601 	flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) |
602 					   FW_FLOWC_WR_NPARAMS_V(nparams));
603 	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(DIV_ROUND_UP(FLOWC_LEN,
604 					  16)) | FW_WR_FLOWID_V(ep->hwtid));
605 
606 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
607 	flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN_V
608 					    (ep->com.dev->rdev.lldi.pf));
609 	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
610 	flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
611 	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
612 	flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
613 	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
614 	flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
615 	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
616 	flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
617 	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
618 	flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
619 	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
620 	flowc->mnemval[6].val = cpu_to_be32(ep->snd_win);
621 	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
622 	flowc->mnemval[7].val = cpu_to_be32(ep->emss);
623 	if (nparams == 9) {
624 		u16 pri;
625 
626 		pri = (vlan & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
627 		flowc->mnemval[8].mnemonic = FW_FLOWC_MNEM_SCHEDCLASS;
628 		flowc->mnemval[8].val = cpu_to_be32(pri);
629 	} else {
630 		/* Pad WR to 16 byte boundary */
631 		flowc->mnemval[8].mnemonic = 0;
632 		flowc->mnemval[8].val = 0;
633 	}
634 	for (i = 0; i < 9; i++) {
635 		flowc->mnemval[i].r4[0] = 0;
636 		flowc->mnemval[i].r4[1] = 0;
637 		flowc->mnemval[i].r4[2] = 0;
638 	}
639 
640 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
641 	return c4iw_ofld_send(&ep->com.dev->rdev, skb);
642 }
643 
644 static int send_halfclose(struct c4iw_ep *ep)
645 {
646 	struct sk_buff *skb = skb_dequeue(&ep->com.ep_skb_list);
647 	u32 wrlen = roundup(sizeof(struct cpl_close_con_req), 16);
648 
649 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
650 	if (WARN_ON(!skb))
651 		return -ENOMEM;
652 
653 	cxgb_mk_close_con_req(skb, wrlen, ep->hwtid, ep->txq_idx,
654 			      NULL, arp_failure_discard);
655 
656 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
657 }
658 
659 static int send_abort(struct c4iw_ep *ep)
660 {
661 	u32 wrlen = roundup(sizeof(struct cpl_abort_req), 16);
662 	struct sk_buff *req_skb = skb_dequeue(&ep->com.ep_skb_list);
663 
664 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
665 	if (WARN_ON(!req_skb))
666 		return -ENOMEM;
667 
668 	cxgb_mk_abort_req(req_skb, wrlen, ep->hwtid, ep->txq_idx,
669 			  ep, abort_arp_failure);
670 
671 	return c4iw_l2t_send(&ep->com.dev->rdev, req_skb, ep->l2t);
672 }
673 
674 static int send_connect(struct c4iw_ep *ep)
675 {
676 	struct cpl_act_open_req *req = NULL;
677 	struct cpl_t5_act_open_req *t5req = NULL;
678 	struct cpl_t6_act_open_req *t6req = NULL;
679 	struct cpl_act_open_req6 *req6 = NULL;
680 	struct cpl_t5_act_open_req6 *t5req6 = NULL;
681 	struct cpl_t6_act_open_req6 *t6req6 = NULL;
682 	struct sk_buff *skb;
683 	u64 opt0;
684 	u32 opt2;
685 	unsigned int mtu_idx;
686 	u32 wscale;
687 	int win, sizev4, sizev6, wrlen;
688 	struct sockaddr_in *la = (struct sockaddr_in *)
689 				 &ep->com.local_addr;
690 	struct sockaddr_in *ra = (struct sockaddr_in *)
691 				 &ep->com.remote_addr;
692 	struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)
693 				   &ep->com.local_addr;
694 	struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)
695 				   &ep->com.remote_addr;
696 	int ret;
697 	enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
698 	u32 isn = (prandom_u32() & ~7UL) - 1;
699 	struct net_device *netdev;
700 	u64 params;
701 
702 	netdev = ep->com.dev->rdev.lldi.ports[0];
703 
704 	switch (CHELSIO_CHIP_VERSION(adapter_type)) {
705 	case CHELSIO_T4:
706 		sizev4 = sizeof(struct cpl_act_open_req);
707 		sizev6 = sizeof(struct cpl_act_open_req6);
708 		break;
709 	case CHELSIO_T5:
710 		sizev4 = sizeof(struct cpl_t5_act_open_req);
711 		sizev6 = sizeof(struct cpl_t5_act_open_req6);
712 		break;
713 	case CHELSIO_T6:
714 		sizev4 = sizeof(struct cpl_t6_act_open_req);
715 		sizev6 = sizeof(struct cpl_t6_act_open_req6);
716 		break;
717 	default:
718 		pr_err("T%d Chip is not supported\n",
719 		       CHELSIO_CHIP_VERSION(adapter_type));
720 		return -EINVAL;
721 	}
722 
723 	wrlen = (ep->com.remote_addr.ss_family == AF_INET) ?
724 			roundup(sizev4, 16) :
725 			roundup(sizev6, 16);
726 
727 	pr_debug("%s ep %p atid %u\n", __func__, ep, ep->atid);
728 
729 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
730 	if (!skb) {
731 		pr_err("%s - failed to alloc skb\n", __func__);
732 		return -ENOMEM;
733 	}
734 	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
735 
736 	cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
737 		      enable_tcp_timestamps,
738 		      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
739 	wscale = cxgb_compute_wscale(rcv_win);
740 
741 	/*
742 	 * Specify the largest window that will fit in opt0. The
743 	 * remainder will be specified in the rx_data_ack.
744 	 */
745 	win = ep->rcv_win >> 10;
746 	if (win > RCV_BUFSIZ_M)
747 		win = RCV_BUFSIZ_M;
748 
749 	opt0 = (nocong ? NO_CONG_F : 0) |
750 	       KEEP_ALIVE_F |
751 	       DELACK_F |
752 	       WND_SCALE_V(wscale) |
753 	       MSS_IDX_V(mtu_idx) |
754 	       L2T_IDX_V(ep->l2t->idx) |
755 	       TX_CHAN_V(ep->tx_chan) |
756 	       SMAC_SEL_V(ep->smac_idx) |
757 	       DSCP_V(ep->tos >> 2) |
758 	       ULP_MODE_V(ULP_MODE_TCPDDP) |
759 	       RCV_BUFSIZ_V(win);
760 	opt2 = RX_CHANNEL_V(0) |
761 	       CCTRL_ECN_V(enable_ecn) |
762 	       RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
763 	if (enable_tcp_timestamps)
764 		opt2 |= TSTAMPS_EN_F;
765 	if (enable_tcp_sack)
766 		opt2 |= SACK_EN_F;
767 	if (wscale && enable_tcp_window_scaling)
768 		opt2 |= WND_SCALE_EN_F;
769 	if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
770 		if (peer2peer)
771 			isn += 4;
772 
773 		opt2 |= T5_OPT_2_VALID_F;
774 		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
775 		opt2 |= T5_ISS_F;
776 	}
777 
778 	params = cxgb4_select_ntuple(netdev, ep->l2t);
779 
780 	if (ep->com.remote_addr.ss_family == AF_INET6)
781 		cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
782 			       (const u32 *)&la6->sin6_addr.s6_addr, 1);
783 
784 	t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
785 
786 	if (ep->com.remote_addr.ss_family == AF_INET) {
787 		switch (CHELSIO_CHIP_VERSION(adapter_type)) {
788 		case CHELSIO_T4:
789 			req = (struct cpl_act_open_req *)skb_put(skb, wrlen);
790 			INIT_TP_WR(req, 0);
791 			break;
792 		case CHELSIO_T5:
793 			t5req = (struct cpl_t5_act_open_req *)skb_put(skb,
794 					wrlen);
795 			INIT_TP_WR(t5req, 0);
796 			req = (struct cpl_act_open_req *)t5req;
797 			break;
798 		case CHELSIO_T6:
799 			t6req = (struct cpl_t6_act_open_req *)skb_put(skb,
800 					wrlen);
801 			INIT_TP_WR(t6req, 0);
802 			req = (struct cpl_act_open_req *)t6req;
803 			t5req = (struct cpl_t5_act_open_req *)t6req;
804 			break;
805 		default:
806 			pr_err("T%d Chip is not supported\n",
807 			       CHELSIO_CHIP_VERSION(adapter_type));
808 			ret = -EINVAL;
809 			goto clip_release;
810 		}
811 
812 		OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ,
813 					((ep->rss_qid<<14) | ep->atid)));
814 		req->local_port = la->sin_port;
815 		req->peer_port = ra->sin_port;
816 		req->local_ip = la->sin_addr.s_addr;
817 		req->peer_ip = ra->sin_addr.s_addr;
818 		req->opt0 = cpu_to_be64(opt0);
819 
820 		if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
821 			req->params = cpu_to_be32(params);
822 			req->opt2 = cpu_to_be32(opt2);
823 		} else {
824 			if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
825 				t5req->params =
826 					  cpu_to_be64(FILTER_TUPLE_V(params));
827 				t5req->rsvd = cpu_to_be32(isn);
828 			pr_debug("%s snd_isn %u\n", __func__, t5req->rsvd);
829 				t5req->opt2 = cpu_to_be32(opt2);
830 			} else {
831 				t6req->params =
832 					  cpu_to_be64(FILTER_TUPLE_V(params));
833 				t6req->rsvd = cpu_to_be32(isn);
834 			pr_debug("%s snd_isn %u\n", __func__, t6req->rsvd);
835 				t6req->opt2 = cpu_to_be32(opt2);
836 			}
837 		}
838 	} else {
839 		switch (CHELSIO_CHIP_VERSION(adapter_type)) {
840 		case CHELSIO_T4:
841 			req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
842 			INIT_TP_WR(req6, 0);
843 			break;
844 		case CHELSIO_T5:
845 			t5req6 = (struct cpl_t5_act_open_req6 *)skb_put(skb,
846 					wrlen);
847 			INIT_TP_WR(t5req6, 0);
848 			req6 = (struct cpl_act_open_req6 *)t5req6;
849 			break;
850 		case CHELSIO_T6:
851 			t6req6 = (struct cpl_t6_act_open_req6 *)skb_put(skb,
852 					wrlen);
853 			INIT_TP_WR(t6req6, 0);
854 			req6 = (struct cpl_act_open_req6 *)t6req6;
855 			t5req6 = (struct cpl_t5_act_open_req6 *)t6req6;
856 			break;
857 		default:
858 			pr_err("T%d Chip is not supported\n",
859 			       CHELSIO_CHIP_VERSION(adapter_type));
860 			ret = -EINVAL;
861 			goto clip_release;
862 		}
863 
864 		OPCODE_TID(req6) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
865 					((ep->rss_qid<<14)|ep->atid)));
866 		req6->local_port = la6->sin6_port;
867 		req6->peer_port = ra6->sin6_port;
868 		req6->local_ip_hi = *((__be64 *)(la6->sin6_addr.s6_addr));
869 		req6->local_ip_lo = *((__be64 *)(la6->sin6_addr.s6_addr + 8));
870 		req6->peer_ip_hi = *((__be64 *)(ra6->sin6_addr.s6_addr));
871 		req6->peer_ip_lo = *((__be64 *)(ra6->sin6_addr.s6_addr + 8));
872 		req6->opt0 = cpu_to_be64(opt0);
873 
874 		if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
875 			req6->params = cpu_to_be32(cxgb4_select_ntuple(netdev,
876 								      ep->l2t));
877 			req6->opt2 = cpu_to_be32(opt2);
878 		} else {
879 			if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
880 				t5req6->params =
881 					    cpu_to_be64(FILTER_TUPLE_V(params));
882 				t5req6->rsvd = cpu_to_be32(isn);
883 			pr_debug("%s snd_isn %u\n", __func__, t5req6->rsvd);
884 				t5req6->opt2 = cpu_to_be32(opt2);
885 			} else {
886 				t6req6->params =
887 					    cpu_to_be64(FILTER_TUPLE_V(params));
888 				t6req6->rsvd = cpu_to_be32(isn);
889 			pr_debug("%s snd_isn %u\n", __func__, t6req6->rsvd);
890 				t6req6->opt2 = cpu_to_be32(opt2);
891 			}
892 
893 		}
894 	}
895 
896 	set_bit(ACT_OPEN_REQ, &ep->com.history);
897 	ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
898 clip_release:
899 	if (ret && ep->com.remote_addr.ss_family == AF_INET6)
900 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
901 				   (const u32 *)&la6->sin6_addr.s6_addr, 1);
902 	return ret;
903 }
904 
905 static int send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
906 			u8 mpa_rev_to_use)
907 {
908 	int mpalen, wrlen, ret;
909 	struct fw_ofld_tx_data_wr *req;
910 	struct mpa_message *mpa;
911 	struct mpa_v2_conn_params mpa_v2_params;
912 
913 	pr_debug("%s ep %p tid %u pd_len %d\n",
914 		 __func__, ep, ep->hwtid, ep->plen);
915 
916 	BUG_ON(skb_cloned(skb));
917 
918 	mpalen = sizeof(*mpa) + ep->plen;
919 	if (mpa_rev_to_use == 2)
920 		mpalen += sizeof(struct mpa_v2_conn_params);
921 	wrlen = roundup(mpalen + sizeof *req, 16);
922 	skb = get_skb(skb, wrlen, GFP_KERNEL);
923 	if (!skb) {
924 		connect_reply_upcall(ep, -ENOMEM);
925 		return -ENOMEM;
926 	}
927 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
928 
929 	req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
930 	memset(req, 0, wrlen);
931 	req->op_to_immdlen = cpu_to_be32(
932 		FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
933 		FW_WR_COMPL_F |
934 		FW_WR_IMMDLEN_V(mpalen));
935 	req->flowid_len16 = cpu_to_be32(
936 		FW_WR_FLOWID_V(ep->hwtid) |
937 		FW_WR_LEN16_V(wrlen >> 4));
938 	req->plen = cpu_to_be32(mpalen);
939 	req->tunnel_to_proxy = cpu_to_be32(
940 		FW_OFLD_TX_DATA_WR_FLUSH_F |
941 		FW_OFLD_TX_DATA_WR_SHOVE_F);
942 
943 	mpa = (struct mpa_message *)(req + 1);
944 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
945 
946 	mpa->flags = 0;
947 	if (crc_enabled)
948 		mpa->flags |= MPA_CRC;
949 	if (markers_enabled) {
950 		mpa->flags |= MPA_MARKERS;
951 		ep->mpa_attr.recv_marker_enabled = 1;
952 	} else {
953 		ep->mpa_attr.recv_marker_enabled = 0;
954 	}
955 	if (mpa_rev_to_use == 2)
956 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
957 
958 	mpa->private_data_size = htons(ep->plen);
959 	mpa->revision = mpa_rev_to_use;
960 	if (mpa_rev_to_use == 1) {
961 		ep->tried_with_mpa_v1 = 1;
962 		ep->retry_with_mpa_v1 = 0;
963 	}
964 
965 	if (mpa_rev_to_use == 2) {
966 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
967 					       sizeof (struct mpa_v2_conn_params));
968 		pr_debug("%s initiator ird %u ord %u\n", __func__, ep->ird,
969 			 ep->ord);
970 		mpa_v2_params.ird = htons((u16)ep->ird);
971 		mpa_v2_params.ord = htons((u16)ep->ord);
972 
973 		if (peer2peer) {
974 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
975 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
976 				mpa_v2_params.ord |=
977 					htons(MPA_V2_RDMA_WRITE_RTR);
978 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
979 				mpa_v2_params.ord |=
980 					htons(MPA_V2_RDMA_READ_RTR);
981 		}
982 		memcpy(mpa->private_data, &mpa_v2_params,
983 		       sizeof(struct mpa_v2_conn_params));
984 
985 		if (ep->plen)
986 			memcpy(mpa->private_data +
987 			       sizeof(struct mpa_v2_conn_params),
988 			       ep->mpa_pkt + sizeof(*mpa), ep->plen);
989 	} else
990 		if (ep->plen)
991 			memcpy(mpa->private_data,
992 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
993 
994 	/*
995 	 * Reference the mpa skb.  This ensures the data area
996 	 * will remain in memory until the hw acks the tx.
997 	 * Function fw4_ack() will deref it.
998 	 */
999 	skb_get(skb);
1000 	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
1001 	BUG_ON(ep->mpa_skb);
1002 	ep->mpa_skb = skb;
1003 	ret = c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1004 	if (ret)
1005 		return ret;
1006 	start_ep_timer(ep);
1007 	__state_set(&ep->com, MPA_REQ_SENT);
1008 	ep->mpa_attr.initiator = 1;
1009 	ep->snd_seq += mpalen;
1010 	return ret;
1011 }
1012 
1013 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1014 {
1015 	int mpalen, wrlen;
1016 	struct fw_ofld_tx_data_wr *req;
1017 	struct mpa_message *mpa;
1018 	struct sk_buff *skb;
1019 	struct mpa_v2_conn_params mpa_v2_params;
1020 
1021 	pr_debug("%s ep %p tid %u pd_len %d\n",
1022 		 __func__, ep, ep->hwtid, ep->plen);
1023 
1024 	mpalen = sizeof(*mpa) + plen;
1025 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1026 		mpalen += sizeof(struct mpa_v2_conn_params);
1027 	wrlen = roundup(mpalen + sizeof *req, 16);
1028 
1029 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
1030 	if (!skb) {
1031 		pr_err("%s - cannot alloc skb!\n", __func__);
1032 		return -ENOMEM;
1033 	}
1034 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1035 
1036 	req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
1037 	memset(req, 0, wrlen);
1038 	req->op_to_immdlen = cpu_to_be32(
1039 		FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1040 		FW_WR_COMPL_F |
1041 		FW_WR_IMMDLEN_V(mpalen));
1042 	req->flowid_len16 = cpu_to_be32(
1043 		FW_WR_FLOWID_V(ep->hwtid) |
1044 		FW_WR_LEN16_V(wrlen >> 4));
1045 	req->plen = cpu_to_be32(mpalen);
1046 	req->tunnel_to_proxy = cpu_to_be32(
1047 		FW_OFLD_TX_DATA_WR_FLUSH_F |
1048 		FW_OFLD_TX_DATA_WR_SHOVE_F);
1049 
1050 	mpa = (struct mpa_message *)(req + 1);
1051 	memset(mpa, 0, sizeof(*mpa));
1052 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1053 	mpa->flags = MPA_REJECT;
1054 	mpa->revision = ep->mpa_attr.version;
1055 	mpa->private_data_size = htons(plen);
1056 
1057 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1058 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1059 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1060 					       sizeof (struct mpa_v2_conn_params));
1061 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1062 					  (peer2peer ? MPA_V2_PEER2PEER_MODEL :
1063 					   0));
1064 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1065 					  (p2p_type ==
1066 					   FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1067 					   MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1068 					   FW_RI_INIT_P2PTYPE_READ_REQ ?
1069 					   MPA_V2_RDMA_READ_RTR : 0) : 0));
1070 		memcpy(mpa->private_data, &mpa_v2_params,
1071 		       sizeof(struct mpa_v2_conn_params));
1072 
1073 		if (ep->plen)
1074 			memcpy(mpa->private_data +
1075 			       sizeof(struct mpa_v2_conn_params), pdata, plen);
1076 	} else
1077 		if (plen)
1078 			memcpy(mpa->private_data, pdata, plen);
1079 
1080 	/*
1081 	 * Reference the mpa skb again.  This ensures the data area
1082 	 * will remain in memory until the hw acks the tx.
1083 	 * Function fw4_ack() will deref it.
1084 	 */
1085 	skb_get(skb);
1086 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1087 	t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
1088 	BUG_ON(ep->mpa_skb);
1089 	ep->mpa_skb = skb;
1090 	ep->snd_seq += mpalen;
1091 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1092 }
1093 
1094 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1095 {
1096 	int mpalen, wrlen;
1097 	struct fw_ofld_tx_data_wr *req;
1098 	struct mpa_message *mpa;
1099 	struct sk_buff *skb;
1100 	struct mpa_v2_conn_params mpa_v2_params;
1101 
1102 	pr_debug("%s ep %p tid %u pd_len %d\n",
1103 		 __func__, ep, ep->hwtid, ep->plen);
1104 
1105 	mpalen = sizeof(*mpa) + plen;
1106 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn)
1107 		mpalen += sizeof(struct mpa_v2_conn_params);
1108 	wrlen = roundup(mpalen + sizeof *req, 16);
1109 
1110 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
1111 	if (!skb) {
1112 		pr_err("%s - cannot alloc skb!\n", __func__);
1113 		return -ENOMEM;
1114 	}
1115 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1116 
1117 	req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
1118 	memset(req, 0, wrlen);
1119 	req->op_to_immdlen = cpu_to_be32(
1120 		FW_WR_OP_V(FW_OFLD_TX_DATA_WR) |
1121 		FW_WR_COMPL_F |
1122 		FW_WR_IMMDLEN_V(mpalen));
1123 	req->flowid_len16 = cpu_to_be32(
1124 		FW_WR_FLOWID_V(ep->hwtid) |
1125 		FW_WR_LEN16_V(wrlen >> 4));
1126 	req->plen = cpu_to_be32(mpalen);
1127 	req->tunnel_to_proxy = cpu_to_be32(
1128 		FW_OFLD_TX_DATA_WR_FLUSH_F |
1129 		FW_OFLD_TX_DATA_WR_SHOVE_F);
1130 
1131 	mpa = (struct mpa_message *)(req + 1);
1132 	memset(mpa, 0, sizeof(*mpa));
1133 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1134 	mpa->flags = 0;
1135 	if (ep->mpa_attr.crc_enabled)
1136 		mpa->flags |= MPA_CRC;
1137 	if (ep->mpa_attr.recv_marker_enabled)
1138 		mpa->flags |= MPA_MARKERS;
1139 	mpa->revision = ep->mpa_attr.version;
1140 	mpa->private_data_size = htons(plen);
1141 
1142 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1143 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1144 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1145 					       sizeof (struct mpa_v2_conn_params));
1146 		mpa_v2_params.ird = htons((u16)ep->ird);
1147 		mpa_v2_params.ord = htons((u16)ep->ord);
1148 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1149 					FW_RI_INIT_P2PTYPE_DISABLED)) {
1150 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1151 
1152 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE)
1153 				mpa_v2_params.ord |=
1154 					htons(MPA_V2_RDMA_WRITE_RTR);
1155 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ)
1156 				mpa_v2_params.ord |=
1157 					htons(MPA_V2_RDMA_READ_RTR);
1158 		}
1159 
1160 		memcpy(mpa->private_data, &mpa_v2_params,
1161 		       sizeof(struct mpa_v2_conn_params));
1162 
1163 		if (ep->plen)
1164 			memcpy(mpa->private_data +
1165 			       sizeof(struct mpa_v2_conn_params), pdata, plen);
1166 	} else
1167 		if (plen)
1168 			memcpy(mpa->private_data, pdata, plen);
1169 
1170 	/*
1171 	 * Reference the mpa skb.  This ensures the data area
1172 	 * will remain in memory until the hw acks the tx.
1173 	 * Function fw4_ack() will deref it.
1174 	 */
1175 	skb_get(skb);
1176 	t4_set_arp_err_handler(skb, NULL, mpa_start_arp_failure);
1177 	ep->mpa_skb = skb;
1178 	__state_set(&ep->com, MPA_REP_SENT);
1179 	ep->snd_seq += mpalen;
1180 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1181 }
1182 
1183 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1184 {
1185 	struct c4iw_ep *ep;
1186 	struct cpl_act_establish *req = cplhdr(skb);
1187 	unsigned int tid = GET_TID(req);
1188 	unsigned int atid = TID_TID_G(ntohl(req->tos_atid));
1189 	struct tid_info *t = dev->rdev.lldi.tids;
1190 	int ret;
1191 
1192 	ep = lookup_atid(t, atid);
1193 
1194 	pr_debug("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
1195 		 be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
1196 
1197 	mutex_lock(&ep->com.mutex);
1198 	dst_confirm(ep->dst);
1199 
1200 	/* setup the hwtid for this connection */
1201 	ep->hwtid = tid;
1202 	cxgb4_insert_tid(t, ep, tid);
1203 	insert_ep_tid(ep);
1204 
1205 	ep->snd_seq = be32_to_cpu(req->snd_isn);
1206 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1207 
1208 	set_emss(ep, ntohs(req->tcp_opt));
1209 
1210 	/* dealloc the atid */
1211 	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
1212 	cxgb4_free_atid(t, atid);
1213 	set_bit(ACT_ESTAB, &ep->com.history);
1214 
1215 	/* start MPA negotiation */
1216 	ret = send_flowc(ep);
1217 	if (ret)
1218 		goto err;
1219 	if (ep->retry_with_mpa_v1)
1220 		ret = send_mpa_req(ep, skb, 1);
1221 	else
1222 		ret = send_mpa_req(ep, skb, mpa_rev);
1223 	if (ret)
1224 		goto err;
1225 	mutex_unlock(&ep->com.mutex);
1226 	return 0;
1227 err:
1228 	mutex_unlock(&ep->com.mutex);
1229 	connect_reply_upcall(ep, -ENOMEM);
1230 	c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1231 	return 0;
1232 }
1233 
1234 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1235 {
1236 	struct iw_cm_event event;
1237 
1238 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1239 	memset(&event, 0, sizeof(event));
1240 	event.event = IW_CM_EVENT_CLOSE;
1241 	event.status = status;
1242 	if (ep->com.cm_id) {
1243 		pr_debug("close complete delivered ep %p cm_id %p tid %u\n",
1244 			 ep, ep->com.cm_id, ep->hwtid);
1245 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1246 		deref_cm_id(&ep->com);
1247 		set_bit(CLOSE_UPCALL, &ep->com.history);
1248 	}
1249 }
1250 
1251 static void peer_close_upcall(struct c4iw_ep *ep)
1252 {
1253 	struct iw_cm_event event;
1254 
1255 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1256 	memset(&event, 0, sizeof(event));
1257 	event.event = IW_CM_EVENT_DISCONNECT;
1258 	if (ep->com.cm_id) {
1259 		pr_debug("peer close delivered ep %p cm_id %p tid %u\n",
1260 			 ep, ep->com.cm_id, ep->hwtid);
1261 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1262 		set_bit(DISCONN_UPCALL, &ep->com.history);
1263 	}
1264 }
1265 
1266 static void peer_abort_upcall(struct c4iw_ep *ep)
1267 {
1268 	struct iw_cm_event event;
1269 
1270 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1271 	memset(&event, 0, sizeof(event));
1272 	event.event = IW_CM_EVENT_CLOSE;
1273 	event.status = -ECONNRESET;
1274 	if (ep->com.cm_id) {
1275 		pr_debug("abort delivered ep %p cm_id %p tid %u\n", ep,
1276 			 ep->com.cm_id, ep->hwtid);
1277 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1278 		deref_cm_id(&ep->com);
1279 		set_bit(ABORT_UPCALL, &ep->com.history);
1280 	}
1281 }
1282 
1283 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1284 {
1285 	struct iw_cm_event event;
1286 
1287 	pr_debug("%s ep %p tid %u status %d\n",
1288 		 __func__, ep, ep->hwtid, status);
1289 	memset(&event, 0, sizeof(event));
1290 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1291 	event.status = status;
1292 	memcpy(&event.local_addr, &ep->com.local_addr,
1293 	       sizeof(ep->com.local_addr));
1294 	memcpy(&event.remote_addr, &ep->com.remote_addr,
1295 	       sizeof(ep->com.remote_addr));
1296 
1297 	if ((status == 0) || (status == -ECONNREFUSED)) {
1298 		if (!ep->tried_with_mpa_v1) {
1299 			/* this means MPA_v2 is used */
1300 			event.ord = ep->ird;
1301 			event.ird = ep->ord;
1302 			event.private_data_len = ep->plen -
1303 				sizeof(struct mpa_v2_conn_params);
1304 			event.private_data = ep->mpa_pkt +
1305 				sizeof(struct mpa_message) +
1306 				sizeof(struct mpa_v2_conn_params);
1307 		} else {
1308 			/* this means MPA_v1 is used */
1309 			event.ord = cur_max_read_depth(ep->com.dev);
1310 			event.ird = cur_max_read_depth(ep->com.dev);
1311 			event.private_data_len = ep->plen;
1312 			event.private_data = ep->mpa_pkt +
1313 				sizeof(struct mpa_message);
1314 		}
1315 	}
1316 
1317 	pr_debug("%s ep %p tid %u status %d\n", __func__, ep,
1318 		 ep->hwtid, status);
1319 	set_bit(CONN_RPL_UPCALL, &ep->com.history);
1320 	ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1321 
1322 	if (status < 0)
1323 		deref_cm_id(&ep->com);
1324 }
1325 
1326 static int connect_request_upcall(struct c4iw_ep *ep)
1327 {
1328 	struct iw_cm_event event;
1329 	int ret;
1330 
1331 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1332 	memset(&event, 0, sizeof(event));
1333 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1334 	memcpy(&event.local_addr, &ep->com.local_addr,
1335 	       sizeof(ep->com.local_addr));
1336 	memcpy(&event.remote_addr, &ep->com.remote_addr,
1337 	       sizeof(ep->com.remote_addr));
1338 	event.provider_data = ep;
1339 	if (!ep->tried_with_mpa_v1) {
1340 		/* this means MPA_v2 is used */
1341 		event.ord = ep->ord;
1342 		event.ird = ep->ird;
1343 		event.private_data_len = ep->plen -
1344 			sizeof(struct mpa_v2_conn_params);
1345 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1346 			sizeof(struct mpa_v2_conn_params);
1347 	} else {
1348 		/* this means MPA_v1 is used. Send max supported */
1349 		event.ord = cur_max_read_depth(ep->com.dev);
1350 		event.ird = cur_max_read_depth(ep->com.dev);
1351 		event.private_data_len = ep->plen;
1352 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1353 	}
1354 	c4iw_get_ep(&ep->com);
1355 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1356 						      &event);
1357 	if (ret)
1358 		c4iw_put_ep(&ep->com);
1359 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1360 	c4iw_put_ep(&ep->parent_ep->com);
1361 	return ret;
1362 }
1363 
1364 static void established_upcall(struct c4iw_ep *ep)
1365 {
1366 	struct iw_cm_event event;
1367 
1368 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1369 	memset(&event, 0, sizeof(event));
1370 	event.event = IW_CM_EVENT_ESTABLISHED;
1371 	event.ird = ep->ord;
1372 	event.ord = ep->ird;
1373 	if (ep->com.cm_id) {
1374 		pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1375 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1376 		set_bit(ESTAB_UPCALL, &ep->com.history);
1377 	}
1378 }
1379 
1380 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
1381 {
1382 	struct sk_buff *skb;
1383 	u32 wrlen = roundup(sizeof(struct cpl_rx_data_ack), 16);
1384 	u32 credit_dack;
1385 
1386 	pr_debug("%s ep %p tid %u credits %u\n",
1387 		 __func__, ep, ep->hwtid, credits);
1388 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
1389 	if (!skb) {
1390 		pr_err("update_rx_credits - cannot alloc skb!\n");
1391 		return 0;
1392 	}
1393 
1394 	/*
1395 	 * If we couldn't specify the entire rcv window at connection setup
1396 	 * due to the limit in the number of bits in the RCV_BUFSIZ field,
1397 	 * then add the overage in to the credits returned.
1398 	 */
1399 	if (ep->rcv_win > RCV_BUFSIZ_M * 1024)
1400 		credits += ep->rcv_win - RCV_BUFSIZ_M * 1024;
1401 
1402 	credit_dack = credits | RX_FORCE_ACK_F | RX_DACK_CHANGE_F |
1403 		      RX_DACK_MODE_V(dack_mode);
1404 
1405 	cxgb_mk_rx_data_ack(skb, wrlen, ep->hwtid, ep->ctrlq_idx,
1406 			    credit_dack);
1407 
1408 	c4iw_ofld_send(&ep->com.dev->rdev, skb);
1409 	return credits;
1410 }
1411 
1412 #define RELAXED_IRD_NEGOTIATION 1
1413 
1414 /*
1415  * process_mpa_reply - process streaming mode MPA reply
1416  *
1417  * Returns:
1418  *
1419  * 0 upon success indicating a connect request was delivered to the ULP
1420  * or the mpa request is incomplete but valid so far.
1421  *
1422  * 1 if a failure requires the caller to close the connection.
1423  *
1424  * 2 if a failure requires the caller to abort the connection.
1425  */
1426 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
1427 {
1428 	struct mpa_message *mpa;
1429 	struct mpa_v2_conn_params *mpa_v2_params;
1430 	u16 plen;
1431 	u16 resp_ird, resp_ord;
1432 	u8 rtr_mismatch = 0, insuff_ird = 0;
1433 	struct c4iw_qp_attributes attrs;
1434 	enum c4iw_qp_attr_mask mask;
1435 	int err;
1436 	int disconnect = 0;
1437 
1438 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1439 
1440 	/*
1441 	 * If we get more than the supported amount of private data
1442 	 * then we must fail this connection.
1443 	 */
1444 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1445 		err = -EINVAL;
1446 		goto err_stop_timer;
1447 	}
1448 
1449 	/*
1450 	 * copy the new data into our accumulation buffer.
1451 	 */
1452 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1453 				  skb->len);
1454 	ep->mpa_pkt_len += skb->len;
1455 
1456 	/*
1457 	 * if we don't even have the mpa message, then bail.
1458 	 */
1459 	if (ep->mpa_pkt_len < sizeof(*mpa))
1460 		return 0;
1461 	mpa = (struct mpa_message *) ep->mpa_pkt;
1462 
1463 	/* Validate MPA header. */
1464 	if (mpa->revision > mpa_rev) {
1465 		pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1466 		       __func__, mpa_rev, mpa->revision);
1467 		err = -EPROTO;
1468 		goto err_stop_timer;
1469 	}
1470 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1471 		err = -EPROTO;
1472 		goto err_stop_timer;
1473 	}
1474 
1475 	plen = ntohs(mpa->private_data_size);
1476 
1477 	/*
1478 	 * Fail if there's too much private data.
1479 	 */
1480 	if (plen > MPA_MAX_PRIVATE_DATA) {
1481 		err = -EPROTO;
1482 		goto err_stop_timer;
1483 	}
1484 
1485 	/*
1486 	 * If plen does not account for pkt size
1487 	 */
1488 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1489 		err = -EPROTO;
1490 		goto err_stop_timer;
1491 	}
1492 
1493 	ep->plen = (u8) plen;
1494 
1495 	/*
1496 	 * If we don't have all the pdata yet, then bail.
1497 	 * We'll continue process when more data arrives.
1498 	 */
1499 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1500 		return 0;
1501 
1502 	if (mpa->flags & MPA_REJECT) {
1503 		err = -ECONNREFUSED;
1504 		goto err_stop_timer;
1505 	}
1506 
1507 	/*
1508 	 * Stop mpa timer.  If it expired, then
1509 	 * we ignore the MPA reply.  process_timeout()
1510 	 * will abort the connection.
1511 	 */
1512 	if (stop_ep_timer(ep))
1513 		return 0;
1514 
1515 	/*
1516 	 * If we get here we have accumulated the entire mpa
1517 	 * start reply message including private data. And
1518 	 * the MPA header is valid.
1519 	 */
1520 	__state_set(&ep->com, FPDU_MODE);
1521 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1522 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1523 	ep->mpa_attr.version = mpa->revision;
1524 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1525 
1526 	if (mpa->revision == 2) {
1527 		ep->mpa_attr.enhanced_rdma_conn =
1528 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1529 		if (ep->mpa_attr.enhanced_rdma_conn) {
1530 			mpa_v2_params = (struct mpa_v2_conn_params *)
1531 				(ep->mpa_pkt + sizeof(*mpa));
1532 			resp_ird = ntohs(mpa_v2_params->ird) &
1533 				MPA_V2_IRD_ORD_MASK;
1534 			resp_ord = ntohs(mpa_v2_params->ord) &
1535 				MPA_V2_IRD_ORD_MASK;
1536 			pr_debug("%s responder ird %u ord %u ep ird %u ord %u\n",
1537 				 __func__,
1538 				 resp_ird, resp_ord, ep->ird, ep->ord);
1539 
1540 			/*
1541 			 * This is a double-check. Ideally, below checks are
1542 			 * not required since ird/ord stuff has been taken
1543 			 * care of in c4iw_accept_cr
1544 			 */
1545 			if (ep->ird < resp_ord) {
1546 				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1547 				    ep->com.dev->rdev.lldi.max_ordird_qp)
1548 					ep->ird = resp_ord;
1549 				else
1550 					insuff_ird = 1;
1551 			} else if (ep->ird > resp_ord) {
1552 				ep->ird = resp_ord;
1553 			}
1554 			if (ep->ord > resp_ird) {
1555 				if (RELAXED_IRD_NEGOTIATION)
1556 					ep->ord = resp_ird;
1557 				else
1558 					insuff_ird = 1;
1559 			}
1560 			if (insuff_ird) {
1561 				err = -ENOMEM;
1562 				ep->ird = resp_ord;
1563 				ep->ord = resp_ird;
1564 			}
1565 
1566 			if (ntohs(mpa_v2_params->ird) &
1567 					MPA_V2_PEER2PEER_MODEL) {
1568 				if (ntohs(mpa_v2_params->ord) &
1569 						MPA_V2_RDMA_WRITE_RTR)
1570 					ep->mpa_attr.p2p_type =
1571 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1572 				else if (ntohs(mpa_v2_params->ord) &
1573 						MPA_V2_RDMA_READ_RTR)
1574 					ep->mpa_attr.p2p_type =
1575 						FW_RI_INIT_P2PTYPE_READ_REQ;
1576 			}
1577 		}
1578 	} else if (mpa->revision == 1)
1579 		if (peer2peer)
1580 			ep->mpa_attr.p2p_type = p2p_type;
1581 
1582 	pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d local-p2p_type = %d\n",
1583 		 __func__, ep->mpa_attr.crc_enabled,
1584 		 ep->mpa_attr.recv_marker_enabled,
1585 		 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1586 		 ep->mpa_attr.p2p_type, p2p_type);
1587 
1588 	/*
1589 	 * If responder's RTR does not match with that of initiator, assign
1590 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1591 	 * generated when moving QP to RTS state.
1592 	 * A TERM message will be sent after QP has moved to RTS state
1593 	 */
1594 	if ((ep->mpa_attr.version == 2) && peer2peer &&
1595 			(ep->mpa_attr.p2p_type != p2p_type)) {
1596 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1597 		rtr_mismatch = 1;
1598 	}
1599 
1600 	attrs.mpa_attr = ep->mpa_attr;
1601 	attrs.max_ird = ep->ird;
1602 	attrs.max_ord = ep->ord;
1603 	attrs.llp_stream_handle = ep;
1604 	attrs.next_state = C4IW_QP_STATE_RTS;
1605 
1606 	mask = C4IW_QP_ATTR_NEXT_STATE |
1607 	    C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1608 	    C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1609 
1610 	/* bind QP and TID with INIT_WR */
1611 	err = c4iw_modify_qp(ep->com.qp->rhp,
1612 			     ep->com.qp, mask, &attrs, 1);
1613 	if (err)
1614 		goto err;
1615 
1616 	/*
1617 	 * If responder's RTR requirement did not match with what initiator
1618 	 * supports, generate TERM message
1619 	 */
1620 	if (rtr_mismatch) {
1621 		pr_err("%s: RTR mismatch, sending TERM\n", __func__);
1622 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1623 		attrs.ecode = MPA_NOMATCH_RTR;
1624 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1625 		attrs.send_term = 1;
1626 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1627 				C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1628 		err = -ENOMEM;
1629 		disconnect = 1;
1630 		goto out;
1631 	}
1632 
1633 	/*
1634 	 * Generate TERM if initiator IRD is not sufficient for responder
1635 	 * provided ORD. Currently, we do the same behaviour even when
1636 	 * responder provided IRD is also not sufficient as regards to
1637 	 * initiator ORD.
1638 	 */
1639 	if (insuff_ird) {
1640 		pr_err("%s: Insufficient IRD, sending TERM\n", __func__);
1641 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1642 		attrs.ecode = MPA_INSUFF_IRD;
1643 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1644 		attrs.send_term = 1;
1645 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1646 				C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1647 		err = -ENOMEM;
1648 		disconnect = 1;
1649 		goto out;
1650 	}
1651 	goto out;
1652 err_stop_timer:
1653 	stop_ep_timer(ep);
1654 err:
1655 	disconnect = 2;
1656 out:
1657 	connect_reply_upcall(ep, err);
1658 	return disconnect;
1659 }
1660 
1661 /*
1662  * process_mpa_request - process streaming mode MPA request
1663  *
1664  * Returns:
1665  *
1666  * 0 upon success indicating a connect request was delivered to the ULP
1667  * or the mpa request is incomplete but valid so far.
1668  *
1669  * 1 if a failure requires the caller to close the connection.
1670  *
1671  * 2 if a failure requires the caller to abort the connection.
1672  */
1673 static int process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
1674 {
1675 	struct mpa_message *mpa;
1676 	struct mpa_v2_conn_params *mpa_v2_params;
1677 	u16 plen;
1678 
1679 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1680 
1681 	/*
1682 	 * If we get more than the supported amount of private data
1683 	 * then we must fail this connection.
1684 	 */
1685 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt))
1686 		goto err_stop_timer;
1687 
1688 	pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1689 
1690 	/*
1691 	 * Copy the new data into our accumulation buffer.
1692 	 */
1693 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1694 				  skb->len);
1695 	ep->mpa_pkt_len += skb->len;
1696 
1697 	/*
1698 	 * If we don't even have the mpa message, then bail.
1699 	 * We'll continue process when more data arrives.
1700 	 */
1701 	if (ep->mpa_pkt_len < sizeof(*mpa))
1702 		return 0;
1703 
1704 	pr_debug("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1705 	mpa = (struct mpa_message *) ep->mpa_pkt;
1706 
1707 	/*
1708 	 * Validate MPA Header.
1709 	 */
1710 	if (mpa->revision > mpa_rev) {
1711 		pr_err("%s MPA version mismatch. Local = %d, Received = %d\n",
1712 		       __func__, mpa_rev, mpa->revision);
1713 		goto err_stop_timer;
1714 	}
1715 
1716 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1717 		goto err_stop_timer;
1718 
1719 	plen = ntohs(mpa->private_data_size);
1720 
1721 	/*
1722 	 * Fail if there's too much private data.
1723 	 */
1724 	if (plen > MPA_MAX_PRIVATE_DATA)
1725 		goto err_stop_timer;
1726 
1727 	/*
1728 	 * If plen does not account for pkt size
1729 	 */
1730 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1731 		goto err_stop_timer;
1732 	ep->plen = (u8) plen;
1733 
1734 	/*
1735 	 * If we don't have all the pdata yet, then bail.
1736 	 */
1737 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1738 		return 0;
1739 
1740 	/*
1741 	 * If we get here we have accumulated the entire mpa
1742 	 * start reply message including private data.
1743 	 */
1744 	ep->mpa_attr.initiator = 0;
1745 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1746 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1747 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1748 	ep->mpa_attr.version = mpa->revision;
1749 	if (mpa->revision == 1)
1750 		ep->tried_with_mpa_v1 = 1;
1751 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1752 
1753 	if (mpa->revision == 2) {
1754 		ep->mpa_attr.enhanced_rdma_conn =
1755 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1756 		if (ep->mpa_attr.enhanced_rdma_conn) {
1757 			mpa_v2_params = (struct mpa_v2_conn_params *)
1758 				(ep->mpa_pkt + sizeof(*mpa));
1759 			ep->ird = ntohs(mpa_v2_params->ird) &
1760 				MPA_V2_IRD_ORD_MASK;
1761 			ep->ird = min_t(u32, ep->ird,
1762 					cur_max_read_depth(ep->com.dev));
1763 			ep->ord = ntohs(mpa_v2_params->ord) &
1764 				MPA_V2_IRD_ORD_MASK;
1765 			ep->ord = min_t(u32, ep->ord,
1766 					cur_max_read_depth(ep->com.dev));
1767 			pr_debug("%s initiator ird %u ord %u\n",
1768 				 __func__, ep->ird, ep->ord);
1769 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
1770 				if (peer2peer) {
1771 					if (ntohs(mpa_v2_params->ord) &
1772 							MPA_V2_RDMA_WRITE_RTR)
1773 						ep->mpa_attr.p2p_type =
1774 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1775 					else if (ntohs(mpa_v2_params->ord) &
1776 							MPA_V2_RDMA_READ_RTR)
1777 						ep->mpa_attr.p2p_type =
1778 						FW_RI_INIT_P2PTYPE_READ_REQ;
1779 				}
1780 		}
1781 	} else if (mpa->revision == 1)
1782 		if (peer2peer)
1783 			ep->mpa_attr.p2p_type = p2p_type;
1784 
1785 	pr_debug("%s - crc_enabled=%d, recv_marker_enabled=%d, xmit_marker_enabled=%d, version=%d p2p_type=%d\n",
1786 		 __func__,
1787 		 ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1788 		 ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1789 		 ep->mpa_attr.p2p_type);
1790 
1791 	__state_set(&ep->com, MPA_REQ_RCVD);
1792 
1793 	/* drive upcall */
1794 	mutex_lock_nested(&ep->parent_ep->com.mutex, SINGLE_DEPTH_NESTING);
1795 	if (ep->parent_ep->com.state != DEAD) {
1796 		if (connect_request_upcall(ep))
1797 			goto err_unlock_parent;
1798 	} else {
1799 		goto err_unlock_parent;
1800 	}
1801 	mutex_unlock(&ep->parent_ep->com.mutex);
1802 	return 0;
1803 
1804 err_unlock_parent:
1805 	mutex_unlock(&ep->parent_ep->com.mutex);
1806 	goto err_out;
1807 err_stop_timer:
1808 	(void)stop_ep_timer(ep);
1809 err_out:
1810 	return 2;
1811 }
1812 
1813 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1814 {
1815 	struct c4iw_ep *ep;
1816 	struct cpl_rx_data *hdr = cplhdr(skb);
1817 	unsigned int dlen = ntohs(hdr->len);
1818 	unsigned int tid = GET_TID(hdr);
1819 	__u8 status = hdr->status;
1820 	int disconnect = 0;
1821 
1822 	ep = get_ep_from_tid(dev, tid);
1823 	if (!ep)
1824 		return 0;
1825 	pr_debug("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1826 	skb_pull(skb, sizeof(*hdr));
1827 	skb_trim(skb, dlen);
1828 	mutex_lock(&ep->com.mutex);
1829 
1830 	switch (ep->com.state) {
1831 	case MPA_REQ_SENT:
1832 		update_rx_credits(ep, dlen);
1833 		ep->rcv_seq += dlen;
1834 		disconnect = process_mpa_reply(ep, skb);
1835 		break;
1836 	case MPA_REQ_WAIT:
1837 		update_rx_credits(ep, dlen);
1838 		ep->rcv_seq += dlen;
1839 		disconnect = process_mpa_request(ep, skb);
1840 		break;
1841 	case FPDU_MODE: {
1842 		struct c4iw_qp_attributes attrs;
1843 
1844 		update_rx_credits(ep, dlen);
1845 		BUG_ON(!ep->com.qp);
1846 		if (status)
1847 			pr_err("%s Unexpected streaming data." \
1848 			       " qpid %u ep %p state %d tid %u status %d\n",
1849 			       __func__, ep->com.qp->wq.sq.qid, ep,
1850 			       ep->com.state, ep->hwtid, status);
1851 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1852 		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1853 			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1854 		disconnect = 1;
1855 		break;
1856 	}
1857 	default:
1858 		break;
1859 	}
1860 	mutex_unlock(&ep->com.mutex);
1861 	if (disconnect)
1862 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
1863 	c4iw_put_ep(&ep->com);
1864 	return 0;
1865 }
1866 
1867 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1868 {
1869 	struct c4iw_ep *ep;
1870 	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1871 	int release = 0;
1872 	unsigned int tid = GET_TID(rpl);
1873 
1874 	ep = get_ep_from_tid(dev, tid);
1875 	if (!ep) {
1876 		pr_warn("Abort rpl to freed endpoint\n");
1877 		return 0;
1878 	}
1879 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1880 	mutex_lock(&ep->com.mutex);
1881 	switch (ep->com.state) {
1882 	case ABORTING:
1883 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1884 		__state_set(&ep->com, DEAD);
1885 		release = 1;
1886 		break;
1887 	default:
1888 		pr_err("%s ep %p state %d\n", __func__, ep, ep->com.state);
1889 		break;
1890 	}
1891 	mutex_unlock(&ep->com.mutex);
1892 
1893 	if (release)
1894 		release_ep_resources(ep);
1895 	c4iw_put_ep(&ep->com);
1896 	return 0;
1897 }
1898 
1899 static int send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
1900 {
1901 	struct sk_buff *skb;
1902 	struct fw_ofld_connection_wr *req;
1903 	unsigned int mtu_idx;
1904 	u32 wscale;
1905 	struct sockaddr_in *sin;
1906 	int win;
1907 
1908 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1909 	req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req));
1910 	memset(req, 0, sizeof(*req));
1911 	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR));
1912 	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
1913 	req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
1914 				     ep->com.dev->rdev.lldi.ports[0],
1915 				     ep->l2t));
1916 	sin = (struct sockaddr_in *)&ep->com.local_addr;
1917 	req->le.lport = sin->sin_port;
1918 	req->le.u.ipv4.lip = sin->sin_addr.s_addr;
1919 	sin = (struct sockaddr_in *)&ep->com.remote_addr;
1920 	req->le.pport = sin->sin_port;
1921 	req->le.u.ipv4.pip = sin->sin_addr.s_addr;
1922 	req->tcb.t_state_to_astid =
1923 			htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_SENT) |
1924 			FW_OFLD_CONNECTION_WR_ASTID_V(atid));
1925 	req->tcb.cplrxdataack_cplpassacceptrpl =
1926 			htons(FW_OFLD_CONNECTION_WR_CPLRXDATAACK_F);
1927 	req->tcb.tx_max = (__force __be32) jiffies;
1928 	req->tcb.rcv_adv = htons(1);
1929 	cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
1930 		      enable_tcp_timestamps,
1931 		      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
1932 	wscale = cxgb_compute_wscale(rcv_win);
1933 
1934 	/*
1935 	 * Specify the largest window that will fit in opt0. The
1936 	 * remainder will be specified in the rx_data_ack.
1937 	 */
1938 	win = ep->rcv_win >> 10;
1939 	if (win > RCV_BUFSIZ_M)
1940 		win = RCV_BUFSIZ_M;
1941 
1942 	req->tcb.opt0 = (__force __be64) (TCAM_BYPASS_F |
1943 		(nocong ? NO_CONG_F : 0) |
1944 		KEEP_ALIVE_F |
1945 		DELACK_F |
1946 		WND_SCALE_V(wscale) |
1947 		MSS_IDX_V(mtu_idx) |
1948 		L2T_IDX_V(ep->l2t->idx) |
1949 		TX_CHAN_V(ep->tx_chan) |
1950 		SMAC_SEL_V(ep->smac_idx) |
1951 		DSCP_V(ep->tos >> 2) |
1952 		ULP_MODE_V(ULP_MODE_TCPDDP) |
1953 		RCV_BUFSIZ_V(win));
1954 	req->tcb.opt2 = (__force __be32) (PACE_V(1) |
1955 		TX_QUEUE_V(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) |
1956 		RX_CHANNEL_V(0) |
1957 		CCTRL_ECN_V(enable_ecn) |
1958 		RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid));
1959 	if (enable_tcp_timestamps)
1960 		req->tcb.opt2 |= (__force __be32)TSTAMPS_EN_F;
1961 	if (enable_tcp_sack)
1962 		req->tcb.opt2 |= (__force __be32)SACK_EN_F;
1963 	if (wscale && enable_tcp_window_scaling)
1964 		req->tcb.opt2 |= (__force __be32)WND_SCALE_EN_F;
1965 	req->tcb.opt0 = cpu_to_be64((__force u64)req->tcb.opt0);
1966 	req->tcb.opt2 = cpu_to_be32((__force u32)req->tcb.opt2);
1967 	set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx);
1968 	set_bit(ACT_OFLD_CONN, &ep->com.history);
1969 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1970 }
1971 
1972 /*
1973  * Some of the error codes above implicitly indicate that there is no TID
1974  * allocated with the result of an ACT_OPEN.  We use this predicate to make
1975  * that explicit.
1976  */
1977 static inline int act_open_has_tid(int status)
1978 {
1979 	return (status != CPL_ERR_TCAM_PARITY &&
1980 		status != CPL_ERR_TCAM_MISS &&
1981 		status != CPL_ERR_TCAM_FULL &&
1982 		status != CPL_ERR_CONN_EXIST_SYNRECV &&
1983 		status != CPL_ERR_CONN_EXIST);
1984 }
1985 
1986 static char *neg_adv_str(unsigned int status)
1987 {
1988 	switch (status) {
1989 	case CPL_ERR_RTX_NEG_ADVICE:
1990 		return "Retransmit timeout";
1991 	case CPL_ERR_PERSIST_NEG_ADVICE:
1992 		return "Persist timeout";
1993 	case CPL_ERR_KEEPALV_NEG_ADVICE:
1994 		return "Keepalive timeout";
1995 	default:
1996 		return "Unknown";
1997 	}
1998 }
1999 
2000 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
2001 {
2002 	ep->snd_win = snd_win;
2003 	ep->rcv_win = rcv_win;
2004 	pr_debug("%s snd_win %d rcv_win %d\n",
2005 		 __func__, ep->snd_win, ep->rcv_win);
2006 }
2007 
2008 #define ACT_OPEN_RETRY_COUNT 2
2009 
2010 static int import_ep(struct c4iw_ep *ep, int iptype, __u8 *peer_ip,
2011 		     struct dst_entry *dst, struct c4iw_dev *cdev,
2012 		     bool clear_mpa_v1, enum chip_type adapter_type, u8 tos)
2013 {
2014 	struct neighbour *n;
2015 	int err, step;
2016 	struct net_device *pdev;
2017 
2018 	n = dst_neigh_lookup(dst, peer_ip);
2019 	if (!n)
2020 		return -ENODEV;
2021 
2022 	rcu_read_lock();
2023 	err = -ENOMEM;
2024 	if (n->dev->flags & IFF_LOOPBACK) {
2025 		if (iptype == 4)
2026 			pdev = ip_dev_find(&init_net, *(__be32 *)peer_ip);
2027 		else if (IS_ENABLED(CONFIG_IPV6))
2028 			for_each_netdev(&init_net, pdev) {
2029 				if (ipv6_chk_addr(&init_net,
2030 						  (struct in6_addr *)peer_ip,
2031 						  pdev, 1))
2032 					break;
2033 			}
2034 		else
2035 			pdev = NULL;
2036 
2037 		if (!pdev) {
2038 			err = -ENODEV;
2039 			goto out;
2040 		}
2041 		ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2042 					n, pdev, rt_tos2priority(tos));
2043 		if (!ep->l2t) {
2044 			dev_put(pdev);
2045 			goto out;
2046 		}
2047 		ep->mtu = pdev->mtu;
2048 		ep->tx_chan = cxgb4_port_chan(pdev);
2049 		ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
2050 						cxgb4_port_viid(pdev));
2051 		step = cdev->rdev.lldi.ntxq /
2052 			cdev->rdev.lldi.nchan;
2053 		ep->txq_idx = cxgb4_port_idx(pdev) * step;
2054 		step = cdev->rdev.lldi.nrxq /
2055 			cdev->rdev.lldi.nchan;
2056 		ep->ctrlq_idx = cxgb4_port_idx(pdev);
2057 		ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2058 			cxgb4_port_idx(pdev) * step];
2059 		set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2060 		dev_put(pdev);
2061 	} else {
2062 		pdev = get_real_dev(n->dev);
2063 		ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
2064 					n, pdev, 0);
2065 		if (!ep->l2t)
2066 			goto out;
2067 		ep->mtu = dst_mtu(dst);
2068 		ep->tx_chan = cxgb4_port_chan(pdev);
2069 		ep->smac_idx = cxgb4_tp_smt_idx(adapter_type,
2070 						cxgb4_port_viid(pdev));
2071 		step = cdev->rdev.lldi.ntxq /
2072 			cdev->rdev.lldi.nchan;
2073 		ep->txq_idx = cxgb4_port_idx(pdev) * step;
2074 		ep->ctrlq_idx = cxgb4_port_idx(pdev);
2075 		step = cdev->rdev.lldi.nrxq /
2076 			cdev->rdev.lldi.nchan;
2077 		ep->rss_qid = cdev->rdev.lldi.rxq_ids[
2078 			cxgb4_port_idx(pdev) * step];
2079 		set_tcp_window(ep, (struct port_info *)netdev_priv(pdev));
2080 
2081 		if (clear_mpa_v1) {
2082 			ep->retry_with_mpa_v1 = 0;
2083 			ep->tried_with_mpa_v1 = 0;
2084 		}
2085 	}
2086 	err = 0;
2087 out:
2088 	rcu_read_unlock();
2089 
2090 	neigh_release(n);
2091 
2092 	return err;
2093 }
2094 
2095 static int c4iw_reconnect(struct c4iw_ep *ep)
2096 {
2097 	int err = 0;
2098 	int size = 0;
2099 	struct sockaddr_in *laddr = (struct sockaddr_in *)
2100 				    &ep->com.cm_id->m_local_addr;
2101 	struct sockaddr_in *raddr = (struct sockaddr_in *)
2102 				    &ep->com.cm_id->m_remote_addr;
2103 	struct sockaddr_in6 *laddr6 = (struct sockaddr_in6 *)
2104 				      &ep->com.cm_id->m_local_addr;
2105 	struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)
2106 				      &ep->com.cm_id->m_remote_addr;
2107 	int iptype;
2108 	__u8 *ra;
2109 
2110 	pr_debug("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id);
2111 	init_timer(&ep->timer);
2112 	c4iw_init_wr_wait(&ep->com.wr_wait);
2113 
2114 	/* When MPA revision is different on nodes, the node with MPA_rev=2
2115 	 * tries to reconnect with MPA_rev 1 for the same EP through
2116 	 * c4iw_reconnect(), where the same EP is assigned with new tid for
2117 	 * further connection establishment. As we are using the same EP pointer
2118 	 * for reconnect, few skbs are used during the previous c4iw_connect(),
2119 	 * which leaves the EP with inadequate skbs for further
2120 	 * c4iw_reconnect(), Further causing an assert BUG_ON() due to empty
2121 	 * skb_list() during peer_abort(). Allocate skbs which is already used.
2122 	 */
2123 	size = (CN_MAX_CON_BUF - skb_queue_len(&ep->com.ep_skb_list));
2124 	if (alloc_ep_skb_list(&ep->com.ep_skb_list, size)) {
2125 		err = -ENOMEM;
2126 		goto fail1;
2127 	}
2128 
2129 	/*
2130 	 * Allocate an active TID to initiate a TCP connection.
2131 	 */
2132 	ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep);
2133 	if (ep->atid == -1) {
2134 		pr_err("%s - cannot alloc atid\n", __func__);
2135 		err = -ENOMEM;
2136 		goto fail2;
2137 	}
2138 	insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid);
2139 
2140 	/* find a route */
2141 	if (ep->com.cm_id->m_local_addr.ss_family == AF_INET) {
2142 		ep->dst = cxgb_find_route(&ep->com.dev->rdev.lldi, get_real_dev,
2143 					  laddr->sin_addr.s_addr,
2144 					  raddr->sin_addr.s_addr,
2145 					  laddr->sin_port,
2146 					  raddr->sin_port, ep->com.cm_id->tos);
2147 		iptype = 4;
2148 		ra = (__u8 *)&raddr->sin_addr;
2149 	} else {
2150 		ep->dst = cxgb_find_route6(&ep->com.dev->rdev.lldi,
2151 					   get_real_dev,
2152 					   laddr6->sin6_addr.s6_addr,
2153 					   raddr6->sin6_addr.s6_addr,
2154 					   laddr6->sin6_port,
2155 					   raddr6->sin6_port, 0,
2156 					   raddr6->sin6_scope_id);
2157 		iptype = 6;
2158 		ra = (__u8 *)&raddr6->sin6_addr;
2159 	}
2160 	if (!ep->dst) {
2161 		pr_err("%s - cannot find route\n", __func__);
2162 		err = -EHOSTUNREACH;
2163 		goto fail3;
2164 	}
2165 	err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, false,
2166 			ep->com.dev->rdev.lldi.adapter_type,
2167 			ep->com.cm_id->tos);
2168 	if (err) {
2169 		pr_err("%s - cannot alloc l2e\n", __func__);
2170 		goto fail4;
2171 	}
2172 
2173 	pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
2174 		 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
2175 		 ep->l2t->idx);
2176 
2177 	state_set(&ep->com, CONNECTING);
2178 	ep->tos = ep->com.cm_id->tos;
2179 
2180 	/* send connect request to rnic */
2181 	err = send_connect(ep);
2182 	if (!err)
2183 		goto out;
2184 
2185 	cxgb4_l2t_release(ep->l2t);
2186 fail4:
2187 	dst_release(ep->dst);
2188 fail3:
2189 	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
2190 	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2191 fail2:
2192 	/*
2193 	 * remember to send notification to upper layer.
2194 	 * We are in here so the upper layer is not aware that this is
2195 	 * re-connect attempt and so, upper layer is still waiting for
2196 	 * response of 1st connect request.
2197 	 */
2198 	connect_reply_upcall(ep, -ECONNRESET);
2199 fail1:
2200 	c4iw_put_ep(&ep->com);
2201 out:
2202 	return err;
2203 }
2204 
2205 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2206 {
2207 	struct c4iw_ep *ep;
2208 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
2209 	unsigned int atid = TID_TID_G(AOPEN_ATID_G(
2210 				      ntohl(rpl->atid_status)));
2211 	struct tid_info *t = dev->rdev.lldi.tids;
2212 	int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2213 	struct sockaddr_in *la;
2214 	struct sockaddr_in *ra;
2215 	struct sockaddr_in6 *la6;
2216 	struct sockaddr_in6 *ra6;
2217 	int ret = 0;
2218 
2219 	ep = lookup_atid(t, atid);
2220 	la = (struct sockaddr_in *)&ep->com.local_addr;
2221 	ra = (struct sockaddr_in *)&ep->com.remote_addr;
2222 	la6 = (struct sockaddr_in6 *)&ep->com.local_addr;
2223 	ra6 = (struct sockaddr_in6 *)&ep->com.remote_addr;
2224 
2225 	pr_debug("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
2226 		 status, status2errno(status));
2227 
2228 	if (cxgb_is_neg_adv(status)) {
2229 		pr_debug("%s Connection problems for atid %u status %u (%s)\n",
2230 			 __func__, atid, status, neg_adv_str(status));
2231 		ep->stats.connect_neg_adv++;
2232 		mutex_lock(&dev->rdev.stats.lock);
2233 		dev->rdev.stats.neg_adv++;
2234 		mutex_unlock(&dev->rdev.stats.lock);
2235 		return 0;
2236 	}
2237 
2238 	set_bit(ACT_OPEN_RPL, &ep->com.history);
2239 
2240 	/*
2241 	 * Log interesting failures.
2242 	 */
2243 	switch (status) {
2244 	case CPL_ERR_CONN_RESET:
2245 	case CPL_ERR_CONN_TIMEDOUT:
2246 		break;
2247 	case CPL_ERR_TCAM_FULL:
2248 		mutex_lock(&dev->rdev.stats.lock);
2249 		dev->rdev.stats.tcam_full++;
2250 		mutex_unlock(&dev->rdev.stats.lock);
2251 		if (ep->com.local_addr.ss_family == AF_INET &&
2252 		    dev->rdev.lldi.enable_fw_ofld_conn) {
2253 			ret = send_fw_act_open_req(ep, TID_TID_G(AOPEN_ATID_G(
2254 						   ntohl(rpl->atid_status))));
2255 			if (ret)
2256 				goto fail;
2257 			return 0;
2258 		}
2259 		break;
2260 	case CPL_ERR_CONN_EXIST:
2261 		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
2262 			set_bit(ACT_RETRY_INUSE, &ep->com.history);
2263 			if (ep->com.remote_addr.ss_family == AF_INET6) {
2264 				struct sockaddr_in6 *sin6 =
2265 						(struct sockaddr_in6 *)
2266 						&ep->com.local_addr;
2267 				cxgb4_clip_release(
2268 						ep->com.dev->rdev.lldi.ports[0],
2269 						(const u32 *)
2270 						&sin6->sin6_addr.s6_addr, 1);
2271 			}
2272 			remove_handle(ep->com.dev, &ep->com.dev->atid_idr,
2273 					atid);
2274 			cxgb4_free_atid(t, atid);
2275 			dst_release(ep->dst);
2276 			cxgb4_l2t_release(ep->l2t);
2277 			c4iw_reconnect(ep);
2278 			return 0;
2279 		}
2280 		break;
2281 	default:
2282 		if (ep->com.local_addr.ss_family == AF_INET) {
2283 			pr_info("Active open failure - atid %u status %u errno %d %pI4:%u->%pI4:%u\n",
2284 				atid, status, status2errno(status),
2285 				&la->sin_addr.s_addr, ntohs(la->sin_port),
2286 				&ra->sin_addr.s_addr, ntohs(ra->sin_port));
2287 		} else {
2288 			pr_info("Active open failure - atid %u status %u errno %d %pI6:%u->%pI6:%u\n",
2289 				atid, status, status2errno(status),
2290 				la6->sin6_addr.s6_addr, ntohs(la6->sin6_port),
2291 				ra6->sin6_addr.s6_addr, ntohs(ra6->sin6_port));
2292 		}
2293 		break;
2294 	}
2295 
2296 fail:
2297 	connect_reply_upcall(ep, status2errno(status));
2298 	state_set(&ep->com, DEAD);
2299 
2300 	if (ep->com.remote_addr.ss_family == AF_INET6) {
2301 		struct sockaddr_in6 *sin6 =
2302 			(struct sockaddr_in6 *)&ep->com.local_addr;
2303 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
2304 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2305 	}
2306 	if (status && act_open_has_tid(status))
2307 		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
2308 
2309 	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);
2310 	cxgb4_free_atid(t, atid);
2311 	dst_release(ep->dst);
2312 	cxgb4_l2t_release(ep->l2t);
2313 	c4iw_put_ep(&ep->com);
2314 
2315 	return 0;
2316 }
2317 
2318 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2319 {
2320 	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
2321 	unsigned int stid = GET_TID(rpl);
2322 	struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2323 
2324 	if (!ep) {
2325 		pr_debug("%s stid %d lookup failure!\n", __func__, stid);
2326 		goto out;
2327 	}
2328 	pr_debug("%s ep %p status %d error %d\n", __func__, ep,
2329 		 rpl->status, status2errno(rpl->status));
2330 	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2331 	c4iw_put_ep(&ep->com);
2332 out:
2333 	return 0;
2334 }
2335 
2336 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2337 {
2338 	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
2339 	unsigned int stid = GET_TID(rpl);
2340 	struct c4iw_listen_ep *ep = get_ep_from_stid(dev, stid);
2341 
2342 	pr_debug("%s ep %p\n", __func__, ep);
2343 	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status));
2344 	c4iw_put_ep(&ep->com);
2345 	return 0;
2346 }
2347 
2348 static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
2349 		     struct cpl_pass_accept_req *req)
2350 {
2351 	struct cpl_pass_accept_rpl *rpl;
2352 	unsigned int mtu_idx;
2353 	u64 opt0;
2354 	u32 opt2;
2355 	u32 wscale;
2356 	struct cpl_t5_pass_accept_rpl *rpl5 = NULL;
2357 	int win;
2358 	enum chip_type adapter_type = ep->com.dev->rdev.lldi.adapter_type;
2359 
2360 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2361 	BUG_ON(skb_cloned(skb));
2362 
2363 	skb_get(skb);
2364 	rpl = cplhdr(skb);
2365 	if (!is_t4(adapter_type)) {
2366 		skb_trim(skb, roundup(sizeof(*rpl5), 16));
2367 		rpl5 = (void *)rpl;
2368 		INIT_TP_WR(rpl5, ep->hwtid);
2369 	} else {
2370 		skb_trim(skb, sizeof(*rpl));
2371 		INIT_TP_WR(rpl, ep->hwtid);
2372 	}
2373 	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
2374 						    ep->hwtid));
2375 
2376 	cxgb_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx,
2377 		      enable_tcp_timestamps && req->tcpopt.tstamp,
2378 		      (ep->com.remote_addr.ss_family == AF_INET) ? 0 : 1);
2379 	wscale = cxgb_compute_wscale(rcv_win);
2380 
2381 	/*
2382 	 * Specify the largest window that will fit in opt0. The
2383 	 * remainder will be specified in the rx_data_ack.
2384 	 */
2385 	win = ep->rcv_win >> 10;
2386 	if (win > RCV_BUFSIZ_M)
2387 		win = RCV_BUFSIZ_M;
2388 	opt0 = (nocong ? NO_CONG_F : 0) |
2389 	       KEEP_ALIVE_F |
2390 	       DELACK_F |
2391 	       WND_SCALE_V(wscale) |
2392 	       MSS_IDX_V(mtu_idx) |
2393 	       L2T_IDX_V(ep->l2t->idx) |
2394 	       TX_CHAN_V(ep->tx_chan) |
2395 	       SMAC_SEL_V(ep->smac_idx) |
2396 	       DSCP_V(ep->tos >> 2) |
2397 	       ULP_MODE_V(ULP_MODE_TCPDDP) |
2398 	       RCV_BUFSIZ_V(win);
2399 	opt2 = RX_CHANNEL_V(0) |
2400 	       RSS_QUEUE_VALID_F | RSS_QUEUE_V(ep->rss_qid);
2401 
2402 	if (enable_tcp_timestamps && req->tcpopt.tstamp)
2403 		opt2 |= TSTAMPS_EN_F;
2404 	if (enable_tcp_sack && req->tcpopt.sack)
2405 		opt2 |= SACK_EN_F;
2406 	if (wscale && enable_tcp_window_scaling)
2407 		opt2 |= WND_SCALE_EN_F;
2408 	if (enable_ecn) {
2409 		const struct tcphdr *tcph;
2410 		u32 hlen = ntohl(req->hdr_len);
2411 
2412 		if (CHELSIO_CHIP_VERSION(adapter_type) <= CHELSIO_T5)
2413 			tcph = (const void *)(req + 1) + ETH_HDR_LEN_G(hlen) +
2414 				IP_HDR_LEN_G(hlen);
2415 		else
2416 			tcph = (const void *)(req + 1) +
2417 				T6_ETH_HDR_LEN_G(hlen) + T6_IP_HDR_LEN_G(hlen);
2418 		if (tcph->ece && tcph->cwr)
2419 			opt2 |= CCTRL_ECN_V(1);
2420 	}
2421 	if (CHELSIO_CHIP_VERSION(adapter_type) > CHELSIO_T4) {
2422 		u32 isn = (prandom_u32() & ~7UL) - 1;
2423 		opt2 |= T5_OPT_2_VALID_F;
2424 		opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
2425 		opt2 |= T5_ISS_F;
2426 		rpl5 = (void *)rpl;
2427 		memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
2428 		if (peer2peer)
2429 			isn += 4;
2430 		rpl5->iss = cpu_to_be32(isn);
2431 		pr_debug("%s iss %u\n", __func__, be32_to_cpu(rpl5->iss));
2432 	}
2433 
2434 	rpl->opt0 = cpu_to_be64(opt0);
2435 	rpl->opt2 = cpu_to_be32(opt2);
2436 	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
2437 	t4_set_arp_err_handler(skb, ep, pass_accept_rpl_arp_failure);
2438 
2439 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
2440 }
2441 
2442 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2443 {
2444 	pr_debug("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2445 	BUG_ON(skb_cloned(skb));
2446 	skb_trim(skb, sizeof(struct cpl_tid_release));
2447 	release_tid(&dev->rdev, hwtid, skb);
2448 	return;
2449 }
2450 
2451 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
2452 {
2453 	struct c4iw_ep *child_ep = NULL, *parent_ep;
2454 	struct cpl_pass_accept_req *req = cplhdr(skb);
2455 	unsigned int stid = PASS_OPEN_TID_G(ntohl(req->tos_stid));
2456 	struct tid_info *t = dev->rdev.lldi.tids;
2457 	unsigned int hwtid = GET_TID(req);
2458 	struct dst_entry *dst;
2459 	__u8 local_ip[16], peer_ip[16];
2460 	__be16 local_port, peer_port;
2461 	struct sockaddr_in6 *sin6;
2462 	int err;
2463 	u16 peer_mss = ntohs(req->tcpopt.mss);
2464 	int iptype;
2465 	unsigned short hdrs;
2466 	u8 tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
2467 
2468 	parent_ep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
2469 	if (!parent_ep) {
2470 		pr_debug("%s connect request on invalid stid %d\n",
2471 			 __func__, stid);
2472 		goto reject;
2473 	}
2474 
2475 	if (state_read(&parent_ep->com) != LISTEN) {
2476 		pr_debug("%s - listening ep not in LISTEN\n", __func__);
2477 		goto reject;
2478 	}
2479 
2480 	cxgb_get_4tuple(req, parent_ep->com.dev->rdev.lldi.adapter_type,
2481 			&iptype, local_ip, peer_ip, &local_port, &peer_port);
2482 
2483 	/* Find output route */
2484 	if (iptype == 4)  {
2485 		pr_debug("%s parent ep %p hwtid %u laddr %pI4 raddr %pI4 lport %d rport %d peer_mss %d\n"
2486 			 , __func__, parent_ep, hwtid,
2487 			 local_ip, peer_ip, ntohs(local_port),
2488 			 ntohs(peer_port), peer_mss);
2489 		dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
2490 				      *(__be32 *)local_ip, *(__be32 *)peer_ip,
2491 				      local_port, peer_port, tos);
2492 	} else {
2493 		pr_debug("%s parent ep %p hwtid %u laddr %pI6 raddr %pI6 lport %d rport %d peer_mss %d\n"
2494 			 , __func__, parent_ep, hwtid,
2495 			 local_ip, peer_ip, ntohs(local_port),
2496 			 ntohs(peer_port), peer_mss);
2497 		dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
2498 				local_ip, peer_ip, local_port, peer_port,
2499 				PASS_OPEN_TOS_G(ntohl(req->tos_stid)),
2500 				((struct sockaddr_in6 *)
2501 				 &parent_ep->com.local_addr)->sin6_scope_id);
2502 	}
2503 	if (!dst) {
2504 		pr_err("%s - failed to find dst entry!\n", __func__);
2505 		goto reject;
2506 	}
2507 
2508 	child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
2509 	if (!child_ep) {
2510 		pr_err("%s - failed to allocate ep entry!\n", __func__);
2511 		dst_release(dst);
2512 		goto reject;
2513 	}
2514 
2515 	err = import_ep(child_ep, iptype, peer_ip, dst, dev, false,
2516 			parent_ep->com.dev->rdev.lldi.adapter_type, tos);
2517 	if (err) {
2518 		pr_err("%s - failed to allocate l2t entry!\n", __func__);
2519 		dst_release(dst);
2520 		kfree(child_ep);
2521 		goto reject;
2522 	}
2523 
2524 	hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
2525 	       sizeof(struct tcphdr) +
2526 	       ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
2527 	if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
2528 		child_ep->mtu = peer_mss + hdrs;
2529 
2530 	skb_queue_head_init(&child_ep->com.ep_skb_list);
2531 	if (alloc_ep_skb_list(&child_ep->com.ep_skb_list, CN_MAX_CON_BUF))
2532 		goto fail;
2533 
2534 	state_set(&child_ep->com, CONNECTING);
2535 	child_ep->com.dev = dev;
2536 	child_ep->com.cm_id = NULL;
2537 
2538 	if (iptype == 4) {
2539 		struct sockaddr_in *sin = (struct sockaddr_in *)
2540 			&child_ep->com.local_addr;
2541 
2542 		sin->sin_family = AF_INET;
2543 		sin->sin_port = local_port;
2544 		sin->sin_addr.s_addr = *(__be32 *)local_ip;
2545 
2546 		sin = (struct sockaddr_in *)&child_ep->com.local_addr;
2547 		sin->sin_family = AF_INET;
2548 		sin->sin_port = ((struct sockaddr_in *)
2549 				 &parent_ep->com.local_addr)->sin_port;
2550 		sin->sin_addr.s_addr = *(__be32 *)local_ip;
2551 
2552 		sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
2553 		sin->sin_family = AF_INET;
2554 		sin->sin_port = peer_port;
2555 		sin->sin_addr.s_addr = *(__be32 *)peer_ip;
2556 	} else {
2557 		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2558 		sin6->sin6_family = PF_INET6;
2559 		sin6->sin6_port = local_port;
2560 		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2561 
2562 		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2563 		sin6->sin6_family = PF_INET6;
2564 		sin6->sin6_port = ((struct sockaddr_in6 *)
2565 				   &parent_ep->com.local_addr)->sin6_port;
2566 		memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
2567 
2568 		sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
2569 		sin6->sin6_family = PF_INET6;
2570 		sin6->sin6_port = peer_port;
2571 		memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
2572 	}
2573 
2574 	c4iw_get_ep(&parent_ep->com);
2575 	child_ep->parent_ep = parent_ep;
2576 	child_ep->tos = tos;
2577 	child_ep->dst = dst;
2578 	child_ep->hwtid = hwtid;
2579 
2580 	pr_debug("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
2581 		 child_ep->tx_chan, child_ep->smac_idx, child_ep->rss_qid);
2582 
2583 	init_timer(&child_ep->timer);
2584 	cxgb4_insert_tid(t, child_ep, hwtid);
2585 	insert_ep_tid(child_ep);
2586 	if (accept_cr(child_ep, skb, req)) {
2587 		c4iw_put_ep(&parent_ep->com);
2588 		release_ep_resources(child_ep);
2589 	} else {
2590 		set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);
2591 	}
2592 	if (iptype == 6) {
2593 		sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
2594 		cxgb4_clip_get(child_ep->com.dev->rdev.lldi.ports[0],
2595 			       (const u32 *)&sin6->sin6_addr.s6_addr, 1);
2596 	}
2597 	goto out;
2598 fail:
2599 	c4iw_put_ep(&child_ep->com);
2600 reject:
2601 	reject_cr(dev, hwtid, skb);
2602 	if (parent_ep)
2603 		c4iw_put_ep(&parent_ep->com);
2604 out:
2605 	return 0;
2606 }
2607 
2608 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
2609 {
2610 	struct c4iw_ep *ep;
2611 	struct cpl_pass_establish *req = cplhdr(skb);
2612 	unsigned int tid = GET_TID(req);
2613 	int ret;
2614 
2615 	ep = get_ep_from_tid(dev, tid);
2616 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2617 	ep->snd_seq = be32_to_cpu(req->snd_isn);
2618 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
2619 
2620 	pr_debug("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid,
2621 		 ntohs(req->tcp_opt));
2622 
2623 	set_emss(ep, ntohs(req->tcp_opt));
2624 
2625 	dst_confirm(ep->dst);
2626 	mutex_lock(&ep->com.mutex);
2627 	ep->com.state = MPA_REQ_WAIT;
2628 	start_ep_timer(ep);
2629 	set_bit(PASS_ESTAB, &ep->com.history);
2630 	ret = send_flowc(ep);
2631 	mutex_unlock(&ep->com.mutex);
2632 	if (ret)
2633 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2634 	c4iw_put_ep(&ep->com);
2635 
2636 	return 0;
2637 }
2638 
2639 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
2640 {
2641 	struct cpl_peer_close *hdr = cplhdr(skb);
2642 	struct c4iw_ep *ep;
2643 	struct c4iw_qp_attributes attrs;
2644 	int disconnect = 1;
2645 	int release = 0;
2646 	unsigned int tid = GET_TID(hdr);
2647 	int ret;
2648 
2649 	ep = get_ep_from_tid(dev, tid);
2650 	if (!ep)
2651 		return 0;
2652 
2653 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2654 	dst_confirm(ep->dst);
2655 
2656 	set_bit(PEER_CLOSE, &ep->com.history);
2657 	mutex_lock(&ep->com.mutex);
2658 	switch (ep->com.state) {
2659 	case MPA_REQ_WAIT:
2660 		__state_set(&ep->com, CLOSING);
2661 		break;
2662 	case MPA_REQ_SENT:
2663 		__state_set(&ep->com, CLOSING);
2664 		connect_reply_upcall(ep, -ECONNRESET);
2665 		break;
2666 	case MPA_REQ_RCVD:
2667 
2668 		/*
2669 		 * We're gonna mark this puppy DEAD, but keep
2670 		 * the reference on it until the ULP accepts or
2671 		 * rejects the CR. Also wake up anyone waiting
2672 		 * in rdma connection migration (see c4iw_accept_cr()).
2673 		 */
2674 		__state_set(&ep->com, CLOSING);
2675 		pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2676 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2677 		break;
2678 	case MPA_REP_SENT:
2679 		__state_set(&ep->com, CLOSING);
2680 		pr_debug("waking up ep %p tid %u\n", ep, ep->hwtid);
2681 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2682 		break;
2683 	case FPDU_MODE:
2684 		start_ep_timer(ep);
2685 		__state_set(&ep->com, CLOSING);
2686 		attrs.next_state = C4IW_QP_STATE_CLOSING;
2687 		ret = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2688 				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2689 		if (ret != -ECONNRESET) {
2690 			peer_close_upcall(ep);
2691 			disconnect = 1;
2692 		}
2693 		break;
2694 	case ABORTING:
2695 		disconnect = 0;
2696 		break;
2697 	case CLOSING:
2698 		__state_set(&ep->com, MORIBUND);
2699 		disconnect = 0;
2700 		break;
2701 	case MORIBUND:
2702 		(void)stop_ep_timer(ep);
2703 		if (ep->com.cm_id && ep->com.qp) {
2704 			attrs.next_state = C4IW_QP_STATE_IDLE;
2705 			c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2706 				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2707 		}
2708 		close_complete_upcall(ep, 0);
2709 		__state_set(&ep->com, DEAD);
2710 		release = 1;
2711 		disconnect = 0;
2712 		break;
2713 	case DEAD:
2714 		disconnect = 0;
2715 		break;
2716 	default:
2717 		BUG_ON(1);
2718 	}
2719 	mutex_unlock(&ep->com.mutex);
2720 	if (disconnect)
2721 		c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
2722 	if (release)
2723 		release_ep_resources(ep);
2724 	c4iw_put_ep(&ep->com);
2725 	return 0;
2726 }
2727 
2728 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
2729 {
2730 	struct cpl_abort_req_rss *req = cplhdr(skb);
2731 	struct c4iw_ep *ep;
2732 	struct sk_buff *rpl_skb;
2733 	struct c4iw_qp_attributes attrs;
2734 	int ret;
2735 	int release = 0;
2736 	unsigned int tid = GET_TID(req);
2737 	u32 len = roundup(sizeof(struct cpl_abort_rpl), 16);
2738 
2739 	ep = get_ep_from_tid(dev, tid);
2740 	if (!ep)
2741 		return 0;
2742 
2743 	if (cxgb_is_neg_adv(req->status)) {
2744 		pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
2745 			 __func__, ep->hwtid, req->status,
2746 			 neg_adv_str(req->status));
2747 		ep->stats.abort_neg_adv++;
2748 		mutex_lock(&dev->rdev.stats.lock);
2749 		dev->rdev.stats.neg_adv++;
2750 		mutex_unlock(&dev->rdev.stats.lock);
2751 		goto deref_ep;
2752 	}
2753 	pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
2754 		 ep->com.state);
2755 	set_bit(PEER_ABORT, &ep->com.history);
2756 
2757 	/*
2758 	 * Wake up any threads in rdma_init() or rdma_fini().
2759 	 * However, this is not needed if com state is just
2760 	 * MPA_REQ_SENT
2761 	 */
2762 	if (ep->com.state != MPA_REQ_SENT)
2763 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
2764 
2765 	mutex_lock(&ep->com.mutex);
2766 	switch (ep->com.state) {
2767 	case CONNECTING:
2768 		c4iw_put_ep(&ep->parent_ep->com);
2769 		break;
2770 	case MPA_REQ_WAIT:
2771 		(void)stop_ep_timer(ep);
2772 		break;
2773 	case MPA_REQ_SENT:
2774 		(void)stop_ep_timer(ep);
2775 		if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))
2776 			connect_reply_upcall(ep, -ECONNRESET);
2777 		else {
2778 			/*
2779 			 * we just don't send notification upwards because we
2780 			 * want to retry with mpa_v1 without upper layers even
2781 			 * knowing it.
2782 			 *
2783 			 * do some housekeeping so as to re-initiate the
2784 			 * connection
2785 			 */
2786 			pr_debug("%s: mpa_rev=%d. Retrying with mpav1\n",
2787 				 __func__, mpa_rev);
2788 			ep->retry_with_mpa_v1 = 1;
2789 		}
2790 		break;
2791 	case MPA_REP_SENT:
2792 		break;
2793 	case MPA_REQ_RCVD:
2794 		break;
2795 	case MORIBUND:
2796 	case CLOSING:
2797 		stop_ep_timer(ep);
2798 		/*FALLTHROUGH*/
2799 	case FPDU_MODE:
2800 		if (ep->com.cm_id && ep->com.qp) {
2801 			attrs.next_state = C4IW_QP_STATE_ERROR;
2802 			ret = c4iw_modify_qp(ep->com.qp->rhp,
2803 				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2804 				     &attrs, 1);
2805 			if (ret)
2806 				pr_err("%s - qp <- error failed!\n", __func__);
2807 		}
2808 		peer_abort_upcall(ep);
2809 		break;
2810 	case ABORTING:
2811 		break;
2812 	case DEAD:
2813 		pr_debug("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
2814 		mutex_unlock(&ep->com.mutex);
2815 		goto deref_ep;
2816 	default:
2817 		BUG_ON(1);
2818 		break;
2819 	}
2820 	dst_confirm(ep->dst);
2821 	if (ep->com.state != ABORTING) {
2822 		__state_set(&ep->com, DEAD);
2823 		/* we don't release if we want to retry with mpa_v1 */
2824 		if (!ep->retry_with_mpa_v1)
2825 			release = 1;
2826 	}
2827 	mutex_unlock(&ep->com.mutex);
2828 
2829 	rpl_skb = skb_dequeue(&ep->com.ep_skb_list);
2830 	if (WARN_ON(!rpl_skb)) {
2831 		release = 1;
2832 		goto out;
2833 	}
2834 
2835 	cxgb_mk_abort_rpl(rpl_skb, len, ep->hwtid, ep->txq_idx);
2836 
2837 	c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
2838 out:
2839 	if (release)
2840 		release_ep_resources(ep);
2841 	else if (ep->retry_with_mpa_v1) {
2842 		if (ep->com.remote_addr.ss_family == AF_INET6) {
2843 			struct sockaddr_in6 *sin6 =
2844 					(struct sockaddr_in6 *)
2845 					&ep->com.local_addr;
2846 			cxgb4_clip_release(
2847 					ep->com.dev->rdev.lldi.ports[0],
2848 					(const u32 *)&sin6->sin6_addr.s6_addr,
2849 					1);
2850 		}
2851 		remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);
2852 		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
2853 		dst_release(ep->dst);
2854 		cxgb4_l2t_release(ep->l2t);
2855 		c4iw_reconnect(ep);
2856 	}
2857 
2858 deref_ep:
2859 	c4iw_put_ep(&ep->com);
2860 	/* Dereferencing ep, referenced in peer_abort_intr() */
2861 	c4iw_put_ep(&ep->com);
2862 	return 0;
2863 }
2864 
2865 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2866 {
2867 	struct c4iw_ep *ep;
2868 	struct c4iw_qp_attributes attrs;
2869 	struct cpl_close_con_rpl *rpl = cplhdr(skb);
2870 	int release = 0;
2871 	unsigned int tid = GET_TID(rpl);
2872 
2873 	ep = get_ep_from_tid(dev, tid);
2874 	if (!ep)
2875 		return 0;
2876 
2877 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2878 	BUG_ON(!ep);
2879 
2880 	/* The cm_id may be null if we failed to connect */
2881 	mutex_lock(&ep->com.mutex);
2882 	set_bit(CLOSE_CON_RPL, &ep->com.history);
2883 	switch (ep->com.state) {
2884 	case CLOSING:
2885 		__state_set(&ep->com, MORIBUND);
2886 		break;
2887 	case MORIBUND:
2888 		(void)stop_ep_timer(ep);
2889 		if ((ep->com.cm_id) && (ep->com.qp)) {
2890 			attrs.next_state = C4IW_QP_STATE_IDLE;
2891 			c4iw_modify_qp(ep->com.qp->rhp,
2892 					     ep->com.qp,
2893 					     C4IW_QP_ATTR_NEXT_STATE,
2894 					     &attrs, 1);
2895 		}
2896 		close_complete_upcall(ep, 0);
2897 		__state_set(&ep->com, DEAD);
2898 		release = 1;
2899 		break;
2900 	case ABORTING:
2901 	case DEAD:
2902 		break;
2903 	default:
2904 		BUG_ON(1);
2905 		break;
2906 	}
2907 	mutex_unlock(&ep->com.mutex);
2908 	if (release)
2909 		release_ep_resources(ep);
2910 	c4iw_put_ep(&ep->com);
2911 	return 0;
2912 }
2913 
2914 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
2915 {
2916 	struct cpl_rdma_terminate *rpl = cplhdr(skb);
2917 	unsigned int tid = GET_TID(rpl);
2918 	struct c4iw_ep *ep;
2919 	struct c4iw_qp_attributes attrs;
2920 
2921 	ep = get_ep_from_tid(dev, tid);
2922 	BUG_ON(!ep);
2923 
2924 	if (ep && ep->com.qp) {
2925 		pr_warn("TERM received tid %u qpid %u\n",
2926 			tid, ep->com.qp->wq.sq.qid);
2927 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2928 		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2929 			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2930 	} else
2931 		pr_warn("TERM received tid %u no ep/qp\n", tid);
2932 	c4iw_put_ep(&ep->com);
2933 
2934 	return 0;
2935 }
2936 
2937 /*
2938  * Upcall from the adapter indicating data has been transmitted.
2939  * For us its just the single MPA request or reply.  We can now free
2940  * the skb holding the mpa message.
2941  */
2942 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
2943 {
2944 	struct c4iw_ep *ep;
2945 	struct cpl_fw4_ack *hdr = cplhdr(skb);
2946 	u8 credits = hdr->credits;
2947 	unsigned int tid = GET_TID(hdr);
2948 
2949 
2950 	ep = get_ep_from_tid(dev, tid);
2951 	if (!ep)
2952 		return 0;
2953 	pr_debug("%s ep %p tid %u credits %u\n",
2954 		 __func__, ep, ep->hwtid, credits);
2955 	if (credits == 0) {
2956 		pr_debug("%s 0 credit ack ep %p tid %u state %u\n",
2957 			 __func__, ep, ep->hwtid, state_read(&ep->com));
2958 		goto out;
2959 	}
2960 
2961 	dst_confirm(ep->dst);
2962 	if (ep->mpa_skb) {
2963 		pr_debug("%s last streaming msg ack ep %p tid %u state %u initiator %u freeing skb\n",
2964 			 __func__, ep, ep->hwtid,
2965 			 state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
2966 		mutex_lock(&ep->com.mutex);
2967 		kfree_skb(ep->mpa_skb);
2968 		ep->mpa_skb = NULL;
2969 		if (test_bit(STOP_MPA_TIMER, &ep->com.flags))
2970 			stop_ep_timer(ep);
2971 		mutex_unlock(&ep->com.mutex);
2972 	}
2973 out:
2974 	c4iw_put_ep(&ep->com);
2975 	return 0;
2976 }
2977 
2978 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2979 {
2980 	int abort;
2981 	struct c4iw_ep *ep = to_ep(cm_id);
2982 
2983 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
2984 
2985 	mutex_lock(&ep->com.mutex);
2986 	if (ep->com.state != MPA_REQ_RCVD) {
2987 		mutex_unlock(&ep->com.mutex);
2988 		c4iw_put_ep(&ep->com);
2989 		return -ECONNRESET;
2990 	}
2991 	set_bit(ULP_REJECT, &ep->com.history);
2992 	if (mpa_rev == 0)
2993 		abort = 1;
2994 	else
2995 		abort = send_mpa_reject(ep, pdata, pdata_len);
2996 	mutex_unlock(&ep->com.mutex);
2997 
2998 	stop_ep_timer(ep);
2999 	c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
3000 	c4iw_put_ep(&ep->com);
3001 	return 0;
3002 }
3003 
3004 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3005 {
3006 	int err;
3007 	struct c4iw_qp_attributes attrs;
3008 	enum c4iw_qp_attr_mask mask;
3009 	struct c4iw_ep *ep = to_ep(cm_id);
3010 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
3011 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
3012 	int abort = 0;
3013 
3014 	pr_debug("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
3015 
3016 	mutex_lock(&ep->com.mutex);
3017 	if (ep->com.state != MPA_REQ_RCVD) {
3018 		err = -ECONNRESET;
3019 		goto err_out;
3020 	}
3021 
3022 	BUG_ON(!qp);
3023 
3024 	set_bit(ULP_ACCEPT, &ep->com.history);
3025 	if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
3026 	    (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
3027 		err = -EINVAL;
3028 		goto err_abort;
3029 	}
3030 
3031 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
3032 		if (conn_param->ord > ep->ird) {
3033 			if (RELAXED_IRD_NEGOTIATION) {
3034 				conn_param->ord = ep->ird;
3035 			} else {
3036 				ep->ird = conn_param->ird;
3037 				ep->ord = conn_param->ord;
3038 				send_mpa_reject(ep, conn_param->private_data,
3039 						conn_param->private_data_len);
3040 				err = -ENOMEM;
3041 				goto err_abort;
3042 			}
3043 		}
3044 		if (conn_param->ird < ep->ord) {
3045 			if (RELAXED_IRD_NEGOTIATION &&
3046 			    ep->ord <= h->rdev.lldi.max_ordird_qp) {
3047 				conn_param->ird = ep->ord;
3048 			} else {
3049 				err = -ENOMEM;
3050 				goto err_abort;
3051 			}
3052 		}
3053 	}
3054 	ep->ird = conn_param->ird;
3055 	ep->ord = conn_param->ord;
3056 
3057 	if (ep->mpa_attr.version == 1) {
3058 		if (peer2peer && ep->ird == 0)
3059 			ep->ird = 1;
3060 	} else {
3061 		if (peer2peer &&
3062 		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
3063 		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
3064 			ep->ird = 1;
3065 	}
3066 
3067 	pr_debug("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
3068 
3069 	ep->com.cm_id = cm_id;
3070 	ref_cm_id(&ep->com);
3071 	ep->com.qp = qp;
3072 	ref_qp(ep);
3073 
3074 	/* bind QP to EP and move to RTS */
3075 	attrs.mpa_attr = ep->mpa_attr;
3076 	attrs.max_ird = ep->ird;
3077 	attrs.max_ord = ep->ord;
3078 	attrs.llp_stream_handle = ep;
3079 	attrs.next_state = C4IW_QP_STATE_RTS;
3080 
3081 	/* bind QP and TID with INIT_WR */
3082 	mask = C4IW_QP_ATTR_NEXT_STATE |
3083 			     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
3084 			     C4IW_QP_ATTR_MPA_ATTR |
3085 			     C4IW_QP_ATTR_MAX_IRD |
3086 			     C4IW_QP_ATTR_MAX_ORD;
3087 
3088 	err = c4iw_modify_qp(ep->com.qp->rhp,
3089 			     ep->com.qp, mask, &attrs, 1);
3090 	if (err)
3091 		goto err_deref_cm_id;
3092 
3093 	set_bit(STOP_MPA_TIMER, &ep->com.flags);
3094 	err = send_mpa_reply(ep, conn_param->private_data,
3095 			     conn_param->private_data_len);
3096 	if (err)
3097 		goto err_deref_cm_id;
3098 
3099 	__state_set(&ep->com, FPDU_MODE);
3100 	established_upcall(ep);
3101 	mutex_unlock(&ep->com.mutex);
3102 	c4iw_put_ep(&ep->com);
3103 	return 0;
3104 err_deref_cm_id:
3105 	deref_cm_id(&ep->com);
3106 err_abort:
3107 	abort = 1;
3108 err_out:
3109 	mutex_unlock(&ep->com.mutex);
3110 	if (abort)
3111 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
3112 	c4iw_put_ep(&ep->com);
3113 	return err;
3114 }
3115 
3116 static int pick_local_ipaddrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3117 {
3118 	struct in_device *ind;
3119 	int found = 0;
3120 	struct sockaddr_in *laddr = (struct sockaddr_in *)&cm_id->m_local_addr;
3121 	struct sockaddr_in *raddr = (struct sockaddr_in *)&cm_id->m_remote_addr;
3122 
3123 	ind = in_dev_get(dev->rdev.lldi.ports[0]);
3124 	if (!ind)
3125 		return -EADDRNOTAVAIL;
3126 	for_primary_ifa(ind) {
3127 		laddr->sin_addr.s_addr = ifa->ifa_address;
3128 		raddr->sin_addr.s_addr = ifa->ifa_address;
3129 		found = 1;
3130 		break;
3131 	}
3132 	endfor_ifa(ind);
3133 	in_dev_put(ind);
3134 	return found ? 0 : -EADDRNOTAVAIL;
3135 }
3136 
3137 static int get_lladdr(struct net_device *dev, struct in6_addr *addr,
3138 		      unsigned char banned_flags)
3139 {
3140 	struct inet6_dev *idev;
3141 	int err = -EADDRNOTAVAIL;
3142 
3143 	rcu_read_lock();
3144 	idev = __in6_dev_get(dev);
3145 	if (idev != NULL) {
3146 		struct inet6_ifaddr *ifp;
3147 
3148 		read_lock_bh(&idev->lock);
3149 		list_for_each_entry(ifp, &idev->addr_list, if_list) {
3150 			if (ifp->scope == IFA_LINK &&
3151 			    !(ifp->flags & banned_flags)) {
3152 				memcpy(addr, &ifp->addr, 16);
3153 				err = 0;
3154 				break;
3155 			}
3156 		}
3157 		read_unlock_bh(&idev->lock);
3158 	}
3159 	rcu_read_unlock();
3160 	return err;
3161 }
3162 
3163 static int pick_local_ip6addrs(struct c4iw_dev *dev, struct iw_cm_id *cm_id)
3164 {
3165 	struct in6_addr uninitialized_var(addr);
3166 	struct sockaddr_in6 *la6 = (struct sockaddr_in6 *)&cm_id->m_local_addr;
3167 	struct sockaddr_in6 *ra6 = (struct sockaddr_in6 *)&cm_id->m_remote_addr;
3168 
3169 	if (!get_lladdr(dev->rdev.lldi.ports[0], &addr, IFA_F_TENTATIVE)) {
3170 		memcpy(la6->sin6_addr.s6_addr, &addr, 16);
3171 		memcpy(ra6->sin6_addr.s6_addr, &addr, 16);
3172 		return 0;
3173 	}
3174 	return -EADDRNOTAVAIL;
3175 }
3176 
3177 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
3178 {
3179 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3180 	struct c4iw_ep *ep;
3181 	int err = 0;
3182 	struct sockaddr_in *laddr;
3183 	struct sockaddr_in *raddr;
3184 	struct sockaddr_in6 *laddr6;
3185 	struct sockaddr_in6 *raddr6;
3186 	__u8 *ra;
3187 	int iptype;
3188 
3189 	if ((conn_param->ord > cur_max_read_depth(dev)) ||
3190 	    (conn_param->ird > cur_max_read_depth(dev))) {
3191 		err = -EINVAL;
3192 		goto out;
3193 	}
3194 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3195 	if (!ep) {
3196 		pr_err("%s - cannot alloc ep\n", __func__);
3197 		err = -ENOMEM;
3198 		goto out;
3199 	}
3200 
3201 	skb_queue_head_init(&ep->com.ep_skb_list);
3202 	if (alloc_ep_skb_list(&ep->com.ep_skb_list, CN_MAX_CON_BUF)) {
3203 		err = -ENOMEM;
3204 		goto fail1;
3205 	}
3206 
3207 	init_timer(&ep->timer);
3208 	ep->plen = conn_param->private_data_len;
3209 	if (ep->plen)
3210 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
3211 		       conn_param->private_data, ep->plen);
3212 	ep->ird = conn_param->ird;
3213 	ep->ord = conn_param->ord;
3214 
3215 	if (peer2peer && ep->ord == 0)
3216 		ep->ord = 1;
3217 
3218 	ep->com.cm_id = cm_id;
3219 	ref_cm_id(&ep->com);
3220 	ep->com.dev = dev;
3221 	ep->com.qp = get_qhp(dev, conn_param->qpn);
3222 	if (!ep->com.qp) {
3223 		pr_debug("%s qpn 0x%x not found!\n", __func__, conn_param->qpn);
3224 		err = -EINVAL;
3225 		goto fail2;
3226 	}
3227 	ref_qp(ep);
3228 	pr_debug("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
3229 		 ep->com.qp, cm_id);
3230 
3231 	/*
3232 	 * Allocate an active TID to initiate a TCP connection.
3233 	 */
3234 	ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
3235 	if (ep->atid == -1) {
3236 		pr_err("%s - cannot alloc atid\n", __func__);
3237 		err = -ENOMEM;
3238 		goto fail2;
3239 	}
3240 	insert_handle(dev, &dev->atid_idr, ep, ep->atid);
3241 
3242 	memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3243 	       sizeof(ep->com.local_addr));
3244 	memcpy(&ep->com.remote_addr, &cm_id->m_remote_addr,
3245 	       sizeof(ep->com.remote_addr));
3246 
3247 	laddr = (struct sockaddr_in *)&ep->com.local_addr;
3248 	raddr = (struct sockaddr_in *)&ep->com.remote_addr;
3249 	laddr6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3250 	raddr6 = (struct sockaddr_in6 *) &ep->com.remote_addr;
3251 
3252 	if (cm_id->m_remote_addr.ss_family == AF_INET) {
3253 		iptype = 4;
3254 		ra = (__u8 *)&raddr->sin_addr;
3255 
3256 		/*
3257 		 * Handle loopback requests to INADDR_ANY.
3258 		 */
3259 		if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
3260 			err = pick_local_ipaddrs(dev, cm_id);
3261 			if (err)
3262 				goto fail2;
3263 		}
3264 
3265 		/* find a route */
3266 		pr_debug("%s saddr %pI4 sport 0x%x raddr %pI4 rport 0x%x\n",
3267 			 __func__, &laddr->sin_addr, ntohs(laddr->sin_port),
3268 			 ra, ntohs(raddr->sin_port));
3269 		ep->dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3270 					  laddr->sin_addr.s_addr,
3271 					  raddr->sin_addr.s_addr,
3272 					  laddr->sin_port,
3273 					  raddr->sin_port, cm_id->tos);
3274 	} else {
3275 		iptype = 6;
3276 		ra = (__u8 *)&raddr6->sin6_addr;
3277 
3278 		/*
3279 		 * Handle loopback requests to INADDR_ANY.
3280 		 */
3281 		if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
3282 			err = pick_local_ip6addrs(dev, cm_id);
3283 			if (err)
3284 				goto fail2;
3285 		}
3286 
3287 		/* find a route */
3288 		pr_debug("%s saddr %pI6 sport 0x%x raddr %pI6 rport 0x%x\n",
3289 			 __func__, laddr6->sin6_addr.s6_addr,
3290 			 ntohs(laddr6->sin6_port),
3291 			 raddr6->sin6_addr.s6_addr, ntohs(raddr6->sin6_port));
3292 		ep->dst = cxgb_find_route6(&dev->rdev.lldi, get_real_dev,
3293 					   laddr6->sin6_addr.s6_addr,
3294 					   raddr6->sin6_addr.s6_addr,
3295 					   laddr6->sin6_port,
3296 					   raddr6->sin6_port, 0,
3297 					   raddr6->sin6_scope_id);
3298 	}
3299 	if (!ep->dst) {
3300 		pr_err("%s - cannot find route\n", __func__);
3301 		err = -EHOSTUNREACH;
3302 		goto fail3;
3303 	}
3304 
3305 	err = import_ep(ep, iptype, ra, ep->dst, ep->com.dev, true,
3306 			ep->com.dev->rdev.lldi.adapter_type, cm_id->tos);
3307 	if (err) {
3308 		pr_err("%s - cannot alloc l2e\n", __func__);
3309 		goto fail4;
3310 	}
3311 
3312 	pr_debug("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
3313 		 __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
3314 		 ep->l2t->idx);
3315 
3316 	state_set(&ep->com, CONNECTING);
3317 	ep->tos = cm_id->tos;
3318 
3319 	/* send connect request to rnic */
3320 	err = send_connect(ep);
3321 	if (!err)
3322 		goto out;
3323 
3324 	cxgb4_l2t_release(ep->l2t);
3325 fail4:
3326 	dst_release(ep->dst);
3327 fail3:
3328 	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
3329 	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
3330 fail2:
3331 	skb_queue_purge(&ep->com.ep_skb_list);
3332 	deref_cm_id(&ep->com);
3333 fail1:
3334 	c4iw_put_ep(&ep->com);
3335 out:
3336 	return err;
3337 }
3338 
3339 static int create_server6(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3340 {
3341 	int err;
3342 	struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
3343 				    &ep->com.local_addr;
3344 
3345 	if (ipv6_addr_type(&sin6->sin6_addr) != IPV6_ADDR_ANY) {
3346 		err = cxgb4_clip_get(ep->com.dev->rdev.lldi.ports[0],
3347 				     (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3348 		if (err)
3349 			return err;
3350 	}
3351 	c4iw_init_wr_wait(&ep->com.wr_wait);
3352 	err = cxgb4_create_server6(ep->com.dev->rdev.lldi.ports[0],
3353 				   ep->stid, &sin6->sin6_addr,
3354 				   sin6->sin6_port,
3355 				   ep->com.dev->rdev.lldi.rxq_ids[0]);
3356 	if (!err)
3357 		err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3358 					  &ep->com.wr_wait,
3359 					  0, 0, __func__);
3360 	else if (err > 0)
3361 		err = net_xmit_errno(err);
3362 	if (err) {
3363 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3364 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3365 		pr_err("cxgb4_create_server6/filter failed err %d stid %d laddr %pI6 lport %d\n",
3366 		       err, ep->stid,
3367 		       sin6->sin6_addr.s6_addr, ntohs(sin6->sin6_port));
3368 	}
3369 	return err;
3370 }
3371 
3372 static int create_server4(struct c4iw_dev *dev, struct c4iw_listen_ep *ep)
3373 {
3374 	int err;
3375 	struct sockaddr_in *sin = (struct sockaddr_in *)
3376 				  &ep->com.local_addr;
3377 
3378 	if (dev->rdev.lldi.enable_fw_ofld_conn) {
3379 		do {
3380 			err = cxgb4_create_server_filter(
3381 				ep->com.dev->rdev.lldi.ports[0], ep->stid,
3382 				sin->sin_addr.s_addr, sin->sin_port, 0,
3383 				ep->com.dev->rdev.lldi.rxq_ids[0], 0, 0);
3384 			if (err == -EBUSY) {
3385 				if (c4iw_fatal_error(&ep->com.dev->rdev)) {
3386 					err = -EIO;
3387 					break;
3388 				}
3389 				set_current_state(TASK_UNINTERRUPTIBLE);
3390 				schedule_timeout(usecs_to_jiffies(100));
3391 			}
3392 		} while (err == -EBUSY);
3393 	} else {
3394 		c4iw_init_wr_wait(&ep->com.wr_wait);
3395 		err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0],
3396 				ep->stid, sin->sin_addr.s_addr, sin->sin_port,
3397 				0, ep->com.dev->rdev.lldi.rxq_ids[0]);
3398 		if (!err)
3399 			err = c4iw_wait_for_reply(&ep->com.dev->rdev,
3400 						  &ep->com.wr_wait,
3401 						  0, 0, __func__);
3402 		else if (err > 0)
3403 			err = net_xmit_errno(err);
3404 	}
3405 	if (err)
3406 		pr_err("cxgb4_create_server/filter failed err %d stid %d laddr %pI4 lport %d\n"
3407 		       , err, ep->stid,
3408 		       &sin->sin_addr, ntohs(sin->sin_port));
3409 	return err;
3410 }
3411 
3412 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
3413 {
3414 	int err = 0;
3415 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
3416 	struct c4iw_listen_ep *ep;
3417 
3418 	might_sleep();
3419 
3420 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
3421 	if (!ep) {
3422 		pr_err("%s - cannot alloc ep\n", __func__);
3423 		err = -ENOMEM;
3424 		goto fail1;
3425 	}
3426 	skb_queue_head_init(&ep->com.ep_skb_list);
3427 	pr_debug("%s ep %p\n", __func__, ep);
3428 	ep->com.cm_id = cm_id;
3429 	ref_cm_id(&ep->com);
3430 	ep->com.dev = dev;
3431 	ep->backlog = backlog;
3432 	memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3433 	       sizeof(ep->com.local_addr));
3434 
3435 	/*
3436 	 * Allocate a server TID.
3437 	 */
3438 	if (dev->rdev.lldi.enable_fw_ofld_conn &&
3439 	    ep->com.local_addr.ss_family == AF_INET)
3440 		ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
3441 					     cm_id->m_local_addr.ss_family, ep);
3442 	else
3443 		ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids,
3444 					    cm_id->m_local_addr.ss_family, ep);
3445 
3446 	if (ep->stid == -1) {
3447 		pr_err("%s - cannot alloc stid\n", __func__);
3448 		err = -ENOMEM;
3449 		goto fail2;
3450 	}
3451 	insert_handle(dev, &dev->stid_idr, ep, ep->stid);
3452 
3453 	memcpy(&ep->com.local_addr, &cm_id->m_local_addr,
3454 	       sizeof(ep->com.local_addr));
3455 
3456 	state_set(&ep->com, LISTEN);
3457 	if (ep->com.local_addr.ss_family == AF_INET)
3458 		err = create_server4(dev, ep);
3459 	else
3460 		err = create_server6(dev, ep);
3461 	if (!err) {
3462 		cm_id->provider_data = ep;
3463 		goto out;
3464 	}
3465 
3466 	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3467 			ep->com.local_addr.ss_family);
3468 fail2:
3469 	deref_cm_id(&ep->com);
3470 	c4iw_put_ep(&ep->com);
3471 fail1:
3472 out:
3473 	return err;
3474 }
3475 
3476 int c4iw_destroy_listen(struct iw_cm_id *cm_id)
3477 {
3478 	int err;
3479 	struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
3480 
3481 	pr_debug("%s ep %p\n", __func__, ep);
3482 
3483 	might_sleep();
3484 	state_set(&ep->com, DEAD);
3485 	if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn &&
3486 	    ep->com.local_addr.ss_family == AF_INET) {
3487 		err = cxgb4_remove_server_filter(
3488 			ep->com.dev->rdev.lldi.ports[0], ep->stid,
3489 			ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3490 	} else {
3491 		struct sockaddr_in6 *sin6;
3492 		c4iw_init_wr_wait(&ep->com.wr_wait);
3493 		err = cxgb4_remove_server(
3494 				ep->com.dev->rdev.lldi.ports[0], ep->stid,
3495 				ep->com.dev->rdev.lldi.rxq_ids[0], 0);
3496 		if (err)
3497 			goto done;
3498 		err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait,
3499 					  0, 0, __func__);
3500 		sin6 = (struct sockaddr_in6 *)&ep->com.local_addr;
3501 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3502 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3503 	}
3504 	remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);
3505 	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid,
3506 			ep->com.local_addr.ss_family);
3507 done:
3508 	deref_cm_id(&ep->com);
3509 	c4iw_put_ep(&ep->com);
3510 	return err;
3511 }
3512 
3513 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
3514 {
3515 	int ret = 0;
3516 	int close = 0;
3517 	int fatal = 0;
3518 	struct c4iw_rdev *rdev;
3519 
3520 	mutex_lock(&ep->com.mutex);
3521 
3522 	pr_debug("%s ep %p state %s, abrupt %d\n", __func__, ep,
3523 		 states[ep->com.state], abrupt);
3524 
3525 	/*
3526 	 * Ref the ep here in case we have fatal errors causing the
3527 	 * ep to be released and freed.
3528 	 */
3529 	c4iw_get_ep(&ep->com);
3530 
3531 	rdev = &ep->com.dev->rdev;
3532 	if (c4iw_fatal_error(rdev)) {
3533 		fatal = 1;
3534 		close_complete_upcall(ep, -EIO);
3535 		ep->com.state = DEAD;
3536 	}
3537 	switch (ep->com.state) {
3538 	case MPA_REQ_WAIT:
3539 	case MPA_REQ_SENT:
3540 	case MPA_REQ_RCVD:
3541 	case MPA_REP_SENT:
3542 	case FPDU_MODE:
3543 	case CONNECTING:
3544 		close = 1;
3545 		if (abrupt)
3546 			ep->com.state = ABORTING;
3547 		else {
3548 			ep->com.state = CLOSING;
3549 
3550 			/*
3551 			 * if we close before we see the fw4_ack() then we fix
3552 			 * up the timer state since we're reusing it.
3553 			 */
3554 			if (ep->mpa_skb &&
3555 			    test_bit(STOP_MPA_TIMER, &ep->com.flags)) {
3556 				clear_bit(STOP_MPA_TIMER, &ep->com.flags);
3557 				stop_ep_timer(ep);
3558 			}
3559 			start_ep_timer(ep);
3560 		}
3561 		set_bit(CLOSE_SENT, &ep->com.flags);
3562 		break;
3563 	case CLOSING:
3564 		if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
3565 			close = 1;
3566 			if (abrupt) {
3567 				(void)stop_ep_timer(ep);
3568 				ep->com.state = ABORTING;
3569 			} else
3570 				ep->com.state = MORIBUND;
3571 		}
3572 		break;
3573 	case MORIBUND:
3574 	case ABORTING:
3575 	case DEAD:
3576 		pr_debug("%s ignoring disconnect ep %p state %u\n",
3577 			 __func__, ep, ep->com.state);
3578 		break;
3579 	default:
3580 		BUG();
3581 		break;
3582 	}
3583 
3584 	if (close) {
3585 		if (abrupt) {
3586 			set_bit(EP_DISC_ABORT, &ep->com.history);
3587 			close_complete_upcall(ep, -ECONNRESET);
3588 			ret = send_abort(ep);
3589 		} else {
3590 			set_bit(EP_DISC_CLOSE, &ep->com.history);
3591 			ret = send_halfclose(ep);
3592 		}
3593 		if (ret) {
3594 			set_bit(EP_DISC_FAIL, &ep->com.history);
3595 			if (!abrupt) {
3596 				stop_ep_timer(ep);
3597 				close_complete_upcall(ep, -EIO);
3598 			}
3599 			if (ep->com.qp) {
3600 				struct c4iw_qp_attributes attrs;
3601 
3602 				attrs.next_state = C4IW_QP_STATE_ERROR;
3603 				ret = c4iw_modify_qp(ep->com.qp->rhp,
3604 						     ep->com.qp,
3605 						     C4IW_QP_ATTR_NEXT_STATE,
3606 						     &attrs, 1);
3607 				if (ret)
3608 					pr_err("%s - qp <- error failed!\n",
3609 					       __func__);
3610 			}
3611 			fatal = 1;
3612 		}
3613 	}
3614 	mutex_unlock(&ep->com.mutex);
3615 	c4iw_put_ep(&ep->com);
3616 	if (fatal)
3617 		release_ep_resources(ep);
3618 	return ret;
3619 }
3620 
3621 static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3622 			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3623 {
3624 	struct c4iw_ep *ep;
3625 	int atid = be32_to_cpu(req->tid);
3626 
3627 	ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids,
3628 					   (__force u32) req->tid);
3629 	if (!ep)
3630 		return;
3631 
3632 	switch (req->retval) {
3633 	case FW_ENOMEM:
3634 		set_bit(ACT_RETRY_NOMEM, &ep->com.history);
3635 		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3636 			send_fw_act_open_req(ep, atid);
3637 			return;
3638 		}
3639 	case FW_EADDRINUSE:
3640 		set_bit(ACT_RETRY_INUSE, &ep->com.history);
3641 		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) {
3642 			send_fw_act_open_req(ep, atid);
3643 			return;
3644 		}
3645 		break;
3646 	default:
3647 		pr_info("%s unexpected ofld conn wr retval %d\n",
3648 		       __func__, req->retval);
3649 		break;
3650 	}
3651 	pr_err("active ofld_connect_wr failure %d atid %d\n",
3652 	       req->retval, atid);
3653 	mutex_lock(&dev->rdev.stats.lock);
3654 	dev->rdev.stats.act_ofld_conn_fails++;
3655 	mutex_unlock(&dev->rdev.stats.lock);
3656 	connect_reply_upcall(ep, status2errno(req->retval));
3657 	state_set(&ep->com, DEAD);
3658 	if (ep->com.remote_addr.ss_family == AF_INET6) {
3659 		struct sockaddr_in6 *sin6 =
3660 			(struct sockaddr_in6 *)&ep->com.local_addr;
3661 		cxgb4_clip_release(ep->com.dev->rdev.lldi.ports[0],
3662 				   (const u32 *)&sin6->sin6_addr.s6_addr, 1);
3663 	}
3664 	remove_handle(dev, &dev->atid_idr, atid);
3665 	cxgb4_free_atid(dev->rdev.lldi.tids, atid);
3666 	dst_release(ep->dst);
3667 	cxgb4_l2t_release(ep->l2t);
3668 	c4iw_put_ep(&ep->com);
3669 }
3670 
3671 static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb,
3672 			struct cpl_fw6_msg_ofld_connection_wr_rpl *req)
3673 {
3674 	struct sk_buff *rpl_skb;
3675 	struct cpl_pass_accept_req *cpl;
3676 	int ret;
3677 
3678 	rpl_skb = (struct sk_buff *)(unsigned long)req->cookie;
3679 	BUG_ON(!rpl_skb);
3680 	if (req->retval) {
3681 		pr_debug("%s passive open failure %d\n", __func__, req->retval);
3682 		mutex_lock(&dev->rdev.stats.lock);
3683 		dev->rdev.stats.pas_ofld_conn_fails++;
3684 		mutex_unlock(&dev->rdev.stats.lock);
3685 		kfree_skb(rpl_skb);
3686 	} else {
3687 		cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb);
3688 		OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ,
3689 					(__force u32) htonl(
3690 					(__force u32) req->tid)));
3691 		ret = pass_accept_req(dev, rpl_skb);
3692 		if (!ret)
3693 			kfree_skb(rpl_skb);
3694 	}
3695 	return;
3696 }
3697 
3698 static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
3699 {
3700 	struct cpl_fw6_msg *rpl = cplhdr(skb);
3701 	struct cpl_fw6_msg_ofld_connection_wr_rpl *req;
3702 
3703 	switch (rpl->type) {
3704 	case FW6_TYPE_CQE:
3705 		c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
3706 		break;
3707 	case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
3708 		req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data;
3709 		switch (req->t_state) {
3710 		case TCP_SYN_SENT:
3711 			active_ofld_conn_reply(dev, skb, req);
3712 			break;
3713 		case TCP_SYN_RECV:
3714 			passive_ofld_conn_reply(dev, skb, req);
3715 			break;
3716 		default:
3717 			pr_err("%s unexpected ofld conn wr state %d\n",
3718 			       __func__, req->t_state);
3719 			break;
3720 		}
3721 		break;
3722 	}
3723 	return 0;
3724 }
3725 
3726 static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos)
3727 {
3728 	__be32 l2info;
3729 	__be16 hdr_len, vlantag, len;
3730 	u16 eth_hdr_len;
3731 	int tcp_hdr_len, ip_hdr_len;
3732 	u8 intf;
3733 	struct cpl_rx_pkt *cpl = cplhdr(skb);
3734 	struct cpl_pass_accept_req *req;
3735 	struct tcp_options_received tmp_opt;
3736 	struct c4iw_dev *dev;
3737 	enum chip_type type;
3738 
3739 	dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
3740 	/* Store values from cpl_rx_pkt in temporary location. */
3741 	vlantag = cpl->vlan;
3742 	len = cpl->len;
3743 	l2info  = cpl->l2info;
3744 	hdr_len = cpl->hdr_len;
3745 	intf = cpl->iff;
3746 
3747 	__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header));
3748 
3749 	/*
3750 	 * We need to parse the TCP options from SYN packet.
3751 	 * to generate cpl_pass_accept_req.
3752 	 */
3753 	memset(&tmp_opt, 0, sizeof(tmp_opt));
3754 	tcp_clear_options(&tmp_opt);
3755 	tcp_parse_options(skb, &tmp_opt, 0, NULL);
3756 
3757 	req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req));
3758 	memset(req, 0, sizeof(*req));
3759 	req->l2info = cpu_to_be16(SYN_INTF_V(intf) |
3760 			 SYN_MAC_IDX_V(RX_MACIDX_G(
3761 			 be32_to_cpu(l2info))) |
3762 			 SYN_XACT_MATCH_F);
3763 	type = dev->rdev.lldi.adapter_type;
3764 	tcp_hdr_len = RX_TCPHDR_LEN_G(be16_to_cpu(hdr_len));
3765 	ip_hdr_len = RX_IPHDR_LEN_G(be16_to_cpu(hdr_len));
3766 	req->hdr_len =
3767 		cpu_to_be32(SYN_RX_CHAN_V(RX_CHAN_G(be32_to_cpu(l2info))));
3768 	if (CHELSIO_CHIP_VERSION(type) <= CHELSIO_T5) {
3769 		eth_hdr_len = is_t4(type) ?
3770 				RX_ETHHDR_LEN_G(be32_to_cpu(l2info)) :
3771 				RX_T5_ETHHDR_LEN_G(be32_to_cpu(l2info));
3772 		req->hdr_len |= cpu_to_be32(TCP_HDR_LEN_V(tcp_hdr_len) |
3773 					    IP_HDR_LEN_V(ip_hdr_len) |
3774 					    ETH_HDR_LEN_V(eth_hdr_len));
3775 	} else { /* T6 and later */
3776 		eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(l2info));
3777 		req->hdr_len |= cpu_to_be32(T6_TCP_HDR_LEN_V(tcp_hdr_len) |
3778 					    T6_IP_HDR_LEN_V(ip_hdr_len) |
3779 					    T6_ETH_HDR_LEN_V(eth_hdr_len));
3780 	}
3781 	req->vlan = vlantag;
3782 	req->len = len;
3783 	req->tos_stid = cpu_to_be32(PASS_OPEN_TID_V(stid) |
3784 				    PASS_OPEN_TOS_V(tos));
3785 	req->tcpopt.mss = htons(tmp_opt.mss_clamp);
3786 	if (tmp_opt.wscale_ok)
3787 		req->tcpopt.wsf = tmp_opt.snd_wscale;
3788 	req->tcpopt.tstamp = tmp_opt.saw_tstamp;
3789 	if (tmp_opt.sack_ok)
3790 		req->tcpopt.sack = 1;
3791 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0));
3792 	return;
3793 }
3794 
3795 static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
3796 				  __be32 laddr, __be16 lport,
3797 				  __be32 raddr, __be16 rport,
3798 				  u32 rcv_isn, u32 filter, u16 window,
3799 				  u32 rss_qid, u8 port_id)
3800 {
3801 	struct sk_buff *req_skb;
3802 	struct fw_ofld_connection_wr *req;
3803 	struct cpl_pass_accept_req *cpl = cplhdr(skb);
3804 	int ret;
3805 
3806 	req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL);
3807 	if (!req_skb)
3808 		return;
3809 	req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req));
3810 	memset(req, 0, sizeof(*req));
3811 	req->op_compl = htonl(WR_OP_V(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL_F);
3812 	req->len16_pkd = htonl(FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*req), 16)));
3813 	req->le.version_cpl = htonl(FW_OFLD_CONNECTION_WR_CPL_F);
3814 	req->le.filter = (__force __be32) filter;
3815 	req->le.lport = lport;
3816 	req->le.pport = rport;
3817 	req->le.u.ipv4.lip = laddr;
3818 	req->le.u.ipv4.pip = raddr;
3819 	req->tcb.rcv_nxt = htonl(rcv_isn + 1);
3820 	req->tcb.rcv_adv = htons(window);
3821 	req->tcb.t_state_to_astid =
3822 		 htonl(FW_OFLD_CONNECTION_WR_T_STATE_V(TCP_SYN_RECV) |
3823 			FW_OFLD_CONNECTION_WR_RCV_SCALE_V(cpl->tcpopt.wsf) |
3824 			FW_OFLD_CONNECTION_WR_ASTID_V(
3825 			PASS_OPEN_TID_G(ntohl(cpl->tos_stid))));
3826 
3827 	/*
3828 	 * We store the qid in opt2 which will be used by the firmware
3829 	 * to send us the wr response.
3830 	 */
3831 	req->tcb.opt2 = htonl(RSS_QUEUE_V(rss_qid));
3832 
3833 	/*
3834 	 * We initialize the MSS index in TCB to 0xF.
3835 	 * So that when driver sends cpl_pass_accept_rpl
3836 	 * TCB picks up the correct value. If this was 0
3837 	 * TP will ignore any value > 0 for MSS index.
3838 	 */
3839 	req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
3840 	req->cookie = (uintptr_t)skb;
3841 
3842 	set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
3843 	ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
3844 	if (ret < 0) {
3845 		pr_err("%s - cxgb4_ofld_send error %d - dropping\n", __func__,
3846 		       ret);
3847 		kfree_skb(skb);
3848 		kfree_skb(req_skb);
3849 	}
3850 }
3851 
3852 /*
3853  * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt
3854  * messages when a filter is being used instead of server to
3855  * redirect a syn packet. When packets hit filter they are redirected
3856  * to the offload queue and driver tries to establish the connection
3857  * using firmware work request.
3858  */
3859 static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
3860 {
3861 	int stid;
3862 	unsigned int filter;
3863 	struct ethhdr *eh = NULL;
3864 	struct vlan_ethhdr *vlan_eh = NULL;
3865 	struct iphdr *iph;
3866 	struct tcphdr *tcph;
3867 	struct rss_header *rss = (void *)skb->data;
3868 	struct cpl_rx_pkt *cpl = (void *)skb->data;
3869 	struct cpl_pass_accept_req *req = (void *)(rss + 1);
3870 	struct l2t_entry *e;
3871 	struct dst_entry *dst;
3872 	struct c4iw_ep *lep = NULL;
3873 	u16 window;
3874 	struct port_info *pi;
3875 	struct net_device *pdev;
3876 	u16 rss_qid, eth_hdr_len;
3877 	int step;
3878 	u32 tx_chan;
3879 	struct neighbour *neigh;
3880 
3881 	/* Drop all non-SYN packets */
3882 	if (!(cpl->l2info & cpu_to_be32(RXF_SYN_F)))
3883 		goto reject;
3884 
3885 	/*
3886 	 * Drop all packets which did not hit the filter.
3887 	 * Unlikely to happen.
3888 	 */
3889 	if (!(rss->filter_hit && rss->filter_tid))
3890 		goto reject;
3891 
3892 	/*
3893 	 * Calculate the server tid from filter hit index from cpl_rx_pkt.
3894 	 */
3895 	stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
3896 
3897 	lep = (struct c4iw_ep *)get_ep_from_stid(dev, stid);
3898 	if (!lep) {
3899 		pr_debug("%s connect request on invalid stid %d\n",
3900 			 __func__, stid);
3901 		goto reject;
3902 	}
3903 
3904 	switch (CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type)) {
3905 	case CHELSIO_T4:
3906 		eth_hdr_len = RX_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3907 		break;
3908 	case CHELSIO_T5:
3909 		eth_hdr_len = RX_T5_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3910 		break;
3911 	case CHELSIO_T6:
3912 		eth_hdr_len = RX_T6_ETHHDR_LEN_G(be32_to_cpu(cpl->l2info));
3913 		break;
3914 	default:
3915 		pr_err("T%d Chip is not supported\n",
3916 		       CHELSIO_CHIP_VERSION(dev->rdev.lldi.adapter_type));
3917 		goto reject;
3918 	}
3919 
3920 	if (eth_hdr_len == ETH_HLEN) {
3921 		eh = (struct ethhdr *)(req + 1);
3922 		iph = (struct iphdr *)(eh + 1);
3923 	} else {
3924 		vlan_eh = (struct vlan_ethhdr *)(req + 1);
3925 		iph = (struct iphdr *)(vlan_eh + 1);
3926 		skb->vlan_tci = ntohs(cpl->vlan);
3927 	}
3928 
3929 	if (iph->version != 0x4)
3930 		goto reject;
3931 
3932 	tcph = (struct tcphdr *)(iph + 1);
3933 	skb_set_network_header(skb, (void *)iph - (void *)rss);
3934 	skb_set_transport_header(skb, (void *)tcph - (void *)rss);
3935 	skb_get(skb);
3936 
3937 	pr_debug("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__,
3938 		 ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr),
3939 		 ntohs(tcph->source), iph->tos);
3940 
3941 	dst = cxgb_find_route(&dev->rdev.lldi, get_real_dev,
3942 			      iph->daddr, iph->saddr, tcph->dest,
3943 			      tcph->source, iph->tos);
3944 	if (!dst) {
3945 		pr_err("%s - failed to find dst entry!\n",
3946 		       __func__);
3947 		goto reject;
3948 	}
3949 	neigh = dst_neigh_lookup_skb(dst, skb);
3950 
3951 	if (!neigh) {
3952 		pr_err("%s - failed to allocate neigh!\n",
3953 		       __func__);
3954 		goto free_dst;
3955 	}
3956 
3957 	if (neigh->dev->flags & IFF_LOOPBACK) {
3958 		pdev = ip_dev_find(&init_net, iph->daddr);
3959 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3960 				    pdev, 0);
3961 		pi = (struct port_info *)netdev_priv(pdev);
3962 		tx_chan = cxgb4_port_chan(pdev);
3963 		dev_put(pdev);
3964 	} else {
3965 		pdev = get_real_dev(neigh->dev);
3966 		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh,
3967 					pdev, 0);
3968 		pi = (struct port_info *)netdev_priv(pdev);
3969 		tx_chan = cxgb4_port_chan(pdev);
3970 	}
3971 	neigh_release(neigh);
3972 	if (!e) {
3973 		pr_err("%s - failed to allocate l2t entry!\n",
3974 		       __func__);
3975 		goto free_dst;
3976 	}
3977 
3978 	step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
3979 	rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
3980 	window = (__force u16) htons((__force u16)tcph->window);
3981 
3982 	/* Calcuate filter portion for LE region. */
3983 	filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
3984 						    dev->rdev.lldi.ports[0],
3985 						    e));
3986 
3987 	/*
3988 	 * Synthesize the cpl_pass_accept_req. We have everything except the
3989 	 * TID. Once firmware sends a reply with TID we update the TID field
3990 	 * in cpl and pass it through the regular cpl_pass_accept_req path.
3991 	 */
3992 	build_cpl_pass_accept_req(skb, stid, iph->tos);
3993 	send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr,
3994 			      tcph->source, ntohl(tcph->seq), filter, window,
3995 			      rss_qid, pi->port_id);
3996 	cxgb4_l2t_release(e);
3997 free_dst:
3998 	dst_release(dst);
3999 reject:
4000 	if (lep)
4001 		c4iw_put_ep(&lep->com);
4002 	return 0;
4003 }
4004 
4005 /*
4006  * These are the real handlers that are called from a
4007  * work queue.
4008  */
4009 static c4iw_handler_func work_handlers[NUM_CPL_CMDS + NUM_FAKE_CPLS] = {
4010 	[CPL_ACT_ESTABLISH] = act_establish,
4011 	[CPL_ACT_OPEN_RPL] = act_open_rpl,
4012 	[CPL_RX_DATA] = rx_data,
4013 	[CPL_ABORT_RPL_RSS] = abort_rpl,
4014 	[CPL_ABORT_RPL] = abort_rpl,
4015 	[CPL_PASS_OPEN_RPL] = pass_open_rpl,
4016 	[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
4017 	[CPL_PASS_ACCEPT_REQ] = pass_accept_req,
4018 	[CPL_PASS_ESTABLISH] = pass_establish,
4019 	[CPL_PEER_CLOSE] = peer_close,
4020 	[CPL_ABORT_REQ_RSS] = peer_abort,
4021 	[CPL_CLOSE_CON_RPL] = close_con_rpl,
4022 	[CPL_RDMA_TERMINATE] = terminate,
4023 	[CPL_FW4_ACK] = fw4_ack,
4024 	[CPL_FW6_MSG] = deferred_fw6_msg,
4025 	[CPL_RX_PKT] = rx_pkt,
4026 	[FAKE_CPL_PUT_EP_SAFE] = _put_ep_safe,
4027 	[FAKE_CPL_PASS_PUT_EP_SAFE] = _put_pass_ep_safe
4028 };
4029 
4030 static void process_timeout(struct c4iw_ep *ep)
4031 {
4032 	struct c4iw_qp_attributes attrs;
4033 	int abort = 1;
4034 
4035 	mutex_lock(&ep->com.mutex);
4036 	pr_debug("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
4037 		 ep->com.state);
4038 	set_bit(TIMEDOUT, &ep->com.history);
4039 	switch (ep->com.state) {
4040 	case MPA_REQ_SENT:
4041 		connect_reply_upcall(ep, -ETIMEDOUT);
4042 		break;
4043 	case MPA_REQ_WAIT:
4044 	case MPA_REQ_RCVD:
4045 	case MPA_REP_SENT:
4046 	case FPDU_MODE:
4047 		break;
4048 	case CLOSING:
4049 	case MORIBUND:
4050 		if (ep->com.cm_id && ep->com.qp) {
4051 			attrs.next_state = C4IW_QP_STATE_ERROR;
4052 			c4iw_modify_qp(ep->com.qp->rhp,
4053 				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
4054 				     &attrs, 1);
4055 		}
4056 		close_complete_upcall(ep, -ETIMEDOUT);
4057 		break;
4058 	case ABORTING:
4059 	case DEAD:
4060 
4061 		/*
4062 		 * These states are expected if the ep timed out at the same
4063 		 * time as another thread was calling stop_ep_timer().
4064 		 * So we silently do nothing for these states.
4065 		 */
4066 		abort = 0;
4067 		break;
4068 	default:
4069 		WARN(1, "%s unexpected state ep %p tid %u state %u\n",
4070 			__func__, ep, ep->hwtid, ep->com.state);
4071 		abort = 0;
4072 	}
4073 	mutex_unlock(&ep->com.mutex);
4074 	if (abort)
4075 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
4076 	c4iw_put_ep(&ep->com);
4077 }
4078 
4079 static void process_timedout_eps(void)
4080 {
4081 	struct c4iw_ep *ep;
4082 
4083 	spin_lock_irq(&timeout_lock);
4084 	while (!list_empty(&timeout_list)) {
4085 		struct list_head *tmp;
4086 
4087 		tmp = timeout_list.next;
4088 		list_del(tmp);
4089 		tmp->next = NULL;
4090 		tmp->prev = NULL;
4091 		spin_unlock_irq(&timeout_lock);
4092 		ep = list_entry(tmp, struct c4iw_ep, entry);
4093 		process_timeout(ep);
4094 		spin_lock_irq(&timeout_lock);
4095 	}
4096 	spin_unlock_irq(&timeout_lock);
4097 }
4098 
4099 static void process_work(struct work_struct *work)
4100 {
4101 	struct sk_buff *skb = NULL;
4102 	struct c4iw_dev *dev;
4103 	struct cpl_act_establish *rpl;
4104 	unsigned int opcode;
4105 	int ret;
4106 
4107 	process_timedout_eps();
4108 	while ((skb = skb_dequeue(&rxq))) {
4109 		rpl = cplhdr(skb);
4110 		dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
4111 		opcode = rpl->ot.opcode;
4112 
4113 		BUG_ON(!work_handlers[opcode]);
4114 		ret = work_handlers[opcode](dev, skb);
4115 		if (!ret)
4116 			kfree_skb(skb);
4117 		process_timedout_eps();
4118 	}
4119 }
4120 
4121 static DECLARE_WORK(skb_work, process_work);
4122 
4123 static void ep_timeout(unsigned long arg)
4124 {
4125 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
4126 	int kickit = 0;
4127 
4128 	spin_lock(&timeout_lock);
4129 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
4130 		/*
4131 		 * Only insert if it is not already on the list.
4132 		 */
4133 		if (!ep->entry.next) {
4134 			list_add_tail(&ep->entry, &timeout_list);
4135 			kickit = 1;
4136 		}
4137 	}
4138 	spin_unlock(&timeout_lock);
4139 	if (kickit)
4140 		queue_work(workq, &skb_work);
4141 }
4142 
4143 /*
4144  * All the CM events are handled on a work queue to have a safe context.
4145  */
4146 static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
4147 {
4148 
4149 	/*
4150 	 * Save dev in the skb->cb area.
4151 	 */
4152 	*((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
4153 
4154 	/*
4155 	 * Queue the skb and schedule the worker thread.
4156 	 */
4157 	skb_queue_tail(&rxq, skb);
4158 	queue_work(workq, &skb_work);
4159 	return 0;
4160 }
4161 
4162 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
4163 {
4164 	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
4165 
4166 	if (rpl->status != CPL_ERR_NONE) {
4167 		pr_err("Unexpected SET_TCB_RPL status %u for tid %u\n",
4168 		       rpl->status, GET_TID(rpl));
4169 	}
4170 	kfree_skb(skb);
4171 	return 0;
4172 }
4173 
4174 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
4175 {
4176 	struct cpl_fw6_msg *rpl = cplhdr(skb);
4177 	struct c4iw_wr_wait *wr_waitp;
4178 	int ret;
4179 
4180 	pr_debug("%s type %u\n", __func__, rpl->type);
4181 
4182 	switch (rpl->type) {
4183 	case FW6_TYPE_WR_RPL:
4184 		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
4185 		wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];
4186 		pr_debug("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
4187 		if (wr_waitp)
4188 			c4iw_wake_up(wr_waitp, ret ? -ret : 0);
4189 		kfree_skb(skb);
4190 		break;
4191 	case FW6_TYPE_CQE:
4192 	case FW6_TYPE_OFLD_CONNECTION_WR_RPL:
4193 		sched(dev, skb);
4194 		break;
4195 	default:
4196 		pr_err("%s unexpected fw6 msg type %u\n",
4197 		       __func__, rpl->type);
4198 		kfree_skb(skb);
4199 		break;
4200 	}
4201 	return 0;
4202 }
4203 
4204 static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
4205 {
4206 	struct cpl_abort_req_rss *req = cplhdr(skb);
4207 	struct c4iw_ep *ep;
4208 	unsigned int tid = GET_TID(req);
4209 
4210 	ep = get_ep_from_tid(dev, tid);
4211 	/* This EP will be dereferenced in peer_abort() */
4212 	if (!ep) {
4213 		pr_warn("Abort on non-existent endpoint, tid %d\n", tid);
4214 		kfree_skb(skb);
4215 		return 0;
4216 	}
4217 	if (cxgb_is_neg_adv(req->status)) {
4218 		pr_debug("%s Negative advice on abort- tid %u status %d (%s)\n",
4219 			 __func__, ep->hwtid, req->status,
4220 			 neg_adv_str(req->status));
4221 		goto out;
4222 	}
4223 	pr_debug("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
4224 		 ep->com.state);
4225 
4226 	c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
4227 out:
4228 	sched(dev, skb);
4229 	return 0;
4230 }
4231 
4232 /*
4233  * Most upcalls from the T4 Core go to sched() to
4234  * schedule the processing on a work queue.
4235  */
4236 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
4237 	[CPL_ACT_ESTABLISH] = sched,
4238 	[CPL_ACT_OPEN_RPL] = sched,
4239 	[CPL_RX_DATA] = sched,
4240 	[CPL_ABORT_RPL_RSS] = sched,
4241 	[CPL_ABORT_RPL] = sched,
4242 	[CPL_PASS_OPEN_RPL] = sched,
4243 	[CPL_CLOSE_LISTSRV_RPL] = sched,
4244 	[CPL_PASS_ACCEPT_REQ] = sched,
4245 	[CPL_PASS_ESTABLISH] = sched,
4246 	[CPL_PEER_CLOSE] = sched,
4247 	[CPL_CLOSE_CON_RPL] = sched,
4248 	[CPL_ABORT_REQ_RSS] = peer_abort_intr,
4249 	[CPL_RDMA_TERMINATE] = sched,
4250 	[CPL_FW4_ACK] = sched,
4251 	[CPL_SET_TCB_RPL] = set_tcb_rpl,
4252 	[CPL_FW6_MSG] = fw6_msg,
4253 	[CPL_RX_PKT] = sched
4254 };
4255 
4256 int __init c4iw_cm_init(void)
4257 {
4258 	spin_lock_init(&timeout_lock);
4259 	skb_queue_head_init(&rxq);
4260 
4261 	workq = alloc_ordered_workqueue("iw_cxgb4", WQ_MEM_RECLAIM);
4262 	if (!workq)
4263 		return -ENOMEM;
4264 
4265 	return 0;
4266 }
4267 
4268 void c4iw_cm_term(void)
4269 {
4270 	WARN_ON(!list_empty(&timeout_list));
4271 	flush_workqueue(workq);
4272 	destroy_workqueue(workq);
4273 }
4274