xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision 9f23cbd6cae82fd77edfad7173432fa8dccd0a95)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *	  copyright notice, this list of conditions and the following
18  *	  disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *	  copyright notice, this list of conditions and the following
22  *	  disclaimer in the documentation and/or other materials
23  *	  provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 
39 #ifdef TCP_OFFLOAD
40 #include <sys/types.h>
41 #include <sys/malloc.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sockio.h>
45 #include <sys/taskqueue.h>
46 #include <netinet/in.h>
47 #include <net/route.h>
48 #include <net/route/nhop.h>
49 
50 #include <netinet/in_systm.h>
51 #include <netinet/in_pcb.h>
52 #include <netinet6/in6_pcb.h>
53 #include <netinet/ip.h>
54 #include <netinet/in_fib.h>
55 #include <netinet6/in6_fib.h>
56 #include <netinet6/scope6_var.h>
57 #include <netinet/ip_var.h>
58 #include <netinet/tcp_var.h>
59 #include <netinet/tcp.h>
60 #include <netinet/tcpip.h>
61 
62 #include <netinet/toecore.h>
63 
64 struct sge_iq;
65 struct rss_header;
66 struct cpl_set_tcb_rpl;
67 #include <linux/types.h>
68 #include "offload.h"
69 #include "tom/t4_tom.h"
70 
71 #define TOEPCB(so)  ((struct toepcb *)(sototcpcb((so))->t_toe))
72 
73 #include "iw_cxgbe.h"
74 #include <linux/module.h>
75 #include <linux/workqueue.h>
76 #include <linux/if_vlan.h>
77 #include <net/netevent.h>
78 #include <rdma/rdma_cm.h>
79 
80 static spinlock_t req_lock;
81 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
82 static struct work_struct c4iw_task;
83 static struct workqueue_struct *c4iw_taskq;
84 static LIST_HEAD(err_cqe_list);
85 static spinlock_t err_cqe_lock;
86 static LIST_HEAD(listen_port_list);
87 static DEFINE_MUTEX(listen_port_mutex);
88 
89 static void process_req(struct work_struct *ctx);
90 static void start_ep_timer(struct c4iw_ep *ep);
91 static int stop_ep_timer(struct c4iw_ep *ep);
92 static int set_tcpinfo(struct c4iw_ep *ep);
93 static void process_timeout(struct c4iw_ep *ep);
94 static void process_err_cqes(void);
95 static void *alloc_ep(int size, gfp_t flags);
96 static void close_socket(struct socket *so);
97 static int send_mpa_req(struct c4iw_ep *ep);
98 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
99 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
100 static void close_complete_upcall(struct c4iw_ep *ep, int status);
101 static int send_abort(struct c4iw_ep *ep);
102 static void peer_close_upcall(struct c4iw_ep *ep);
103 static void peer_abort_upcall(struct c4iw_ep *ep);
104 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
105 static int connect_request_upcall(struct c4iw_ep *ep);
106 static void established_upcall(struct c4iw_ep *ep);
107 static int process_mpa_reply(struct c4iw_ep *ep);
108 static int process_mpa_request(struct c4iw_ep *ep);
109 static void process_peer_close(struct c4iw_ep *ep);
110 static void process_conn_error(struct c4iw_ep *ep);
111 static void process_close_complete(struct c4iw_ep *ep);
112 static void ep_timeout(unsigned long arg);
113 static void setiwsockopt(struct socket *so);
114 static void init_iwarp_socket(struct socket *so, void *arg);
115 static void uninit_iwarp_socket(struct socket *so);
116 static void process_data(struct c4iw_ep *ep);
117 static void process_connected(struct c4iw_ep *ep);
118 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
119 static void process_socket_event(struct c4iw_ep *ep);
120 static void release_ep_resources(struct c4iw_ep *ep);
121 static int process_terminate(struct c4iw_ep *ep);
122 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
123     struct mbuf *m);
124 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
125 static struct listen_port_info *
126 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
127 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
128 static struct c4iw_listen_ep *
129 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
130 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
131 		if_t *ifp);
132 static void process_newconn(struct c4iw_listen_ep *master_lep,
133 		struct socket *new_so);
134 #define START_EP_TIMER(ep) \
135     do { \
136 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
137 		__func__, __LINE__, (ep)); \
138 	    start_ep_timer(ep); \
139     } while (0)
140 
141 #define STOP_EP_TIMER(ep) \
142     ({ \
143 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
144 		__func__, __LINE__, (ep)); \
145 	    stop_ep_timer(ep); \
146     })
147 
148 #define GET_LOCAL_ADDR(pladdr, so) \
149 	do { \
150 		struct sockaddr_storage *__a = NULL; \
151 		struct  inpcb *__inp = sotoinpcb(so); \
152 		KASSERT(__inp != NULL, \
153 		   ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
154 		if (__inp->inp_vflag & INP_IPV4) \
155 			in_getsockaddr(so, (struct sockaddr **)&__a); \
156 		else \
157 			in6_getsockaddr(so, (struct sockaddr **)&__a); \
158 		*(pladdr) = *__a; \
159 		free(__a, M_SONAME); \
160 	} while (0)
161 
162 #define GET_REMOTE_ADDR(praddr, so) \
163 	do { \
164 		struct sockaddr_storage *__a = NULL; \
165 		struct  inpcb *__inp = sotoinpcb(so); \
166 		KASSERT(__inp != NULL, \
167 		   ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
168 		if (__inp->inp_vflag & INP_IPV4) \
169 			in_getpeeraddr(so, (struct sockaddr **)&__a); \
170 		else \
171 			in6_getpeeraddr(so, (struct sockaddr **)&__a); \
172 		*(praddr) = *__a; \
173 		free(__a, M_SONAME); \
174 	} while (0)
175 
176 static char *states[] = {
177 	"idle",
178 	"listen",
179 	"connecting",
180 	"mpa_wait_req",
181 	"mpa_req_sent",
182 	"mpa_req_rcvd",
183 	"mpa_rep_sent",
184 	"fpdu_mode",
185 	"aborting",
186 	"closing",
187 	"moribund",
188 	"dead",
189 	NULL,
190 };
191 
192 static void deref_cm_id(struct c4iw_ep_common *epc)
193 {
194       epc->cm_id->rem_ref(epc->cm_id);
195       epc->cm_id = NULL;
196       set_bit(CM_ID_DEREFED, &epc->history);
197 }
198 
199 static void ref_cm_id(struct c4iw_ep_common *epc)
200 {
201       set_bit(CM_ID_REFED, &epc->history);
202       epc->cm_id->add_ref(epc->cm_id);
203 }
204 
205 static void deref_qp(struct c4iw_ep *ep)
206 {
207 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
208 	clear_bit(QP_REFERENCED, &ep->com.flags);
209 	set_bit(QP_DEREFED, &ep->com.history);
210 }
211 
212 static void ref_qp(struct c4iw_ep *ep)
213 {
214 	set_bit(QP_REFERENCED, &ep->com.flags);
215 	set_bit(QP_REFED, &ep->com.history);
216 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
217 }
218 /* allocated per TCP port while listening */
219 struct listen_port_info {
220 	uint16_t port_num; /* TCP port address */
221 	struct list_head list; /* belongs to listen_port_list */
222 	struct list_head lep_list; /* per port lep list */
223 	uint32_t refcnt; /* number of lep's listening */
224 };
225 
226 /*
227  * Following two lists are used to manage INADDR_ANY listeners:
228  * 1)listen_port_list
229  * 2)lep_list
230  *
231  * Below is the INADDR_ANY listener lists overview on a system with a two port
232  * adapter:
233  *   |------------------|
234  *   |listen_port_list  |
235  *   |------------------|
236  *            |
237  *            |              |-----------|       |-----------|
238  *            |              | port_num:X|       | port_num:X|
239  *            |--------------|-list------|-------|-list------|-------....
240  *                           | lep_list----|     | lep_list----|
241  *                           | refcnt    | |     | refcnt    | |
242  *                           |           | |     |           | |
243  *                           |           | |     |           | |
244  *                           |-----------| |     |-----------| |
245  *                                         |                   |
246  *                                         |                   |
247  *                                         |                   |
248  *                                         |                   |         lep1                  lep2
249  *                                         |                   |    |----------------|    |----------------|
250  *                                         |                   |----| listen_ep_list |----| listen_ep_list |
251  *                                         |                        |----------------|    |----------------|
252  *                                         |
253  *                                         |
254  *                                         |        lep1                  lep2
255  *                                         |   |----------------|    |----------------|
256  *                                         |---| listen_ep_list |----| listen_ep_list |
257  *                                             |----------------|    |----------------|
258  *
259  * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
260  * each TCP port number.
261  *
262  * Here 'lep1' is always marked as Master lep, because solisten() is always
263  * called through first lep.
264  *
265  */
266 static struct listen_port_info *
267 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
268 {
269 	uint16_t port;
270 	struct listen_port_info *port_info = NULL;
271 	struct sockaddr_storage *laddr = &lep->com.local_addr;
272 
273 	port = (laddr->ss_family == AF_INET) ?
274 		((struct sockaddr_in *)laddr)->sin_port :
275 		((struct sockaddr_in6 *)laddr)->sin6_port;
276 
277 	mutex_lock(&listen_port_mutex);
278 
279 	list_for_each_entry(port_info, &listen_port_list, list)
280 		if (port_info->port_num == port)
281 			goto found_port;
282 
283 	port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
284 	port_info->port_num = port;
285 	port_info->refcnt    = 0;
286 
287 	list_add_tail(&port_info->list, &listen_port_list);
288 	INIT_LIST_HEAD(&port_info->lep_list);
289 
290 found_port:
291 	port_info->refcnt++;
292 	list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
293 	mutex_unlock(&listen_port_mutex);
294 	return port_info;
295 }
296 
297 static int
298 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
299 {
300 	uint16_t port;
301 	struct listen_port_info *port_info = NULL;
302 	struct sockaddr_storage *laddr = &lep->com.local_addr;
303 	int refcnt = 0;
304 
305 	port = (laddr->ss_family == AF_INET) ?
306 		((struct sockaddr_in *)laddr)->sin_port :
307 		((struct sockaddr_in6 *)laddr)->sin6_port;
308 
309 	mutex_lock(&listen_port_mutex);
310 
311 	/* get the port_info structure based on the lep's port address */
312 	list_for_each_entry(port_info, &listen_port_list, list) {
313 		if (port_info->port_num == port) {
314 			port_info->refcnt--;
315 			refcnt = port_info->refcnt;
316 			/* remove the current lep from the listen list */
317 			list_del(&lep->listen_ep_list);
318 			if (port_info->refcnt == 0) {
319 				/* Remove this entry from the list as there
320 				 * are no more listeners for this port_num.
321 				 */
322 				list_del(&port_info->list);
323 				kfree(port_info);
324 			}
325 			break;
326 		}
327 	}
328 	mutex_unlock(&listen_port_mutex);
329 	return refcnt;
330 }
331 
332 /*
333  * Find the lep that belongs to the ifnet on which the SYN frame was received.
334  */
335 struct c4iw_listen_ep *
336 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
337 {
338 	struct adapter *adap = NULL;
339 	struct c4iw_listen_ep *lep = NULL;
340 	if_t ifp = NULL, hw_ifp = NULL;
341 	struct listen_port_info *port_info = NULL;
342 	int i = 0, found_portinfo = 0, found_lep = 0;
343 	uint16_t port;
344 
345 	/*
346 	 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo
347 	 * interfaces like vlan, lagg, etc..
348 	 * TBD: lagg support, lagg + vlan support.
349 	 */
350 	ifp = TOEPCB(so)->l2te->ifp;
351 	if (if_gettype(ifp) == IFT_L2VLAN) {
352 		hw_ifp = VLAN_TRUNKDEV(ifp);
353 		if (hw_ifp == NULL) {
354 			CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of "
355 				"vlan ifnet %p, sock %p, master_lep %p",
356 				__func__, ifp, so, master_lep);
357 			return (NULL);
358 		}
359 	} else
360 		hw_ifp = ifp;
361 
362 	/* STEP 2: Find 'port_info' with listener local port address. */
363 	port = (master_lep->com.local_addr.ss_family == AF_INET) ?
364 		((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
365 		((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
366 
367 
368 	mutex_lock(&listen_port_mutex);
369 	list_for_each_entry(port_info, &listen_port_list, list)
370 		if (port_info->port_num == port) {
371 			found_portinfo =1;
372 			break;
373 		}
374 	if (!found_portinfo)
375 		goto out;
376 
377 	/* STEP 3: Traverse through list of lep's that are bound to the current
378 	 * TCP port address and find the lep that belongs to the ifnet on which
379 	 * the SYN frame was received.
380 	 */
381 	list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
382 		adap = lep->com.dev->rdev.adap;
383 		for_each_port(adap, i) {
384 			if (hw_ifp == adap->port[i]->vi[0].ifp) {
385 				found_lep =1;
386 				goto out;
387 			}
388 		}
389 	}
390 out:
391 	mutex_unlock(&listen_port_mutex);
392 	return found_lep ? lep : (NULL);
393 }
394 
395 static void process_timeout(struct c4iw_ep *ep)
396 {
397 	struct c4iw_qp_attributes attrs = {0};
398 	int abort = 1;
399 
400 	CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
401 			ep, ep->hwtid, ep->com.state);
402 	set_bit(TIMEDOUT, &ep->com.history);
403 	switch (ep->com.state) {
404 	case MPA_REQ_SENT:
405 		connect_reply_upcall(ep, -ETIMEDOUT);
406 		break;
407 	case MPA_REQ_WAIT:
408 	case MPA_REQ_RCVD:
409 	case MPA_REP_SENT:
410 	case FPDU_MODE:
411 		break;
412 	case CLOSING:
413 	case MORIBUND:
414 		if (ep->com.cm_id && ep->com.qp) {
415 			attrs.next_state = C4IW_QP_STATE_ERROR;
416 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
417 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
418 		}
419 		close_complete_upcall(ep, -ETIMEDOUT);
420 		break;
421 	case ABORTING:
422 	case DEAD:
423 		/*
424 		 * These states are expected if the ep timed out at the same
425 		 * time as another thread was calling stop_ep_timer().
426 		 * So we silently do nothing for these states.
427 		 */
428 		abort = 0;
429 		break;
430 	default:
431 		CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u"
432 				, __func__, ep, ep->hwtid, ep->com.state);
433 		abort = 0;
434 	}
435 	if (abort)
436 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
437 	c4iw_put_ep(&ep->com);
438 	return;
439 }
440 
441 struct cqe_list_entry {
442 	struct list_head entry;
443 	struct c4iw_dev *rhp;
444 	struct t4_cqe err_cqe;
445 };
446 
447 static void
448 process_err_cqes(void)
449 {
450 	unsigned long flag;
451 	struct cqe_list_entry *cle;
452 
453 	spin_lock_irqsave(&err_cqe_lock, flag);
454 	while (!list_empty(&err_cqe_list)) {
455 		struct list_head *tmp;
456 		tmp = err_cqe_list.next;
457 		list_del(tmp);
458 		tmp->next = tmp->prev = NULL;
459 		spin_unlock_irqrestore(&err_cqe_lock, flag);
460 		cle = list_entry(tmp, struct cqe_list_entry, entry);
461 		c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
462 		free(cle, M_CXGBE);
463 		spin_lock_irqsave(&err_cqe_lock, flag);
464 	}
465 	spin_unlock_irqrestore(&err_cqe_lock, flag);
466 
467 	return;
468 }
469 
470 static void
471 process_req(struct work_struct *ctx)
472 {
473 	struct c4iw_ep_common *epc;
474 	unsigned long flag;
475 	int ep_events;
476 
477 	process_err_cqes();
478 	spin_lock_irqsave(&req_lock, flag);
479 	while (!TAILQ_EMPTY(&req_list)) {
480 		epc = TAILQ_FIRST(&req_list);
481 		TAILQ_REMOVE(&req_list, epc, entry);
482 		epc->entry.tqe_prev = NULL;
483 		ep_events = epc->ep_events;
484 		epc->ep_events = 0;
485 		spin_unlock_irqrestore(&req_lock, flag);
486 		mutex_lock(&epc->mutex);
487 		CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
488 		    __func__, epc->so, epc, states[epc->state], ep_events);
489 		if (ep_events & C4IW_EVENT_TERM)
490 			process_terminate((struct c4iw_ep *)epc);
491 		if (ep_events & C4IW_EVENT_TIMEOUT)
492 			process_timeout((struct c4iw_ep *)epc);
493 		if (ep_events & C4IW_EVENT_SOCKET)
494 			process_socket_event((struct c4iw_ep *)epc);
495 		mutex_unlock(&epc->mutex);
496 		c4iw_put_ep(epc);
497 		process_err_cqes();
498 		spin_lock_irqsave(&req_lock, flag);
499 	}
500 	spin_unlock_irqrestore(&req_lock, flag);
501 }
502 
503 /*
504  * XXX: doesn't belong here in the iWARP driver.
505  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
506  *      set.  Is this a valid assumption for active open?
507  */
508 static int
509 set_tcpinfo(struct c4iw_ep *ep)
510 {
511 	struct socket *so = ep->com.so;
512 	struct inpcb *inp = sotoinpcb(so);
513 	struct tcpcb *tp;
514 	struct toepcb *toep;
515 	int rc = 0;
516 
517 	INP_WLOCK(inp);
518 	tp = intotcpcb(inp);
519 	if ((tp->t_flags & TF_TOE) == 0) {
520 		rc = EINVAL;
521 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
522 		    __func__, so, ep);
523 		goto done;
524 	}
525 	toep = TOEPCB(so);
526 
527 	ep->hwtid = toep->tid;
528 	ep->snd_seq = tp->snd_nxt;
529 	ep->rcv_seq = tp->rcv_nxt;
530 done:
531 	INP_WUNLOCK(inp);
532 	return (rc);
533 
534 }
535 static int
536 get_ifnet_from_raddr(struct sockaddr_storage *raddr, if_t *ifp)
537 {
538 	int err = 0;
539 	struct nhop_object *nh;
540 
541 	if (raddr->ss_family == AF_INET) {
542 		struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
543 
544 		nh = fib4_lookup(RT_DEFAULT_FIB, raddr4->sin_addr, 0,
545 				NHR_NONE, 0);
546 	} else {
547 		struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
548 		struct in6_addr addr6;
549 		uint32_t scopeid;
550 
551 		memset(&addr6, 0, sizeof(addr6));
552 		in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
553 					&addr6, &scopeid);
554 		nh = fib6_lookup(RT_DEFAULT_FIB, &addr6, scopeid,
555 				NHR_NONE, 0);
556 	}
557 
558 	if (nh == NULL)
559 		err = EHOSTUNREACH;
560 	else
561 		*ifp = nh->nh_ifp;
562 	CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
563 	return err;
564 }
565 
566 static void
567 close_socket(struct socket *so)
568 {
569 	uninit_iwarp_socket(so);
570 	soclose(so);
571 }
572 
573 static void
574 process_peer_close(struct c4iw_ep *ep)
575 {
576 	struct c4iw_qp_attributes attrs = {0};
577 	int disconnect = 1;
578 	int release = 0;
579 
580 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
581 	    ep->com.so, states[ep->com.state]);
582 
583 	switch (ep->com.state) {
584 
585 		case MPA_REQ_WAIT:
586 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
587 			    __func__, ep);
588 			/* Fallthrough */
589 		case MPA_REQ_SENT:
590 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
591 			    __func__, ep);
592 			ep->com.state = DEAD;
593 			connect_reply_upcall(ep, -ECONNABORTED);
594 
595 			disconnect = 0;
596 			STOP_EP_TIMER(ep);
597 			close_socket(ep->com.so);
598 			deref_cm_id(&ep->com);
599 			release = 1;
600 			break;
601 
602 		case MPA_REQ_RCVD:
603 
604 			/*
605 			 * We're gonna mark this puppy DEAD, but keep
606 			 * the reference on it until the ULP accepts or
607 			 * rejects the CR.
608 			 */
609 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
610 			    __func__, ep);
611 			ep->com.state = CLOSING;
612 			break;
613 
614 		case MPA_REP_SENT:
615 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
616 			    __func__, ep);
617 			ep->com.state = CLOSING;
618 			break;
619 
620 		case FPDU_MODE:
621 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
622 			    __func__, ep);
623 			START_EP_TIMER(ep);
624 			ep->com.state = CLOSING;
625 			attrs.next_state = C4IW_QP_STATE_CLOSING;
626 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
627 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
628 			peer_close_upcall(ep);
629 			break;
630 
631 		case ABORTING:
632 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
633 			    __func__, ep);
634 			disconnect = 0;
635 			break;
636 
637 		case CLOSING:
638 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
639 			    __func__, ep);
640 			ep->com.state = MORIBUND;
641 			disconnect = 0;
642 			break;
643 
644 		case MORIBUND:
645 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
646 			    ep);
647 			STOP_EP_TIMER(ep);
648 			if (ep->com.cm_id && ep->com.qp) {
649 				attrs.next_state = C4IW_QP_STATE_IDLE;
650 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
651 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
652 			}
653 			close_socket(ep->com.so);
654 			close_complete_upcall(ep, 0);
655 			ep->com.state = DEAD;
656 			release = 1;
657 			disconnect = 0;
658 			break;
659 
660 		case DEAD:
661 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
662 			    __func__, ep);
663 			disconnect = 0;
664 			break;
665 
666 		default:
667 			panic("%s: ep %p state %d", __func__, ep,
668 			    ep->com.state);
669 			break;
670 	}
671 
672 
673 	if (disconnect) {
674 
675 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
676 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
677 	}
678 	if (release) {
679 
680 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
681 		c4iw_put_ep(&ep->com);
682 	}
683 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
684 	return;
685 }
686 
687 static void
688 process_conn_error(struct c4iw_ep *ep)
689 {
690 	struct c4iw_qp_attributes attrs = {0};
691 	int ret;
692 	int state;
693 
694 	state = ep->com.state;
695 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
696 	    __func__, ep, ep->com.so, ep->com.so->so_error,
697 	    states[ep->com.state]);
698 
699 	switch (state) {
700 
701 		case MPA_REQ_WAIT:
702 			STOP_EP_TIMER(ep);
703 			c4iw_put_ep(&ep->parent_ep->com);
704 			break;
705 
706 		case MPA_REQ_SENT:
707 			STOP_EP_TIMER(ep);
708 			connect_reply_upcall(ep, -ECONNRESET);
709 			break;
710 
711 		case MPA_REP_SENT:
712 			ep->com.rpl_err = ECONNRESET;
713 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
714 			break;
715 
716 		case MPA_REQ_RCVD:
717 			break;
718 
719 		case MORIBUND:
720 		case CLOSING:
721 			STOP_EP_TIMER(ep);
722 			/*FALLTHROUGH*/
723 		case FPDU_MODE:
724 
725 			if (ep->com.cm_id && ep->com.qp) {
726 
727 				attrs.next_state = C4IW_QP_STATE_ERROR;
728 				ret = c4iw_modify_qp(ep->com.qp->rhp,
729 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
730 					&attrs, 1);
731 				if (ret)
732 					log(LOG_ERR,
733 							"%s - qp <- error failed!\n",
734 							__func__);
735 			}
736 			peer_abort_upcall(ep);
737 			break;
738 
739 		case ABORTING:
740 			break;
741 
742 		case DEAD:
743 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
744 			    __func__, ep->com.so->so_error);
745 			return;
746 
747 		default:
748 			panic("%s: ep %p state %d", __func__, ep, state);
749 			break;
750 	}
751 
752 	if (state != ABORTING) {
753 		close_socket(ep->com.so);
754 		ep->com.state = DEAD;
755 		c4iw_put_ep(&ep->com);
756 	}
757 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
758 	return;
759 }
760 
761 static void
762 process_close_complete(struct c4iw_ep *ep)
763 {
764 	struct c4iw_qp_attributes attrs = {0};
765 	int release = 0;
766 
767 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
768 	    ep->com.so, states[ep->com.state]);
769 
770 	/* The cm_id may be null if we failed to connect */
771 	set_bit(CLOSE_CON_RPL, &ep->com.history);
772 
773 	switch (ep->com.state) {
774 
775 		case CLOSING:
776 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
777 			    __func__, ep);
778 			ep->com.state = MORIBUND;
779 			break;
780 
781 		case MORIBUND:
782 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
783 			    ep);
784 			STOP_EP_TIMER(ep);
785 
786 			if ((ep->com.cm_id) && (ep->com.qp)) {
787 
788 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
789 				    __func__, ep);
790 				attrs.next_state = C4IW_QP_STATE_IDLE;
791 				c4iw_modify_qp(ep->com.dev,
792 						ep->com.qp,
793 						C4IW_QP_ATTR_NEXT_STATE,
794 						&attrs, 1);
795 			}
796 
797 			close_socket(ep->com.so);
798 			close_complete_upcall(ep, 0);
799 			ep->com.state = DEAD;
800 			release = 1;
801 			break;
802 
803 		case ABORTING:
804 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
805 			break;
806 
807 		case DEAD:
808 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
809 			break;
810 		default:
811 			CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
812 					__func__, ep);
813 			panic("%s:pcc6 %p unknown ep state", __func__, ep);
814 			break;
815 	}
816 
817 	if (release) {
818 
819 		CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
820 		release_ep_resources(ep);
821 	}
822 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
823 	return;
824 }
825 
826 static void
827 setiwsockopt(struct socket *so)
828 {
829 	int rc;
830 	struct sockopt sopt;
831 	int on = 1;
832 
833 	sopt.sopt_dir = SOPT_SET;
834 	sopt.sopt_level = IPPROTO_TCP;
835 	sopt.sopt_name = TCP_NODELAY;
836 	sopt.sopt_val = (caddr_t)&on;
837 	sopt.sopt_valsize = sizeof on;
838 	sopt.sopt_td = NULL;
839 	rc = -sosetopt(so, &sopt);
840 	if (rc) {
841 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
842 		    __func__, so, rc);
843 	}
844 }
845 
846 static void
847 init_iwarp_socket(struct socket *so, void *arg)
848 {
849 	if (SOLISTENING(so)) {
850 		SOLISTEN_LOCK(so);
851 		solisten_upcall_set(so, c4iw_so_upcall, arg);
852 		so->so_state |= SS_NBIO;
853 		SOLISTEN_UNLOCK(so);
854 	} else {
855 		SOCKBUF_LOCK(&so->so_rcv);
856 		soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
857 		so->so_state |= SS_NBIO;
858 		SOCKBUF_UNLOCK(&so->so_rcv);
859 	}
860 }
861 
862 static void
863 uninit_iwarp_socket(struct socket *so)
864 {
865 	if (SOLISTENING(so)) {
866 		SOLISTEN_LOCK(so);
867 		solisten_upcall_set(so, NULL, NULL);
868 		SOLISTEN_UNLOCK(so);
869 	} else {
870 		SOCKBUF_LOCK(&so->so_rcv);
871 		soupcall_clear(so, SO_RCV);
872 		SOCKBUF_UNLOCK(&so->so_rcv);
873 	}
874 }
875 
876 static void
877 process_data(struct c4iw_ep *ep)
878 {
879 	int ret = 0;
880 	int disconnect = 0;
881 	struct c4iw_qp_attributes attrs = {0};
882 
883 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
884 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
885 
886 	switch (ep->com.state) {
887 	case MPA_REQ_SENT:
888 		disconnect = process_mpa_reply(ep);
889 		break;
890 	case MPA_REQ_WAIT:
891 		disconnect = process_mpa_request(ep);
892 		if (disconnect)
893 			/* Refered in process_newconn() */
894 			c4iw_put_ep(&ep->parent_ep->com);
895 		break;
896 	case FPDU_MODE:
897 		MPASS(ep->com.qp != NULL);
898 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
899 		ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
900 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
901 		if (ret != -EINPROGRESS)
902 			disconnect = 1;
903 		break;
904 	default:
905 		log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
906 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
907 			    __func__, ep, ep->com.state, ep->com.so,
908 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
909 		break;
910 	}
911 	if (disconnect)
912 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
913 
914 }
915 
916 static void
917 process_connected(struct c4iw_ep *ep)
918 {
919 	struct socket *so = ep->com.so;
920 
921 	if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
922 		if (send_mpa_req(ep))
923 			goto err;
924 	} else {
925 		connect_reply_upcall(ep, -so->so_error);
926 		goto err;
927 	}
928 	return;
929 err:
930 	close_socket(so);
931 	ep->com.state = DEAD;
932 	c4iw_put_ep(&ep->com);
933 	return;
934 }
935 
936 static inline bool c4iw_zero_addr(struct sockaddr *addr)
937 {
938 	struct in6_addr *ip6;
939 
940 	if (addr->sa_family == AF_INET)
941 		return (((struct sockaddr_in *)addr)->sin_addr.s_addr == 0);
942 	else {
943 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
944 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
945 				ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
946 	}
947 }
948 
949 #define _IN_LOOPBACK(i)	(((in_addr_t)(i) & 0xff000000) == 0x7f000000)
950 static inline bool c4iw_loopback_addr(struct sockaddr *addr, struct vnet *vnet)
951 {
952 	bool ret;
953 
954 	if (addr->sa_family == AF_INET) {
955 		if (vnet == NULL)
956 			ret = _IN_LOOPBACK(ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
957 		else {
958 			CURVNET_SET_QUIET(vnet);
959 			ret = IN_LOOPBACK(ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
960 			CURVNET_RESTORE();
961 		}
962 	} else {
963 		ret = IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6 *) addr)->sin6_addr);
964 	}
965 	return (ret);
966 }
967 #undef _IN_LOOPBACK
968 
969 static inline bool c4iw_any_addr(struct sockaddr *addr, struct vnet *vnet)
970 {
971 	return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr, vnet);
972 }
973 
974 static void
975 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
976 {
977 	struct c4iw_listen_ep *real_lep = NULL;
978 	struct c4iw_ep *new_ep = NULL;
979 	struct sockaddr_in *remote = NULL;
980 	int ret = 0;
981 
982 	MPASS(new_so != NULL);
983 
984 	if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr,
985 	    new_so->so_vnet)) {
986 		/* Here we need to find the 'real_lep' that belongs to the
987 		 * incomming socket's network interface, such that the newly
988 		 * created 'ep' can be attached to the real 'lep'.
989 		 */
990 		real_lep = find_real_listen_ep(master_lep, new_so);
991 		if (real_lep == NULL) {
992 			CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
993 					"ep for sock: %p", __func__, new_so);
994 			log(LOG_ERR,"%s: Could not find the real listen ep for "
995 					"sock: %p\n", __func__, new_so);
996 			/* FIXME: properly free the 'new_so' in failure case.
997 			 * Use of soabort() and  soclose() are not legal
998 			 * here(before soaccept()).
999 			 */
1000 			return;
1001 		}
1002 	} else /* for Non-Wildcard address, master_lep is always the real_lep */
1003 		real_lep = master_lep;
1004 
1005 	new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
1006 
1007 	CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
1008 	    "listening so %p, new so %p", __func__, master_lep, real_lep,
1009 	    new_ep, master_lep->com.so, new_so);
1010 
1011 	new_ep->com.dev = real_lep->com.dev;
1012 	new_ep->com.so = new_so;
1013 	new_ep->com.cm_id = NULL;
1014 	new_ep->com.thread = real_lep->com.thread;
1015 	new_ep->parent_ep = real_lep;
1016 
1017 	GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
1018 	GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1019 	c4iw_get_ep(&real_lep->com);
1020 	init_timer(&new_ep->timer);
1021 	new_ep->com.state = MPA_REQ_WAIT;
1022 
1023 	setiwsockopt(new_so);
1024 	ret = soaccept(new_so, (struct sockaddr **)&remote);
1025 	if (ret != 0) {
1026 		CTR4(KTR_IW_CXGBE,
1027 				"%s:listen sock:%p, new sock:%p, ret:%d",
1028 				__func__, master_lep->com.so, new_so, ret);
1029 		if (remote != NULL)
1030 			free(remote, M_SONAME);
1031 		soclose(new_so);
1032 		c4iw_put_ep(&new_ep->com);
1033 		c4iw_put_ep(&real_lep->com);
1034 		return;
1035 	}
1036 	free(remote, M_SONAME);
1037 
1038 	START_EP_TIMER(new_ep);
1039 
1040 	/* MPA request might have been queued up on the socket already, so we
1041 	 * initialize the socket/upcall_handler under lock to prevent processing
1042 	 * MPA request on another thread(via process_req()) simultaneously.
1043 	 */
1044 	c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1045 				      avoid freeing of ep before ep unlock. */
1046 	mutex_lock(&new_ep->com.mutex);
1047 	init_iwarp_socket(new_so, &new_ep->com);
1048 
1049 	ret = process_mpa_request(new_ep);
1050 	if (ret) {
1051 		/* ABORT */
1052 		c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1053 		c4iw_put_ep(&real_lep->com);
1054 	}
1055 	mutex_unlock(&new_ep->com.mutex);
1056 	c4iw_put_ep(&new_ep->com);
1057 	return;
1058 }
1059 
1060 static int
1061 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1062 {
1063 	unsigned long flag;
1064 
1065 	spin_lock_irqsave(&req_lock, flag);
1066 	if (ep && ep->com.so) {
1067 		ep->com.ep_events |= new_ep_event;
1068 		if (!ep->com.entry.tqe_prev) {
1069 			c4iw_get_ep(&ep->com);
1070 			TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1071 			queue_work(c4iw_taskq, &c4iw_task);
1072 		}
1073 	}
1074 	spin_unlock_irqrestore(&req_lock, flag);
1075 
1076 	return (0);
1077 }
1078 
1079 static int
1080 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1081 {
1082 	struct c4iw_ep *ep = arg;
1083 
1084 	CTR6(KTR_IW_CXGBE,
1085 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1086 	    __func__, so, so->so_state, ep, states[ep->com.state],
1087 	    ep->com.entry.tqe_prev);
1088 
1089 	MPASS(ep->com.so == so);
1090 	/*
1091 	 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1092 	 * with locks held.
1093 	 */
1094 	if (so->so_error || (ep->com.dev->rdev.flags & T4_FATAL_ERROR))
1095 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1096 	add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1097 
1098 	return (SU_OK);
1099 }
1100 
1101 
1102 static int
1103 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1104 {
1105 	struct adapter *sc = iq->adapter;
1106 	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1107 	unsigned int tid = GET_TID(cpl);
1108 	struct toepcb *toep = lookup_tid(sc, tid);
1109 	struct socket *so;
1110 	struct c4iw_ep *ep;
1111 
1112 	INP_WLOCK(toep->inp);
1113 	so = inp_inpcbtosocket(toep->inp);
1114 	ep = so->so_rcv.sb_upcallarg;
1115 	INP_WUNLOCK(toep->inp);
1116 
1117 	CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1118 	add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1119 
1120 	return 0;
1121 }
1122 
1123 static void
1124 process_socket_event(struct c4iw_ep *ep)
1125 {
1126 	int state = ep->com.state;
1127 	struct socket *so = ep->com.so;
1128 
1129 	if (ep->com.state == DEAD) {
1130 		CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1131 		    "ep %p ep_state %s", __func__, ep, states[state]);
1132 		return;
1133 	}
1134 
1135 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1136 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1137 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
1138 
1139 	if (state == CONNECTING) {
1140 		process_connected(ep);
1141 		return;
1142 	}
1143 
1144 	if (state == LISTEN) {
1145 		struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1146 		struct socket *listen_so = so, *new_so = NULL;
1147 		int error = 0;
1148 
1149 		SOLISTEN_LOCK(listen_so);
1150 		do {
1151 			error = solisten_dequeue(listen_so, &new_so,
1152 						SOCK_NONBLOCK);
1153 			if (error) {
1154 				CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1155 					"error %d", __func__, lep, listen_so,
1156 					error);
1157 				return;
1158 			}
1159 			process_newconn(lep, new_so);
1160 
1161 			/* solisten_dequeue() unlocks while return, so aquire
1162 			 * lock again for sol_qlen and also for next iteration.
1163 			 */
1164 			SOLISTEN_LOCK(listen_so);
1165 		} while (listen_so->sol_qlen);
1166 		SOLISTEN_UNLOCK(listen_so);
1167 
1168 		return;
1169 	}
1170 
1171 	/* connection error */
1172 	if (so->so_error) {
1173 		process_conn_error(ep);
1174 		return;
1175 	}
1176 
1177 	/* peer close */
1178 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1179 		process_peer_close(ep);
1180 		/*
1181 		 * check whether socket disconnect event is pending before
1182 		 * returning. Fallthrough if yes.
1183 		 */
1184 		if (!(so->so_state & SS_ISDISCONNECTED))
1185 			return;
1186 	}
1187 
1188 	/* close complete */
1189 	if (so->so_state & SS_ISDISCONNECTED) {
1190 		process_close_complete(ep);
1191 		return;
1192 	}
1193 
1194 	/* rx data */
1195 	if (sbused(&ep->com.so->so_rcv)) {
1196 		process_data(ep);
1197 		return;
1198 	}
1199 
1200 	/* Socket events for 'MPA Request Received' and 'Close Complete'
1201 	 * were already processed earlier in their previous events handlers.
1202 	 * Hence, these socket events are skipped.
1203 	 * And any other socket events must have handled above.
1204 	 */
1205 	MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND));
1206 
1207 	if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND))
1208 		log(LOG_ERR, "%s: Unprocessed socket event so %p, "
1209 		"so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n",
1210 		__func__, so, so->so_state, so->so_error, so->so_rcv.sb_state,
1211 			ep, states[state]);
1212 
1213 }
1214 
1215 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1216     "iw_cxgbe driver parameters");
1217 
1218 static int dack_mode = 0;
1219 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1220 		"Delayed ack mode (default = 0)");
1221 
1222 int c4iw_max_read_depth = 8;
1223 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1224 		"Per-connection max ORD/IRD (default = 8)");
1225 
1226 static int enable_tcp_timestamps;
1227 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1228 		"Enable tcp timestamps (default = 0)");
1229 
1230 static int enable_tcp_sack;
1231 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1232 		"Enable tcp SACK (default = 0)");
1233 
1234 static int enable_tcp_window_scaling = 1;
1235 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1236 		"Enable tcp window scaling (default = 1)");
1237 
1238 int c4iw_debug = 0;
1239 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1240 		"Enable debug logging (default = 0)");
1241 
1242 static int peer2peer = 1;
1243 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1244 		"Support peer2peer ULPs (default = 1)");
1245 
1246 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1247 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1248 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1249 
1250 static int ep_timeout_secs = 60;
1251 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1252 		"CM Endpoint operation timeout in seconds (default = 60)");
1253 
1254 static int mpa_rev = 1;
1255 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1256 		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1257 
1258 static int markers_enabled;
1259 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1260 		"Enable MPA MARKERS (default(0) = disabled)");
1261 
1262 static int crc_enabled = 1;
1263 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1264 		"Enable MPA CRC (default(1) = enabled)");
1265 
1266 static int rcv_win = 256 * 1024;
1267 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1268 		"TCP receive window in bytes (default = 256KB)");
1269 
1270 static int snd_win = 128 * 1024;
1271 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1272 		"TCP send window in bytes (default = 128KB)");
1273 
1274 int use_dsgl = 1;
1275 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0,
1276 		"Use DSGL for PBL/FastReg (default=1)");
1277 
1278 int inline_threshold = 128;
1279 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0,
1280 		"inline vs dsgl threshold (default=128)");
1281 
1282 static int reuseaddr = 0;
1283 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0,
1284 		"Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)");
1285 
1286 static void
1287 start_ep_timer(struct c4iw_ep *ep)
1288 {
1289 
1290 	if (timer_pending(&ep->timer)) {
1291 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1292 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1293 		    ep);
1294 		return;
1295 	}
1296 	clear_bit(TIMEOUT, &ep->com.flags);
1297 	c4iw_get_ep(&ep->com);
1298 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1299 	ep->timer.data = (unsigned long)ep;
1300 	ep->timer.function = ep_timeout;
1301 	add_timer(&ep->timer);
1302 }
1303 
1304 static int
1305 stop_ep_timer(struct c4iw_ep *ep)
1306 {
1307 
1308 	del_timer_sync(&ep->timer);
1309 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1310 		c4iw_put_ep(&ep->com);
1311 		return 0;
1312 	}
1313 	return 1;
1314 }
1315 
1316 static void *
1317 alloc_ep(int size, gfp_t gfp)
1318 {
1319 	struct c4iw_ep_common *epc;
1320 
1321 	epc = kzalloc(size, gfp);
1322 	if (epc == NULL)
1323 		return (NULL);
1324 
1325 	kref_init(&epc->kref);
1326 	mutex_init(&epc->mutex);
1327 	c4iw_init_wr_wait(&epc->wr_wait);
1328 
1329 	return (epc);
1330 }
1331 
1332 void _c4iw_free_ep(struct kref *kref)
1333 {
1334 	struct c4iw_ep *ep;
1335 #if defined(KTR) || defined(INVARIANTS)
1336 	struct c4iw_ep_common *epc;
1337 #endif
1338 
1339 	ep = container_of(kref, struct c4iw_ep, com.kref);
1340 #if defined(KTR) || defined(INVARIANTS)
1341 	epc = &ep->com;
1342 #endif
1343 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1344 	    __func__, epc));
1345 	if (test_bit(QP_REFERENCED, &ep->com.flags))
1346 		deref_qp(ep);
1347 	CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1348 	    __func__, ep, epc->history, epc->flags);
1349 	kfree(ep);
1350 }
1351 
1352 static void release_ep_resources(struct c4iw_ep *ep)
1353 {
1354 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1355 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
1356 	c4iw_put_ep(&ep->com);
1357 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1358 }
1359 
1360 static int
1361 send_mpa_req(struct c4iw_ep *ep)
1362 {
1363 	int mpalen;
1364 	struct mpa_message *mpa;
1365 	struct mpa_v2_conn_params mpa_v2_params;
1366 	struct mbuf *m;
1367 	char mpa_rev_to_use = mpa_rev;
1368 	int err = 0;
1369 
1370 	if (ep->retry_with_mpa_v1)
1371 		mpa_rev_to_use = 1;
1372 	mpalen = sizeof(*mpa) + ep->plen;
1373 	if (mpa_rev_to_use == 2)
1374 		mpalen += sizeof(struct mpa_v2_conn_params);
1375 
1376 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1377 	if (mpa == NULL) {
1378 		err = -ENOMEM;
1379 		CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1380 				__func__, ep, err);
1381 		goto err;
1382 	}
1383 
1384 	memset(mpa, 0, mpalen);
1385 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1386 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1387 		(markers_enabled ? MPA_MARKERS : 0) |
1388 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1389 	mpa->private_data_size = htons(ep->plen);
1390 	mpa->revision = mpa_rev_to_use;
1391 
1392 	if (mpa_rev_to_use == 1) {
1393 		ep->tried_with_mpa_v1 = 1;
1394 		ep->retry_with_mpa_v1 = 0;
1395 	}
1396 
1397 	if (mpa_rev_to_use == 2) {
1398 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1399 					    sizeof(struct mpa_v2_conn_params));
1400 		mpa_v2_params.ird = htons((u16)ep->ird);
1401 		mpa_v2_params.ord = htons((u16)ep->ord);
1402 
1403 		if (peer2peer) {
1404 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1405 
1406 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1407 				mpa_v2_params.ord |=
1408 				    htons(MPA_V2_RDMA_WRITE_RTR);
1409 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1410 				mpa_v2_params.ord |=
1411 					htons(MPA_V2_RDMA_READ_RTR);
1412 			}
1413 		}
1414 		memcpy(mpa->private_data, &mpa_v2_params,
1415 			sizeof(struct mpa_v2_conn_params));
1416 
1417 		if (ep->plen) {
1418 
1419 			memcpy(mpa->private_data +
1420 				sizeof(struct mpa_v2_conn_params),
1421 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1422 		}
1423 	} else {
1424 
1425 		if (ep->plen)
1426 			memcpy(mpa->private_data,
1427 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1428 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1429 	}
1430 
1431 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1432 	if (m == NULL) {
1433 		err = -ENOMEM;
1434 		CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1435 				__func__, ep, err);
1436 		free(mpa, M_CXGBE);
1437 		goto err;
1438 	}
1439 	m_copyback(m, 0, mpalen, (void *)mpa);
1440 	free(mpa, M_CXGBE);
1441 
1442 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1443 			ep->com.thread);
1444 	if (err) {
1445 		CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1446 				__func__, ep, err);
1447 		goto err;
1448 	}
1449 
1450 	START_EP_TIMER(ep);
1451 	ep->com.state = MPA_REQ_SENT;
1452 	ep->mpa_attr.initiator = 1;
1453 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1454 	return 0;
1455 err:
1456 	connect_reply_upcall(ep, err);
1457 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1458 	return err;
1459 }
1460 
1461 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1462 {
1463 	int mpalen ;
1464 	struct mpa_message *mpa;
1465 	struct mpa_v2_conn_params mpa_v2_params;
1466 	struct mbuf *m;
1467 	int err;
1468 
1469 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1470 	    ep->plen);
1471 
1472 	mpalen = sizeof(*mpa) + plen;
1473 
1474 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1475 
1476 		mpalen += sizeof(struct mpa_v2_conn_params);
1477 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1478 		    ep->mpa_attr.version, mpalen);
1479 	}
1480 
1481 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1482 	if (mpa == NULL)
1483 		return (-ENOMEM);
1484 
1485 	memset(mpa, 0, mpalen);
1486 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1487 	mpa->flags = MPA_REJECT;
1488 	mpa->revision = mpa_rev;
1489 	mpa->private_data_size = htons(plen);
1490 
1491 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1492 
1493 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1494 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1495 					    sizeof(struct mpa_v2_conn_params));
1496 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1497 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1498 				 0));
1499 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1500 					(p2p_type ==
1501 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1502 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1503 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1504 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1505 		memcpy(mpa->private_data, &mpa_v2_params,
1506 				sizeof(struct mpa_v2_conn_params));
1507 
1508 		if (ep->plen)
1509 			memcpy(mpa->private_data +
1510 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1511 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1512 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1513 	} else
1514 		if (plen)
1515 			memcpy(mpa->private_data, pdata, plen);
1516 
1517 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1518 	if (m == NULL) {
1519 		free(mpa, M_CXGBE);
1520 		return (-ENOMEM);
1521 	}
1522 	m_copyback(m, 0, mpalen, (void *)mpa);
1523 	free(mpa, M_CXGBE);
1524 
1525 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1526 	if (!err)
1527 		ep->snd_seq += mpalen;
1528 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1529 	return err;
1530 }
1531 
1532 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1533 {
1534 	int mpalen;
1535 	struct mpa_message *mpa;
1536 	struct mbuf *m;
1537 	struct mpa_v2_conn_params mpa_v2_params;
1538 	int err;
1539 
1540 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1541 
1542 	mpalen = sizeof(*mpa) + plen;
1543 
1544 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1545 
1546 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1547 		    ep->mpa_attr.version);
1548 		mpalen += sizeof(struct mpa_v2_conn_params);
1549 	}
1550 
1551 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1552 	if (mpa == NULL)
1553 		return (-ENOMEM);
1554 
1555 	memset(mpa, 0, sizeof(*mpa));
1556 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1557 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1558 		(markers_enabled ? MPA_MARKERS : 0);
1559 	mpa->revision = ep->mpa_attr.version;
1560 	mpa->private_data_size = htons(plen);
1561 
1562 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1563 
1564 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1565 		mpa->private_data_size +=
1566 			htons(sizeof(struct mpa_v2_conn_params));
1567 		mpa_v2_params.ird = htons((u16)ep->ird);
1568 		mpa_v2_params.ord = htons((u16)ep->ord);
1569 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1570 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1571 
1572 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1573 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1574 
1575 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1576 
1577 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1578 
1579 				mpa_v2_params.ord |=
1580 					htons(MPA_V2_RDMA_WRITE_RTR);
1581 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1582 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1583 				    mpa_v2_params.ord);
1584 			}
1585 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1586 
1587 				mpa_v2_params.ord |=
1588 					htons(MPA_V2_RDMA_READ_RTR);
1589 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1590 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1591 				    mpa_v2_params.ord);
1592 			}
1593 		}
1594 
1595 		memcpy(mpa->private_data, &mpa_v2_params,
1596 			sizeof(struct mpa_v2_conn_params));
1597 
1598 		if (ep->plen)
1599 			memcpy(mpa->private_data +
1600 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1601 	} else
1602 		if (plen)
1603 			memcpy(mpa->private_data, pdata, plen);
1604 
1605 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1606 	if (m == NULL) {
1607 		free(mpa, M_CXGBE);
1608 		return (-ENOMEM);
1609 	}
1610 	m_copyback(m, 0, mpalen, (void *)mpa);
1611 	free(mpa, M_CXGBE);
1612 
1613 
1614 	ep->com.state = MPA_REP_SENT;
1615 	ep->snd_seq += mpalen;
1616 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1617 			ep->com.thread);
1618 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1619 	return err;
1620 }
1621 
1622 
1623 
1624 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1625 {
1626 	struct iw_cm_event event;
1627 
1628 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1629 	memset(&event, 0, sizeof(event));
1630 	event.event = IW_CM_EVENT_CLOSE;
1631 	event.status = status;
1632 
1633 	if (ep->com.cm_id) {
1634 
1635 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1636 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1637 		deref_cm_id(&ep->com);
1638 		set_bit(CLOSE_UPCALL, &ep->com.history);
1639 	}
1640 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1641 }
1642 
1643 static int
1644 send_abort(struct c4iw_ep *ep)
1645 {
1646 	struct socket *so = ep->com.so;
1647 	struct sockopt sopt;
1648 	int rc;
1649 	struct linger l;
1650 
1651 	CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1652 	    states[ep->com.state], ep->hwtid);
1653 
1654 	l.l_onoff = 1;
1655 	l.l_linger = 0;
1656 
1657 	/* linger_time of 0 forces RST to be sent */
1658 	sopt.sopt_dir = SOPT_SET;
1659 	sopt.sopt_level = SOL_SOCKET;
1660 	sopt.sopt_name = SO_LINGER;
1661 	sopt.sopt_val = (caddr_t)&l;
1662 	sopt.sopt_valsize = sizeof l;
1663 	sopt.sopt_td = NULL;
1664 	rc = -sosetopt(so, &sopt);
1665 	if (rc != 0) {
1666 		log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1667 		    __func__, so, rc);
1668 	}
1669 
1670 	uninit_iwarp_socket(so);
1671 	soclose(so);
1672 	set_bit(ABORT_CONN, &ep->com.history);
1673 
1674 	/*
1675 	 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1676 	 * request it has sent. But the current TOE driver is not propagating
1677 	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1678 	 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1679 	 * handler(not yet implemented) of iw_cxgbe driver.
1680 	 */
1681 	release_ep_resources(ep);
1682 	ep->com.state = DEAD;
1683 
1684 	return (0);
1685 }
1686 
1687 static void peer_close_upcall(struct c4iw_ep *ep)
1688 {
1689 	struct iw_cm_event event;
1690 
1691 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1692 	memset(&event, 0, sizeof(event));
1693 	event.event = IW_CM_EVENT_DISCONNECT;
1694 
1695 	if (ep->com.cm_id) {
1696 
1697 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1698 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1699 		set_bit(DISCONN_UPCALL, &ep->com.history);
1700 	}
1701 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1702 }
1703 
1704 static void peer_abort_upcall(struct c4iw_ep *ep)
1705 {
1706 	struct iw_cm_event event;
1707 
1708 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1709 	memset(&event, 0, sizeof(event));
1710 	event.event = IW_CM_EVENT_CLOSE;
1711 	event.status = -ECONNRESET;
1712 
1713 	if (ep->com.cm_id) {
1714 
1715 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1716 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1717 		deref_cm_id(&ep->com);
1718 		set_bit(ABORT_UPCALL, &ep->com.history);
1719 	}
1720 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1721 }
1722 
1723 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1724 {
1725 	struct iw_cm_event event;
1726 
1727 	CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1728 	memset(&event, 0, sizeof(event));
1729 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1730 	event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1731 					-ECONNRESET : status;
1732 	event.local_addr = ep->com.local_addr;
1733 	event.remote_addr = ep->com.remote_addr;
1734 
1735 	if ((status == 0) || (status == -ECONNREFUSED)) {
1736 
1737 		if (!ep->tried_with_mpa_v1) {
1738 
1739 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1740 			/* this means MPA_v2 is used */
1741 			event.ord = ep->ird;
1742 			event.ird = ep->ord;
1743 			event.private_data_len = ep->plen -
1744 				sizeof(struct mpa_v2_conn_params);
1745 			event.private_data = ep->mpa_pkt +
1746 				sizeof(struct mpa_message) +
1747 				sizeof(struct mpa_v2_conn_params);
1748 		} else {
1749 
1750 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1751 			/* this means MPA_v1 is used */
1752 			event.ord = c4iw_max_read_depth;
1753 			event.ird = c4iw_max_read_depth;
1754 			event.private_data_len = ep->plen;
1755 			event.private_data = ep->mpa_pkt +
1756 				sizeof(struct mpa_message);
1757 		}
1758 	}
1759 
1760 	if (ep->com.cm_id) {
1761 
1762 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1763 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1764 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1765 	}
1766 
1767 	if(status == -ECONNABORTED) {
1768 
1769 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1770 		return;
1771 	}
1772 
1773 	if (status < 0) {
1774 
1775 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1776 		deref_cm_id(&ep->com);
1777 	}
1778 
1779 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1780 }
1781 
1782 static int connect_request_upcall(struct c4iw_ep *ep)
1783 {
1784 	struct iw_cm_event event;
1785 	int ret;
1786 
1787 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1788 	    ep->tried_with_mpa_v1);
1789 
1790 	memset(&event, 0, sizeof(event));
1791 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1792 	event.local_addr = ep->com.local_addr;
1793 	event.remote_addr = ep->com.remote_addr;
1794 	event.provider_data = ep;
1795 
1796 	if (!ep->tried_with_mpa_v1) {
1797 		/* this means MPA_v2 is used */
1798 		event.ord = ep->ord;
1799 		event.ird = ep->ird;
1800 		event.private_data_len = ep->plen -
1801 			sizeof(struct mpa_v2_conn_params);
1802 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1803 			sizeof(struct mpa_v2_conn_params);
1804 	} else {
1805 
1806 		/* this means MPA_v1 is used. Send max supported */
1807 		event.ord = c4iw_max_read_depth;
1808 		event.ird = c4iw_max_read_depth;
1809 		event.private_data_len = ep->plen;
1810 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1811 	}
1812 
1813 	c4iw_get_ep(&ep->com);
1814 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1815 	    &event);
1816 	if(ret) {
1817 		CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1818 			" IWCM, err:%d", __func__, ep, ret);
1819 		c4iw_put_ep(&ep->com);
1820 	} else
1821 		/* Dereference parent_ep only in success case.
1822 		 * In case of failure, parent_ep is dereferenced by the caller
1823 		 * of process_mpa_request().
1824 		 */
1825 		c4iw_put_ep(&ep->parent_ep->com);
1826 
1827 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1828 	return ret;
1829 }
1830 
1831 static void established_upcall(struct c4iw_ep *ep)
1832 {
1833 	struct iw_cm_event event;
1834 
1835 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1836 	memset(&event, 0, sizeof(event));
1837 	event.event = IW_CM_EVENT_ESTABLISHED;
1838 	event.ird = ep->ord;
1839 	event.ord = ep->ird;
1840 
1841 	if (ep->com.cm_id) {
1842 
1843 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1844 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1845 		set_bit(ESTAB_UPCALL, &ep->com.history);
1846 	}
1847 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1848 }
1849 
1850 
1851 #define RELAXED_IRD_NEGOTIATION 1
1852 
1853 /*
1854  * process_mpa_reply - process streaming mode MPA reply
1855  *
1856  * Returns:
1857  *
1858  * 0 upon success indicating a connect request was delivered to the ULP
1859  * or the mpa request is incomplete but valid so far.
1860  *
1861  * 1 if a failure requires the caller to close the connection.
1862  *
1863  * 2 if a failure requires the caller to abort the connection.
1864  */
1865 static int process_mpa_reply(struct c4iw_ep *ep)
1866 {
1867 	struct mpa_message *mpa;
1868 	struct mpa_v2_conn_params *mpa_v2_params;
1869 	u16 plen;
1870 	u16 resp_ird, resp_ord;
1871 	u8 rtr_mismatch = 0, insuff_ird = 0;
1872 	struct c4iw_qp_attributes attrs = {0};
1873 	enum c4iw_qp_attr_mask mask;
1874 	int err;
1875 	struct mbuf *top, *m;
1876 	int flags = MSG_DONTWAIT;
1877 	struct uio uio;
1878 	int disconnect = 0;
1879 
1880 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1881 
1882 	/*
1883 	 * Stop mpa timer.  If it expired, then
1884 	 * we ignore the MPA reply.  process_timeout()
1885 	 * will abort the connection.
1886 	 */
1887 	if (STOP_EP_TIMER(ep))
1888 		return 0;
1889 
1890 	uio.uio_resid = 1000000;
1891 	uio.uio_td = ep->com.thread;
1892 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1893 
1894 	if (err) {
1895 
1896 		if (err == EWOULDBLOCK) {
1897 
1898 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1899 			START_EP_TIMER(ep);
1900 			return 0;
1901 		}
1902 		err = -err;
1903 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1904 		goto err;
1905 	}
1906 
1907 	if (ep->com.so->so_rcv.sb_mb) {
1908 
1909 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1910 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1911 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1912 	}
1913 
1914 	m = top;
1915 
1916 	do {
1917 
1918 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1919 		/*
1920 		 * If we get more than the supported amount of private data
1921 		 * then we must fail this connection.
1922 		 */
1923 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1924 
1925 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1926 			    ep->mpa_pkt_len + m->m_len);
1927 			err = (-EINVAL);
1928 			goto err_stop_timer;
1929 		}
1930 
1931 		/*
1932 		 * copy the new data into our accumulation buffer.
1933 		 */
1934 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1935 		ep->mpa_pkt_len += m->m_len;
1936 		if (!m->m_next)
1937 			m = m->m_nextpkt;
1938 		else
1939 			m = m->m_next;
1940 	} while (m);
1941 
1942 	m_freem(top);
1943 	/*
1944 	 * if we don't even have the mpa message, then bail.
1945 	 */
1946 	if (ep->mpa_pkt_len < sizeof(*mpa)) {
1947 		return 0;
1948 	}
1949 	mpa = (struct mpa_message *) ep->mpa_pkt;
1950 
1951 	/* Validate MPA header. */
1952 	if (mpa->revision > mpa_rev) {
1953 
1954 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1955 		    mpa->revision, mpa_rev);
1956 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1957 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1958 		err = -EPROTO;
1959 		goto err_stop_timer;
1960 	}
1961 
1962 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1963 
1964 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1965 		err = -EPROTO;
1966 		goto err_stop_timer;
1967 	}
1968 
1969 	plen = ntohs(mpa->private_data_size);
1970 
1971 	/*
1972 	 * Fail if there's too much private data.
1973 	 */
1974 	if (plen > MPA_MAX_PRIVATE_DATA) {
1975 
1976 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1977 		err = -EPROTO;
1978 		goto err_stop_timer;
1979 	}
1980 
1981 	/*
1982 	 * If plen does not account for pkt size
1983 	 */
1984 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1985 
1986 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1987 		STOP_EP_TIMER(ep);
1988 		err = -EPROTO;
1989 		goto err_stop_timer;
1990 	}
1991 
1992 	ep->plen = (u8) plen;
1993 
1994 	/*
1995 	 * If we don't have all the pdata yet, then bail.
1996 	 * We'll continue process when more data arrives.
1997 	 */
1998 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1999 
2000 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
2001 		return 0;
2002 	}
2003 
2004 	if (mpa->flags & MPA_REJECT) {
2005 
2006 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
2007 		err = -ECONNREFUSED;
2008 		goto err_stop_timer;
2009 	}
2010 
2011 	/*
2012 	 * If we get here we have accumulated the entire mpa
2013 	 * start reply message including private data. And
2014 	 * the MPA header is valid.
2015 	 */
2016 	ep->com.state = FPDU_MODE;
2017 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2018 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2019 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2020 	ep->mpa_attr.version = mpa->revision;
2021 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2022 
2023 	if (mpa->revision == 2) {
2024 
2025 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
2026 		ep->mpa_attr.enhanced_rdma_conn =
2027 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2028 
2029 		if (ep->mpa_attr.enhanced_rdma_conn) {
2030 
2031 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
2032 			mpa_v2_params = (struct mpa_v2_conn_params *)
2033 				(ep->mpa_pkt + sizeof(*mpa));
2034 			resp_ird = ntohs(mpa_v2_params->ird) &
2035 				MPA_V2_IRD_ORD_MASK;
2036 			resp_ord = ntohs(mpa_v2_params->ord) &
2037 				MPA_V2_IRD_ORD_MASK;
2038 
2039 			/*
2040 			 * This is a double-check. Ideally, below checks are
2041 			 * not required since ird/ord stuff has been taken
2042 			 * care of in c4iw_accept_cr
2043 			 */
2044 			if (ep->ird < resp_ord) {
2045 				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
2046 				   ep->com.dev->rdev.adap->params.max_ordird_qp)
2047 					ep->ird = resp_ord;
2048 				else
2049 					insuff_ird = 1;
2050 			} else if (ep->ird > resp_ord) {
2051 				ep->ird = resp_ord;
2052 			}
2053 			if (ep->ord > resp_ird) {
2054 				if (RELAXED_IRD_NEGOTIATION)
2055 					ep->ord = resp_ird;
2056 				else
2057 					insuff_ird = 1;
2058 			}
2059 			if (insuff_ird) {
2060 				err = -ENOMEM;
2061 				ep->ird = resp_ord;
2062 				ep->ord = resp_ird;
2063 			}
2064 
2065 			if (ntohs(mpa_v2_params->ird) &
2066 				MPA_V2_PEER2PEER_MODEL) {
2067 
2068 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2069 				if (ntohs(mpa_v2_params->ord) &
2070 					MPA_V2_RDMA_WRITE_RTR) {
2071 
2072 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2073 					ep->mpa_attr.p2p_type =
2074 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2075 				}
2076 				else if (ntohs(mpa_v2_params->ord) &
2077 					MPA_V2_RDMA_READ_RTR) {
2078 
2079 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2080 					ep->mpa_attr.p2p_type =
2081 						FW_RI_INIT_P2PTYPE_READ_REQ;
2082 				}
2083 			}
2084 		}
2085 	} else {
2086 
2087 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2088 
2089 		if (mpa->revision == 1) {
2090 
2091 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2092 
2093 			if (peer2peer) {
2094 
2095 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2096 				ep->mpa_attr.p2p_type = p2p_type;
2097 			}
2098 		}
2099 	}
2100 
2101 	if (set_tcpinfo(ep)) {
2102 
2103 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2104 		printf("%s set_tcpinfo error\n", __func__);
2105 		err = -ECONNRESET;
2106 		goto err;
2107 	}
2108 
2109 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2110 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2111 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2112 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2113 	    ep->mpa_attr.p2p_type);
2114 
2115 	/*
2116 	 * If responder's RTR does not match with that of initiator, assign
2117 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2118 	 * generated when moving QP to RTS state.
2119 	 * A TERM message will be sent after QP has moved to RTS state
2120 	 */
2121 	if ((ep->mpa_attr.version == 2) && peer2peer &&
2122 		(ep->mpa_attr.p2p_type != p2p_type)) {
2123 
2124 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2125 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2126 		rtr_mismatch = 1;
2127 	}
2128 
2129 
2130 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2131 	attrs.mpa_attr = ep->mpa_attr;
2132 	attrs.max_ird = ep->ird;
2133 	attrs.max_ord = ep->ord;
2134 	attrs.llp_stream_handle = ep;
2135 	attrs.next_state = C4IW_QP_STATE_RTS;
2136 
2137 	mask = C4IW_QP_ATTR_NEXT_STATE |
2138 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2139 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2140 
2141 	/* bind QP and TID with INIT_WR */
2142 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2143 
2144 	if (err) {
2145 
2146 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2147 		goto err;
2148 	}
2149 
2150 	/*
2151 	 * If responder's RTR requirement did not match with what initiator
2152 	 * supports, generate TERM message
2153 	 */
2154 	if (rtr_mismatch) {
2155 
2156 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2157 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2158 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2159 		attrs.ecode = MPA_NOMATCH_RTR;
2160 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2161 		attrs.send_term = 1;
2162 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2163 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2164 		err = -ENOMEM;
2165 		disconnect = 1;
2166 		goto out;
2167 	}
2168 
2169 	/*
2170 	 * Generate TERM if initiator IRD is not sufficient for responder
2171 	 * provided ORD. Currently, we do the same behaviour even when
2172 	 * responder provided IRD is also not sufficient as regards to
2173 	 * initiator ORD.
2174 	 */
2175 	if (insuff_ird) {
2176 
2177 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2178 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2179 				__func__);
2180 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2181 		attrs.ecode = MPA_INSUFF_IRD;
2182 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2183 		attrs.send_term = 1;
2184 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2185 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2186 		err = -ENOMEM;
2187 		disconnect = 1;
2188 		goto out;
2189 	}
2190 	goto out;
2191 err_stop_timer:
2192 	STOP_EP_TIMER(ep);
2193 err:
2194 	disconnect = 2;
2195 out:
2196 	connect_reply_upcall(ep, err);
2197 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2198 	return disconnect;
2199 }
2200 
2201 /*
2202  * process_mpa_request - process streaming mode MPA request
2203  *
2204  * Returns:
2205  *
2206  * 0 upon success indicating a connect request was delivered to the ULP
2207  * or the mpa request is incomplete but valid so far.
2208  *
2209  * 1 if a failure requires the caller to close the connection.
2210  *
2211  * 2 if a failure requires the caller to abort the connection.
2212  */
2213 static int
2214 process_mpa_request(struct c4iw_ep *ep)
2215 {
2216 	struct mpa_message *mpa;
2217 	struct mpa_v2_conn_params *mpa_v2_params;
2218 	u16 plen;
2219 	int flags = MSG_DONTWAIT;
2220 	int rc;
2221 	struct iovec iov;
2222 	struct uio uio;
2223 	enum c4iw_ep_state state = ep->com.state;
2224 
2225 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2226 
2227 	if (state != MPA_REQ_WAIT)
2228 		return 0;
2229 
2230 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2231 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2232 	uio.uio_iov = &iov;
2233 	uio.uio_iovcnt = 1;
2234 	uio.uio_offset = 0;
2235 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2236 	uio.uio_segflg = UIO_SYSSPACE;
2237 	uio.uio_rw = UIO_READ;
2238 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2239 
2240 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2241 	if (rc == EAGAIN)
2242 		return 0;
2243 	else if (rc)
2244 		goto err_stop_timer;
2245 
2246 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2247 	    __func__, ep->com.so));
2248 	ep->mpa_pkt_len += uio.uio_offset;
2249 
2250 	/*
2251 	 * If we get more than the supported amount of private data then we must
2252 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
2253 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2254 	 * byte is filled by the soreceive above.
2255 	 */
2256 
2257 	/* Don't even have the MPA message.  Wait for more data to arrive. */
2258 	if (ep->mpa_pkt_len < sizeof(*mpa))
2259 		return 0;
2260 	mpa = (struct mpa_message *) ep->mpa_pkt;
2261 
2262 	/*
2263 	 * Validate MPA Header.
2264 	 */
2265 	if (mpa->revision > mpa_rev) {
2266 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2267 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
2268 		goto err_stop_timer;
2269 	}
2270 
2271 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2272 		goto err_stop_timer;
2273 
2274 	/*
2275 	 * Fail if there's too much private data.
2276 	 */
2277 	plen = ntohs(mpa->private_data_size);
2278 	if (plen > MPA_MAX_PRIVATE_DATA)
2279 		goto err_stop_timer;
2280 
2281 	/*
2282 	 * If plen does not account for pkt size
2283 	 */
2284 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2285 		goto err_stop_timer;
2286 
2287 	ep->plen = (u8) plen;
2288 
2289 	/*
2290 	 * If we don't have all the pdata yet, then bail.
2291 	 */
2292 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2293 		return 0;
2294 
2295 	/*
2296 	 * If we get here we have accumulated the entire mpa
2297 	 * start reply message including private data.
2298 	 */
2299 	ep->mpa_attr.initiator = 0;
2300 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2301 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2302 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2303 	ep->mpa_attr.version = mpa->revision;
2304 	if (mpa->revision == 1)
2305 		ep->tried_with_mpa_v1 = 1;
2306 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2307 
2308 	if (mpa->revision == 2) {
2309 		ep->mpa_attr.enhanced_rdma_conn =
2310 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2311 		if (ep->mpa_attr.enhanced_rdma_conn) {
2312 			mpa_v2_params = (struct mpa_v2_conn_params *)
2313 				(ep->mpa_pkt + sizeof(*mpa));
2314 			ep->ird = ntohs(mpa_v2_params->ird) &
2315 				MPA_V2_IRD_ORD_MASK;
2316 			ep->ird = min_t(u32, ep->ird,
2317 					cur_max_read_depth(ep->com.dev));
2318 			ep->ord = ntohs(mpa_v2_params->ord) &
2319 				MPA_V2_IRD_ORD_MASK;
2320 			ep->ord = min_t(u32, ep->ord,
2321 					cur_max_read_depth(ep->com.dev));
2322 			CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u",
2323 				 __func__, ep->ird, ep->ord);
2324 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2325 				if (peer2peer) {
2326 					if (ntohs(mpa_v2_params->ord) &
2327 							MPA_V2_RDMA_WRITE_RTR)
2328 						ep->mpa_attr.p2p_type =
2329 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2330 					else if (ntohs(mpa_v2_params->ord) &
2331 							MPA_V2_RDMA_READ_RTR)
2332 						ep->mpa_attr.p2p_type =
2333 						FW_RI_INIT_P2PTYPE_READ_REQ;
2334 				}
2335 		}
2336 	} else if (mpa->revision == 1 && peer2peer)
2337 		ep->mpa_attr.p2p_type = p2p_type;
2338 
2339 	if (set_tcpinfo(ep))
2340 		goto err_stop_timer;
2341 
2342 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2343 	    "xmit_marker_enabled = %d, version = %d", __func__,
2344 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2345 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2346 
2347 	ep->com.state = MPA_REQ_RCVD;
2348 	STOP_EP_TIMER(ep);
2349 
2350 	/* drive upcall */
2351 	if (ep->parent_ep->com.state != DEAD)
2352 		if (connect_request_upcall(ep))
2353 			goto err_out;
2354 	return 0;
2355 
2356 err_stop_timer:
2357 	STOP_EP_TIMER(ep);
2358 err_out:
2359 	return 2;
2360 }
2361 
2362 /*
2363  * Upcall from the adapter indicating data has been transmitted.
2364  * For us its just the single MPA request or reply.  We can now free
2365  * the skb holding the mpa message.
2366  */
2367 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2368 {
2369 #ifdef KTR
2370 	int err;
2371 #endif
2372 	struct c4iw_ep *ep = to_ep(cm_id);
2373 	int abort = 0;
2374 
2375 	mutex_lock(&ep->com.mutex);
2376 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2377 
2378 	if ((ep->com.state == DEAD) ||
2379 			(ep->com.state != MPA_REQ_RCVD)) {
2380 
2381 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2382 		mutex_unlock(&ep->com.mutex);
2383 		c4iw_put_ep(&ep->com);
2384 		return -ECONNRESET;
2385 	}
2386 	set_bit(ULP_REJECT, &ep->com.history);
2387 
2388 	if (mpa_rev == 0) {
2389 
2390 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2391 		abort = 1;
2392 	}
2393 	else {
2394 
2395 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2396 		abort = send_mpa_reject(ep, pdata, pdata_len);
2397 	}
2398 	STOP_EP_TIMER(ep);
2399 #ifdef KTR
2400 	err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2401 #else
2402 	c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2403 #endif
2404 	mutex_unlock(&ep->com.mutex);
2405 	c4iw_put_ep(&ep->com);
2406 	CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2407 	return 0;
2408 }
2409 
2410 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2411 {
2412 	int err;
2413 	struct c4iw_qp_attributes attrs = {0};
2414 	enum c4iw_qp_attr_mask mask;
2415 	struct c4iw_ep *ep = to_ep(cm_id);
2416 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2417 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2418 	int abort = 0;
2419 
2420 	mutex_lock(&ep->com.mutex);
2421 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2422 
2423 	if ((ep->com.state == DEAD) ||
2424 			(ep->com.state != MPA_REQ_RCVD)) {
2425 
2426 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2427 		err = -ECONNRESET;
2428 		goto err_out;
2429 	}
2430 
2431 	BUG_ON(!qp);
2432 
2433 	set_bit(ULP_ACCEPT, &ep->com.history);
2434 
2435 	if ((conn_param->ord > c4iw_max_read_depth) ||
2436 		(conn_param->ird > c4iw_max_read_depth)) {
2437 
2438 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2439 		err = -EINVAL;
2440 		goto err_abort;
2441 	}
2442 
2443 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2444 
2445 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2446 
2447 		if (conn_param->ord > ep->ird) {
2448 			if (RELAXED_IRD_NEGOTIATION) {
2449 				conn_param->ord = ep->ird;
2450 			} else {
2451 				ep->ird = conn_param->ird;
2452 				ep->ord = conn_param->ord;
2453 				send_mpa_reject(ep, conn_param->private_data,
2454 						conn_param->private_data_len);
2455 				err = -ENOMEM;
2456 				goto err_abort;
2457 			}
2458 		}
2459 		if (conn_param->ird < ep->ord) {
2460 			if (RELAXED_IRD_NEGOTIATION &&
2461 			    ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2462 				conn_param->ird = ep->ord;
2463 			} else {
2464 				err = -ENOMEM;
2465 				goto err_abort;
2466 			}
2467 		}
2468 	}
2469 	ep->ird = conn_param->ird;
2470 	ep->ord = conn_param->ord;
2471 
2472 	if (ep->mpa_attr.version == 1) {
2473 		if (peer2peer && ep->ird == 0)
2474 			ep->ird = 1;
2475 	} else {
2476 		if (peer2peer &&
2477 		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2478 		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2479 			ep->ird = 1;
2480 	}
2481 
2482 	CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__,
2483 			ep->ird, ep->ord);
2484 
2485 	ep->com.cm_id = cm_id;
2486 	ref_cm_id(&ep->com);
2487 	ep->com.qp = qp;
2488 	ref_qp(ep);
2489 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2490 
2491 	/* bind QP to EP and move to RTS */
2492 	attrs.mpa_attr = ep->mpa_attr;
2493 	attrs.max_ird = ep->ird;
2494 	attrs.max_ord = ep->ord;
2495 	attrs.llp_stream_handle = ep;
2496 	attrs.next_state = C4IW_QP_STATE_RTS;
2497 
2498 	/* bind QP and TID with INIT_WR */
2499 	mask = C4IW_QP_ATTR_NEXT_STATE |
2500 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2501 		C4IW_QP_ATTR_MPA_ATTR |
2502 		C4IW_QP_ATTR_MAX_IRD |
2503 		C4IW_QP_ATTR_MAX_ORD;
2504 
2505 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2506 	if (err) {
2507 		CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2508 		goto err_defef_cm_id;
2509 	}
2510 
2511 	err = send_mpa_reply(ep, conn_param->private_data,
2512 			conn_param->private_data_len);
2513 	if (err) {
2514 		CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2515 		goto err_defef_cm_id;
2516 	}
2517 
2518 	ep->com.state = FPDU_MODE;
2519 	established_upcall(ep);
2520 	mutex_unlock(&ep->com.mutex);
2521 	c4iw_put_ep(&ep->com);
2522 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2523 	return 0;
2524 err_defef_cm_id:
2525 	deref_cm_id(&ep->com);
2526 err_abort:
2527 	abort = 1;
2528 err_out:
2529 	if (abort)
2530 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2531 	mutex_unlock(&ep->com.mutex);
2532 	c4iw_put_ep(&ep->com);
2533 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2534 	return err;
2535 }
2536 
2537 static int
2538 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2539 {
2540 	int ret;
2541 	int size, on;
2542 	struct socket *sock = NULL;
2543 	struct sockopt sopt;
2544 
2545 	ret = sock_create_kern(laddr->ss_family,
2546 			SOCK_STREAM, IPPROTO_TCP, &sock);
2547 	if (ret) {
2548 		CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2549 				__func__, ret);
2550 		return ret;
2551 	}
2552 
2553 	if (reuseaddr) {
2554 		bzero(&sopt, sizeof(struct sockopt));
2555 		sopt.sopt_dir = SOPT_SET;
2556 		sopt.sopt_level = SOL_SOCKET;
2557 		sopt.sopt_name = SO_REUSEADDR;
2558 		on = 1;
2559 		sopt.sopt_val = &on;
2560 		sopt.sopt_valsize = sizeof(on);
2561 		ret = -sosetopt(sock, &sopt);
2562 		if (ret != 0) {
2563 			log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) "
2564 				"failed with %d.\n", __func__, sock, ret);
2565 		}
2566 		bzero(&sopt, sizeof(struct sockopt));
2567 		sopt.sopt_dir = SOPT_SET;
2568 		sopt.sopt_level = SOL_SOCKET;
2569 		sopt.sopt_name = SO_REUSEPORT;
2570 		on = 1;
2571 		sopt.sopt_val = &on;
2572 		sopt.sopt_valsize = sizeof(on);
2573 		ret = -sosetopt(sock, &sopt);
2574 		if (ret != 0) {
2575 			log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) "
2576 				"failed with %d.\n", __func__, sock, ret);
2577 		}
2578 	}
2579 
2580 	ret = -sobind(sock, (struct sockaddr *)laddr, curthread);
2581 	if (ret) {
2582 		CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2583 				__func__, ret);
2584 		sock_release(sock);
2585 		return ret;
2586 	}
2587 
2588 	size = laddr->ss_family == AF_INET6 ?
2589 		sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2590 	ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2591 	if (ret) {
2592 		CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2593 				__func__, ret);
2594 		sock_release(sock);
2595 		return ret;
2596 	}
2597 
2598 	*so = sock;
2599 	return 0;
2600 }
2601 
2602 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2603 {
2604 	int err = 0;
2605 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2606 	struct c4iw_ep *ep = NULL;
2607 	if_t nh_ifp;        /* Logical egress interface */
2608 	struct epoch_tracker et;
2609 #ifdef VIMAGE
2610 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context;
2611 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
2612 #endif
2613 
2614 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2615 
2616 
2617 	if ((conn_param->ord > c4iw_max_read_depth) ||
2618 		(conn_param->ird > c4iw_max_read_depth)) {
2619 
2620 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2621 		err = -EINVAL;
2622 		goto out;
2623 	}
2624 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2625 	cm_id->provider_data = ep;
2626 
2627 	init_timer(&ep->timer);
2628 	ep->plen = conn_param->private_data_len;
2629 
2630 	if (ep->plen) {
2631 
2632 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2633 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2634 				conn_param->private_data, ep->plen);
2635 	}
2636 	ep->ird = conn_param->ird;
2637 	ep->ord = conn_param->ord;
2638 
2639 	if (peer2peer && ep->ord == 0) {
2640 
2641 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2642 		ep->ord = 1;
2643 	}
2644 
2645 	ep->com.dev = dev;
2646 	ep->com.cm_id = cm_id;
2647 	ref_cm_id(&ep->com);
2648 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2649 
2650 	if (!ep->com.qp) {
2651 
2652 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2653 		err = -EINVAL;
2654 		goto fail;
2655 	}
2656 	ref_qp(ep);
2657 	ep->com.thread = curthread;
2658 
2659 	NET_EPOCH_ENTER(et);
2660 	CURVNET_SET(vnet);
2661 	err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2662 	CURVNET_RESTORE();
2663 	NET_EPOCH_EXIT(et);
2664 
2665 	if (err) {
2666 
2667 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2668 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2669 		err = EHOSTUNREACH;
2670 		return err;
2671 	}
2672 
2673 	if (!(if_getcapenable(nh_ifp) & IFCAP_TOE) ||
2674 	    TOEDEV(nh_ifp) == NULL) {
2675 		err = -ENOPROTOOPT;
2676 		goto fail;
2677 	}
2678 	ep->com.state = CONNECTING;
2679 	ep->tos = 0;
2680 	ep->com.local_addr = cm_id->local_addr;
2681 	ep->com.remote_addr = cm_id->remote_addr;
2682 
2683 	err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2684 	if (err)
2685 		goto fail;
2686 
2687 	setiwsockopt(ep->com.so);
2688 	init_iwarp_socket(ep->com.so, &ep->com);
2689 	err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2690 		ep->com.thread);
2691 	if (err)
2692 		goto fail_free_so;
2693 	CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep);
2694 	return 0;
2695 
2696 fail_free_so:
2697 	uninit_iwarp_socket(ep->com.so);
2698 	ep->com.state = DEAD;
2699 	sock_release(ep->com.so);
2700 fail:
2701 	deref_cm_id(&ep->com);
2702 	c4iw_put_ep(&ep->com);
2703 	ep = NULL;
2704 out:
2705 	CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err);
2706 	return err;
2707 }
2708 
2709 /*
2710  * iwcm->create_listen.  Returns -errno on failure.
2711  */
2712 int
2713 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2714 {
2715 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2716 	struct c4iw_listen_ep *lep = NULL;
2717 	struct listen_port_info *port_info = NULL;
2718 	int rc = 0;
2719 
2720 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
2721 			backlog);
2722 	if (c4iw_fatal_error(&dev->rdev)) {
2723 		CTR2(KTR_IW_CXGBE, "%s: cm_id %p, fatal error", __func__,
2724 			       cm_id);
2725 		return -EIO;
2726 	}
2727 	lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2728 	lep->com.cm_id = cm_id;
2729 	ref_cm_id(&lep->com);
2730 	lep->com.dev = dev;
2731 	lep->backlog = backlog;
2732 	lep->com.local_addr = cm_id->local_addr;
2733 	lep->com.thread = curthread;
2734 	cm_id->provider_data = lep;
2735 	lep->com.state = LISTEN;
2736 
2737 	/* In case of INDADDR_ANY, ibcore creates cmid for each device and
2738 	 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2739 	 * HW listeners for each device seperately. But toecore expects single
2740 	 * solisten() call with INADDR_ANY address to create HW listeners on
2741 	 * all devices for a given port number. So iw_cxgbe driver calls
2742 	 * solisten() only once for INADDR_ANY(usually done at first time
2743 	 * listener callback from ibcore). And all the subsequent INADDR_ANY
2744 	 * listener callbacks from ibcore(for the same port address) do not
2745 	 * invoke solisten() as first listener callback has already created
2746 	 * listeners for all other devices(via solisten).
2747 	 */
2748 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr, NULL)) {
2749 		port_info = add_ep_to_listenlist(lep);
2750 		/* skip solisten() if refcnt > 1, as the listeners were
2751 		 * already created by 'Master lep'
2752 		 */
2753 		if (port_info->refcnt > 1) {
2754 			/* As there will be only one listener socket for a TCP
2755 			 * port, copy Master lep's socket pointer to other lep's
2756 			 * that are belonging to same TCP port.
2757 			 */
2758 			struct c4iw_listen_ep *head_lep =
2759 					container_of(port_info->lep_list.next,
2760 					struct c4iw_listen_ep, listen_ep_list);
2761 			lep->com.so =  head_lep->com.so;
2762 			goto out;
2763 		}
2764 	}
2765 	rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2766 	if (rc) {
2767 		CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2768 				__func__, rc);
2769 		goto fail;
2770 	}
2771 
2772 	rc = -solisten(lep->com.so, backlog, curthread);
2773 	if (rc) {
2774 		CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2775 				__func__, lep->com.so, rc);
2776 		goto fail_free_so;
2777 	}
2778 	init_iwarp_socket(lep->com.so, &lep->com);
2779 out:
2780 	return 0;
2781 
2782 fail_free_so:
2783 	sock_release(lep->com.so);
2784 fail:
2785 	if (port_info)
2786 		rem_ep_from_listenlist(lep);
2787 	deref_cm_id(&lep->com);
2788 	c4iw_put_ep(&lep->com);
2789 	return rc;
2790 }
2791 
2792 int
2793 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2794 {
2795 	struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2796 
2797 	mutex_lock(&lep->com.mutex);
2798 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2799 	    states[lep->com.state]);
2800 
2801 	lep->com.state = DEAD;
2802 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr,
2803 	    lep->com.so->so_vnet)) {
2804 		/* if no refcount then close listen socket */
2805 		if (!rem_ep_from_listenlist(lep))
2806 			close_socket(lep->com.so);
2807 	} else
2808 		close_socket(lep->com.so);
2809 	deref_cm_id(&lep->com);
2810 	mutex_unlock(&lep->com.mutex);
2811 	c4iw_put_ep(&lep->com);
2812 	return 0;
2813 }
2814 
2815 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2816 {
2817 	int ret;
2818 	mutex_lock(&ep->com.mutex);
2819 	ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2820 	mutex_unlock(&ep->com.mutex);
2821 	return ret;
2822 }
2823 
2824 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2825 {
2826 	int ret = 0;
2827 	int close = 0;
2828 	struct c4iw_rdev *rdev;
2829 
2830 
2831 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2832 
2833 	rdev = &ep->com.dev->rdev;
2834 
2835 	if (c4iw_fatal_error(rdev)) {
2836 		CTR3(KTR_IW_CXGBE, "%s:ced1 fatal error %p %s", __func__, ep,
2837 					states[ep->com.state]);
2838 		if (ep->com.state != DEAD) {
2839 			send_abort(ep);
2840 			ep->com.state = DEAD;
2841 		}
2842 		close_complete_upcall(ep, -ECONNRESET);
2843 		return ECONNRESET;
2844 	}
2845 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2846 	    states[ep->com.state]);
2847 
2848 	/*
2849 	 * Ref the ep here in case we have fatal errors causing the
2850 	 * ep to be released and freed.
2851 	 */
2852 	c4iw_get_ep(&ep->com);
2853 	switch (ep->com.state) {
2854 
2855 		case MPA_REQ_WAIT:
2856 		case MPA_REQ_SENT:
2857 		case MPA_REQ_RCVD:
2858 		case MPA_REP_SENT:
2859 		case FPDU_MODE:
2860 			close = 1;
2861 			if (abrupt)
2862 				ep->com.state = ABORTING;
2863 			else {
2864 				ep->com.state = CLOSING;
2865 				START_EP_TIMER(ep);
2866 			}
2867 			set_bit(CLOSE_SENT, &ep->com.flags);
2868 			break;
2869 
2870 		case CLOSING:
2871 
2872 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2873 
2874 				close = 1;
2875 				if (abrupt) {
2876 					STOP_EP_TIMER(ep);
2877 					ep->com.state = ABORTING;
2878 				} else
2879 					ep->com.state = MORIBUND;
2880 			}
2881 			break;
2882 
2883 		case MORIBUND:
2884 		case ABORTING:
2885 		case DEAD:
2886 			CTR3(KTR_IW_CXGBE,
2887 			    "%s ignoring disconnect ep %p state %u", __func__,
2888 			    ep, ep->com.state);
2889 			break;
2890 
2891 		default:
2892 			BUG();
2893 			break;
2894 	}
2895 
2896 
2897 	if (close) {
2898 
2899 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2900 
2901 		if (abrupt) {
2902 
2903 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2904 			set_bit(EP_DISC_ABORT, &ep->com.history);
2905 			close_complete_upcall(ep, -ECONNRESET);
2906 			send_abort(ep);
2907 		} else {
2908 
2909 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2910 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2911 
2912 			if (!ep->parent_ep)
2913 				ep->com.state = MORIBUND;
2914 
2915 			CURVNET_SET(ep->com.so->so_vnet);
2916 			ret = sodisconnect(ep->com.so);
2917 			CURVNET_RESTORE();
2918 			if (ret) {
2919 				CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2920 				STOP_EP_TIMER(ep);
2921 				send_abort(ep);
2922 				ep->com.state = DEAD;
2923 				close_complete_upcall(ep, -ECONNRESET);
2924 				set_bit(EP_DISC_FAIL, &ep->com.history);
2925 				if (ep->com.qp) {
2926 					struct c4iw_qp_attributes attrs = {0};
2927 
2928 					attrs.next_state = C4IW_QP_STATE_ERROR;
2929 					ret = c4iw_modify_qp(
2930 							ep->com.dev, ep->com.qp,
2931 							C4IW_QP_ATTR_NEXT_STATE,
2932 							&attrs, 1);
2933 					CTR3(KTR_IW_CXGBE, "%s:ced7 %p ret %d",
2934 						__func__, ep, ret);
2935 				}
2936 			}
2937 		}
2938 	}
2939 	c4iw_put_ep(&ep->com);
2940 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2941 	return ret;
2942 }
2943 
2944 #ifdef C4IW_EP_REDIRECT
2945 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2946 		struct l2t_entry *l2t)
2947 {
2948 	struct c4iw_ep *ep = ctx;
2949 
2950 	if (ep->dst != old)
2951 		return 0;
2952 
2953 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2954 			l2t);
2955 	dst_hold(new);
2956 	cxgb4_l2t_release(ep->l2t);
2957 	ep->l2t = l2t;
2958 	dst_release(old);
2959 	ep->dst = new;
2960 	return 1;
2961 }
2962 #endif
2963 
2964 
2965 
2966 static void ep_timeout(unsigned long arg)
2967 {
2968 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2969 
2970 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2971 
2972 		/*
2973 		 * Only insert if it is not already on the list.
2974 		 */
2975 		if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2976 			CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2977 			add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2978 		}
2979 	}
2980 }
2981 
2982 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2983 {
2984 	uint64_t val = be64toh(*rpl);
2985 	int ret;
2986 	struct c4iw_wr_wait *wr_waitp;
2987 
2988 	ret = (int)((val >> 8) & 0xff);
2989 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2990 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2991 	if (wr_waitp)
2992 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2993 
2994 	return (0);
2995 }
2996 
2997 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2998 {
2999 	struct cqe_list_entry *cle;
3000 	unsigned long flag;
3001 
3002 	cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
3003 	cle->rhp = sc->iwarp_softc;
3004 	cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
3005 
3006 	spin_lock_irqsave(&err_cqe_lock, flag);
3007 	list_add_tail(&cle->entry, &err_cqe_list);
3008 	queue_work(c4iw_taskq, &c4iw_task);
3009 	spin_unlock_irqrestore(&err_cqe_lock, flag);
3010 
3011 	return (0);
3012 }
3013 
3014 static int
3015 process_terminate(struct c4iw_ep *ep)
3016 {
3017 	struct c4iw_qp_attributes attrs = {0};
3018 
3019 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
3020 
3021 	if (ep && ep->com.qp) {
3022 
3023 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
3024 				ep->hwtid, ep->com.qp->wq.sq.qid);
3025 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
3026 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
3027 				1);
3028 	} else
3029 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
3030 								ep->hwtid);
3031 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
3032 
3033 	return 0;
3034 }
3035 
3036 int __init c4iw_cm_init(void)
3037 {
3038 
3039 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
3040 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
3041 	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
3042 	t4_register_an_handler(c4iw_ev_handler);
3043 
3044 	TAILQ_INIT(&req_list);
3045 	spin_lock_init(&req_lock);
3046 	INIT_LIST_HEAD(&err_cqe_list);
3047 	spin_lock_init(&err_cqe_lock);
3048 
3049 	INIT_WORK(&c4iw_task, process_req);
3050 
3051 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
3052 	if (!c4iw_taskq)
3053 		return -ENOMEM;
3054 
3055 	return 0;
3056 }
3057 
3058 void __exit c4iw_cm_term(void)
3059 {
3060 	WARN_ON(!TAILQ_EMPTY(&req_list));
3061 	WARN_ON(!list_empty(&err_cqe_list));
3062 	flush_workqueue(c4iw_taskq);
3063 	destroy_workqueue(c4iw_taskq);
3064 
3065 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
3066 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
3067 	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
3068 	t4_register_an_handler(NULL);
3069 }
3070 #endif
3071