xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision edf8578117e8844e02c0121147f45e4609b30680)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *	  copyright notice, this list of conditions and the following
18  *	  disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *	  copyright notice, this list of conditions and the following
22  *	  disclaimer in the documentation and/or other materials
23  *	  provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <sys/cdefs.h>
35 #include "opt_inet.h"
36 
37 #ifdef TCP_OFFLOAD
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/socket.h>
41 #include <sys/socketvar.h>
42 #include <sys/sockio.h>
43 #include <sys/taskqueue.h>
44 #include <netinet/in.h>
45 #include <net/route.h>
46 #include <net/route/nhop.h>
47 
48 #include <netinet/in_systm.h>
49 #include <netinet/in_pcb.h>
50 #include <netinet6/in6_pcb.h>
51 #include <netinet/ip.h>
52 #include <netinet/in_fib.h>
53 #include <netinet6/in6_fib.h>
54 #include <netinet6/scope6_var.h>
55 #include <netinet/ip_var.h>
56 #include <netinet/tcp_var.h>
57 #include <netinet/tcp.h>
58 #include <netinet/tcpip.h>
59 
60 #include <netinet/toecore.h>
61 
62 struct sge_iq;
63 struct rss_header;
64 struct cpl_set_tcb_rpl;
65 #include <linux/types.h>
66 #include "offload.h"
67 #include "tom/t4_tom.h"
68 
69 #define TOEPCB(so)  ((struct toepcb *)(sototcpcb((so))->t_toe))
70 
71 #include "iw_cxgbe.h"
72 #include <linux/module.h>
73 #include <linux/workqueue.h>
74 #include <linux/if_vlan.h>
75 #include <net/netevent.h>
76 #include <rdma/rdma_cm.h>
77 
78 static spinlock_t req_lock;
79 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
80 static struct work_struct c4iw_task;
81 static struct workqueue_struct *c4iw_taskq;
82 static LIST_HEAD(err_cqe_list);
83 static spinlock_t err_cqe_lock;
84 static LIST_HEAD(listen_port_list);
85 static DEFINE_MUTEX(listen_port_mutex);
86 
87 static void process_req(struct work_struct *ctx);
88 static void start_ep_timer(struct c4iw_ep *ep);
89 static int stop_ep_timer(struct c4iw_ep *ep);
90 static int set_tcpinfo(struct c4iw_ep *ep);
91 static void process_timeout(struct c4iw_ep *ep);
92 static void process_err_cqes(void);
93 static void *alloc_ep(int size, gfp_t flags);
94 static void close_socket(struct socket *so);
95 static int send_mpa_req(struct c4iw_ep *ep);
96 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
97 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
98 static void close_complete_upcall(struct c4iw_ep *ep, int status);
99 static int send_abort(struct c4iw_ep *ep);
100 static void peer_close_upcall(struct c4iw_ep *ep);
101 static void peer_abort_upcall(struct c4iw_ep *ep);
102 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
103 static int connect_request_upcall(struct c4iw_ep *ep);
104 static void established_upcall(struct c4iw_ep *ep);
105 static int process_mpa_reply(struct c4iw_ep *ep);
106 static int process_mpa_request(struct c4iw_ep *ep);
107 static void process_peer_close(struct c4iw_ep *ep);
108 static void process_conn_error(struct c4iw_ep *ep);
109 static void process_close_complete(struct c4iw_ep *ep);
110 static void ep_timeout(unsigned long arg);
111 static void setiwsockopt(struct socket *so);
112 static void init_iwarp_socket(struct socket *so, void *arg);
113 static void uninit_iwarp_socket(struct socket *so);
114 static void process_data(struct c4iw_ep *ep);
115 static void process_connected(struct c4iw_ep *ep);
116 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
117 static void process_socket_event(struct c4iw_ep *ep);
118 static void release_ep_resources(struct c4iw_ep *ep);
119 static int process_terminate(struct c4iw_ep *ep);
120 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
121     struct mbuf *m);
122 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
123 static struct listen_port_info *
124 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
125 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
126 static struct c4iw_listen_ep *
127 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
128 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
129 		if_t *ifp);
130 static void process_newconn(struct c4iw_listen_ep *master_lep,
131 		struct socket *new_so);
132 #define START_EP_TIMER(ep) \
133     do { \
134 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
135 		__func__, __LINE__, (ep)); \
136 	    start_ep_timer(ep); \
137     } while (0)
138 
139 #define STOP_EP_TIMER(ep) \
140     ({ \
141 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
142 		__func__, __LINE__, (ep)); \
143 	    stop_ep_timer(ep); \
144     })
145 
146 #define GET_LOCAL_ADDR(pladdr, so) \
147 	do { \
148 		struct sockaddr_storage *__a = NULL; \
149 		struct  inpcb *__inp = sotoinpcb(so); \
150 		KASSERT(__inp != NULL, \
151 		   ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
152 		if (__inp->inp_vflag & INP_IPV4) \
153 			in_getsockaddr(so, (struct sockaddr **)&__a); \
154 		else \
155 			in6_getsockaddr(so, (struct sockaddr **)&__a); \
156 		*(pladdr) = *__a; \
157 		free(__a, M_SONAME); \
158 	} while (0)
159 
160 #define GET_REMOTE_ADDR(praddr, so) \
161 	do { \
162 		struct sockaddr_storage *__a = NULL; \
163 		struct  inpcb *__inp = sotoinpcb(so); \
164 		KASSERT(__inp != NULL, \
165 		   ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
166 		if (__inp->inp_vflag & INP_IPV4) \
167 			in_getpeeraddr(so, (struct sockaddr **)&__a); \
168 		else \
169 			in6_getpeeraddr(so, (struct sockaddr **)&__a); \
170 		*(praddr) = *__a; \
171 		free(__a, M_SONAME); \
172 	} while (0)
173 
174 static char *states[] = {
175 	"idle",
176 	"listen",
177 	"connecting",
178 	"mpa_wait_req",
179 	"mpa_req_sent",
180 	"mpa_req_rcvd",
181 	"mpa_rep_sent",
182 	"fpdu_mode",
183 	"aborting",
184 	"closing",
185 	"moribund",
186 	"dead",
187 	NULL,
188 };
189 
190 static void deref_cm_id(struct c4iw_ep_common *epc)
191 {
192       epc->cm_id->rem_ref(epc->cm_id);
193       epc->cm_id = NULL;
194       set_bit(CM_ID_DEREFED, &epc->history);
195 }
196 
197 static void ref_cm_id(struct c4iw_ep_common *epc)
198 {
199       set_bit(CM_ID_REFED, &epc->history);
200       epc->cm_id->add_ref(epc->cm_id);
201 }
202 
203 static void deref_qp(struct c4iw_ep *ep)
204 {
205 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
206 	clear_bit(QP_REFERENCED, &ep->com.flags);
207 	set_bit(QP_DEREFED, &ep->com.history);
208 }
209 
210 static void ref_qp(struct c4iw_ep *ep)
211 {
212 	set_bit(QP_REFERENCED, &ep->com.flags);
213 	set_bit(QP_REFED, &ep->com.history);
214 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
215 }
216 /* allocated per TCP port while listening */
217 struct listen_port_info {
218 	uint16_t port_num; /* TCP port address */
219 	struct list_head list; /* belongs to listen_port_list */
220 	struct list_head lep_list; /* per port lep list */
221 	uint32_t refcnt; /* number of lep's listening */
222 };
223 
224 /*
225  * Following two lists are used to manage INADDR_ANY listeners:
226  * 1)listen_port_list
227  * 2)lep_list
228  *
229  * Below is the INADDR_ANY listener lists overview on a system with a two port
230  * adapter:
231  *   |------------------|
232  *   |listen_port_list  |
233  *   |------------------|
234  *            |
235  *            |              |-----------|       |-----------|
236  *            |              | port_num:X|       | port_num:X|
237  *            |--------------|-list------|-------|-list------|-------....
238  *                           | lep_list----|     | lep_list----|
239  *                           | refcnt    | |     | refcnt    | |
240  *                           |           | |     |           | |
241  *                           |           | |     |           | |
242  *                           |-----------| |     |-----------| |
243  *                                         |                   |
244  *                                         |                   |
245  *                                         |                   |
246  *                                         |                   |         lep1                  lep2
247  *                                         |                   |    |----------------|    |----------------|
248  *                                         |                   |----| listen_ep_list |----| listen_ep_list |
249  *                                         |                        |----------------|    |----------------|
250  *                                         |
251  *                                         |
252  *                                         |        lep1                  lep2
253  *                                         |   |----------------|    |----------------|
254  *                                         |---| listen_ep_list |----| listen_ep_list |
255  *                                             |----------------|    |----------------|
256  *
257  * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
258  * each TCP port number.
259  *
260  * Here 'lep1' is always marked as Master lep, because solisten() is always
261  * called through first lep.
262  *
263  */
264 static struct listen_port_info *
265 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
266 {
267 	uint16_t port;
268 	struct listen_port_info *port_info = NULL;
269 	struct sockaddr_storage *laddr = &lep->com.local_addr;
270 
271 	port = (laddr->ss_family == AF_INET) ?
272 		((struct sockaddr_in *)laddr)->sin_port :
273 		((struct sockaddr_in6 *)laddr)->sin6_port;
274 
275 	mutex_lock(&listen_port_mutex);
276 
277 	list_for_each_entry(port_info, &listen_port_list, list)
278 		if (port_info->port_num == port)
279 			goto found_port;
280 
281 	port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
282 	port_info->port_num = port;
283 	port_info->refcnt    = 0;
284 
285 	list_add_tail(&port_info->list, &listen_port_list);
286 	INIT_LIST_HEAD(&port_info->lep_list);
287 
288 found_port:
289 	port_info->refcnt++;
290 	list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
291 	mutex_unlock(&listen_port_mutex);
292 	return port_info;
293 }
294 
295 static int
296 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
297 {
298 	uint16_t port;
299 	struct listen_port_info *port_info = NULL;
300 	struct sockaddr_storage *laddr = &lep->com.local_addr;
301 	int refcnt = 0;
302 
303 	port = (laddr->ss_family == AF_INET) ?
304 		((struct sockaddr_in *)laddr)->sin_port :
305 		((struct sockaddr_in6 *)laddr)->sin6_port;
306 
307 	mutex_lock(&listen_port_mutex);
308 
309 	/* get the port_info structure based on the lep's port address */
310 	list_for_each_entry(port_info, &listen_port_list, list) {
311 		if (port_info->port_num == port) {
312 			port_info->refcnt--;
313 			refcnt = port_info->refcnt;
314 			/* remove the current lep from the listen list */
315 			list_del(&lep->listen_ep_list);
316 			if (port_info->refcnt == 0) {
317 				/* Remove this entry from the list as there
318 				 * are no more listeners for this port_num.
319 				 */
320 				list_del(&port_info->list);
321 				kfree(port_info);
322 			}
323 			break;
324 		}
325 	}
326 	mutex_unlock(&listen_port_mutex);
327 	return refcnt;
328 }
329 
330 /*
331  * Find the lep that belongs to the ifnet on which the SYN frame was received.
332  */
333 struct c4iw_listen_ep *
334 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
335 {
336 	struct adapter *adap = NULL;
337 	struct c4iw_listen_ep *lep = NULL;
338 	if_t ifp = NULL, hw_ifp = NULL;
339 	struct listen_port_info *port_info = NULL;
340 	int i = 0, found_portinfo = 0, found_lep = 0;
341 	uint16_t port;
342 
343 	/*
344 	 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo
345 	 * interfaces like vlan, lagg, etc..
346 	 * TBD: lagg support, lagg + vlan support.
347 	 */
348 	ifp = TOEPCB(so)->l2te->ifp;
349 	if (if_gettype(ifp) == IFT_L2VLAN) {
350 		hw_ifp = VLAN_TRUNKDEV(ifp);
351 		if (hw_ifp == NULL) {
352 			CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of "
353 				"vlan ifnet %p, sock %p, master_lep %p",
354 				__func__, ifp, so, master_lep);
355 			return (NULL);
356 		}
357 	} else
358 		hw_ifp = ifp;
359 
360 	/* STEP 2: Find 'port_info' with listener local port address. */
361 	port = (master_lep->com.local_addr.ss_family == AF_INET) ?
362 		((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
363 		((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
364 
365 
366 	mutex_lock(&listen_port_mutex);
367 	list_for_each_entry(port_info, &listen_port_list, list)
368 		if (port_info->port_num == port) {
369 			found_portinfo =1;
370 			break;
371 		}
372 	if (!found_portinfo)
373 		goto out;
374 
375 	/* STEP 3: Traverse through list of lep's that are bound to the current
376 	 * TCP port address and find the lep that belongs to the ifnet on which
377 	 * the SYN frame was received.
378 	 */
379 	list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
380 		adap = lep->com.dev->rdev.adap;
381 		for_each_port(adap, i) {
382 			if (hw_ifp == adap->port[i]->vi[0].ifp) {
383 				found_lep =1;
384 				goto out;
385 			}
386 		}
387 	}
388 out:
389 	mutex_unlock(&listen_port_mutex);
390 	return found_lep ? lep : (NULL);
391 }
392 
393 static void process_timeout(struct c4iw_ep *ep)
394 {
395 	struct c4iw_qp_attributes attrs = {0};
396 	int abort = 1;
397 
398 	CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
399 			ep, ep->hwtid, ep->com.state);
400 	set_bit(TIMEDOUT, &ep->com.history);
401 	switch (ep->com.state) {
402 	case MPA_REQ_SENT:
403 		connect_reply_upcall(ep, -ETIMEDOUT);
404 		break;
405 	case MPA_REQ_WAIT:
406 	case MPA_REQ_RCVD:
407 	case MPA_REP_SENT:
408 	case FPDU_MODE:
409 		break;
410 	case CLOSING:
411 	case MORIBUND:
412 		if (ep->com.cm_id && ep->com.qp) {
413 			attrs.next_state = C4IW_QP_STATE_ERROR;
414 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
415 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
416 		}
417 		close_complete_upcall(ep, -ETIMEDOUT);
418 		break;
419 	case ABORTING:
420 	case DEAD:
421 		/*
422 		 * These states are expected if the ep timed out at the same
423 		 * time as another thread was calling stop_ep_timer().
424 		 * So we silently do nothing for these states.
425 		 */
426 		abort = 0;
427 		break;
428 	default:
429 		CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u"
430 				, __func__, ep, ep->hwtid, ep->com.state);
431 		abort = 0;
432 	}
433 	if (abort)
434 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
435 	c4iw_put_ep(&ep->com);
436 	return;
437 }
438 
439 struct cqe_list_entry {
440 	struct list_head entry;
441 	struct c4iw_dev *rhp;
442 	struct t4_cqe err_cqe;
443 };
444 
445 static void
446 process_err_cqes(void)
447 {
448 	unsigned long flag;
449 	struct cqe_list_entry *cle;
450 
451 	spin_lock_irqsave(&err_cqe_lock, flag);
452 	while (!list_empty(&err_cqe_list)) {
453 		struct list_head *tmp;
454 		tmp = err_cqe_list.next;
455 		list_del(tmp);
456 		tmp->next = tmp->prev = NULL;
457 		spin_unlock_irqrestore(&err_cqe_lock, flag);
458 		cle = list_entry(tmp, struct cqe_list_entry, entry);
459 		c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
460 		free(cle, M_CXGBE);
461 		spin_lock_irqsave(&err_cqe_lock, flag);
462 	}
463 	spin_unlock_irqrestore(&err_cqe_lock, flag);
464 
465 	return;
466 }
467 
468 static void
469 process_req(struct work_struct *ctx)
470 {
471 	struct c4iw_ep_common *epc;
472 	unsigned long flag;
473 	int ep_events;
474 
475 	process_err_cqes();
476 	spin_lock_irqsave(&req_lock, flag);
477 	while (!TAILQ_EMPTY(&req_list)) {
478 		epc = TAILQ_FIRST(&req_list);
479 		TAILQ_REMOVE(&req_list, epc, entry);
480 		epc->entry.tqe_prev = NULL;
481 		ep_events = epc->ep_events;
482 		epc->ep_events = 0;
483 		spin_unlock_irqrestore(&req_lock, flag);
484 		mutex_lock(&epc->mutex);
485 		CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
486 		    __func__, epc->so, epc, states[epc->state], ep_events);
487 		if (ep_events & C4IW_EVENT_TERM)
488 			process_terminate((struct c4iw_ep *)epc);
489 		if (ep_events & C4IW_EVENT_TIMEOUT)
490 			process_timeout((struct c4iw_ep *)epc);
491 		if (ep_events & C4IW_EVENT_SOCKET)
492 			process_socket_event((struct c4iw_ep *)epc);
493 		mutex_unlock(&epc->mutex);
494 		c4iw_put_ep(epc);
495 		process_err_cqes();
496 		spin_lock_irqsave(&req_lock, flag);
497 	}
498 	spin_unlock_irqrestore(&req_lock, flag);
499 }
500 
501 /*
502  * XXX: doesn't belong here in the iWARP driver.
503  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
504  *      set.  Is this a valid assumption for active open?
505  */
506 static int
507 set_tcpinfo(struct c4iw_ep *ep)
508 {
509 	struct socket *so = ep->com.so;
510 	struct inpcb *inp = sotoinpcb(so);
511 	struct tcpcb *tp;
512 	struct toepcb *toep;
513 	int rc = 0;
514 
515 	INP_WLOCK(inp);
516 	tp = intotcpcb(inp);
517 	if ((tp->t_flags & TF_TOE) == 0) {
518 		rc = EINVAL;
519 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
520 		    __func__, so, ep);
521 		goto done;
522 	}
523 	toep = TOEPCB(so);
524 
525 	ep->hwtid = toep->tid;
526 	ep->snd_seq = tp->snd_nxt;
527 	ep->rcv_seq = tp->rcv_nxt;
528 done:
529 	INP_WUNLOCK(inp);
530 	return (rc);
531 
532 }
533 static int
534 get_ifnet_from_raddr(struct sockaddr_storage *raddr, if_t *ifp)
535 {
536 	int err = 0;
537 	struct nhop_object *nh;
538 
539 	if (raddr->ss_family == AF_INET) {
540 		struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
541 
542 		nh = fib4_lookup(RT_DEFAULT_FIB, raddr4->sin_addr, 0,
543 				NHR_NONE, 0);
544 	} else {
545 		struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
546 		struct in6_addr addr6;
547 		uint32_t scopeid;
548 
549 		memset(&addr6, 0, sizeof(addr6));
550 		in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
551 					&addr6, &scopeid);
552 		nh = fib6_lookup(RT_DEFAULT_FIB, &addr6, scopeid,
553 				NHR_NONE, 0);
554 	}
555 
556 	if (nh == NULL)
557 		err = EHOSTUNREACH;
558 	else
559 		*ifp = nh->nh_ifp;
560 	CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
561 	return err;
562 }
563 
564 static void
565 close_socket(struct socket *so)
566 {
567 	uninit_iwarp_socket(so);
568 	soclose(so);
569 }
570 
571 static void
572 process_peer_close(struct c4iw_ep *ep)
573 {
574 	struct c4iw_qp_attributes attrs = {0};
575 	int disconnect = 1;
576 	int release = 0;
577 
578 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
579 	    ep->com.so, states[ep->com.state]);
580 
581 	switch (ep->com.state) {
582 
583 		case MPA_REQ_WAIT:
584 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
585 			    __func__, ep);
586 			/* Fallthrough */
587 		case MPA_REQ_SENT:
588 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
589 			    __func__, ep);
590 			ep->com.state = DEAD;
591 			connect_reply_upcall(ep, -ECONNABORTED);
592 
593 			disconnect = 0;
594 			STOP_EP_TIMER(ep);
595 			close_socket(ep->com.so);
596 			deref_cm_id(&ep->com);
597 			release = 1;
598 			break;
599 
600 		case MPA_REQ_RCVD:
601 
602 			/*
603 			 * We're gonna mark this puppy DEAD, but keep
604 			 * the reference on it until the ULP accepts or
605 			 * rejects the CR.
606 			 */
607 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
608 			    __func__, ep);
609 			ep->com.state = CLOSING;
610 			break;
611 
612 		case MPA_REP_SENT:
613 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
614 			    __func__, ep);
615 			ep->com.state = CLOSING;
616 			break;
617 
618 		case FPDU_MODE:
619 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
620 			    __func__, ep);
621 			START_EP_TIMER(ep);
622 			ep->com.state = CLOSING;
623 			attrs.next_state = C4IW_QP_STATE_CLOSING;
624 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
625 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
626 			peer_close_upcall(ep);
627 			break;
628 
629 		case ABORTING:
630 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
631 			    __func__, ep);
632 			disconnect = 0;
633 			break;
634 
635 		case CLOSING:
636 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
637 			    __func__, ep);
638 			ep->com.state = MORIBUND;
639 			disconnect = 0;
640 			break;
641 
642 		case MORIBUND:
643 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
644 			    ep);
645 			STOP_EP_TIMER(ep);
646 			if (ep->com.cm_id && ep->com.qp) {
647 				attrs.next_state = C4IW_QP_STATE_IDLE;
648 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
649 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
650 			}
651 			close_socket(ep->com.so);
652 			close_complete_upcall(ep, 0);
653 			ep->com.state = DEAD;
654 			release = 1;
655 			disconnect = 0;
656 			break;
657 
658 		case DEAD:
659 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
660 			    __func__, ep);
661 			disconnect = 0;
662 			break;
663 
664 		default:
665 			panic("%s: ep %p state %d", __func__, ep,
666 			    ep->com.state);
667 			break;
668 	}
669 
670 
671 	if (disconnect) {
672 
673 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
674 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
675 	}
676 	if (release) {
677 
678 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
679 		c4iw_put_ep(&ep->com);
680 	}
681 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
682 	return;
683 }
684 
685 static void
686 process_conn_error(struct c4iw_ep *ep)
687 {
688 	struct c4iw_qp_attributes attrs = {0};
689 	int ret;
690 	int state;
691 
692 	state = ep->com.state;
693 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
694 	    __func__, ep, ep->com.so, ep->com.so->so_error,
695 	    states[ep->com.state]);
696 
697 	switch (state) {
698 
699 		case MPA_REQ_WAIT:
700 			STOP_EP_TIMER(ep);
701 			c4iw_put_ep(&ep->parent_ep->com);
702 			break;
703 
704 		case MPA_REQ_SENT:
705 			STOP_EP_TIMER(ep);
706 			connect_reply_upcall(ep, -ECONNRESET);
707 			break;
708 
709 		case MPA_REP_SENT:
710 			ep->com.rpl_err = ECONNRESET;
711 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
712 			break;
713 
714 		case MPA_REQ_RCVD:
715 			break;
716 
717 		case MORIBUND:
718 		case CLOSING:
719 			STOP_EP_TIMER(ep);
720 			/*FALLTHROUGH*/
721 		case FPDU_MODE:
722 
723 			if (ep->com.cm_id && ep->com.qp) {
724 
725 				attrs.next_state = C4IW_QP_STATE_ERROR;
726 				ret = c4iw_modify_qp(ep->com.qp->rhp,
727 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
728 					&attrs, 1);
729 				if (ret)
730 					log(LOG_ERR,
731 							"%s - qp <- error failed!\n",
732 							__func__);
733 			}
734 			peer_abort_upcall(ep);
735 			break;
736 
737 		case ABORTING:
738 			break;
739 
740 		case DEAD:
741 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
742 			    __func__, ep->com.so->so_error);
743 			return;
744 
745 		default:
746 			panic("%s: ep %p state %d", __func__, ep, state);
747 			break;
748 	}
749 
750 	if (state != ABORTING) {
751 		close_socket(ep->com.so);
752 		ep->com.state = DEAD;
753 		c4iw_put_ep(&ep->com);
754 	}
755 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
756 	return;
757 }
758 
759 static void
760 process_close_complete(struct c4iw_ep *ep)
761 {
762 	struct c4iw_qp_attributes attrs = {0};
763 	int release = 0;
764 
765 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
766 	    ep->com.so, states[ep->com.state]);
767 
768 	/* The cm_id may be null if we failed to connect */
769 	set_bit(CLOSE_CON_RPL, &ep->com.history);
770 
771 	switch (ep->com.state) {
772 
773 		case CLOSING:
774 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
775 			    __func__, ep);
776 			ep->com.state = MORIBUND;
777 			break;
778 
779 		case MORIBUND:
780 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
781 			    ep);
782 			STOP_EP_TIMER(ep);
783 
784 			if ((ep->com.cm_id) && (ep->com.qp)) {
785 
786 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
787 				    __func__, ep);
788 				attrs.next_state = C4IW_QP_STATE_IDLE;
789 				c4iw_modify_qp(ep->com.dev,
790 						ep->com.qp,
791 						C4IW_QP_ATTR_NEXT_STATE,
792 						&attrs, 1);
793 			}
794 
795 			close_socket(ep->com.so);
796 			close_complete_upcall(ep, 0);
797 			ep->com.state = DEAD;
798 			release = 1;
799 			break;
800 
801 		case ABORTING:
802 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
803 			break;
804 
805 		case DEAD:
806 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
807 			break;
808 		default:
809 			CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
810 					__func__, ep);
811 			panic("%s:pcc6 %p unknown ep state", __func__, ep);
812 			break;
813 	}
814 
815 	if (release) {
816 
817 		CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
818 		release_ep_resources(ep);
819 	}
820 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
821 	return;
822 }
823 
824 static void
825 setiwsockopt(struct socket *so)
826 {
827 	int rc;
828 	struct sockopt sopt;
829 	int on = 1;
830 
831 	sopt.sopt_dir = SOPT_SET;
832 	sopt.sopt_level = IPPROTO_TCP;
833 	sopt.sopt_name = TCP_NODELAY;
834 	sopt.sopt_val = (caddr_t)&on;
835 	sopt.sopt_valsize = sizeof on;
836 	sopt.sopt_td = NULL;
837 	rc = -sosetopt(so, &sopt);
838 	if (rc) {
839 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
840 		    __func__, so, rc);
841 	}
842 }
843 
844 static void
845 init_iwarp_socket(struct socket *so, void *arg)
846 {
847 	if (SOLISTENING(so)) {
848 		SOLISTEN_LOCK(so);
849 		solisten_upcall_set(so, c4iw_so_upcall, arg);
850 		so->so_state |= SS_NBIO;
851 		SOLISTEN_UNLOCK(so);
852 	} else {
853 		SOCKBUF_LOCK(&so->so_rcv);
854 		soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
855 		so->so_state |= SS_NBIO;
856 		SOCKBUF_UNLOCK(&so->so_rcv);
857 	}
858 }
859 
860 static void
861 uninit_iwarp_socket(struct socket *so)
862 {
863 	if (SOLISTENING(so)) {
864 		SOLISTEN_LOCK(so);
865 		solisten_upcall_set(so, NULL, NULL);
866 		SOLISTEN_UNLOCK(so);
867 	} else {
868 		SOCKBUF_LOCK(&so->so_rcv);
869 		soupcall_clear(so, SO_RCV);
870 		SOCKBUF_UNLOCK(&so->so_rcv);
871 	}
872 }
873 
874 static void
875 process_data(struct c4iw_ep *ep)
876 {
877 	int ret = 0;
878 	int disconnect = 0;
879 	struct c4iw_qp_attributes attrs = {0};
880 
881 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
882 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
883 
884 	switch (ep->com.state) {
885 	case MPA_REQ_SENT:
886 		disconnect = process_mpa_reply(ep);
887 		break;
888 	case MPA_REQ_WAIT:
889 		disconnect = process_mpa_request(ep);
890 		if (disconnect)
891 			/* Refered in process_newconn() */
892 			c4iw_put_ep(&ep->parent_ep->com);
893 		break;
894 	case FPDU_MODE:
895 		MPASS(ep->com.qp != NULL);
896 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
897 		ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
898 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
899 		if (ret != -EINPROGRESS)
900 			disconnect = 1;
901 		break;
902 	default:
903 		log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
904 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
905 			    __func__, ep, ep->com.state, ep->com.so,
906 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
907 		break;
908 	}
909 	if (disconnect)
910 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
911 
912 }
913 
914 static void
915 process_connected(struct c4iw_ep *ep)
916 {
917 	struct socket *so = ep->com.so;
918 
919 	if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
920 		if (send_mpa_req(ep))
921 			goto err;
922 	} else {
923 		connect_reply_upcall(ep, -so->so_error);
924 		goto err;
925 	}
926 	return;
927 err:
928 	close_socket(so);
929 	ep->com.state = DEAD;
930 	c4iw_put_ep(&ep->com);
931 	return;
932 }
933 
934 static inline bool c4iw_zero_addr(struct sockaddr *addr)
935 {
936 	struct in6_addr *ip6;
937 
938 	if (addr->sa_family == AF_INET)
939 		return (((struct sockaddr_in *)addr)->sin_addr.s_addr == 0);
940 	else {
941 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
942 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
943 				ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
944 	}
945 }
946 
947 #define _IN_LOOPBACK(i)	(((in_addr_t)(i) & 0xff000000) == 0x7f000000)
948 static inline bool c4iw_loopback_addr(struct sockaddr *addr, struct vnet *vnet)
949 {
950 	bool ret;
951 
952 	if (addr->sa_family == AF_INET) {
953 		if (vnet == NULL)
954 			ret = _IN_LOOPBACK(ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
955 		else {
956 			CURVNET_SET_QUIET(vnet);
957 			ret = IN_LOOPBACK(ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
958 			CURVNET_RESTORE();
959 		}
960 	} else {
961 		ret = IN6_IS_ADDR_LOOPBACK(&((struct sockaddr_in6 *) addr)->sin6_addr);
962 	}
963 	return (ret);
964 }
965 #undef _IN_LOOPBACK
966 
967 static inline bool c4iw_any_addr(struct sockaddr *addr, struct vnet *vnet)
968 {
969 	return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr, vnet);
970 }
971 
972 static void
973 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
974 {
975 	struct c4iw_listen_ep *real_lep = NULL;
976 	struct c4iw_ep *new_ep = NULL;
977 	struct sockaddr_in *remote = NULL;
978 	int ret = 0;
979 
980 	MPASS(new_so != NULL);
981 
982 	if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr,
983 	    new_so->so_vnet)) {
984 		/* Here we need to find the 'real_lep' that belongs to the
985 		 * incomming socket's network interface, such that the newly
986 		 * created 'ep' can be attached to the real 'lep'.
987 		 */
988 		real_lep = find_real_listen_ep(master_lep, new_so);
989 		if (real_lep == NULL) {
990 			CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
991 					"ep for sock: %p", __func__, new_so);
992 			log(LOG_ERR,"%s: Could not find the real listen ep for "
993 					"sock: %p\n", __func__, new_so);
994 			/* FIXME: properly free the 'new_so' in failure case.
995 			 * Use of soabort() and  soclose() are not legal
996 			 * here(before soaccept()).
997 			 */
998 			return;
999 		}
1000 	} else /* for Non-Wildcard address, master_lep is always the real_lep */
1001 		real_lep = master_lep;
1002 
1003 	new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
1004 
1005 	CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
1006 	    "listening so %p, new so %p", __func__, master_lep, real_lep,
1007 	    new_ep, master_lep->com.so, new_so);
1008 
1009 	new_ep->com.dev = real_lep->com.dev;
1010 	new_ep->com.so = new_so;
1011 	new_ep->com.cm_id = NULL;
1012 	new_ep->com.thread = real_lep->com.thread;
1013 	new_ep->parent_ep = real_lep;
1014 
1015 	GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
1016 	GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1017 	c4iw_get_ep(&real_lep->com);
1018 	init_timer(&new_ep->timer);
1019 	new_ep->com.state = MPA_REQ_WAIT;
1020 
1021 	setiwsockopt(new_so);
1022 	ret = soaccept(new_so, (struct sockaddr **)&remote);
1023 	if (ret != 0) {
1024 		CTR4(KTR_IW_CXGBE,
1025 				"%s:listen sock:%p, new sock:%p, ret:%d",
1026 				__func__, master_lep->com.so, new_so, ret);
1027 		if (remote != NULL)
1028 			free(remote, M_SONAME);
1029 		soclose(new_so);
1030 		c4iw_put_ep(&new_ep->com);
1031 		c4iw_put_ep(&real_lep->com);
1032 		return;
1033 	}
1034 	free(remote, M_SONAME);
1035 
1036 	START_EP_TIMER(new_ep);
1037 
1038 	/* MPA request might have been queued up on the socket already, so we
1039 	 * initialize the socket/upcall_handler under lock to prevent processing
1040 	 * MPA request on another thread(via process_req()) simultaneously.
1041 	 */
1042 	c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1043 				      avoid freeing of ep before ep unlock. */
1044 	mutex_lock(&new_ep->com.mutex);
1045 	init_iwarp_socket(new_so, &new_ep->com);
1046 
1047 	ret = process_mpa_request(new_ep);
1048 	if (ret) {
1049 		/* ABORT */
1050 		c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1051 		c4iw_put_ep(&real_lep->com);
1052 	}
1053 	mutex_unlock(&new_ep->com.mutex);
1054 	c4iw_put_ep(&new_ep->com);
1055 	return;
1056 }
1057 
1058 static int
1059 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1060 {
1061 	unsigned long flag;
1062 
1063 	spin_lock_irqsave(&req_lock, flag);
1064 	if (ep && ep->com.so) {
1065 		ep->com.ep_events |= new_ep_event;
1066 		if (!ep->com.entry.tqe_prev) {
1067 			c4iw_get_ep(&ep->com);
1068 			TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1069 			queue_work(c4iw_taskq, &c4iw_task);
1070 		}
1071 	}
1072 	spin_unlock_irqrestore(&req_lock, flag);
1073 
1074 	return (0);
1075 }
1076 
1077 static int
1078 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1079 {
1080 	struct c4iw_ep *ep = arg;
1081 
1082 	CTR6(KTR_IW_CXGBE,
1083 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1084 	    __func__, so, so->so_state, ep, states[ep->com.state],
1085 	    ep->com.entry.tqe_prev);
1086 
1087 	MPASS(ep->com.so == so);
1088 	/*
1089 	 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1090 	 * with locks held.
1091 	 */
1092 	if (so->so_error || (ep->com.dev->rdev.flags & T4_FATAL_ERROR))
1093 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1094 	add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1095 
1096 	return (SU_OK);
1097 }
1098 
1099 
1100 static int
1101 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1102 {
1103 	struct adapter *sc = iq->adapter;
1104 	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1105 	unsigned int tid = GET_TID(cpl);
1106 	struct toepcb *toep = lookup_tid(sc, tid);
1107 	struct socket *so;
1108 	struct c4iw_ep *ep;
1109 
1110 	INP_WLOCK(toep->inp);
1111 	so = inp_inpcbtosocket(toep->inp);
1112 	ep = so->so_rcv.sb_upcallarg;
1113 	INP_WUNLOCK(toep->inp);
1114 
1115 	CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1116 	add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1117 
1118 	return 0;
1119 }
1120 
1121 static void
1122 process_socket_event(struct c4iw_ep *ep)
1123 {
1124 	int state = ep->com.state;
1125 	struct socket *so = ep->com.so;
1126 
1127 	if (ep->com.state == DEAD) {
1128 		CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1129 		    "ep %p ep_state %s", __func__, ep, states[state]);
1130 		return;
1131 	}
1132 
1133 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1134 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1135 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
1136 
1137 	if (state == CONNECTING) {
1138 		process_connected(ep);
1139 		return;
1140 	}
1141 
1142 	if (state == LISTEN) {
1143 		struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1144 		struct socket *listen_so = so, *new_so = NULL;
1145 		int error = 0;
1146 
1147 		SOLISTEN_LOCK(listen_so);
1148 		do {
1149 			error = solisten_dequeue(listen_so, &new_so,
1150 						SOCK_NONBLOCK);
1151 			if (error) {
1152 				CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1153 					"error %d", __func__, lep, listen_so,
1154 					error);
1155 				return;
1156 			}
1157 			process_newconn(lep, new_so);
1158 
1159 			/* solisten_dequeue() unlocks while return, so aquire
1160 			 * lock again for sol_qlen and also for next iteration.
1161 			 */
1162 			SOLISTEN_LOCK(listen_so);
1163 		} while (listen_so->sol_qlen);
1164 		SOLISTEN_UNLOCK(listen_so);
1165 
1166 		return;
1167 	}
1168 
1169 	/* connection error */
1170 	if (so->so_error) {
1171 		process_conn_error(ep);
1172 		return;
1173 	}
1174 
1175 	/* peer close */
1176 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1177 		process_peer_close(ep);
1178 		/*
1179 		 * check whether socket disconnect event is pending before
1180 		 * returning. Fallthrough if yes.
1181 		 */
1182 		if (!(so->so_state & SS_ISDISCONNECTED))
1183 			return;
1184 	}
1185 
1186 	/* close complete */
1187 	if (so->so_state & SS_ISDISCONNECTED) {
1188 		process_close_complete(ep);
1189 		return;
1190 	}
1191 
1192 	/* rx data */
1193 	if (sbused(&ep->com.so->so_rcv)) {
1194 		process_data(ep);
1195 		return;
1196 	}
1197 
1198 	/* Socket events for 'MPA Request Received' and 'Close Complete'
1199 	 * were already processed earlier in their previous events handlers.
1200 	 * Hence, these socket events are skipped.
1201 	 * And any other socket events must have handled above.
1202 	 */
1203 	MPASS((ep->com.state == MPA_REQ_RCVD) || (ep->com.state == MORIBUND));
1204 
1205 	if ((ep->com.state != MPA_REQ_RCVD) && (ep->com.state != MORIBUND))
1206 		log(LOG_ERR, "%s: Unprocessed socket event so %p, "
1207 		"so_state 0x%x, so_err %d, sb_state 0x%x, ep %p, ep_state %s\n",
1208 		__func__, so, so->so_state, so->so_error, so->so_rcv.sb_state,
1209 			ep, states[state]);
1210 
1211 }
1212 
1213 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
1214     "iw_cxgbe driver parameters");
1215 
1216 static int dack_mode = 0;
1217 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1218 		"Delayed ack mode (default = 0)");
1219 
1220 int c4iw_max_read_depth = 8;
1221 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1222 		"Per-connection max ORD/IRD (default = 8)");
1223 
1224 static int enable_tcp_timestamps;
1225 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1226 		"Enable tcp timestamps (default = 0)");
1227 
1228 static int enable_tcp_sack;
1229 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1230 		"Enable tcp SACK (default = 0)");
1231 
1232 static int enable_tcp_window_scaling = 1;
1233 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1234 		"Enable tcp window scaling (default = 1)");
1235 
1236 int c4iw_debug = 0;
1237 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1238 		"Enable debug logging (default = 0)");
1239 
1240 static int peer2peer = 1;
1241 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1242 		"Support peer2peer ULPs (default = 1)");
1243 
1244 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1245 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1246 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1247 
1248 static int ep_timeout_secs = 60;
1249 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1250 		"CM Endpoint operation timeout in seconds (default = 60)");
1251 
1252 static int mpa_rev = 1;
1253 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1254 		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1255 
1256 static int markers_enabled;
1257 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1258 		"Enable MPA MARKERS (default(0) = disabled)");
1259 
1260 static int crc_enabled = 1;
1261 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1262 		"Enable MPA CRC (default(1) = enabled)");
1263 
1264 static int rcv_win = 256 * 1024;
1265 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1266 		"TCP receive window in bytes (default = 256KB)");
1267 
1268 static int snd_win = 128 * 1024;
1269 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1270 		"TCP send window in bytes (default = 128KB)");
1271 
1272 int use_dsgl = 1;
1273 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, use_dsgl, CTLFLAG_RWTUN, &use_dsgl, 0,
1274 		"Use DSGL for PBL/FastReg (default=1)");
1275 
1276 int inline_threshold = 128;
1277 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, inline_threshold, CTLFLAG_RWTUN, &inline_threshold, 0,
1278 		"inline vs dsgl threshold (default=128)");
1279 
1280 static int reuseaddr = 0;
1281 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, reuseaddr, CTLFLAG_RWTUN, &reuseaddr, 0,
1282 		"Enable SO_REUSEADDR & SO_REUSEPORT socket options on all iWARP client connections(default = 0)");
1283 
1284 static void
1285 start_ep_timer(struct c4iw_ep *ep)
1286 {
1287 
1288 	if (timer_pending(&ep->timer)) {
1289 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1290 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1291 		    ep);
1292 		return;
1293 	}
1294 	clear_bit(TIMEOUT, &ep->com.flags);
1295 	c4iw_get_ep(&ep->com);
1296 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1297 	ep->timer.data = (unsigned long)ep;
1298 	ep->timer.function = ep_timeout;
1299 	add_timer(&ep->timer);
1300 }
1301 
1302 static int
1303 stop_ep_timer(struct c4iw_ep *ep)
1304 {
1305 
1306 	del_timer_sync(&ep->timer);
1307 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1308 		c4iw_put_ep(&ep->com);
1309 		return 0;
1310 	}
1311 	return 1;
1312 }
1313 
1314 static void *
1315 alloc_ep(int size, gfp_t gfp)
1316 {
1317 	struct c4iw_ep_common *epc;
1318 
1319 	epc = kzalloc(size, gfp);
1320 	if (epc == NULL)
1321 		return (NULL);
1322 
1323 	kref_init(&epc->kref);
1324 	mutex_init(&epc->mutex);
1325 	c4iw_init_wr_wait(&epc->wr_wait);
1326 
1327 	return (epc);
1328 }
1329 
1330 void _c4iw_free_ep(struct kref *kref)
1331 {
1332 	struct c4iw_ep *ep;
1333 #if defined(KTR) || defined(INVARIANTS)
1334 	struct c4iw_ep_common *epc;
1335 #endif
1336 
1337 	ep = container_of(kref, struct c4iw_ep, com.kref);
1338 #if defined(KTR) || defined(INVARIANTS)
1339 	epc = &ep->com;
1340 #endif
1341 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1342 	    __func__, epc));
1343 	if (test_bit(QP_REFERENCED, &ep->com.flags))
1344 		deref_qp(ep);
1345 	CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1346 	    __func__, ep, epc->history, epc->flags);
1347 	kfree(ep);
1348 }
1349 
1350 static void release_ep_resources(struct c4iw_ep *ep)
1351 {
1352 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1353 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
1354 	c4iw_put_ep(&ep->com);
1355 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1356 }
1357 
1358 static int
1359 send_mpa_req(struct c4iw_ep *ep)
1360 {
1361 	int mpalen;
1362 	struct mpa_message *mpa;
1363 	struct mpa_v2_conn_params mpa_v2_params;
1364 	struct mbuf *m;
1365 	char mpa_rev_to_use = mpa_rev;
1366 	int err = 0;
1367 
1368 	if (ep->retry_with_mpa_v1)
1369 		mpa_rev_to_use = 1;
1370 	mpalen = sizeof(*mpa) + ep->plen;
1371 	if (mpa_rev_to_use == 2)
1372 		mpalen += sizeof(struct mpa_v2_conn_params);
1373 
1374 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1375 	if (mpa == NULL) {
1376 		err = -ENOMEM;
1377 		CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1378 				__func__, ep, err);
1379 		goto err;
1380 	}
1381 
1382 	memset(mpa, 0, mpalen);
1383 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1384 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1385 		(markers_enabled ? MPA_MARKERS : 0) |
1386 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1387 	mpa->private_data_size = htons(ep->plen);
1388 	mpa->revision = mpa_rev_to_use;
1389 
1390 	if (mpa_rev_to_use == 1) {
1391 		ep->tried_with_mpa_v1 = 1;
1392 		ep->retry_with_mpa_v1 = 0;
1393 	}
1394 
1395 	if (mpa_rev_to_use == 2) {
1396 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1397 					    sizeof(struct mpa_v2_conn_params));
1398 		mpa_v2_params.ird = htons((u16)ep->ird);
1399 		mpa_v2_params.ord = htons((u16)ep->ord);
1400 
1401 		if (peer2peer) {
1402 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1403 
1404 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1405 				mpa_v2_params.ord |=
1406 				    htons(MPA_V2_RDMA_WRITE_RTR);
1407 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1408 				mpa_v2_params.ord |=
1409 					htons(MPA_V2_RDMA_READ_RTR);
1410 			}
1411 		}
1412 		memcpy(mpa->private_data, &mpa_v2_params,
1413 			sizeof(struct mpa_v2_conn_params));
1414 
1415 		if (ep->plen) {
1416 
1417 			memcpy(mpa->private_data +
1418 				sizeof(struct mpa_v2_conn_params),
1419 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1420 		}
1421 	} else {
1422 
1423 		if (ep->plen)
1424 			memcpy(mpa->private_data,
1425 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1426 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1427 	}
1428 
1429 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1430 	if (m == NULL) {
1431 		err = -ENOMEM;
1432 		CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1433 				__func__, ep, err);
1434 		free(mpa, M_CXGBE);
1435 		goto err;
1436 	}
1437 	m_copyback(m, 0, mpalen, (void *)mpa);
1438 	free(mpa, M_CXGBE);
1439 
1440 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1441 			ep->com.thread);
1442 	if (err) {
1443 		CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1444 				__func__, ep, err);
1445 		goto err;
1446 	}
1447 
1448 	START_EP_TIMER(ep);
1449 	ep->com.state = MPA_REQ_SENT;
1450 	ep->mpa_attr.initiator = 1;
1451 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1452 	return 0;
1453 err:
1454 	connect_reply_upcall(ep, err);
1455 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1456 	return err;
1457 }
1458 
1459 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1460 {
1461 	int mpalen ;
1462 	struct mpa_message *mpa;
1463 	struct mpa_v2_conn_params mpa_v2_params;
1464 	struct mbuf *m;
1465 	int err;
1466 
1467 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1468 	    ep->plen);
1469 
1470 	mpalen = sizeof(*mpa) + plen;
1471 
1472 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1473 
1474 		mpalen += sizeof(struct mpa_v2_conn_params);
1475 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1476 		    ep->mpa_attr.version, mpalen);
1477 	}
1478 
1479 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1480 	if (mpa == NULL)
1481 		return (-ENOMEM);
1482 
1483 	memset(mpa, 0, mpalen);
1484 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1485 	mpa->flags = MPA_REJECT;
1486 	mpa->revision = mpa_rev;
1487 	mpa->private_data_size = htons(plen);
1488 
1489 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1490 
1491 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1492 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1493 					    sizeof(struct mpa_v2_conn_params));
1494 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1495 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1496 				 0));
1497 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1498 					(p2p_type ==
1499 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1500 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1501 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1502 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1503 		memcpy(mpa->private_data, &mpa_v2_params,
1504 				sizeof(struct mpa_v2_conn_params));
1505 
1506 		if (ep->plen)
1507 			memcpy(mpa->private_data +
1508 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1509 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1510 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1511 	} else
1512 		if (plen)
1513 			memcpy(mpa->private_data, pdata, plen);
1514 
1515 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1516 	if (m == NULL) {
1517 		free(mpa, M_CXGBE);
1518 		return (-ENOMEM);
1519 	}
1520 	m_copyback(m, 0, mpalen, (void *)mpa);
1521 	free(mpa, M_CXGBE);
1522 
1523 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1524 	if (!err)
1525 		ep->snd_seq += mpalen;
1526 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1527 	return err;
1528 }
1529 
1530 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1531 {
1532 	int mpalen;
1533 	struct mpa_message *mpa;
1534 	struct mbuf *m;
1535 	struct mpa_v2_conn_params mpa_v2_params;
1536 	int err;
1537 
1538 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1539 
1540 	mpalen = sizeof(*mpa) + plen;
1541 
1542 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1543 
1544 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1545 		    ep->mpa_attr.version);
1546 		mpalen += sizeof(struct mpa_v2_conn_params);
1547 	}
1548 
1549 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1550 	if (mpa == NULL)
1551 		return (-ENOMEM);
1552 
1553 	memset(mpa, 0, sizeof(*mpa));
1554 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1555 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1556 		(markers_enabled ? MPA_MARKERS : 0);
1557 	mpa->revision = ep->mpa_attr.version;
1558 	mpa->private_data_size = htons(plen);
1559 
1560 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1561 
1562 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1563 		mpa->private_data_size +=
1564 			htons(sizeof(struct mpa_v2_conn_params));
1565 		mpa_v2_params.ird = htons((u16)ep->ird);
1566 		mpa_v2_params.ord = htons((u16)ep->ord);
1567 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1568 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1569 
1570 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1571 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1572 
1573 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1574 
1575 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1576 
1577 				mpa_v2_params.ord |=
1578 					htons(MPA_V2_RDMA_WRITE_RTR);
1579 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1580 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1581 				    mpa_v2_params.ord);
1582 			}
1583 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1584 
1585 				mpa_v2_params.ord |=
1586 					htons(MPA_V2_RDMA_READ_RTR);
1587 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1588 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1589 				    mpa_v2_params.ord);
1590 			}
1591 		}
1592 
1593 		memcpy(mpa->private_data, &mpa_v2_params,
1594 			sizeof(struct mpa_v2_conn_params));
1595 
1596 		if (ep->plen)
1597 			memcpy(mpa->private_data +
1598 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1599 	} else
1600 		if (plen)
1601 			memcpy(mpa->private_data, pdata, plen);
1602 
1603 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1604 	if (m == NULL) {
1605 		free(mpa, M_CXGBE);
1606 		return (-ENOMEM);
1607 	}
1608 	m_copyback(m, 0, mpalen, (void *)mpa);
1609 	free(mpa, M_CXGBE);
1610 
1611 
1612 	ep->com.state = MPA_REP_SENT;
1613 	ep->snd_seq += mpalen;
1614 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1615 			ep->com.thread);
1616 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1617 	return err;
1618 }
1619 
1620 
1621 
1622 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1623 {
1624 	struct iw_cm_event event;
1625 
1626 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1627 	memset(&event, 0, sizeof(event));
1628 	event.event = IW_CM_EVENT_CLOSE;
1629 	event.status = status;
1630 
1631 	if (ep->com.cm_id) {
1632 
1633 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1634 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1635 		deref_cm_id(&ep->com);
1636 		set_bit(CLOSE_UPCALL, &ep->com.history);
1637 	}
1638 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1639 }
1640 
1641 static int
1642 send_abort(struct c4iw_ep *ep)
1643 {
1644 	struct socket *so = ep->com.so;
1645 	struct sockopt sopt;
1646 	int rc;
1647 	struct linger l;
1648 
1649 	CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1650 	    states[ep->com.state], ep->hwtid);
1651 
1652 	l.l_onoff = 1;
1653 	l.l_linger = 0;
1654 
1655 	/* linger_time of 0 forces RST to be sent */
1656 	sopt.sopt_dir = SOPT_SET;
1657 	sopt.sopt_level = SOL_SOCKET;
1658 	sopt.sopt_name = SO_LINGER;
1659 	sopt.sopt_val = (caddr_t)&l;
1660 	sopt.sopt_valsize = sizeof l;
1661 	sopt.sopt_td = NULL;
1662 	rc = -sosetopt(so, &sopt);
1663 	if (rc != 0) {
1664 		log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1665 		    __func__, so, rc);
1666 	}
1667 
1668 	uninit_iwarp_socket(so);
1669 	soclose(so);
1670 	set_bit(ABORT_CONN, &ep->com.history);
1671 
1672 	/*
1673 	 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1674 	 * request it has sent. But the current TOE driver is not propagating
1675 	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1676 	 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1677 	 * handler(not yet implemented) of iw_cxgbe driver.
1678 	 */
1679 	release_ep_resources(ep);
1680 	ep->com.state = DEAD;
1681 
1682 	return (0);
1683 }
1684 
1685 static void peer_close_upcall(struct c4iw_ep *ep)
1686 {
1687 	struct iw_cm_event event;
1688 
1689 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1690 	memset(&event, 0, sizeof(event));
1691 	event.event = IW_CM_EVENT_DISCONNECT;
1692 
1693 	if (ep->com.cm_id) {
1694 
1695 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1696 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1697 		set_bit(DISCONN_UPCALL, &ep->com.history);
1698 	}
1699 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1700 }
1701 
1702 static void peer_abort_upcall(struct c4iw_ep *ep)
1703 {
1704 	struct iw_cm_event event;
1705 
1706 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1707 	memset(&event, 0, sizeof(event));
1708 	event.event = IW_CM_EVENT_CLOSE;
1709 	event.status = -ECONNRESET;
1710 
1711 	if (ep->com.cm_id) {
1712 
1713 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1714 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1715 		deref_cm_id(&ep->com);
1716 		set_bit(ABORT_UPCALL, &ep->com.history);
1717 	}
1718 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1719 }
1720 
1721 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1722 {
1723 	struct iw_cm_event event;
1724 
1725 	CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1726 	memset(&event, 0, sizeof(event));
1727 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1728 	event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1729 					-ECONNRESET : status;
1730 	event.local_addr = ep->com.local_addr;
1731 	event.remote_addr = ep->com.remote_addr;
1732 
1733 	if ((status == 0) || (status == -ECONNREFUSED)) {
1734 
1735 		if (!ep->tried_with_mpa_v1) {
1736 
1737 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1738 			/* this means MPA_v2 is used */
1739 			event.ord = ep->ird;
1740 			event.ird = ep->ord;
1741 			event.private_data_len = ep->plen -
1742 				sizeof(struct mpa_v2_conn_params);
1743 			event.private_data = ep->mpa_pkt +
1744 				sizeof(struct mpa_message) +
1745 				sizeof(struct mpa_v2_conn_params);
1746 		} else {
1747 
1748 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1749 			/* this means MPA_v1 is used */
1750 			event.ord = c4iw_max_read_depth;
1751 			event.ird = c4iw_max_read_depth;
1752 			event.private_data_len = ep->plen;
1753 			event.private_data = ep->mpa_pkt +
1754 				sizeof(struct mpa_message);
1755 		}
1756 	}
1757 
1758 	if (ep->com.cm_id) {
1759 
1760 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1761 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1762 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1763 	}
1764 
1765 	if(status == -ECONNABORTED) {
1766 
1767 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1768 		return;
1769 	}
1770 
1771 	if (status < 0) {
1772 
1773 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1774 		deref_cm_id(&ep->com);
1775 	}
1776 
1777 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1778 }
1779 
1780 static int connect_request_upcall(struct c4iw_ep *ep)
1781 {
1782 	struct iw_cm_event event;
1783 	int ret;
1784 
1785 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1786 	    ep->tried_with_mpa_v1);
1787 
1788 	memset(&event, 0, sizeof(event));
1789 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1790 	event.local_addr = ep->com.local_addr;
1791 	event.remote_addr = ep->com.remote_addr;
1792 	event.provider_data = ep;
1793 
1794 	if (!ep->tried_with_mpa_v1) {
1795 		/* this means MPA_v2 is used */
1796 		event.ord = ep->ord;
1797 		event.ird = ep->ird;
1798 		event.private_data_len = ep->plen -
1799 			sizeof(struct mpa_v2_conn_params);
1800 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1801 			sizeof(struct mpa_v2_conn_params);
1802 	} else {
1803 
1804 		/* this means MPA_v1 is used. Send max supported */
1805 		event.ord = c4iw_max_read_depth;
1806 		event.ird = c4iw_max_read_depth;
1807 		event.private_data_len = ep->plen;
1808 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1809 	}
1810 
1811 	c4iw_get_ep(&ep->com);
1812 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1813 	    &event);
1814 	if(ret) {
1815 		CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1816 			" IWCM, err:%d", __func__, ep, ret);
1817 		c4iw_put_ep(&ep->com);
1818 	} else
1819 		/* Dereference parent_ep only in success case.
1820 		 * In case of failure, parent_ep is dereferenced by the caller
1821 		 * of process_mpa_request().
1822 		 */
1823 		c4iw_put_ep(&ep->parent_ep->com);
1824 
1825 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1826 	return ret;
1827 }
1828 
1829 static void established_upcall(struct c4iw_ep *ep)
1830 {
1831 	struct iw_cm_event event;
1832 
1833 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1834 	memset(&event, 0, sizeof(event));
1835 	event.event = IW_CM_EVENT_ESTABLISHED;
1836 	event.ird = ep->ord;
1837 	event.ord = ep->ird;
1838 
1839 	if (ep->com.cm_id) {
1840 
1841 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1842 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1843 		set_bit(ESTAB_UPCALL, &ep->com.history);
1844 	}
1845 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1846 }
1847 
1848 
1849 #define RELAXED_IRD_NEGOTIATION 1
1850 
1851 /*
1852  * process_mpa_reply - process streaming mode MPA reply
1853  *
1854  * Returns:
1855  *
1856  * 0 upon success indicating a connect request was delivered to the ULP
1857  * or the mpa request is incomplete but valid so far.
1858  *
1859  * 1 if a failure requires the caller to close the connection.
1860  *
1861  * 2 if a failure requires the caller to abort the connection.
1862  */
1863 static int process_mpa_reply(struct c4iw_ep *ep)
1864 {
1865 	struct mpa_message *mpa;
1866 	struct mpa_v2_conn_params *mpa_v2_params;
1867 	u16 plen;
1868 	u16 resp_ird, resp_ord;
1869 	u8 rtr_mismatch = 0, insuff_ird = 0;
1870 	struct c4iw_qp_attributes attrs = {0};
1871 	enum c4iw_qp_attr_mask mask;
1872 	int err;
1873 	struct mbuf *top, *m;
1874 	int flags = MSG_DONTWAIT;
1875 	struct uio uio;
1876 	int disconnect = 0;
1877 
1878 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1879 
1880 	/*
1881 	 * Stop mpa timer.  If it expired, then
1882 	 * we ignore the MPA reply.  process_timeout()
1883 	 * will abort the connection.
1884 	 */
1885 	if (STOP_EP_TIMER(ep))
1886 		return 0;
1887 
1888 	uio.uio_resid = 1000000;
1889 	uio.uio_td = ep->com.thread;
1890 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1891 
1892 	if (err) {
1893 
1894 		if (err == EWOULDBLOCK) {
1895 
1896 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1897 			START_EP_TIMER(ep);
1898 			return 0;
1899 		}
1900 		err = -err;
1901 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1902 		goto err;
1903 	}
1904 
1905 	if (ep->com.so->so_rcv.sb_mb) {
1906 
1907 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1908 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1909 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1910 	}
1911 
1912 	m = top;
1913 
1914 	do {
1915 
1916 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1917 		/*
1918 		 * If we get more than the supported amount of private data
1919 		 * then we must fail this connection.
1920 		 */
1921 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1922 
1923 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1924 			    ep->mpa_pkt_len + m->m_len);
1925 			err = (-EINVAL);
1926 			goto err_stop_timer;
1927 		}
1928 
1929 		/*
1930 		 * copy the new data into our accumulation buffer.
1931 		 */
1932 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1933 		ep->mpa_pkt_len += m->m_len;
1934 		if (!m->m_next)
1935 			m = m->m_nextpkt;
1936 		else
1937 			m = m->m_next;
1938 	} while (m);
1939 
1940 	m_freem(top);
1941 	/*
1942 	 * if we don't even have the mpa message, then bail.
1943 	 */
1944 	if (ep->mpa_pkt_len < sizeof(*mpa)) {
1945 		return 0;
1946 	}
1947 	mpa = (struct mpa_message *) ep->mpa_pkt;
1948 
1949 	/* Validate MPA header. */
1950 	if (mpa->revision > mpa_rev) {
1951 
1952 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1953 		    mpa->revision, mpa_rev);
1954 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1955 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1956 		err = -EPROTO;
1957 		goto err_stop_timer;
1958 	}
1959 
1960 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1961 
1962 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1963 		err = -EPROTO;
1964 		goto err_stop_timer;
1965 	}
1966 
1967 	plen = ntohs(mpa->private_data_size);
1968 
1969 	/*
1970 	 * Fail if there's too much private data.
1971 	 */
1972 	if (plen > MPA_MAX_PRIVATE_DATA) {
1973 
1974 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1975 		err = -EPROTO;
1976 		goto err_stop_timer;
1977 	}
1978 
1979 	/*
1980 	 * If plen does not account for pkt size
1981 	 */
1982 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1983 
1984 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1985 		STOP_EP_TIMER(ep);
1986 		err = -EPROTO;
1987 		goto err_stop_timer;
1988 	}
1989 
1990 	ep->plen = (u8) plen;
1991 
1992 	/*
1993 	 * If we don't have all the pdata yet, then bail.
1994 	 * We'll continue process when more data arrives.
1995 	 */
1996 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1997 
1998 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1999 		return 0;
2000 	}
2001 
2002 	if (mpa->flags & MPA_REJECT) {
2003 
2004 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
2005 		err = -ECONNREFUSED;
2006 		goto err_stop_timer;
2007 	}
2008 
2009 	/*
2010 	 * If we get here we have accumulated the entire mpa
2011 	 * start reply message including private data. And
2012 	 * the MPA header is valid.
2013 	 */
2014 	ep->com.state = FPDU_MODE;
2015 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2016 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2017 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2018 	ep->mpa_attr.version = mpa->revision;
2019 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2020 
2021 	if (mpa->revision == 2) {
2022 
2023 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
2024 		ep->mpa_attr.enhanced_rdma_conn =
2025 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2026 
2027 		if (ep->mpa_attr.enhanced_rdma_conn) {
2028 
2029 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
2030 			mpa_v2_params = (struct mpa_v2_conn_params *)
2031 				(ep->mpa_pkt + sizeof(*mpa));
2032 			resp_ird = ntohs(mpa_v2_params->ird) &
2033 				MPA_V2_IRD_ORD_MASK;
2034 			resp_ord = ntohs(mpa_v2_params->ord) &
2035 				MPA_V2_IRD_ORD_MASK;
2036 
2037 			/*
2038 			 * This is a double-check. Ideally, below checks are
2039 			 * not required since ird/ord stuff has been taken
2040 			 * care of in c4iw_accept_cr
2041 			 */
2042 			if (ep->ird < resp_ord) {
2043 				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
2044 				   ep->com.dev->rdev.adap->params.max_ordird_qp)
2045 					ep->ird = resp_ord;
2046 				else
2047 					insuff_ird = 1;
2048 			} else if (ep->ird > resp_ord) {
2049 				ep->ird = resp_ord;
2050 			}
2051 			if (ep->ord > resp_ird) {
2052 				if (RELAXED_IRD_NEGOTIATION)
2053 					ep->ord = resp_ird;
2054 				else
2055 					insuff_ird = 1;
2056 			}
2057 			if (insuff_ird) {
2058 				err = -ENOMEM;
2059 				ep->ird = resp_ord;
2060 				ep->ord = resp_ird;
2061 			}
2062 
2063 			if (ntohs(mpa_v2_params->ird) &
2064 				MPA_V2_PEER2PEER_MODEL) {
2065 
2066 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2067 				if (ntohs(mpa_v2_params->ord) &
2068 					MPA_V2_RDMA_WRITE_RTR) {
2069 
2070 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2071 					ep->mpa_attr.p2p_type =
2072 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2073 				}
2074 				else if (ntohs(mpa_v2_params->ord) &
2075 					MPA_V2_RDMA_READ_RTR) {
2076 
2077 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2078 					ep->mpa_attr.p2p_type =
2079 						FW_RI_INIT_P2PTYPE_READ_REQ;
2080 				}
2081 			}
2082 		}
2083 	} else {
2084 
2085 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2086 
2087 		if (mpa->revision == 1) {
2088 
2089 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2090 
2091 			if (peer2peer) {
2092 
2093 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2094 				ep->mpa_attr.p2p_type = p2p_type;
2095 			}
2096 		}
2097 	}
2098 
2099 	if (set_tcpinfo(ep)) {
2100 
2101 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2102 		printf("%s set_tcpinfo error\n", __func__);
2103 		err = -ECONNRESET;
2104 		goto err;
2105 	}
2106 
2107 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2108 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2109 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2110 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2111 	    ep->mpa_attr.p2p_type);
2112 
2113 	/*
2114 	 * If responder's RTR does not match with that of initiator, assign
2115 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2116 	 * generated when moving QP to RTS state.
2117 	 * A TERM message will be sent after QP has moved to RTS state
2118 	 */
2119 	if ((ep->mpa_attr.version == 2) && peer2peer &&
2120 		(ep->mpa_attr.p2p_type != p2p_type)) {
2121 
2122 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2123 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2124 		rtr_mismatch = 1;
2125 	}
2126 
2127 
2128 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2129 	attrs.mpa_attr = ep->mpa_attr;
2130 	attrs.max_ird = ep->ird;
2131 	attrs.max_ord = ep->ord;
2132 	attrs.llp_stream_handle = ep;
2133 	attrs.next_state = C4IW_QP_STATE_RTS;
2134 
2135 	mask = C4IW_QP_ATTR_NEXT_STATE |
2136 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2137 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2138 
2139 	/* bind QP and TID with INIT_WR */
2140 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2141 
2142 	if (err) {
2143 
2144 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2145 		goto err;
2146 	}
2147 
2148 	/*
2149 	 * If responder's RTR requirement did not match with what initiator
2150 	 * supports, generate TERM message
2151 	 */
2152 	if (rtr_mismatch) {
2153 
2154 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2155 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2156 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2157 		attrs.ecode = MPA_NOMATCH_RTR;
2158 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2159 		attrs.send_term = 1;
2160 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2161 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2162 		err = -ENOMEM;
2163 		disconnect = 1;
2164 		goto out;
2165 	}
2166 
2167 	/*
2168 	 * Generate TERM if initiator IRD is not sufficient for responder
2169 	 * provided ORD. Currently, we do the same behaviour even when
2170 	 * responder provided IRD is also not sufficient as regards to
2171 	 * initiator ORD.
2172 	 */
2173 	if (insuff_ird) {
2174 
2175 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2176 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2177 				__func__);
2178 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2179 		attrs.ecode = MPA_INSUFF_IRD;
2180 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2181 		attrs.send_term = 1;
2182 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2183 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2184 		err = -ENOMEM;
2185 		disconnect = 1;
2186 		goto out;
2187 	}
2188 	goto out;
2189 err_stop_timer:
2190 	STOP_EP_TIMER(ep);
2191 err:
2192 	disconnect = 2;
2193 out:
2194 	connect_reply_upcall(ep, err);
2195 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2196 	return disconnect;
2197 }
2198 
2199 /*
2200  * process_mpa_request - process streaming mode MPA request
2201  *
2202  * Returns:
2203  *
2204  * 0 upon success indicating a connect request was delivered to the ULP
2205  * or the mpa request is incomplete but valid so far.
2206  *
2207  * 1 if a failure requires the caller to close the connection.
2208  *
2209  * 2 if a failure requires the caller to abort the connection.
2210  */
2211 static int
2212 process_mpa_request(struct c4iw_ep *ep)
2213 {
2214 	struct mpa_message *mpa;
2215 	struct mpa_v2_conn_params *mpa_v2_params;
2216 	u16 plen;
2217 	int flags = MSG_DONTWAIT;
2218 	int rc;
2219 	struct iovec iov;
2220 	struct uio uio;
2221 	enum c4iw_ep_state state = ep->com.state;
2222 
2223 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2224 
2225 	if (state != MPA_REQ_WAIT)
2226 		return 0;
2227 
2228 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2229 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2230 	uio.uio_iov = &iov;
2231 	uio.uio_iovcnt = 1;
2232 	uio.uio_offset = 0;
2233 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2234 	uio.uio_segflg = UIO_SYSSPACE;
2235 	uio.uio_rw = UIO_READ;
2236 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2237 
2238 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2239 	if (rc == EAGAIN)
2240 		return 0;
2241 	else if (rc)
2242 		goto err_stop_timer;
2243 
2244 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2245 	    __func__, ep->com.so));
2246 	ep->mpa_pkt_len += uio.uio_offset;
2247 
2248 	/*
2249 	 * If we get more than the supported amount of private data then we must
2250 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
2251 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2252 	 * byte is filled by the soreceive above.
2253 	 */
2254 
2255 	/* Don't even have the MPA message.  Wait for more data to arrive. */
2256 	if (ep->mpa_pkt_len < sizeof(*mpa))
2257 		return 0;
2258 	mpa = (struct mpa_message *) ep->mpa_pkt;
2259 
2260 	/*
2261 	 * Validate MPA Header.
2262 	 */
2263 	if (mpa->revision > mpa_rev) {
2264 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2265 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
2266 		goto err_stop_timer;
2267 	}
2268 
2269 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2270 		goto err_stop_timer;
2271 
2272 	/*
2273 	 * Fail if there's too much private data.
2274 	 */
2275 	plen = ntohs(mpa->private_data_size);
2276 	if (plen > MPA_MAX_PRIVATE_DATA)
2277 		goto err_stop_timer;
2278 
2279 	/*
2280 	 * If plen does not account for pkt size
2281 	 */
2282 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2283 		goto err_stop_timer;
2284 
2285 	ep->plen = (u8) plen;
2286 
2287 	/*
2288 	 * If we don't have all the pdata yet, then bail.
2289 	 */
2290 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2291 		return 0;
2292 
2293 	/*
2294 	 * If we get here we have accumulated the entire mpa
2295 	 * start reply message including private data.
2296 	 */
2297 	ep->mpa_attr.initiator = 0;
2298 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2299 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2300 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2301 	ep->mpa_attr.version = mpa->revision;
2302 	if (mpa->revision == 1)
2303 		ep->tried_with_mpa_v1 = 1;
2304 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2305 
2306 	if (mpa->revision == 2) {
2307 		ep->mpa_attr.enhanced_rdma_conn =
2308 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2309 		if (ep->mpa_attr.enhanced_rdma_conn) {
2310 			mpa_v2_params = (struct mpa_v2_conn_params *)
2311 				(ep->mpa_pkt + sizeof(*mpa));
2312 			ep->ird = ntohs(mpa_v2_params->ird) &
2313 				MPA_V2_IRD_ORD_MASK;
2314 			ep->ird = min_t(u32, ep->ird,
2315 					cur_max_read_depth(ep->com.dev));
2316 			ep->ord = ntohs(mpa_v2_params->ord) &
2317 				MPA_V2_IRD_ORD_MASK;
2318 			ep->ord = min_t(u32, ep->ord,
2319 					cur_max_read_depth(ep->com.dev));
2320 			CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u",
2321 				 __func__, ep->ird, ep->ord);
2322 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2323 				if (peer2peer) {
2324 					if (ntohs(mpa_v2_params->ord) &
2325 							MPA_V2_RDMA_WRITE_RTR)
2326 						ep->mpa_attr.p2p_type =
2327 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2328 					else if (ntohs(mpa_v2_params->ord) &
2329 							MPA_V2_RDMA_READ_RTR)
2330 						ep->mpa_attr.p2p_type =
2331 						FW_RI_INIT_P2PTYPE_READ_REQ;
2332 				}
2333 		}
2334 	} else if (mpa->revision == 1 && peer2peer)
2335 		ep->mpa_attr.p2p_type = p2p_type;
2336 
2337 	if (set_tcpinfo(ep))
2338 		goto err_stop_timer;
2339 
2340 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2341 	    "xmit_marker_enabled = %d, version = %d", __func__,
2342 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2343 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2344 
2345 	ep->com.state = MPA_REQ_RCVD;
2346 	STOP_EP_TIMER(ep);
2347 
2348 	/* drive upcall */
2349 	if (ep->parent_ep->com.state != DEAD)
2350 		if (connect_request_upcall(ep))
2351 			goto err_out;
2352 	return 0;
2353 
2354 err_stop_timer:
2355 	STOP_EP_TIMER(ep);
2356 err_out:
2357 	return 2;
2358 }
2359 
2360 /*
2361  * Upcall from the adapter indicating data has been transmitted.
2362  * For us its just the single MPA request or reply.  We can now free
2363  * the skb holding the mpa message.
2364  */
2365 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2366 {
2367 #ifdef KTR
2368 	int err;
2369 #endif
2370 	struct c4iw_ep *ep = to_ep(cm_id);
2371 	int abort = 0;
2372 
2373 	mutex_lock(&ep->com.mutex);
2374 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2375 
2376 	if ((ep->com.state == DEAD) ||
2377 			(ep->com.state != MPA_REQ_RCVD)) {
2378 
2379 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2380 		mutex_unlock(&ep->com.mutex);
2381 		c4iw_put_ep(&ep->com);
2382 		return -ECONNRESET;
2383 	}
2384 	set_bit(ULP_REJECT, &ep->com.history);
2385 
2386 	if (mpa_rev == 0) {
2387 
2388 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2389 		abort = 1;
2390 	}
2391 	else {
2392 
2393 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2394 		abort = send_mpa_reject(ep, pdata, pdata_len);
2395 	}
2396 	STOP_EP_TIMER(ep);
2397 #ifdef KTR
2398 	err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2399 #else
2400 	c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2401 #endif
2402 	mutex_unlock(&ep->com.mutex);
2403 	c4iw_put_ep(&ep->com);
2404 	CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2405 	return 0;
2406 }
2407 
2408 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2409 {
2410 	int err;
2411 	struct c4iw_qp_attributes attrs = {0};
2412 	enum c4iw_qp_attr_mask mask;
2413 	struct c4iw_ep *ep = to_ep(cm_id);
2414 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2415 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2416 	int abort = 0;
2417 
2418 	mutex_lock(&ep->com.mutex);
2419 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2420 
2421 	if ((ep->com.state == DEAD) ||
2422 			(ep->com.state != MPA_REQ_RCVD)) {
2423 
2424 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2425 		err = -ECONNRESET;
2426 		goto err_out;
2427 	}
2428 
2429 	BUG_ON(!qp);
2430 
2431 	set_bit(ULP_ACCEPT, &ep->com.history);
2432 
2433 	if ((conn_param->ord > c4iw_max_read_depth) ||
2434 		(conn_param->ird > c4iw_max_read_depth)) {
2435 
2436 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2437 		err = -EINVAL;
2438 		goto err_abort;
2439 	}
2440 
2441 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2442 
2443 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2444 
2445 		if (conn_param->ord > ep->ird) {
2446 			if (RELAXED_IRD_NEGOTIATION) {
2447 				conn_param->ord = ep->ird;
2448 			} else {
2449 				ep->ird = conn_param->ird;
2450 				ep->ord = conn_param->ord;
2451 				send_mpa_reject(ep, conn_param->private_data,
2452 						conn_param->private_data_len);
2453 				err = -ENOMEM;
2454 				goto err_abort;
2455 			}
2456 		}
2457 		if (conn_param->ird < ep->ord) {
2458 			if (RELAXED_IRD_NEGOTIATION &&
2459 			    ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2460 				conn_param->ird = ep->ord;
2461 			} else {
2462 				err = -ENOMEM;
2463 				goto err_abort;
2464 			}
2465 		}
2466 	}
2467 	ep->ird = conn_param->ird;
2468 	ep->ord = conn_param->ord;
2469 
2470 	if (ep->mpa_attr.version == 1) {
2471 		if (peer2peer && ep->ird == 0)
2472 			ep->ird = 1;
2473 	} else {
2474 		if (peer2peer &&
2475 		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2476 		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2477 			ep->ird = 1;
2478 	}
2479 
2480 	CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d", __func__, __LINE__,
2481 			ep->ird, ep->ord);
2482 
2483 	ep->com.cm_id = cm_id;
2484 	ref_cm_id(&ep->com);
2485 	ep->com.qp = qp;
2486 	ref_qp(ep);
2487 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2488 
2489 	/* bind QP to EP and move to RTS */
2490 	attrs.mpa_attr = ep->mpa_attr;
2491 	attrs.max_ird = ep->ird;
2492 	attrs.max_ord = ep->ord;
2493 	attrs.llp_stream_handle = ep;
2494 	attrs.next_state = C4IW_QP_STATE_RTS;
2495 
2496 	/* bind QP and TID with INIT_WR */
2497 	mask = C4IW_QP_ATTR_NEXT_STATE |
2498 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2499 		C4IW_QP_ATTR_MPA_ATTR |
2500 		C4IW_QP_ATTR_MAX_IRD |
2501 		C4IW_QP_ATTR_MAX_ORD;
2502 
2503 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2504 	if (err) {
2505 		CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2506 		goto err_defef_cm_id;
2507 	}
2508 
2509 	err = send_mpa_reply(ep, conn_param->private_data,
2510 			conn_param->private_data_len);
2511 	if (err) {
2512 		CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2513 		goto err_defef_cm_id;
2514 	}
2515 
2516 	ep->com.state = FPDU_MODE;
2517 	established_upcall(ep);
2518 	mutex_unlock(&ep->com.mutex);
2519 	c4iw_put_ep(&ep->com);
2520 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2521 	return 0;
2522 err_defef_cm_id:
2523 	deref_cm_id(&ep->com);
2524 err_abort:
2525 	abort = 1;
2526 err_out:
2527 	if (abort)
2528 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2529 	mutex_unlock(&ep->com.mutex);
2530 	c4iw_put_ep(&ep->com);
2531 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2532 	return err;
2533 }
2534 
2535 static int
2536 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2537 {
2538 	int ret;
2539 	int size, on;
2540 	struct socket *sock = NULL;
2541 	struct sockopt sopt;
2542 
2543 	ret = sock_create_kern(laddr->ss_family,
2544 			SOCK_STREAM, IPPROTO_TCP, &sock);
2545 	if (ret) {
2546 		CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2547 				__func__, ret);
2548 		return ret;
2549 	}
2550 
2551 	if (reuseaddr) {
2552 		bzero(&sopt, sizeof(struct sockopt));
2553 		sopt.sopt_dir = SOPT_SET;
2554 		sopt.sopt_level = SOL_SOCKET;
2555 		sopt.sopt_name = SO_REUSEADDR;
2556 		on = 1;
2557 		sopt.sopt_val = &on;
2558 		sopt.sopt_valsize = sizeof(on);
2559 		ret = -sosetopt(sock, &sopt);
2560 		if (ret != 0) {
2561 			log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEADDR) "
2562 				"failed with %d.\n", __func__, sock, ret);
2563 		}
2564 		bzero(&sopt, sizeof(struct sockopt));
2565 		sopt.sopt_dir = SOPT_SET;
2566 		sopt.sopt_level = SOL_SOCKET;
2567 		sopt.sopt_name = SO_REUSEPORT;
2568 		on = 1;
2569 		sopt.sopt_val = &on;
2570 		sopt.sopt_valsize = sizeof(on);
2571 		ret = -sosetopt(sock, &sopt);
2572 		if (ret != 0) {
2573 			log(LOG_ERR, "%s: sosetopt(%p, SO_REUSEPORT) "
2574 				"failed with %d.\n", __func__, sock, ret);
2575 		}
2576 	}
2577 
2578 	ret = -sobind(sock, (struct sockaddr *)laddr, curthread);
2579 	if (ret) {
2580 		CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2581 				__func__, ret);
2582 		sock_release(sock);
2583 		return ret;
2584 	}
2585 
2586 	size = laddr->ss_family == AF_INET6 ?
2587 		sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2588 	ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2589 	if (ret) {
2590 		CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2591 				__func__, ret);
2592 		sock_release(sock);
2593 		return ret;
2594 	}
2595 
2596 	*so = sock;
2597 	return 0;
2598 }
2599 
2600 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2601 {
2602 	int err = 0;
2603 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2604 	struct c4iw_ep *ep = NULL;
2605 	if_t nh_ifp;        /* Logical egress interface */
2606 	struct epoch_tracker et;
2607 #ifdef VIMAGE
2608 	struct rdma_cm_id *rdma_id = (struct rdma_cm_id*)cm_id->context;
2609 	struct vnet *vnet = rdma_id->route.addr.dev_addr.net;
2610 #endif
2611 
2612 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2613 
2614 
2615 	if ((conn_param->ord > c4iw_max_read_depth) ||
2616 		(conn_param->ird > c4iw_max_read_depth)) {
2617 
2618 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2619 		err = -EINVAL;
2620 		goto out;
2621 	}
2622 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2623 	cm_id->provider_data = ep;
2624 
2625 	init_timer(&ep->timer);
2626 	ep->plen = conn_param->private_data_len;
2627 
2628 	if (ep->plen) {
2629 
2630 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2631 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2632 				conn_param->private_data, ep->plen);
2633 	}
2634 	ep->ird = conn_param->ird;
2635 	ep->ord = conn_param->ord;
2636 
2637 	if (peer2peer && ep->ord == 0) {
2638 
2639 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2640 		ep->ord = 1;
2641 	}
2642 
2643 	ep->com.dev = dev;
2644 	ep->com.cm_id = cm_id;
2645 	ref_cm_id(&ep->com);
2646 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2647 
2648 	if (!ep->com.qp) {
2649 
2650 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2651 		err = -EINVAL;
2652 		goto fail;
2653 	}
2654 	ref_qp(ep);
2655 	ep->com.thread = curthread;
2656 
2657 	NET_EPOCH_ENTER(et);
2658 	CURVNET_SET(vnet);
2659 	err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2660 	CURVNET_RESTORE();
2661 	NET_EPOCH_EXIT(et);
2662 
2663 	if (err) {
2664 
2665 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2666 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2667 		err = EHOSTUNREACH;
2668 		return err;
2669 	}
2670 
2671 	if (!(if_getcapenable(nh_ifp) & IFCAP_TOE) ||
2672 	    TOEDEV(nh_ifp) == NULL) {
2673 		err = -ENOPROTOOPT;
2674 		goto fail;
2675 	}
2676 	ep->com.state = CONNECTING;
2677 	ep->tos = 0;
2678 	ep->com.local_addr = cm_id->local_addr;
2679 	ep->com.remote_addr = cm_id->remote_addr;
2680 
2681 	err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2682 	if (err)
2683 		goto fail;
2684 
2685 	setiwsockopt(ep->com.so);
2686 	init_iwarp_socket(ep->com.so, &ep->com);
2687 	err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2688 		ep->com.thread);
2689 	if (err)
2690 		goto fail_free_so;
2691 	CTR2(KTR_IW_CXGBE, "%s:ccE, ep %p", __func__, ep);
2692 	return 0;
2693 
2694 fail_free_so:
2695 	uninit_iwarp_socket(ep->com.so);
2696 	ep->com.state = DEAD;
2697 	sock_release(ep->com.so);
2698 fail:
2699 	deref_cm_id(&ep->com);
2700 	c4iw_put_ep(&ep->com);
2701 	ep = NULL;
2702 out:
2703 	CTR2(KTR_IW_CXGBE, "%s:ccE Error %d", __func__, err);
2704 	return err;
2705 }
2706 
2707 /*
2708  * iwcm->create_listen.  Returns -errno on failure.
2709  */
2710 int
2711 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2712 {
2713 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2714 	struct c4iw_listen_ep *lep = NULL;
2715 	struct listen_port_info *port_info = NULL;
2716 	int rc = 0;
2717 
2718 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
2719 			backlog);
2720 	if (c4iw_fatal_error(&dev->rdev)) {
2721 		CTR2(KTR_IW_CXGBE, "%s: cm_id %p, fatal error", __func__,
2722 			       cm_id);
2723 		return -EIO;
2724 	}
2725 	lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2726 	lep->com.cm_id = cm_id;
2727 	ref_cm_id(&lep->com);
2728 	lep->com.dev = dev;
2729 	lep->backlog = backlog;
2730 	lep->com.local_addr = cm_id->local_addr;
2731 	lep->com.thread = curthread;
2732 	cm_id->provider_data = lep;
2733 	lep->com.state = LISTEN;
2734 
2735 	/* In case of INDADDR_ANY, ibcore creates cmid for each device and
2736 	 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2737 	 * HW listeners for each device seperately. But toecore expects single
2738 	 * solisten() call with INADDR_ANY address to create HW listeners on
2739 	 * all devices for a given port number. So iw_cxgbe driver calls
2740 	 * solisten() only once for INADDR_ANY(usually done at first time
2741 	 * listener callback from ibcore). And all the subsequent INADDR_ANY
2742 	 * listener callbacks from ibcore(for the same port address) do not
2743 	 * invoke solisten() as first listener callback has already created
2744 	 * listeners for all other devices(via solisten).
2745 	 */
2746 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr, NULL)) {
2747 		port_info = add_ep_to_listenlist(lep);
2748 		/* skip solisten() if refcnt > 1, as the listeners were
2749 		 * already created by 'Master lep'
2750 		 */
2751 		if (port_info->refcnt > 1) {
2752 			/* As there will be only one listener socket for a TCP
2753 			 * port, copy Master lep's socket pointer to other lep's
2754 			 * that are belonging to same TCP port.
2755 			 */
2756 			struct c4iw_listen_ep *head_lep =
2757 					container_of(port_info->lep_list.next,
2758 					struct c4iw_listen_ep, listen_ep_list);
2759 			lep->com.so =  head_lep->com.so;
2760 			goto out;
2761 		}
2762 	}
2763 	rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2764 	if (rc) {
2765 		CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2766 				__func__, rc);
2767 		goto fail;
2768 	}
2769 
2770 	rc = -solisten(lep->com.so, backlog, curthread);
2771 	if (rc) {
2772 		CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2773 				__func__, lep->com.so, rc);
2774 		goto fail_free_so;
2775 	}
2776 	init_iwarp_socket(lep->com.so, &lep->com);
2777 out:
2778 	return 0;
2779 
2780 fail_free_so:
2781 	sock_release(lep->com.so);
2782 fail:
2783 	if (port_info)
2784 		rem_ep_from_listenlist(lep);
2785 	deref_cm_id(&lep->com);
2786 	c4iw_put_ep(&lep->com);
2787 	return rc;
2788 }
2789 
2790 int
2791 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2792 {
2793 	struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2794 
2795 	mutex_lock(&lep->com.mutex);
2796 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2797 	    states[lep->com.state]);
2798 
2799 	lep->com.state = DEAD;
2800 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr,
2801 	    lep->com.so->so_vnet)) {
2802 		/* if no refcount then close listen socket */
2803 		if (!rem_ep_from_listenlist(lep))
2804 			close_socket(lep->com.so);
2805 	} else
2806 		close_socket(lep->com.so);
2807 	deref_cm_id(&lep->com);
2808 	mutex_unlock(&lep->com.mutex);
2809 	c4iw_put_ep(&lep->com);
2810 	return 0;
2811 }
2812 
2813 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2814 {
2815 	int ret;
2816 	mutex_lock(&ep->com.mutex);
2817 	ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2818 	mutex_unlock(&ep->com.mutex);
2819 	return ret;
2820 }
2821 
2822 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2823 {
2824 	int ret = 0;
2825 	int close = 0;
2826 	struct c4iw_rdev *rdev;
2827 
2828 
2829 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2830 
2831 	rdev = &ep->com.dev->rdev;
2832 
2833 	if (c4iw_fatal_error(rdev)) {
2834 		CTR3(KTR_IW_CXGBE, "%s:ced1 fatal error %p %s", __func__, ep,
2835 					states[ep->com.state]);
2836 		if (ep->com.state != DEAD) {
2837 			send_abort(ep);
2838 			ep->com.state = DEAD;
2839 		}
2840 		close_complete_upcall(ep, -ECONNRESET);
2841 		return ECONNRESET;
2842 	}
2843 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2844 	    states[ep->com.state]);
2845 
2846 	/*
2847 	 * Ref the ep here in case we have fatal errors causing the
2848 	 * ep to be released and freed.
2849 	 */
2850 	c4iw_get_ep(&ep->com);
2851 	switch (ep->com.state) {
2852 
2853 		case MPA_REQ_WAIT:
2854 		case MPA_REQ_SENT:
2855 		case MPA_REQ_RCVD:
2856 		case MPA_REP_SENT:
2857 		case FPDU_MODE:
2858 			close = 1;
2859 			if (abrupt)
2860 				ep->com.state = ABORTING;
2861 			else {
2862 				ep->com.state = CLOSING;
2863 				START_EP_TIMER(ep);
2864 			}
2865 			set_bit(CLOSE_SENT, &ep->com.flags);
2866 			break;
2867 
2868 		case CLOSING:
2869 
2870 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2871 
2872 				close = 1;
2873 				if (abrupt) {
2874 					STOP_EP_TIMER(ep);
2875 					ep->com.state = ABORTING;
2876 				} else
2877 					ep->com.state = MORIBUND;
2878 			}
2879 			break;
2880 
2881 		case MORIBUND:
2882 		case ABORTING:
2883 		case DEAD:
2884 			CTR3(KTR_IW_CXGBE,
2885 			    "%s ignoring disconnect ep %p state %u", __func__,
2886 			    ep, ep->com.state);
2887 			break;
2888 
2889 		default:
2890 			BUG();
2891 			break;
2892 	}
2893 
2894 
2895 	if (close) {
2896 
2897 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2898 
2899 		if (abrupt) {
2900 
2901 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2902 			set_bit(EP_DISC_ABORT, &ep->com.history);
2903 			close_complete_upcall(ep, -ECONNRESET);
2904 			send_abort(ep);
2905 		} else {
2906 
2907 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2908 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2909 
2910 			if (!ep->parent_ep)
2911 				ep->com.state = MORIBUND;
2912 
2913 			CURVNET_SET(ep->com.so->so_vnet);
2914 			ret = sodisconnect(ep->com.so);
2915 			CURVNET_RESTORE();
2916 			if (ret) {
2917 				CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2918 				STOP_EP_TIMER(ep);
2919 				send_abort(ep);
2920 				ep->com.state = DEAD;
2921 				close_complete_upcall(ep, -ECONNRESET);
2922 				set_bit(EP_DISC_FAIL, &ep->com.history);
2923 				if (ep->com.qp) {
2924 					struct c4iw_qp_attributes attrs = {0};
2925 
2926 					attrs.next_state = C4IW_QP_STATE_ERROR;
2927 					ret = c4iw_modify_qp(
2928 							ep->com.dev, ep->com.qp,
2929 							C4IW_QP_ATTR_NEXT_STATE,
2930 							&attrs, 1);
2931 					CTR3(KTR_IW_CXGBE, "%s:ced7 %p ret %d",
2932 						__func__, ep, ret);
2933 				}
2934 			}
2935 		}
2936 	}
2937 	c4iw_put_ep(&ep->com);
2938 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2939 	return ret;
2940 }
2941 
2942 #ifdef C4IW_EP_REDIRECT
2943 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2944 		struct l2t_entry *l2t)
2945 {
2946 	struct c4iw_ep *ep = ctx;
2947 
2948 	if (ep->dst != old)
2949 		return 0;
2950 
2951 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2952 			l2t);
2953 	dst_hold(new);
2954 	cxgb4_l2t_release(ep->l2t);
2955 	ep->l2t = l2t;
2956 	dst_release(old);
2957 	ep->dst = new;
2958 	return 1;
2959 }
2960 #endif
2961 
2962 
2963 
2964 static void ep_timeout(unsigned long arg)
2965 {
2966 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2967 
2968 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2969 
2970 		/*
2971 		 * Only insert if it is not already on the list.
2972 		 */
2973 		if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2974 			CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2975 			add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2976 		}
2977 	}
2978 }
2979 
2980 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2981 {
2982 	uint64_t val = be64toh(*rpl);
2983 	int ret;
2984 	struct c4iw_wr_wait *wr_waitp;
2985 
2986 	ret = (int)((val >> 8) & 0xff);
2987 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2988 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2989 	if (wr_waitp)
2990 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2991 
2992 	return (0);
2993 }
2994 
2995 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2996 {
2997 	struct cqe_list_entry *cle;
2998 	unsigned long flag;
2999 
3000 	cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
3001 	cle->rhp = sc->iwarp_softc;
3002 	cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
3003 
3004 	spin_lock_irqsave(&err_cqe_lock, flag);
3005 	list_add_tail(&cle->entry, &err_cqe_list);
3006 	queue_work(c4iw_taskq, &c4iw_task);
3007 	spin_unlock_irqrestore(&err_cqe_lock, flag);
3008 
3009 	return (0);
3010 }
3011 
3012 static int
3013 process_terminate(struct c4iw_ep *ep)
3014 {
3015 	struct c4iw_qp_attributes attrs = {0};
3016 
3017 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
3018 
3019 	if (ep && ep->com.qp) {
3020 
3021 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
3022 				ep->hwtid, ep->com.qp->wq.sq.qid);
3023 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
3024 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
3025 				1);
3026 	} else
3027 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
3028 								ep->hwtid);
3029 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
3030 
3031 	return 0;
3032 }
3033 
3034 int __init c4iw_cm_init(void)
3035 {
3036 
3037 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
3038 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
3039 	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
3040 	t4_register_an_handler(c4iw_ev_handler);
3041 
3042 	TAILQ_INIT(&req_list);
3043 	spin_lock_init(&req_lock);
3044 	INIT_LIST_HEAD(&err_cqe_list);
3045 	spin_lock_init(&err_cqe_lock);
3046 
3047 	INIT_WORK(&c4iw_task, process_req);
3048 
3049 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
3050 	if (!c4iw_taskq)
3051 		return -ENOMEM;
3052 
3053 	return 0;
3054 }
3055 
3056 void __exit c4iw_cm_term(void)
3057 {
3058 	WARN_ON(!TAILQ_EMPTY(&req_list));
3059 	WARN_ON(!list_empty(&err_cqe_list));
3060 	flush_workqueue(c4iw_taskq);
3061 	destroy_workqueue(c4iw_taskq);
3062 
3063 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
3064 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
3065 	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
3066 	t4_register_an_handler(NULL);
3067 }
3068 #endif
3069