xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision f5147e312f43a9050468de539aeafa072caa1a60)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *	  copyright notice, this list of conditions and the following
18  *	  disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *	  copyright notice, this list of conditions and the following
22  *	  disclaimer in the documentation and/or other materials
23  *	  provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 
39 #ifdef TCP_OFFLOAD
40 #include <sys/types.h>
41 #include <sys/malloc.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sockio.h>
45 #include <sys/taskqueue.h>
46 #include <netinet/in.h>
47 #include <net/route.h>
48 
49 #include <netinet/in_systm.h>
50 #include <netinet/in_pcb.h>
51 #include <netinet6/in6_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/in_fib.h>
54 #include <netinet6/in6_fib.h>
55 #include <netinet6/scope6_var.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcpip.h>
60 
61 #include <netinet/toecore.h>
62 
63 struct sge_iq;
64 struct rss_header;
65 struct cpl_set_tcb_rpl;
66 #include <linux/types.h>
67 #include "offload.h"
68 #include "tom/t4_tom.h"
69 
70 #define TOEPCB(so)  ((struct toepcb *)(so_sototcpcb((so))->t_toe))
71 
72 #include "iw_cxgbe.h"
73 #include <linux/module.h>
74 #include <linux/workqueue.h>
75 #include <linux/notifier.h>
76 #include <linux/inetdevice.h>
77 #include <linux/if_vlan.h>
78 #include <net/netevent.h>
79 
80 static spinlock_t req_lock;
81 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
82 static struct work_struct c4iw_task;
83 static struct workqueue_struct *c4iw_taskq;
84 static LIST_HEAD(err_cqe_list);
85 static spinlock_t err_cqe_lock;
86 static LIST_HEAD(listen_port_list);
87 static DEFINE_MUTEX(listen_port_mutex);
88 
89 static void process_req(struct work_struct *ctx);
90 static void start_ep_timer(struct c4iw_ep *ep);
91 static int stop_ep_timer(struct c4iw_ep *ep);
92 static int set_tcpinfo(struct c4iw_ep *ep);
93 static void process_timeout(struct c4iw_ep *ep);
94 static void process_err_cqes(void);
95 static void *alloc_ep(int size, gfp_t flags);
96 static void close_socket(struct socket *so);
97 static int send_mpa_req(struct c4iw_ep *ep);
98 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
99 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
100 static void close_complete_upcall(struct c4iw_ep *ep, int status);
101 static int send_abort(struct c4iw_ep *ep);
102 static void peer_close_upcall(struct c4iw_ep *ep);
103 static void peer_abort_upcall(struct c4iw_ep *ep);
104 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
105 static int connect_request_upcall(struct c4iw_ep *ep);
106 static void established_upcall(struct c4iw_ep *ep);
107 static int process_mpa_reply(struct c4iw_ep *ep);
108 static int process_mpa_request(struct c4iw_ep *ep);
109 static void process_peer_close(struct c4iw_ep *ep);
110 static void process_conn_error(struct c4iw_ep *ep);
111 static void process_close_complete(struct c4iw_ep *ep);
112 static void ep_timeout(unsigned long arg);
113 static void setiwsockopt(struct socket *so);
114 static void init_iwarp_socket(struct socket *so, void *arg);
115 static void uninit_iwarp_socket(struct socket *so);
116 static void process_data(struct c4iw_ep *ep);
117 static void process_connected(struct c4iw_ep *ep);
118 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
119 static void process_socket_event(struct c4iw_ep *ep);
120 static void release_ep_resources(struct c4iw_ep *ep);
121 static int process_terminate(struct c4iw_ep *ep);
122 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
123     struct mbuf *m);
124 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
125 static struct listen_port_info *
126 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
127 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
128 static struct c4iw_listen_ep *
129 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
130 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
131 		struct ifnet **ifp);
132 static void process_newconn(struct c4iw_listen_ep *master_lep,
133 		struct socket *new_so);
134 #define START_EP_TIMER(ep) \
135     do { \
136 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
137 		__func__, __LINE__, (ep)); \
138 	    start_ep_timer(ep); \
139     } while (0)
140 
141 #define STOP_EP_TIMER(ep) \
142     ({ \
143 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
144 		__func__, __LINE__, (ep)); \
145 	    stop_ep_timer(ep); \
146     })
147 
148 #define GET_LOCAL_ADDR(pladdr, so) \
149 	do { \
150 		struct sockaddr_storage *__a = NULL; \
151 		struct  inpcb *__inp = sotoinpcb(so); \
152 		KASSERT(__inp != NULL, \
153 		   ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
154 		if (__inp->inp_vflag & INP_IPV4) \
155 			in_getsockaddr(so, (struct sockaddr **)&__a); \
156 		else \
157 			in6_getsockaddr(so, (struct sockaddr **)&__a); \
158 		*(pladdr) = *__a; \
159 		free(__a, M_SONAME); \
160 	} while (0)
161 
162 #define GET_REMOTE_ADDR(praddr, so) \
163 	do { \
164 		struct sockaddr_storage *__a = NULL; \
165 		struct  inpcb *__inp = sotoinpcb(so); \
166 		KASSERT(__inp != NULL, \
167 		   ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
168 		if (__inp->inp_vflag & INP_IPV4) \
169 			in_getpeeraddr(so, (struct sockaddr **)&__a); \
170 		else \
171 			in6_getpeeraddr(so, (struct sockaddr **)&__a); \
172 		*(praddr) = *__a; \
173 		free(__a, M_SONAME); \
174 	} while (0)
175 
176 #ifdef KTR
177 static char *states[] = {
178 	"idle",
179 	"listen",
180 	"connecting",
181 	"mpa_wait_req",
182 	"mpa_req_sent",
183 	"mpa_req_rcvd",
184 	"mpa_rep_sent",
185 	"fpdu_mode",
186 	"aborting",
187 	"closing",
188 	"moribund",
189 	"dead",
190 	NULL,
191 };
192 #endif
193 
194 static void deref_cm_id(struct c4iw_ep_common *epc)
195 {
196       epc->cm_id->rem_ref(epc->cm_id);
197       epc->cm_id = NULL;
198       set_bit(CM_ID_DEREFED, &epc->history);
199 }
200 
201 static void ref_cm_id(struct c4iw_ep_common *epc)
202 {
203       set_bit(CM_ID_REFED, &epc->history);
204       epc->cm_id->add_ref(epc->cm_id);
205 }
206 
207 static void deref_qp(struct c4iw_ep *ep)
208 {
209 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
210 	clear_bit(QP_REFERENCED, &ep->com.flags);
211 	set_bit(QP_DEREFED, &ep->com.history);
212 }
213 
214 static void ref_qp(struct c4iw_ep *ep)
215 {
216 	set_bit(QP_REFERENCED, &ep->com.flags);
217 	set_bit(QP_REFED, &ep->com.history);
218 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
219 }
220 /* allocated per TCP port while listening */
221 struct listen_port_info {
222 	uint16_t port_num; /* TCP port address */
223 	struct list_head list; /* belongs to listen_port_list */
224 	struct list_head lep_list; /* per port lep list */
225 	uint32_t refcnt; /* number of lep's listening */
226 };
227 
228 /*
229  * Following two lists are used to manage INADDR_ANY listeners:
230  * 1)listen_port_list
231  * 2)lep_list
232  *
233  * Below is the INADDR_ANY listener lists overview on a system with a two port
234  * adapter:
235  *   |------------------|
236  *   |listen_port_list  |
237  *   |------------------|
238  *            |
239  *            |              |-----------|       |-----------|
240  *            |              | port_num:X|       | port_num:X|
241  *            |--------------|-list------|-------|-list------|-------....
242  *                           | lep_list----|     | lep_list----|
243  *                           | refcnt    | |     | refcnt    | |
244  *                           |           | |     |           | |
245  *                           |           | |     |           | |
246  *                           |-----------| |     |-----------| |
247  *                                         |                   |
248  *                                         |                   |
249  *                                         |                   |
250  *                                         |                   |         lep1                  lep2
251  *                                         |                   |    |----------------|    |----------------|
252  *                                         |                   |----| listen_ep_list |----| listen_ep_list |
253  *                                         |                        |----------------|    |----------------|
254  *                                         |
255  *                                         |
256  *                                         |        lep1                  lep2
257  *                                         |   |----------------|    |----------------|
258  *                                         |---| listen_ep_list |----| listen_ep_list |
259  *                                             |----------------|    |----------------|
260  *
261  * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
262  * each TCP port number.
263  *
264  * Here 'lep1' is always marked as Master lep, because solisten() is always
265  * called through first lep.
266  *
267  */
268 static struct listen_port_info *
269 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
270 {
271 	uint16_t port;
272 	struct listen_port_info *port_info = NULL;
273 	struct sockaddr_storage *laddr = &lep->com.local_addr;
274 
275 	port = (laddr->ss_family == AF_INET) ?
276 		((struct sockaddr_in *)laddr)->sin_port :
277 		((struct sockaddr_in6 *)laddr)->sin6_port;
278 
279 	mutex_lock(&listen_port_mutex);
280 
281 	list_for_each_entry(port_info, &listen_port_list, list)
282 		if (port_info->port_num == port)
283 			goto found_port;
284 
285 	port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
286 	port_info->port_num = port;
287 	port_info->refcnt    = 0;
288 
289 	list_add_tail(&port_info->list, &listen_port_list);
290 	INIT_LIST_HEAD(&port_info->lep_list);
291 
292 found_port:
293 	port_info->refcnt++;
294 	list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
295 	mutex_unlock(&listen_port_mutex);
296 	return port_info;
297 }
298 
299 static int
300 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
301 {
302 	uint16_t port;
303 	struct listen_port_info *port_info = NULL;
304 	struct sockaddr_storage *laddr = &lep->com.local_addr;
305 	int refcnt = 0;
306 
307 	port = (laddr->ss_family == AF_INET) ?
308 		((struct sockaddr_in *)laddr)->sin_port :
309 		((struct sockaddr_in6 *)laddr)->sin6_port;
310 
311 	mutex_lock(&listen_port_mutex);
312 
313 	/* get the port_info structure based on the lep's port address */
314 	list_for_each_entry(port_info, &listen_port_list, list) {
315 		if (port_info->port_num == port) {
316 			port_info->refcnt--;
317 			refcnt = port_info->refcnt;
318 			/* remove the current lep from the listen list */
319 			list_del(&lep->listen_ep_list);
320 			if (port_info->refcnt == 0) {
321 				/* Remove this entry from the list as there
322 				 * are no more listeners for this port_num.
323 				 */
324 				list_del(&port_info->list);
325 				kfree(port_info);
326 			}
327 			break;
328 		}
329 	}
330 	mutex_unlock(&listen_port_mutex);
331 	return refcnt;
332 }
333 
334 /*
335  * Find the lep that belongs to the ifnet on which the SYN frame was received.
336  */
337 struct c4iw_listen_ep *
338 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
339 {
340 	struct adapter *adap = NULL;
341 	struct c4iw_listen_ep *lep = NULL;
342 	struct ifnet *ifp = NULL, *hw_ifp = NULL;
343 	struct listen_port_info *port_info = NULL;
344 	int i = 0, found_portinfo = 0, found_lep = 0;
345 	uint16_t port;
346 
347 	/*
348 	 * STEP 1: Figure out 'ifp' of the physical interface, not pseudo
349 	 * interfaces like vlan, lagg, etc..
350 	 * TBD: lagg support, lagg + vlan support.
351 	 */
352 	ifp = TOEPCB(so)->l2te->ifp;
353 	if (ifp->if_type == IFT_L2VLAN) {
354 		hw_ifp = VLAN_TRUNKDEV(ifp);
355 		if (hw_ifp == NULL) {
356 			CTR4(KTR_IW_CXGBE, "%s: Failed to get parent ifnet of "
357 				"vlan ifnet %p, sock %p, master_lep %p",
358 				__func__, ifp, so, master_lep);
359 			return (NULL);
360 		}
361 	} else
362 		hw_ifp = ifp;
363 
364 	/* STEP 2: Find 'port_info' with listener local port address. */
365 	port = (master_lep->com.local_addr.ss_family == AF_INET) ?
366 		((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
367 		((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
368 
369 
370 	mutex_lock(&listen_port_mutex);
371 	list_for_each_entry(port_info, &listen_port_list, list)
372 		if (port_info->port_num == port) {
373 			found_portinfo =1;
374 			break;
375 		}
376 	if (!found_portinfo)
377 		goto out;
378 
379 	/* STEP 3: Traverse through list of lep's that are bound to the current
380 	 * TCP port address and find the lep that belongs to the ifnet on which
381 	 * the SYN frame was received.
382 	 */
383 	list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
384 		adap = lep->com.dev->rdev.adap;
385 		for_each_port(adap, i) {
386 			if (hw_ifp == adap->port[i]->vi[0].ifp) {
387 				found_lep =1;
388 				goto out;
389 			}
390 		}
391 	}
392 out:
393 	mutex_unlock(&listen_port_mutex);
394 	return found_lep ? lep : (NULL);
395 }
396 
397 static void process_timeout(struct c4iw_ep *ep)
398 {
399 	struct c4iw_qp_attributes attrs = {0};
400 	int abort = 1;
401 
402 	CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
403 			ep, ep->hwtid, ep->com.state);
404 	set_bit(TIMEDOUT, &ep->com.history);
405 	switch (ep->com.state) {
406 	case MPA_REQ_SENT:
407 		connect_reply_upcall(ep, -ETIMEDOUT);
408 		break;
409 	case MPA_REQ_WAIT:
410 	case MPA_REQ_RCVD:
411 	case MPA_REP_SENT:
412 	case FPDU_MODE:
413 		break;
414 	case CLOSING:
415 	case MORIBUND:
416 		if (ep->com.cm_id && ep->com.qp) {
417 			attrs.next_state = C4IW_QP_STATE_ERROR;
418 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
419 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
420 		}
421 		close_complete_upcall(ep, -ETIMEDOUT);
422 		break;
423 	case ABORTING:
424 	case DEAD:
425 		/*
426 		 * These states are expected if the ep timed out at the same
427 		 * time as another thread was calling stop_ep_timer().
428 		 * So we silently do nothing for these states.
429 		 */
430 		abort = 0;
431 		break;
432 	default:
433 		CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u\n"
434 				, __func__, ep, ep->hwtid, ep->com.state);
435 		abort = 0;
436 	}
437 	if (abort)
438 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
439 	c4iw_put_ep(&ep->com);
440 	return;
441 }
442 
443 struct cqe_list_entry {
444 	struct list_head entry;
445 	struct c4iw_dev *rhp;
446 	struct t4_cqe err_cqe;
447 };
448 
449 static void
450 process_err_cqes(void)
451 {
452 	unsigned long flag;
453 	struct cqe_list_entry *cle;
454 
455 	spin_lock_irqsave(&err_cqe_lock, flag);
456 	while (!list_empty(&err_cqe_list)) {
457 		struct list_head *tmp;
458 		tmp = err_cqe_list.next;
459 		list_del(tmp);
460 		tmp->next = tmp->prev = NULL;
461 		spin_unlock_irqrestore(&err_cqe_lock, flag);
462 		cle = list_entry(tmp, struct cqe_list_entry, entry);
463 		c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
464 		free(cle, M_CXGBE);
465 		spin_lock_irqsave(&err_cqe_lock, flag);
466 	}
467 	spin_unlock_irqrestore(&err_cqe_lock, flag);
468 
469 	return;
470 }
471 
472 static void
473 process_req(struct work_struct *ctx)
474 {
475 	struct c4iw_ep_common *epc;
476 	unsigned long flag;
477 	int ep_events;
478 
479 	process_err_cqes();
480 	spin_lock_irqsave(&req_lock, flag);
481 	while (!TAILQ_EMPTY(&req_list)) {
482 		epc = TAILQ_FIRST(&req_list);
483 		TAILQ_REMOVE(&req_list, epc, entry);
484 		epc->entry.tqe_prev = NULL;
485 		ep_events = epc->ep_events;
486 		epc->ep_events = 0;
487 		spin_unlock_irqrestore(&req_lock, flag);
488 		mutex_lock(&epc->mutex);
489 		CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
490 		    __func__, epc->so, epc, states[epc->state], ep_events);
491 		if (ep_events & C4IW_EVENT_TERM)
492 			process_terminate((struct c4iw_ep *)epc);
493 		if (ep_events & C4IW_EVENT_TIMEOUT)
494 			process_timeout((struct c4iw_ep *)epc);
495 		if (ep_events & C4IW_EVENT_SOCKET)
496 			process_socket_event((struct c4iw_ep *)epc);
497 		mutex_unlock(&epc->mutex);
498 		c4iw_put_ep(epc);
499 		process_err_cqes();
500 		spin_lock_irqsave(&req_lock, flag);
501 	}
502 	spin_unlock_irqrestore(&req_lock, flag);
503 }
504 
505 /*
506  * XXX: doesn't belong here in the iWARP driver.
507  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
508  *      set.  Is this a valid assumption for active open?
509  */
510 static int
511 set_tcpinfo(struct c4iw_ep *ep)
512 {
513 	struct socket *so = ep->com.so;
514 	struct inpcb *inp = sotoinpcb(so);
515 	struct tcpcb *tp;
516 	struct toepcb *toep;
517 	int rc = 0;
518 
519 	INP_WLOCK(inp);
520 	tp = intotcpcb(inp);
521 	if ((tp->t_flags & TF_TOE) == 0) {
522 		rc = EINVAL;
523 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
524 		    __func__, so, ep);
525 		goto done;
526 	}
527 	toep = TOEPCB(so);
528 
529 	ep->hwtid = toep->tid;
530 	ep->snd_seq = tp->snd_nxt;
531 	ep->rcv_seq = tp->rcv_nxt;
532 	ep->emss = max(tp->t_maxseg, 128);
533 done:
534 	INP_WUNLOCK(inp);
535 	return (rc);
536 
537 }
538 static int
539 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp)
540 {
541 	int err = 0;
542 
543 	if (raddr->ss_family == AF_INET) {
544 		struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
545 		struct nhop4_extended nh4 = {0};
546 
547 		err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, raddr4->sin_addr,
548 				NHR_REF, 0, &nh4);
549 		*ifp = nh4.nh_ifp;
550 		if (err)
551 			fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
552 	} else {
553 		struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
554 		struct nhop6_extended nh6 = {0};
555 		struct in6_addr addr6;
556 		uint32_t scopeid;
557 
558 		memset(&addr6, 0, sizeof(addr6));
559 		in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
560 					&addr6, &scopeid);
561 		err = fib6_lookup_nh_ext(RT_DEFAULT_FIB, &addr6, scopeid,
562 				NHR_REF, 0, &nh6);
563 		*ifp = nh6.nh_ifp;
564 		if (err)
565 			fib6_free_nh_ext(RT_DEFAULT_FIB, &nh6);
566 	}
567 
568 	CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
569 	return err;
570 }
571 
572 static void
573 close_socket(struct socket *so)
574 {
575 	uninit_iwarp_socket(so);
576 	soclose(so);
577 }
578 
579 static void
580 process_peer_close(struct c4iw_ep *ep)
581 {
582 	struct c4iw_qp_attributes attrs = {0};
583 	int disconnect = 1;
584 	int release = 0;
585 
586 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
587 	    ep->com.so, states[ep->com.state]);
588 
589 	switch (ep->com.state) {
590 
591 		case MPA_REQ_WAIT:
592 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
593 			    __func__, ep);
594 			/* Fallthrough */
595 		case MPA_REQ_SENT:
596 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
597 			    __func__, ep);
598 			ep->com.state = DEAD;
599 			connect_reply_upcall(ep, -ECONNABORTED);
600 
601 			disconnect = 0;
602 			STOP_EP_TIMER(ep);
603 			close_socket(ep->com.so);
604 			deref_cm_id(&ep->com);
605 			release = 1;
606 			break;
607 
608 		case MPA_REQ_RCVD:
609 
610 			/*
611 			 * We're gonna mark this puppy DEAD, but keep
612 			 * the reference on it until the ULP accepts or
613 			 * rejects the CR.
614 			 */
615 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
616 			    __func__, ep);
617 			ep->com.state = CLOSING;
618 			break;
619 
620 		case MPA_REP_SENT:
621 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
622 			    __func__, ep);
623 			ep->com.state = CLOSING;
624 			break;
625 
626 		case FPDU_MODE:
627 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
628 			    __func__, ep);
629 			START_EP_TIMER(ep);
630 			ep->com.state = CLOSING;
631 			attrs.next_state = C4IW_QP_STATE_CLOSING;
632 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
633 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
634 			peer_close_upcall(ep);
635 			break;
636 
637 		case ABORTING:
638 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
639 			    __func__, ep);
640 			disconnect = 0;
641 			break;
642 
643 		case CLOSING:
644 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
645 			    __func__, ep);
646 			ep->com.state = MORIBUND;
647 			disconnect = 0;
648 			break;
649 
650 		case MORIBUND:
651 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
652 			    ep);
653 			STOP_EP_TIMER(ep);
654 			if (ep->com.cm_id && ep->com.qp) {
655 				attrs.next_state = C4IW_QP_STATE_IDLE;
656 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
657 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
658 			}
659 			close_socket(ep->com.so);
660 			close_complete_upcall(ep, 0);
661 			ep->com.state = DEAD;
662 			release = 1;
663 			disconnect = 0;
664 			break;
665 
666 		case DEAD:
667 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
668 			    __func__, ep);
669 			disconnect = 0;
670 			break;
671 
672 		default:
673 			panic("%s: ep %p state %d", __func__, ep,
674 			    ep->com.state);
675 			break;
676 	}
677 
678 
679 	if (disconnect) {
680 
681 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
682 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
683 	}
684 	if (release) {
685 
686 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
687 		c4iw_put_ep(&ep->com);
688 	}
689 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
690 	return;
691 }
692 
693 static void
694 process_conn_error(struct c4iw_ep *ep)
695 {
696 	struct c4iw_qp_attributes attrs = {0};
697 	int ret;
698 	int state;
699 
700 	state = ep->com.state;
701 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
702 	    __func__, ep, ep->com.so, ep->com.so->so_error,
703 	    states[ep->com.state]);
704 
705 	switch (state) {
706 
707 		case MPA_REQ_WAIT:
708 			STOP_EP_TIMER(ep);
709 			c4iw_put_ep(&ep->parent_ep->com);
710 			break;
711 
712 		case MPA_REQ_SENT:
713 			STOP_EP_TIMER(ep);
714 			connect_reply_upcall(ep, -ECONNRESET);
715 			break;
716 
717 		case MPA_REP_SENT:
718 			ep->com.rpl_err = ECONNRESET;
719 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
720 			break;
721 
722 		case MPA_REQ_RCVD:
723 			break;
724 
725 		case MORIBUND:
726 		case CLOSING:
727 			STOP_EP_TIMER(ep);
728 			/*FALLTHROUGH*/
729 		case FPDU_MODE:
730 
731 			if (ep->com.cm_id && ep->com.qp) {
732 
733 				attrs.next_state = C4IW_QP_STATE_ERROR;
734 				ret = c4iw_modify_qp(ep->com.qp->rhp,
735 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
736 					&attrs, 1);
737 				if (ret)
738 					log(LOG_ERR,
739 							"%s - qp <- error failed!\n",
740 							__func__);
741 			}
742 			peer_abort_upcall(ep);
743 			break;
744 
745 		case ABORTING:
746 			break;
747 
748 		case DEAD:
749 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
750 			    __func__, ep->com.so->so_error);
751 			return;
752 
753 		default:
754 			panic("%s: ep %p state %d", __func__, ep, state);
755 			break;
756 	}
757 
758 	if (state != ABORTING) {
759 		close_socket(ep->com.so);
760 		ep->com.state = DEAD;
761 		c4iw_put_ep(&ep->com);
762 	}
763 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
764 	return;
765 }
766 
767 static void
768 process_close_complete(struct c4iw_ep *ep)
769 {
770 	struct c4iw_qp_attributes attrs = {0};
771 	int release = 0;
772 
773 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
774 	    ep->com.so, states[ep->com.state]);
775 
776 	/* The cm_id may be null if we failed to connect */
777 	set_bit(CLOSE_CON_RPL, &ep->com.history);
778 
779 	switch (ep->com.state) {
780 
781 		case CLOSING:
782 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
783 			    __func__, ep);
784 			ep->com.state = MORIBUND;
785 			break;
786 
787 		case MORIBUND:
788 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
789 			    ep);
790 			STOP_EP_TIMER(ep);
791 
792 			if ((ep->com.cm_id) && (ep->com.qp)) {
793 
794 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
795 				    __func__, ep);
796 				attrs.next_state = C4IW_QP_STATE_IDLE;
797 				c4iw_modify_qp(ep->com.dev,
798 						ep->com.qp,
799 						C4IW_QP_ATTR_NEXT_STATE,
800 						&attrs, 1);
801 			}
802 
803 			close_socket(ep->com.so);
804 			close_complete_upcall(ep, 0);
805 			ep->com.state = DEAD;
806 			release = 1;
807 			break;
808 
809 		case ABORTING:
810 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
811 			break;
812 
813 		case DEAD:
814 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
815 			break;
816 		default:
817 			CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
818 					__func__, ep);
819 			panic("%s:pcc6 %p unknown ep state", __func__, ep);
820 			break;
821 	}
822 
823 	if (release) {
824 
825 		CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
826 		release_ep_resources(ep);
827 	}
828 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
829 	return;
830 }
831 
832 static void
833 setiwsockopt(struct socket *so)
834 {
835 	int rc;
836 	struct sockopt sopt;
837 	int on = 1;
838 
839 	sopt.sopt_dir = SOPT_SET;
840 	sopt.sopt_level = IPPROTO_TCP;
841 	sopt.sopt_name = TCP_NODELAY;
842 	sopt.sopt_val = (caddr_t)&on;
843 	sopt.sopt_valsize = sizeof on;
844 	sopt.sopt_td = NULL;
845 	rc = sosetopt(so, &sopt);
846 	if (rc) {
847 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
848 		    __func__, so, rc);
849 	}
850 }
851 
852 static void
853 init_iwarp_socket(struct socket *so, void *arg)
854 {
855 	if (SOLISTENING(so)) {
856 		SOLISTEN_LOCK(so);
857 		solisten_upcall_set(so, c4iw_so_upcall, arg);
858 		so->so_state |= SS_NBIO;
859 		SOLISTEN_UNLOCK(so);
860 	} else {
861 		SOCKBUF_LOCK(&so->so_rcv);
862 		soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
863 		so->so_state |= SS_NBIO;
864 		SOCKBUF_UNLOCK(&so->so_rcv);
865 	}
866 }
867 
868 static void
869 uninit_iwarp_socket(struct socket *so)
870 {
871 	if (SOLISTENING(so)) {
872 		SOLISTEN_LOCK(so);
873 		solisten_upcall_set(so, NULL, NULL);
874 		SOLISTEN_UNLOCK(so);
875 	} else {
876 		SOCKBUF_LOCK(&so->so_rcv);
877 		soupcall_clear(so, SO_RCV);
878 		SOCKBUF_UNLOCK(&so->so_rcv);
879 	}
880 }
881 
882 static void
883 process_data(struct c4iw_ep *ep)
884 {
885 	int disconnect = 0;
886 
887 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
888 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
889 
890 	switch (ep->com.state) {
891 	case MPA_REQ_SENT:
892 		disconnect = process_mpa_reply(ep);
893 		break;
894 	case MPA_REQ_WAIT:
895 		disconnect = process_mpa_request(ep);
896 		if (disconnect)
897 			/* Refered in process_newconn() */
898 			c4iw_put_ep(&ep->parent_ep->com);
899 		break;
900 	default:
901 		if (sbused(&ep->com.so->so_rcv))
902 			log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
903 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
904 			    __func__, ep, ep->com.state, ep->com.so,
905 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
906 		break;
907 	}
908 	if (disconnect)
909 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
910 
911 }
912 
913 static void
914 process_connected(struct c4iw_ep *ep)
915 {
916 	struct socket *so = ep->com.so;
917 
918 	if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
919 		if (send_mpa_req(ep))
920 			goto err;
921 	} else {
922 		connect_reply_upcall(ep, -so->so_error);
923 		goto err;
924 	}
925 	return;
926 err:
927 	close_socket(so);
928 	ep->com.state = DEAD;
929 	c4iw_put_ep(&ep->com);
930 	return;
931 }
932 
933 static inline int c4iw_zero_addr(struct sockaddr *addr)
934 {
935 	struct in6_addr *ip6;
936 
937 	if (addr->sa_family == AF_INET)
938 		return IN_ZERONET(
939 			ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
940 	else {
941 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
942 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
943 				ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
944 	}
945 }
946 
947 static inline int c4iw_loopback_addr(struct sockaddr *addr)
948 {
949 	if (addr->sa_family == AF_INET)
950 		return IN_LOOPBACK(
951 			ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
952 	else
953 		return IN6_IS_ADDR_LOOPBACK(
954 				&((struct sockaddr_in6 *) addr)->sin6_addr);
955 }
956 
957 static inline int c4iw_any_addr(struct sockaddr *addr)
958 {
959 	return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr);
960 }
961 
962 static void
963 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
964 {
965 	struct c4iw_listen_ep *real_lep = NULL;
966 	struct c4iw_ep *new_ep = NULL;
967 	struct sockaddr_in *remote = NULL;
968 	int ret = 0;
969 
970 	MPASS(new_so != NULL);
971 
972 	if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) {
973 		/* Here we need to find the 'real_lep' that belongs to the
974 		 * incomming socket's network interface, such that the newly
975 		 * created 'ep' can be attached to the real 'lep'.
976 		 */
977 		real_lep = find_real_listen_ep(master_lep, new_so);
978 		if (real_lep == NULL) {
979 			CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
980 					"ep for sock: %p", __func__, new_so);
981 			log(LOG_ERR,"%s: Could not find the real listen ep for "
982 					"sock: %p\n", __func__, new_so);
983 			/* FIXME: properly free the 'new_so' in failure case.
984 			 * Use of soabort() and  soclose() are not legal
985 			 * here(before soaccept()).
986 			 */
987 			return;
988 		}
989 	} else /* for Non-Wildcard address, master_lep is always the real_lep */
990 		real_lep = master_lep;
991 
992 	new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
993 
994 	CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
995 	    "listening so %p, new so %p", __func__, master_lep, real_lep,
996 	    new_ep, master_lep->com.so, new_so);
997 
998 	new_ep->com.dev = real_lep->com.dev;
999 	new_ep->com.so = new_so;
1000 	new_ep->com.cm_id = NULL;
1001 	new_ep->com.thread = real_lep->com.thread;
1002 	new_ep->parent_ep = real_lep;
1003 
1004 	GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
1005 	GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1006 	c4iw_get_ep(&real_lep->com);
1007 	init_timer(&new_ep->timer);
1008 	new_ep->com.state = MPA_REQ_WAIT;
1009 	START_EP_TIMER(new_ep);
1010 
1011 	setiwsockopt(new_so);
1012 	ret = soaccept(new_so, (struct sockaddr **)&remote);
1013 	if (ret != 0) {
1014 		CTR4(KTR_IW_CXGBE,
1015 				"%s:listen sock:%p, new sock:%p, ret:%d\n",
1016 				__func__, master_lep->com.so, new_so, ret);
1017 		if (remote != NULL)
1018 			free(remote, M_SONAME);
1019 		uninit_iwarp_socket(new_so);
1020 		soclose(new_so);
1021 		c4iw_put_ep(&new_ep->com);
1022 		c4iw_put_ep(&real_lep->com);
1023 		return;
1024 	}
1025 	free(remote, M_SONAME);
1026 
1027 	/* MPA request might have been queued up on the socket already, so we
1028 	 * initialize the socket/upcall_handler under lock to prevent processing
1029 	 * MPA request on another thread(via process_req()) simultaniously.
1030 	 */
1031 	c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1032 				      avoid freeing of ep before ep unlock. */
1033 	mutex_lock(&new_ep->com.mutex);
1034 	init_iwarp_socket(new_so, &new_ep->com);
1035 
1036 	ret = process_mpa_request(new_ep);
1037 	if (ret) {
1038 		/* ABORT */
1039 		c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1040 		c4iw_put_ep(&real_lep->com);
1041 	}
1042 	mutex_unlock(&new_ep->com.mutex);
1043 	c4iw_put_ep(&new_ep->com);
1044 	return;
1045 }
1046 
1047 static int
1048 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1049 {
1050 	unsigned long flag;
1051 
1052 	spin_lock_irqsave(&req_lock, flag);
1053 	if (ep && ep->com.so) {
1054 		ep->com.ep_events |= new_ep_event;
1055 		if (!ep->com.entry.tqe_prev) {
1056 			c4iw_get_ep(&ep->com);
1057 			TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1058 			queue_work(c4iw_taskq, &c4iw_task);
1059 		}
1060 	}
1061 	spin_unlock_irqrestore(&req_lock, flag);
1062 
1063 	return (0);
1064 }
1065 
1066 static int
1067 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1068 {
1069 	struct c4iw_ep *ep = arg;
1070 
1071 	CTR6(KTR_IW_CXGBE,
1072 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1073 	    __func__, so, so->so_state, ep, states[ep->com.state],
1074 	    ep->com.entry.tqe_prev);
1075 
1076 	MPASS(ep->com.so == so);
1077 	/*
1078 	 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1079 	 * with locks held.
1080 	 */
1081 	if (so->so_error)
1082 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1083 	add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1084 
1085 	return (SU_OK);
1086 }
1087 
1088 
1089 static int
1090 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1091 {
1092 	struct adapter *sc = iq->adapter;
1093 	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1094 	unsigned int tid = GET_TID(cpl);
1095 	struct toepcb *toep = lookup_tid(sc, tid);
1096 	struct socket *so;
1097 	struct c4iw_ep *ep;
1098 
1099 	INP_WLOCK(toep->inp);
1100 	so = inp_inpcbtosocket(toep->inp);
1101 	ep = so->so_rcv.sb_upcallarg;
1102 	INP_WUNLOCK(toep->inp);
1103 
1104 	CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1105 	add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1106 
1107 	return 0;
1108 }
1109 
1110 static void
1111 process_socket_event(struct c4iw_ep *ep)
1112 {
1113 	int state = ep->com.state;
1114 	struct socket *so = ep->com.so;
1115 
1116 	if (ep->com.state == DEAD) {
1117 		CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1118 			"ep %p ep_state %s", __func__, ep, states[state]);
1119 		return;
1120 	}
1121 
1122 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1123 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1124 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
1125 
1126 	if (state == CONNECTING) {
1127 		process_connected(ep);
1128 		return;
1129 	}
1130 
1131 	if (state == LISTEN) {
1132 		struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1133 		struct socket *listen_so = so, *new_so = NULL;
1134 		int error = 0;
1135 
1136 		SOLISTEN_LOCK(listen_so);
1137 		do {
1138 			error = solisten_dequeue(listen_so, &new_so,
1139 						SOCK_NONBLOCK);
1140 			if (error) {
1141 				CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1142 					"error %d", __func__, lep, listen_so,
1143 					error);
1144 				return;
1145 			}
1146 			process_newconn(lep, new_so);
1147 
1148 			/* solisten_dequeue() unlocks while return, so aquire
1149 			 * lock again for sol_qlen and also for next iteration.
1150 			 */
1151 			SOLISTEN_LOCK(listen_so);
1152 		} while (listen_so->sol_qlen);
1153 		SOLISTEN_UNLOCK(listen_so);
1154 
1155 		return;
1156 	}
1157 
1158 	/* connection error */
1159 	if (so->so_error) {
1160 		process_conn_error(ep);
1161 		return;
1162 	}
1163 
1164 	/* peer close */
1165 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1166 		process_peer_close(ep);
1167 		/*
1168 		 * check whether socket disconnect event is pending before
1169 		 * returning. Fallthrough if yes.
1170 		 */
1171 		if (!(so->so_state & SS_ISDISCONNECTED))
1172 			return;
1173 	}
1174 
1175 	/* close complete */
1176 	if (so->so_state & SS_ISDISCONNECTED) {
1177 		process_close_complete(ep);
1178 		return;
1179 	}
1180 
1181 	/* rx data */
1182 	process_data(ep);
1183 }
1184 
1185 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
1186 
1187 static int dack_mode = 0;
1188 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1189 		"Delayed ack mode (default = 0)");
1190 
1191 int c4iw_max_read_depth = 8;
1192 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1193 		"Per-connection max ORD/IRD (default = 8)");
1194 
1195 static int enable_tcp_timestamps;
1196 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1197 		"Enable tcp timestamps (default = 0)");
1198 
1199 static int enable_tcp_sack;
1200 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1201 		"Enable tcp SACK (default = 0)");
1202 
1203 static int enable_tcp_window_scaling = 1;
1204 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1205 		"Enable tcp window scaling (default = 1)");
1206 
1207 int c4iw_debug = 0;
1208 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1209 		"Enable debug logging (default = 0)");
1210 
1211 static int peer2peer = 1;
1212 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1213 		"Support peer2peer ULPs (default = 1)");
1214 
1215 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1216 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1217 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1218 
1219 static int ep_timeout_secs = 60;
1220 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1221 		"CM Endpoint operation timeout in seconds (default = 60)");
1222 
1223 static int mpa_rev = 1;
1224 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1225 		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1226 
1227 static int markers_enabled;
1228 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1229 		"Enable MPA MARKERS (default(0) = disabled)");
1230 
1231 static int crc_enabled = 1;
1232 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1233 		"Enable MPA CRC (default(1) = enabled)");
1234 
1235 static int rcv_win = 256 * 1024;
1236 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1237 		"TCP receive window in bytes (default = 256KB)");
1238 
1239 static int snd_win = 128 * 1024;
1240 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1241 		"TCP send window in bytes (default = 128KB)");
1242 
1243 static void
1244 start_ep_timer(struct c4iw_ep *ep)
1245 {
1246 
1247 	if (timer_pending(&ep->timer)) {
1248 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1249 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1250 		    ep);
1251 		return;
1252 	}
1253 	clear_bit(TIMEOUT, &ep->com.flags);
1254 	c4iw_get_ep(&ep->com);
1255 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1256 	ep->timer.data = (unsigned long)ep;
1257 	ep->timer.function = ep_timeout;
1258 	add_timer(&ep->timer);
1259 }
1260 
1261 static int
1262 stop_ep_timer(struct c4iw_ep *ep)
1263 {
1264 
1265 	del_timer_sync(&ep->timer);
1266 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1267 		c4iw_put_ep(&ep->com);
1268 		return 0;
1269 	}
1270 	return 1;
1271 }
1272 
1273 static void *
1274 alloc_ep(int size, gfp_t gfp)
1275 {
1276 	struct c4iw_ep_common *epc;
1277 
1278 	epc = kzalloc(size, gfp);
1279 	if (epc == NULL)
1280 		return (NULL);
1281 
1282 	kref_init(&epc->kref);
1283 	mutex_init(&epc->mutex);
1284 	c4iw_init_wr_wait(&epc->wr_wait);
1285 
1286 	return (epc);
1287 }
1288 
1289 void _c4iw_free_ep(struct kref *kref)
1290 {
1291 	struct c4iw_ep *ep;
1292 	struct c4iw_ep_common *epc;
1293 
1294 	ep = container_of(kref, struct c4iw_ep, com.kref);
1295 	epc = &ep->com;
1296 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1297 	    __func__, epc));
1298 	if (test_bit(QP_REFERENCED, &ep->com.flags))
1299 		deref_qp(ep);
1300 	CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1301 	    __func__, ep, epc->history, epc->flags);
1302 	kfree(ep);
1303 }
1304 
1305 static void release_ep_resources(struct c4iw_ep *ep)
1306 {
1307 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1308 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
1309 	c4iw_put_ep(&ep->com);
1310 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1311 }
1312 
1313 static int
1314 send_mpa_req(struct c4iw_ep *ep)
1315 {
1316 	int mpalen;
1317 	struct mpa_message *mpa;
1318 	struct mpa_v2_conn_params mpa_v2_params;
1319 	struct mbuf *m;
1320 	char mpa_rev_to_use = mpa_rev;
1321 	int err = 0;
1322 
1323 	if (ep->retry_with_mpa_v1)
1324 		mpa_rev_to_use = 1;
1325 	mpalen = sizeof(*mpa) + ep->plen;
1326 	if (mpa_rev_to_use == 2)
1327 		mpalen += sizeof(struct mpa_v2_conn_params);
1328 
1329 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1330 	if (mpa == NULL) {
1331 		err = -ENOMEM;
1332 		CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1333 				__func__, ep, err);
1334 		goto err;
1335 	}
1336 
1337 	memset(mpa, 0, mpalen);
1338 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1339 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1340 		(markers_enabled ? MPA_MARKERS : 0) |
1341 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1342 	mpa->private_data_size = htons(ep->plen);
1343 	mpa->revision = mpa_rev_to_use;
1344 
1345 	if (mpa_rev_to_use == 1) {
1346 		ep->tried_with_mpa_v1 = 1;
1347 		ep->retry_with_mpa_v1 = 0;
1348 	}
1349 
1350 	if (mpa_rev_to_use == 2) {
1351 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1352 					    sizeof(struct mpa_v2_conn_params));
1353 		mpa_v2_params.ird = htons((u16)ep->ird);
1354 		mpa_v2_params.ord = htons((u16)ep->ord);
1355 
1356 		if (peer2peer) {
1357 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1358 
1359 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1360 				mpa_v2_params.ord |=
1361 				    htons(MPA_V2_RDMA_WRITE_RTR);
1362 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1363 				mpa_v2_params.ord |=
1364 					htons(MPA_V2_RDMA_READ_RTR);
1365 			}
1366 		}
1367 		memcpy(mpa->private_data, &mpa_v2_params,
1368 			sizeof(struct mpa_v2_conn_params));
1369 
1370 		if (ep->plen) {
1371 
1372 			memcpy(mpa->private_data +
1373 				sizeof(struct mpa_v2_conn_params),
1374 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1375 		}
1376 	} else {
1377 
1378 		if (ep->plen)
1379 			memcpy(mpa->private_data,
1380 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1381 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1382 	}
1383 
1384 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1385 	if (m == NULL) {
1386 		err = -ENOMEM;
1387 		CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1388 				__func__, ep, err);
1389 		free(mpa, M_CXGBE);
1390 		goto err;
1391 	}
1392 	m_copyback(m, 0, mpalen, (void *)mpa);
1393 	free(mpa, M_CXGBE);
1394 
1395 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1396 			ep->com.thread);
1397 	if (err) {
1398 		CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1399 				__func__, ep, err);
1400 		goto err;
1401 	}
1402 
1403 	START_EP_TIMER(ep);
1404 	ep->com.state = MPA_REQ_SENT;
1405 	ep->mpa_attr.initiator = 1;
1406 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1407 	return 0;
1408 err:
1409 	connect_reply_upcall(ep, err);
1410 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1411 	return err;
1412 }
1413 
1414 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1415 {
1416 	int mpalen ;
1417 	struct mpa_message *mpa;
1418 	struct mpa_v2_conn_params mpa_v2_params;
1419 	struct mbuf *m;
1420 	int err;
1421 
1422 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1423 	    ep->plen);
1424 
1425 	mpalen = sizeof(*mpa) + plen;
1426 
1427 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1428 
1429 		mpalen += sizeof(struct mpa_v2_conn_params);
1430 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1431 		    ep->mpa_attr.version, mpalen);
1432 	}
1433 
1434 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1435 	if (mpa == NULL)
1436 		return (-ENOMEM);
1437 
1438 	memset(mpa, 0, mpalen);
1439 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1440 	mpa->flags = MPA_REJECT;
1441 	mpa->revision = mpa_rev;
1442 	mpa->private_data_size = htons(plen);
1443 
1444 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1445 
1446 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1447 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1448 					    sizeof(struct mpa_v2_conn_params));
1449 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1450 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1451 				 0));
1452 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1453 					(p2p_type ==
1454 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1455 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1456 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1457 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1458 		memcpy(mpa->private_data, &mpa_v2_params,
1459 				sizeof(struct mpa_v2_conn_params));
1460 
1461 		if (ep->plen)
1462 			memcpy(mpa->private_data +
1463 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1464 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1465 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1466 	} else
1467 		if (plen)
1468 			memcpy(mpa->private_data, pdata, plen);
1469 
1470 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1471 	if (m == NULL) {
1472 		free(mpa, M_CXGBE);
1473 		return (-ENOMEM);
1474 	}
1475 	m_copyback(m, 0, mpalen, (void *)mpa);
1476 	free(mpa, M_CXGBE);
1477 
1478 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1479 	if (!err)
1480 		ep->snd_seq += mpalen;
1481 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1482 	return err;
1483 }
1484 
1485 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1486 {
1487 	int mpalen;
1488 	struct mpa_message *mpa;
1489 	struct mbuf *m;
1490 	struct mpa_v2_conn_params mpa_v2_params;
1491 	int err;
1492 
1493 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1494 
1495 	mpalen = sizeof(*mpa) + plen;
1496 
1497 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1498 
1499 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1500 		    ep->mpa_attr.version);
1501 		mpalen += sizeof(struct mpa_v2_conn_params);
1502 	}
1503 
1504 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1505 	if (mpa == NULL)
1506 		return (-ENOMEM);
1507 
1508 	memset(mpa, 0, sizeof(*mpa));
1509 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1510 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1511 		(markers_enabled ? MPA_MARKERS : 0);
1512 	mpa->revision = ep->mpa_attr.version;
1513 	mpa->private_data_size = htons(plen);
1514 
1515 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1516 
1517 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1518 		mpa->private_data_size +=
1519 			htons(sizeof(struct mpa_v2_conn_params));
1520 		mpa_v2_params.ird = htons((u16)ep->ird);
1521 		mpa_v2_params.ord = htons((u16)ep->ord);
1522 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1523 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1524 
1525 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1526 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1527 
1528 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1529 
1530 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1531 
1532 				mpa_v2_params.ord |=
1533 					htons(MPA_V2_RDMA_WRITE_RTR);
1534 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1535 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1536 				    mpa_v2_params.ord);
1537 			}
1538 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1539 
1540 				mpa_v2_params.ord |=
1541 					htons(MPA_V2_RDMA_READ_RTR);
1542 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1543 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1544 				    mpa_v2_params.ord);
1545 			}
1546 		}
1547 
1548 		memcpy(mpa->private_data, &mpa_v2_params,
1549 			sizeof(struct mpa_v2_conn_params));
1550 
1551 		if (ep->plen)
1552 			memcpy(mpa->private_data +
1553 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1554 	} else
1555 		if (plen)
1556 			memcpy(mpa->private_data, pdata, plen);
1557 
1558 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1559 	if (m == NULL) {
1560 		free(mpa, M_CXGBE);
1561 		return (-ENOMEM);
1562 	}
1563 	m_copyback(m, 0, mpalen, (void *)mpa);
1564 	free(mpa, M_CXGBE);
1565 
1566 
1567 	ep->com.state = MPA_REP_SENT;
1568 	ep->snd_seq += mpalen;
1569 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1570 			ep->com.thread);
1571 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1572 	return err;
1573 }
1574 
1575 
1576 
1577 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1578 {
1579 	struct iw_cm_event event;
1580 
1581 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1582 	memset(&event, 0, sizeof(event));
1583 	event.event = IW_CM_EVENT_CLOSE;
1584 	event.status = status;
1585 
1586 	if (ep->com.cm_id) {
1587 
1588 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1589 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1590 		deref_cm_id(&ep->com);
1591 		set_bit(CLOSE_UPCALL, &ep->com.history);
1592 	}
1593 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1594 }
1595 
1596 static int
1597 send_abort(struct c4iw_ep *ep)
1598 {
1599 	struct socket *so = ep->com.so;
1600 	struct sockopt sopt;
1601 	int rc;
1602 	struct linger l;
1603 
1604 	CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1605 	    states[ep->com.state], ep->hwtid);
1606 
1607 	l.l_onoff = 1;
1608 	l.l_linger = 0;
1609 
1610 	/* linger_time of 0 forces RST to be sent */
1611 	sopt.sopt_dir = SOPT_SET;
1612 	sopt.sopt_level = SOL_SOCKET;
1613 	sopt.sopt_name = SO_LINGER;
1614 	sopt.sopt_val = (caddr_t)&l;
1615 	sopt.sopt_valsize = sizeof l;
1616 	sopt.sopt_td = NULL;
1617 	rc = sosetopt(so, &sopt);
1618 	if (rc != 0) {
1619 		log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1620 		    __func__, so, rc);
1621 	}
1622 
1623 	uninit_iwarp_socket(so);
1624 	soclose(so);
1625 	set_bit(ABORT_CONN, &ep->com.history);
1626 
1627 	/*
1628 	 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1629 	 * request it has sent. But the current TOE driver is not propagating
1630 	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1631 	 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1632 	 * handler(not yet implemented) of iw_cxgbe driver.
1633 	 */
1634 	release_ep_resources(ep);
1635 
1636 	return (0);
1637 }
1638 
1639 static void peer_close_upcall(struct c4iw_ep *ep)
1640 {
1641 	struct iw_cm_event event;
1642 
1643 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1644 	memset(&event, 0, sizeof(event));
1645 	event.event = IW_CM_EVENT_DISCONNECT;
1646 
1647 	if (ep->com.cm_id) {
1648 
1649 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1650 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1651 		set_bit(DISCONN_UPCALL, &ep->com.history);
1652 	}
1653 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1654 }
1655 
1656 static void peer_abort_upcall(struct c4iw_ep *ep)
1657 {
1658 	struct iw_cm_event event;
1659 
1660 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1661 	memset(&event, 0, sizeof(event));
1662 	event.event = IW_CM_EVENT_CLOSE;
1663 	event.status = -ECONNRESET;
1664 
1665 	if (ep->com.cm_id) {
1666 
1667 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1668 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1669 		deref_cm_id(&ep->com);
1670 		set_bit(ABORT_UPCALL, &ep->com.history);
1671 	}
1672 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1673 }
1674 
1675 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1676 {
1677 	struct iw_cm_event event;
1678 
1679 	CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1680 	memset(&event, 0, sizeof(event));
1681 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1682 	event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1683 					-ECONNRESET : status;
1684 	event.local_addr = ep->com.local_addr;
1685 	event.remote_addr = ep->com.remote_addr;
1686 
1687 	if ((status == 0) || (status == -ECONNREFUSED)) {
1688 
1689 		if (!ep->tried_with_mpa_v1) {
1690 
1691 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1692 			/* this means MPA_v2 is used */
1693 			event.ord = ep->ird;
1694 			event.ird = ep->ord;
1695 			event.private_data_len = ep->plen -
1696 				sizeof(struct mpa_v2_conn_params);
1697 			event.private_data = ep->mpa_pkt +
1698 				sizeof(struct mpa_message) +
1699 				sizeof(struct mpa_v2_conn_params);
1700 		} else {
1701 
1702 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1703 			/* this means MPA_v1 is used */
1704 			event.ord = c4iw_max_read_depth;
1705 			event.ird = c4iw_max_read_depth;
1706 			event.private_data_len = ep->plen;
1707 			event.private_data = ep->mpa_pkt +
1708 				sizeof(struct mpa_message);
1709 		}
1710 	}
1711 
1712 	if (ep->com.cm_id) {
1713 
1714 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1715 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1716 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1717 	}
1718 
1719 	if(status == -ECONNABORTED) {
1720 
1721 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1722 		return;
1723 	}
1724 
1725 	if (status < 0) {
1726 
1727 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1728 		deref_cm_id(&ep->com);
1729 	}
1730 
1731 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1732 }
1733 
1734 static int connect_request_upcall(struct c4iw_ep *ep)
1735 {
1736 	struct iw_cm_event event;
1737 	int ret;
1738 
1739 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1740 	    ep->tried_with_mpa_v1);
1741 
1742 	memset(&event, 0, sizeof(event));
1743 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1744 	event.local_addr = ep->com.local_addr;
1745 	event.remote_addr = ep->com.remote_addr;
1746 	event.provider_data = ep;
1747 
1748 	if (!ep->tried_with_mpa_v1) {
1749 		/* this means MPA_v2 is used */
1750 		event.ord = ep->ord;
1751 		event.ird = ep->ird;
1752 		event.private_data_len = ep->plen -
1753 			sizeof(struct mpa_v2_conn_params);
1754 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1755 			sizeof(struct mpa_v2_conn_params);
1756 	} else {
1757 
1758 		/* this means MPA_v1 is used. Send max supported */
1759 		event.ord = c4iw_max_read_depth;
1760 		event.ird = c4iw_max_read_depth;
1761 		event.private_data_len = ep->plen;
1762 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1763 	}
1764 
1765 	c4iw_get_ep(&ep->com);
1766 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1767 	    &event);
1768 	if(ret) {
1769 		CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1770 			" IWCM, err:%d", __func__, ep, ret);
1771 		c4iw_put_ep(&ep->com);
1772 	} else
1773 		/* Dereference parent_ep only in success case.
1774 		 * In case of failure, parent_ep is dereferenced by the caller
1775 		 * of process_mpa_request().
1776 		 */
1777 		c4iw_put_ep(&ep->parent_ep->com);
1778 
1779 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1780 	return ret;
1781 }
1782 
1783 static void established_upcall(struct c4iw_ep *ep)
1784 {
1785 	struct iw_cm_event event;
1786 
1787 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1788 	memset(&event, 0, sizeof(event));
1789 	event.event = IW_CM_EVENT_ESTABLISHED;
1790 	event.ird = ep->ord;
1791 	event.ord = ep->ird;
1792 
1793 	if (ep->com.cm_id) {
1794 
1795 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1796 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1797 		set_bit(ESTAB_UPCALL, &ep->com.history);
1798 	}
1799 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1800 }
1801 
1802 
1803 #define RELAXED_IRD_NEGOTIATION 1
1804 
1805 /*
1806  * process_mpa_reply - process streaming mode MPA reply
1807  *
1808  * Returns:
1809  *
1810  * 0 upon success indicating a connect request was delivered to the ULP
1811  * or the mpa request is incomplete but valid so far.
1812  *
1813  * 1 if a failure requires the caller to close the connection.
1814  *
1815  * 2 if a failure requires the caller to abort the connection.
1816  */
1817 static int process_mpa_reply(struct c4iw_ep *ep)
1818 {
1819 	struct mpa_message *mpa;
1820 	struct mpa_v2_conn_params *mpa_v2_params;
1821 	u16 plen;
1822 	u16 resp_ird, resp_ord;
1823 	u8 rtr_mismatch = 0, insuff_ird = 0;
1824 	struct c4iw_qp_attributes attrs = {0};
1825 	enum c4iw_qp_attr_mask mask;
1826 	int err;
1827 	struct mbuf *top, *m;
1828 	int flags = MSG_DONTWAIT;
1829 	struct uio uio;
1830 	int disconnect = 0;
1831 
1832 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1833 
1834 	/*
1835 	 * Stop mpa timer.  If it expired, then
1836 	 * we ignore the MPA reply.  process_timeout()
1837 	 * will abort the connection.
1838 	 */
1839 	if (STOP_EP_TIMER(ep))
1840 		return 0;
1841 
1842 	uio.uio_resid = 1000000;
1843 	uio.uio_td = ep->com.thread;
1844 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1845 
1846 	if (err) {
1847 
1848 		if (err == EWOULDBLOCK) {
1849 
1850 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1851 			START_EP_TIMER(ep);
1852 			return 0;
1853 		}
1854 		err = -err;
1855 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1856 		goto err;
1857 	}
1858 
1859 	if (ep->com.so->so_rcv.sb_mb) {
1860 
1861 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1862 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1863 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1864 	}
1865 
1866 	m = top;
1867 
1868 	do {
1869 
1870 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1871 		/*
1872 		 * If we get more than the supported amount of private data
1873 		 * then we must fail this connection.
1874 		 */
1875 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1876 
1877 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1878 			    ep->mpa_pkt_len + m->m_len);
1879 			err = (-EINVAL);
1880 			goto err_stop_timer;
1881 		}
1882 
1883 		/*
1884 		 * copy the new data into our accumulation buffer.
1885 		 */
1886 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1887 		ep->mpa_pkt_len += m->m_len;
1888 		if (!m->m_next)
1889 			m = m->m_nextpkt;
1890 		else
1891 			m = m->m_next;
1892 	} while (m);
1893 
1894 	m_freem(top);
1895 	/*
1896 	 * if we don't even have the mpa message, then bail.
1897 	 */
1898 	if (ep->mpa_pkt_len < sizeof(*mpa)) {
1899 		return 0;
1900 	}
1901 	mpa = (struct mpa_message *) ep->mpa_pkt;
1902 
1903 	/* Validate MPA header. */
1904 	if (mpa->revision > mpa_rev) {
1905 
1906 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1907 		    mpa->revision, mpa_rev);
1908 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1909 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1910 		err = -EPROTO;
1911 		goto err_stop_timer;
1912 	}
1913 
1914 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1915 
1916 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1917 		err = -EPROTO;
1918 		goto err_stop_timer;
1919 	}
1920 
1921 	plen = ntohs(mpa->private_data_size);
1922 
1923 	/*
1924 	 * Fail if there's too much private data.
1925 	 */
1926 	if (plen > MPA_MAX_PRIVATE_DATA) {
1927 
1928 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1929 		err = -EPROTO;
1930 		goto err_stop_timer;
1931 	}
1932 
1933 	/*
1934 	 * If plen does not account for pkt size
1935 	 */
1936 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1937 
1938 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1939 		STOP_EP_TIMER(ep);
1940 		err = -EPROTO;
1941 		goto err_stop_timer;
1942 	}
1943 
1944 	ep->plen = (u8) plen;
1945 
1946 	/*
1947 	 * If we don't have all the pdata yet, then bail.
1948 	 * We'll continue process when more data arrives.
1949 	 */
1950 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1951 
1952 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1953 		return 0;
1954 	}
1955 
1956 	if (mpa->flags & MPA_REJECT) {
1957 
1958 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1959 		err = -ECONNREFUSED;
1960 		goto err_stop_timer;
1961 	}
1962 
1963 	/*
1964 	 * If we get here we have accumulated the entire mpa
1965 	 * start reply message including private data. And
1966 	 * the MPA header is valid.
1967 	 */
1968 	ep->com.state = FPDU_MODE;
1969 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1970 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1971 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1972 	ep->mpa_attr.version = mpa->revision;
1973 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1974 
1975 	if (mpa->revision == 2) {
1976 
1977 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
1978 		ep->mpa_attr.enhanced_rdma_conn =
1979 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1980 
1981 		if (ep->mpa_attr.enhanced_rdma_conn) {
1982 
1983 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
1984 			mpa_v2_params = (struct mpa_v2_conn_params *)
1985 				(ep->mpa_pkt + sizeof(*mpa));
1986 			resp_ird = ntohs(mpa_v2_params->ird) &
1987 				MPA_V2_IRD_ORD_MASK;
1988 			resp_ord = ntohs(mpa_v2_params->ord) &
1989 				MPA_V2_IRD_ORD_MASK;
1990 
1991 			/*
1992 			 * This is a double-check. Ideally, below checks are
1993 			 * not required since ird/ord stuff has been taken
1994 			 * care of in c4iw_accept_cr
1995 			 */
1996 			if (ep->ird < resp_ord) {
1997 				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1998 				   ep->com.dev->rdev.adap->params.max_ordird_qp)
1999 					ep->ird = resp_ord;
2000 				else
2001 					insuff_ird = 1;
2002 			} else if (ep->ird > resp_ord) {
2003 				ep->ird = resp_ord;
2004 			}
2005 			if (ep->ord > resp_ird) {
2006 				if (RELAXED_IRD_NEGOTIATION)
2007 					ep->ord = resp_ird;
2008 				else
2009 					insuff_ird = 1;
2010 			}
2011 			if (insuff_ird) {
2012 				err = -ENOMEM;
2013 				ep->ird = resp_ord;
2014 				ep->ord = resp_ird;
2015 			}
2016 
2017 			if (ntohs(mpa_v2_params->ird) &
2018 				MPA_V2_PEER2PEER_MODEL) {
2019 
2020 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2021 				if (ntohs(mpa_v2_params->ord) &
2022 					MPA_V2_RDMA_WRITE_RTR) {
2023 
2024 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2025 					ep->mpa_attr.p2p_type =
2026 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2027 				}
2028 				else if (ntohs(mpa_v2_params->ord) &
2029 					MPA_V2_RDMA_READ_RTR) {
2030 
2031 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2032 					ep->mpa_attr.p2p_type =
2033 						FW_RI_INIT_P2PTYPE_READ_REQ;
2034 				}
2035 			}
2036 		}
2037 	} else {
2038 
2039 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2040 
2041 		if (mpa->revision == 1) {
2042 
2043 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2044 
2045 			if (peer2peer) {
2046 
2047 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2048 				ep->mpa_attr.p2p_type = p2p_type;
2049 			}
2050 		}
2051 	}
2052 
2053 	if (set_tcpinfo(ep)) {
2054 
2055 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2056 		printf("%s set_tcpinfo error\n", __func__);
2057 		err = -ECONNRESET;
2058 		goto err;
2059 	}
2060 
2061 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2062 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2063 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2064 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2065 	    ep->mpa_attr.p2p_type);
2066 
2067 	/*
2068 	 * If responder's RTR does not match with that of initiator, assign
2069 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2070 	 * generated when moving QP to RTS state.
2071 	 * A TERM message will be sent after QP has moved to RTS state
2072 	 */
2073 	if ((ep->mpa_attr.version == 2) && peer2peer &&
2074 		(ep->mpa_attr.p2p_type != p2p_type)) {
2075 
2076 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2077 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2078 		rtr_mismatch = 1;
2079 	}
2080 
2081 
2082 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2083 	attrs.mpa_attr = ep->mpa_attr;
2084 	attrs.max_ird = ep->ird;
2085 	attrs.max_ord = ep->ord;
2086 	attrs.llp_stream_handle = ep;
2087 	attrs.next_state = C4IW_QP_STATE_RTS;
2088 
2089 	mask = C4IW_QP_ATTR_NEXT_STATE |
2090 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2091 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2092 
2093 	/* bind QP and TID with INIT_WR */
2094 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2095 
2096 	if (err) {
2097 
2098 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2099 		goto err;
2100 	}
2101 
2102 	/*
2103 	 * If responder's RTR requirement did not match with what initiator
2104 	 * supports, generate TERM message
2105 	 */
2106 	if (rtr_mismatch) {
2107 
2108 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2109 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2110 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2111 		attrs.ecode = MPA_NOMATCH_RTR;
2112 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2113 		attrs.send_term = 1;
2114 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2115 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2116 		err = -ENOMEM;
2117 		disconnect = 1;
2118 		goto out;
2119 	}
2120 
2121 	/*
2122 	 * Generate TERM if initiator IRD is not sufficient for responder
2123 	 * provided ORD. Currently, we do the same behaviour even when
2124 	 * responder provided IRD is also not sufficient as regards to
2125 	 * initiator ORD.
2126 	 */
2127 	if (insuff_ird) {
2128 
2129 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2130 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2131 				__func__);
2132 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2133 		attrs.ecode = MPA_INSUFF_IRD;
2134 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2135 		attrs.send_term = 1;
2136 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2137 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2138 		err = -ENOMEM;
2139 		disconnect = 1;
2140 		goto out;
2141 	}
2142 	goto out;
2143 err_stop_timer:
2144 	STOP_EP_TIMER(ep);
2145 err:
2146 	disconnect = 2;
2147 out:
2148 	connect_reply_upcall(ep, err);
2149 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2150 	return disconnect;
2151 }
2152 
2153 /*
2154  * process_mpa_request - process streaming mode MPA request
2155  *
2156  * Returns:
2157  *
2158  * 0 upon success indicating a connect request was delivered to the ULP
2159  * or the mpa request is incomplete but valid so far.
2160  *
2161  * 1 if a failure requires the caller to close the connection.
2162  *
2163  * 2 if a failure requires the caller to abort the connection.
2164  */
2165 static int
2166 process_mpa_request(struct c4iw_ep *ep)
2167 {
2168 	struct mpa_message *mpa;
2169 	struct mpa_v2_conn_params *mpa_v2_params;
2170 	u16 plen;
2171 	int flags = MSG_DONTWAIT;
2172 	int rc;
2173 	struct iovec iov;
2174 	struct uio uio;
2175 	enum c4iw_ep_state state = ep->com.state;
2176 
2177 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2178 
2179 	if (state != MPA_REQ_WAIT)
2180 		return 0;
2181 
2182 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2183 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2184 	uio.uio_iov = &iov;
2185 	uio.uio_iovcnt = 1;
2186 	uio.uio_offset = 0;
2187 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2188 	uio.uio_segflg = UIO_SYSSPACE;
2189 	uio.uio_rw = UIO_READ;
2190 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2191 
2192 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2193 	if (rc == EAGAIN)
2194 		return 0;
2195 	else if (rc)
2196 		goto err_stop_timer;
2197 
2198 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2199 	    __func__, ep->com.so));
2200 	ep->mpa_pkt_len += uio.uio_offset;
2201 
2202 	/*
2203 	 * If we get more than the supported amount of private data then we must
2204 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
2205 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2206 	 * byte is filled by the soreceive above.
2207 	 */
2208 
2209 	/* Don't even have the MPA message.  Wait for more data to arrive. */
2210 	if (ep->mpa_pkt_len < sizeof(*mpa))
2211 		return 0;
2212 	mpa = (struct mpa_message *) ep->mpa_pkt;
2213 
2214 	/*
2215 	 * Validate MPA Header.
2216 	 */
2217 	if (mpa->revision > mpa_rev) {
2218 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2219 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
2220 		goto err_stop_timer;
2221 	}
2222 
2223 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2224 		goto err_stop_timer;
2225 
2226 	/*
2227 	 * Fail if there's too much private data.
2228 	 */
2229 	plen = ntohs(mpa->private_data_size);
2230 	if (plen > MPA_MAX_PRIVATE_DATA)
2231 		goto err_stop_timer;
2232 
2233 	/*
2234 	 * If plen does not account for pkt size
2235 	 */
2236 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2237 		goto err_stop_timer;
2238 
2239 	ep->plen = (u8) plen;
2240 
2241 	/*
2242 	 * If we don't have all the pdata yet, then bail.
2243 	 */
2244 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2245 		return 0;
2246 
2247 	/*
2248 	 * If we get here we have accumulated the entire mpa
2249 	 * start reply message including private data.
2250 	 */
2251 	ep->mpa_attr.initiator = 0;
2252 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2253 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2254 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2255 	ep->mpa_attr.version = mpa->revision;
2256 	if (mpa->revision == 1)
2257 		ep->tried_with_mpa_v1 = 1;
2258 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2259 
2260 	if (mpa->revision == 2) {
2261 		ep->mpa_attr.enhanced_rdma_conn =
2262 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2263 		if (ep->mpa_attr.enhanced_rdma_conn) {
2264 			mpa_v2_params = (struct mpa_v2_conn_params *)
2265 				(ep->mpa_pkt + sizeof(*mpa));
2266 			ep->ird = ntohs(mpa_v2_params->ird) &
2267 				MPA_V2_IRD_ORD_MASK;
2268 			ep->ird = min_t(u32, ep->ird,
2269 					cur_max_read_depth(ep->com.dev));
2270 			ep->ord = ntohs(mpa_v2_params->ord) &
2271 				MPA_V2_IRD_ORD_MASK;
2272 			ep->ord = min_t(u32, ep->ord,
2273 					cur_max_read_depth(ep->com.dev));
2274 			CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u\n",
2275 				 __func__, ep->ird, ep->ord);
2276 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2277 				if (peer2peer) {
2278 					if (ntohs(mpa_v2_params->ord) &
2279 							MPA_V2_RDMA_WRITE_RTR)
2280 						ep->mpa_attr.p2p_type =
2281 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2282 					else if (ntohs(mpa_v2_params->ord) &
2283 							MPA_V2_RDMA_READ_RTR)
2284 						ep->mpa_attr.p2p_type =
2285 						FW_RI_INIT_P2PTYPE_READ_REQ;
2286 				}
2287 		}
2288 	} else if (mpa->revision == 1 && peer2peer)
2289 		ep->mpa_attr.p2p_type = p2p_type;
2290 
2291 	if (set_tcpinfo(ep))
2292 		goto err_stop_timer;
2293 
2294 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2295 	    "xmit_marker_enabled = %d, version = %d", __func__,
2296 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2297 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2298 
2299 	ep->com.state = MPA_REQ_RCVD;
2300 	STOP_EP_TIMER(ep);
2301 
2302 	/* drive upcall */
2303 	if (ep->parent_ep->com.state != DEAD)
2304 		if (connect_request_upcall(ep))
2305 			goto err_out;
2306 	return 0;
2307 
2308 err_stop_timer:
2309 	STOP_EP_TIMER(ep);
2310 err_out:
2311 	return 2;
2312 }
2313 
2314 /*
2315  * Upcall from the adapter indicating data has been transmitted.
2316  * For us its just the single MPA request or reply.  We can now free
2317  * the skb holding the mpa message.
2318  */
2319 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2320 {
2321 	int err;
2322 	struct c4iw_ep *ep = to_ep(cm_id);
2323 	int abort = 0;
2324 
2325 	mutex_lock(&ep->com.mutex);
2326 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2327 
2328 	if ((ep->com.state == DEAD) ||
2329 			(ep->com.state != MPA_REQ_RCVD)) {
2330 
2331 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2332 		mutex_unlock(&ep->com.mutex);
2333 		c4iw_put_ep(&ep->com);
2334 		return -ECONNRESET;
2335 	}
2336 	set_bit(ULP_REJECT, &ep->com.history);
2337 
2338 	if (mpa_rev == 0) {
2339 
2340 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2341 		abort = 1;
2342 	}
2343 	else {
2344 
2345 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2346 		abort = send_mpa_reject(ep, pdata, pdata_len);
2347 	}
2348 	STOP_EP_TIMER(ep);
2349 	err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2350 	mutex_unlock(&ep->com.mutex);
2351 	c4iw_put_ep(&ep->com);
2352 	CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2353 	return 0;
2354 }
2355 
2356 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2357 {
2358 	int err;
2359 	struct c4iw_qp_attributes attrs = {0};
2360 	enum c4iw_qp_attr_mask mask;
2361 	struct c4iw_ep *ep = to_ep(cm_id);
2362 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2363 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2364 	int abort = 0;
2365 
2366 	mutex_lock(&ep->com.mutex);
2367 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2368 
2369 	if ((ep->com.state == DEAD) ||
2370 			(ep->com.state != MPA_REQ_RCVD)) {
2371 
2372 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2373 		err = -ECONNRESET;
2374 		goto err_out;
2375 	}
2376 
2377 	BUG_ON(!qp);
2378 
2379 	set_bit(ULP_ACCEPT, &ep->com.history);
2380 
2381 	if ((conn_param->ord > c4iw_max_read_depth) ||
2382 		(conn_param->ird > c4iw_max_read_depth)) {
2383 
2384 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2385 		err = -EINVAL;
2386 		goto err_abort;
2387 	}
2388 
2389 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2390 
2391 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2392 
2393 		if (conn_param->ord > ep->ird) {
2394 			if (RELAXED_IRD_NEGOTIATION) {
2395 				conn_param->ord = ep->ird;
2396 			} else {
2397 				ep->ird = conn_param->ird;
2398 				ep->ord = conn_param->ord;
2399 				send_mpa_reject(ep, conn_param->private_data,
2400 						conn_param->private_data_len);
2401 				err = -ENOMEM;
2402 				goto err_abort;
2403 			}
2404 		}
2405 		if (conn_param->ird < ep->ord) {
2406 			if (RELAXED_IRD_NEGOTIATION &&
2407 			    ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2408 				conn_param->ird = ep->ord;
2409 			} else {
2410 				err = -ENOMEM;
2411 				goto err_abort;
2412 			}
2413 		}
2414 	}
2415 	ep->ird = conn_param->ird;
2416 	ep->ord = conn_param->ord;
2417 
2418 	if (ep->mpa_attr.version == 1) {
2419 		if (peer2peer && ep->ird == 0)
2420 			ep->ird = 1;
2421 	} else {
2422 		if (peer2peer &&
2423 		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2424 		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2425 			ep->ird = 1;
2426 	}
2427 
2428 	CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d\n", __func__, __LINE__,
2429 			ep->ird, ep->ord);
2430 
2431 	ep->com.cm_id = cm_id;
2432 	ref_cm_id(&ep->com);
2433 	ep->com.qp = qp;
2434 	ref_qp(ep);
2435 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2436 
2437 	/* bind QP to EP and move to RTS */
2438 	attrs.mpa_attr = ep->mpa_attr;
2439 	attrs.max_ird = ep->ird;
2440 	attrs.max_ord = ep->ord;
2441 	attrs.llp_stream_handle = ep;
2442 	attrs.next_state = C4IW_QP_STATE_RTS;
2443 
2444 	/* bind QP and TID with INIT_WR */
2445 	mask = C4IW_QP_ATTR_NEXT_STATE |
2446 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2447 		C4IW_QP_ATTR_MPA_ATTR |
2448 		C4IW_QP_ATTR_MAX_IRD |
2449 		C4IW_QP_ATTR_MAX_ORD;
2450 
2451 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2452 	if (err) {
2453 		CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2454 		goto err_defef_cm_id;
2455 	}
2456 
2457 	err = send_mpa_reply(ep, conn_param->private_data,
2458 			conn_param->private_data_len);
2459 	if (err) {
2460 		CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2461 		goto err_defef_cm_id;
2462 	}
2463 
2464 	ep->com.state = FPDU_MODE;
2465 	established_upcall(ep);
2466 	mutex_unlock(&ep->com.mutex);
2467 	c4iw_put_ep(&ep->com);
2468 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2469 	return 0;
2470 err_defef_cm_id:
2471 	deref_cm_id(&ep->com);
2472 err_abort:
2473 	abort = 1;
2474 err_out:
2475 	if (abort)
2476 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2477 	mutex_unlock(&ep->com.mutex);
2478 	c4iw_put_ep(&ep->com);
2479 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2480 	return err;
2481 }
2482 
2483 static int
2484 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2485 {
2486 	int ret;
2487 	int size;
2488 	struct socket *sock = NULL;
2489 
2490 	ret = sock_create_kern(laddr->ss_family,
2491 			SOCK_STREAM, IPPROTO_TCP, &sock);
2492 	if (ret) {
2493 		CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2494 				__func__, ret);
2495 		return ret;
2496 	}
2497 
2498 	ret = sobind(sock, (struct sockaddr *)laddr, curthread);
2499 	if (ret) {
2500 		CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2501 				__func__, ret);
2502 		sock_release(sock);
2503 		return ret;
2504 	}
2505 
2506 	size = laddr->ss_family == AF_INET6 ?
2507 		sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2508 	ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2509 	if (ret) {
2510 		CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2511 				__func__, ret);
2512 		sock_release(sock);
2513 		return ret;
2514 	}
2515 
2516 	*so = sock;
2517 	return 0;
2518 }
2519 
2520 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2521 {
2522 	int err = 0;
2523 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2524 	struct c4iw_ep *ep = NULL;
2525 	struct ifnet    *nh_ifp;        /* Logical egress interface */
2526 
2527 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2528 
2529 
2530 	if ((conn_param->ord > c4iw_max_read_depth) ||
2531 		(conn_param->ird > c4iw_max_read_depth)) {
2532 
2533 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2534 		err = -EINVAL;
2535 		goto out;
2536 	}
2537 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2538 
2539 	init_timer(&ep->timer);
2540 	ep->plen = conn_param->private_data_len;
2541 
2542 	if (ep->plen) {
2543 
2544 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2545 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2546 				conn_param->private_data, ep->plen);
2547 	}
2548 	ep->ird = conn_param->ird;
2549 	ep->ord = conn_param->ord;
2550 
2551 	if (peer2peer && ep->ord == 0) {
2552 
2553 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2554 		ep->ord = 1;
2555 	}
2556 
2557 	ep->com.dev = dev;
2558 	ep->com.cm_id = cm_id;
2559 	ref_cm_id(&ep->com);
2560 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2561 
2562 	if (!ep->com.qp) {
2563 
2564 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2565 		err = -EINVAL;
2566 		goto fail;
2567 	}
2568 	ref_qp(ep);
2569 	ep->com.thread = curthread;
2570 
2571 	err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2572 	if (err) {
2573 
2574 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2575 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2576 		err = EHOSTUNREACH;
2577 		return err;
2578 	}
2579 
2580 	if (!(nh_ifp->if_capenable & IFCAP_TOE) ||
2581 	    TOEDEV(nh_ifp) == NULL) {
2582 		err = -ENOPROTOOPT;
2583 		goto fail;
2584 	}
2585 	ep->com.state = CONNECTING;
2586 	ep->tos = 0;
2587 	ep->com.local_addr = cm_id->local_addr;
2588 	ep->com.remote_addr = cm_id->remote_addr;
2589 
2590 	err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2591 	if (err)
2592 		goto fail;
2593 
2594 	setiwsockopt(ep->com.so);
2595 	err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2596 		ep->com.thread);
2597 	if (!err) {
2598 		init_iwarp_socket(ep->com.so, &ep->com);
2599 		goto out;
2600 	} else
2601 		goto fail_free_so;
2602 
2603 fail_free_so:
2604 	sock_release(ep->com.so);
2605 fail:
2606 	deref_cm_id(&ep->com);
2607 	c4iw_put_ep(&ep->com);
2608 	ep = NULL;
2609 out:
2610 	CTR2(KTR_IW_CXGBE, "%s:ccE ret:%d", __func__, err);
2611 	return err;
2612 }
2613 
2614 /*
2615  * iwcm->create_listen.  Returns -errno on failure.
2616  */
2617 int
2618 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2619 {
2620 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2621 	struct c4iw_listen_ep *lep = NULL;
2622 	struct listen_port_info *port_info = NULL;
2623 	int rc = 0;
2624 
2625 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
2626 			backlog);
2627 	lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2628 	lep->com.cm_id = cm_id;
2629 	ref_cm_id(&lep->com);
2630 	lep->com.dev = dev;
2631 	lep->backlog = backlog;
2632 	lep->com.local_addr = cm_id->local_addr;
2633 	lep->com.thread = curthread;
2634 	cm_id->provider_data = lep;
2635 	lep->com.state = LISTEN;
2636 
2637 	/* In case of INDADDR_ANY, ibcore creates cmid for each device and
2638 	 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2639 	 * HW listeners for each device seperately. But toecore expects single
2640 	 * solisten() call with INADDR_ANY address to create HW listeners on
2641 	 * all devices for a given port number. So iw_cxgbe driver calls
2642 	 * solisten() only once for INADDR_ANY(usually done at first time
2643 	 * listener callback from ibcore). And all the subsequent INADDR_ANY
2644 	 * listener callbacks from ibcore(for the same port address) do not
2645 	 * invoke solisten() as first listener callback has already created
2646 	 * listeners for all other devices(via solisten).
2647 	 */
2648 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2649 		port_info = add_ep_to_listenlist(lep);
2650 		/* skip solisten() if refcnt > 1, as the listeners were
2651 		 * alredy created by 'Master lep'
2652 		 */
2653 		if (port_info->refcnt > 1) {
2654 			/* As there will be only one listener socket for a TCP
2655 			 * port, copy Master lep's socket pointer to other lep's
2656 			 * that are belonging to same TCP port.
2657 			 */
2658 			struct c4iw_listen_ep *head_lep =
2659 					container_of(port_info->lep_list.next,
2660 					struct c4iw_listen_ep, listen_ep_list);
2661 			lep->com.so =  head_lep->com.so;
2662 			goto out;
2663 		}
2664 	}
2665 	rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2666 	if (rc) {
2667 		CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2668 				__func__, rc);
2669 		goto fail;
2670 	}
2671 
2672 	rc = solisten(lep->com.so, backlog, curthread);
2673 	if (rc) {
2674 		CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2675 				__func__, lep->com.so, rc);
2676 		goto fail_free_so;
2677 	}
2678 	init_iwarp_socket(lep->com.so, &lep->com);
2679 out:
2680 	return 0;
2681 
2682 fail_free_so:
2683 	sock_release(lep->com.so);
2684 fail:
2685 	if (port_info)
2686 		rem_ep_from_listenlist(lep);
2687 	deref_cm_id(&lep->com);
2688 	c4iw_put_ep(&lep->com);
2689 	return rc;
2690 }
2691 
2692 int
2693 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2694 {
2695 	struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2696 
2697 	mutex_lock(&lep->com.mutex);
2698 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2699 	    states[lep->com.state]);
2700 
2701 	lep->com.state = DEAD;
2702 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2703 		/* if no refcount then close listen socket */
2704 		if (!rem_ep_from_listenlist(lep))
2705 			close_socket(lep->com.so);
2706 	} else
2707 		close_socket(lep->com.so);
2708 	deref_cm_id(&lep->com);
2709 	mutex_unlock(&lep->com.mutex);
2710 	c4iw_put_ep(&lep->com);
2711 	return 0;
2712 }
2713 
2714 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2715 {
2716 	int ret;
2717 	mutex_lock(&ep->com.mutex);
2718 	ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2719 	mutex_unlock(&ep->com.mutex);
2720 	return ret;
2721 }
2722 
2723 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2724 {
2725 	int ret = 0;
2726 	int close = 0;
2727 	int fatal = 0;
2728 	struct c4iw_rdev *rdev;
2729 
2730 
2731 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2732 
2733 	rdev = &ep->com.dev->rdev;
2734 
2735 	if (c4iw_fatal_error(rdev)) {
2736 
2737 		CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2738 		fatal = 1;
2739 		close_complete_upcall(ep, -ECONNRESET);
2740 		send_abort(ep);
2741 		ep->com.state = DEAD;
2742 	}
2743 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2744 	    states[ep->com.state]);
2745 
2746 	/*
2747 	 * Ref the ep here in case we have fatal errors causing the
2748 	 * ep to be released and freed.
2749 	 */
2750 	c4iw_get_ep(&ep->com);
2751 	switch (ep->com.state) {
2752 
2753 		case MPA_REQ_WAIT:
2754 		case MPA_REQ_SENT:
2755 		case MPA_REQ_RCVD:
2756 		case MPA_REP_SENT:
2757 		case FPDU_MODE:
2758 			close = 1;
2759 			if (abrupt)
2760 				ep->com.state = ABORTING;
2761 			else {
2762 				ep->com.state = CLOSING;
2763 				START_EP_TIMER(ep);
2764 			}
2765 			set_bit(CLOSE_SENT, &ep->com.flags);
2766 			break;
2767 
2768 		case CLOSING:
2769 
2770 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2771 
2772 				close = 1;
2773 				if (abrupt) {
2774 					STOP_EP_TIMER(ep);
2775 					ep->com.state = ABORTING;
2776 				} else
2777 					ep->com.state = MORIBUND;
2778 			}
2779 			break;
2780 
2781 		case MORIBUND:
2782 		case ABORTING:
2783 		case DEAD:
2784 			CTR3(KTR_IW_CXGBE,
2785 			    "%s ignoring disconnect ep %p state %u", __func__,
2786 			    ep, ep->com.state);
2787 			break;
2788 
2789 		default:
2790 			BUG();
2791 			break;
2792 	}
2793 
2794 
2795 	if (close) {
2796 
2797 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2798 
2799 		if (abrupt) {
2800 
2801 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2802 			set_bit(EP_DISC_ABORT, &ep->com.history);
2803 			close_complete_upcall(ep, -ECONNRESET);
2804 			ret = send_abort(ep);
2805 			if (ret)
2806 				fatal = 1;
2807 		} else {
2808 
2809 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2810 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2811 
2812 			if (!ep->parent_ep)
2813 				ep->com.state = MORIBUND;
2814 			sodisconnect(ep->com.so);
2815 		}
2816 
2817 	}
2818 
2819 	if (fatal) {
2820 		set_bit(EP_DISC_FAIL, &ep->com.history);
2821 		if (!abrupt) {
2822 			STOP_EP_TIMER(ep);
2823 			close_complete_upcall(ep, -EIO);
2824 		}
2825 		if (ep->com.qp) {
2826 			struct c4iw_qp_attributes attrs = {0};
2827 
2828 			attrs.next_state = C4IW_QP_STATE_ERROR;
2829 			ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
2830 						C4IW_QP_ATTR_NEXT_STATE,
2831 						&attrs, 1);
2832 			if (ret) {
2833 				CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep);
2834 				printf("%s - qp <- error failed!\n", __func__);
2835 			}
2836 		}
2837 		release_ep_resources(ep);
2838 		ep->com.state = DEAD;
2839 		CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2840 	}
2841 	c4iw_put_ep(&ep->com);
2842 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2843 	return ret;
2844 }
2845 
2846 #ifdef C4IW_EP_REDIRECT
2847 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2848 		struct l2t_entry *l2t)
2849 {
2850 	struct c4iw_ep *ep = ctx;
2851 
2852 	if (ep->dst != old)
2853 		return 0;
2854 
2855 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2856 			l2t);
2857 	dst_hold(new);
2858 	cxgb4_l2t_release(ep->l2t);
2859 	ep->l2t = l2t;
2860 	dst_release(old);
2861 	ep->dst = new;
2862 	return 1;
2863 }
2864 #endif
2865 
2866 
2867 
2868 static void ep_timeout(unsigned long arg)
2869 {
2870 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2871 
2872 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2873 
2874 		/*
2875 		 * Only insert if it is not already on the list.
2876 		 */
2877 		if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2878 			CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2879 			add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2880 		}
2881 	}
2882 }
2883 
2884 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2885 {
2886 	uint64_t val = be64toh(*rpl);
2887 	int ret;
2888 	struct c4iw_wr_wait *wr_waitp;
2889 
2890 	ret = (int)((val >> 8) & 0xff);
2891 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2892 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2893 	if (wr_waitp)
2894 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2895 
2896 	return (0);
2897 }
2898 
2899 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2900 {
2901 	struct cqe_list_entry *cle;
2902 	unsigned long flag;
2903 
2904 	cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
2905 	cle->rhp = sc->iwarp_softc;
2906 	cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
2907 
2908 	spin_lock_irqsave(&err_cqe_lock, flag);
2909 	list_add_tail(&cle->entry, &err_cqe_list);
2910 	queue_work(c4iw_taskq, &c4iw_task);
2911 	spin_unlock_irqrestore(&err_cqe_lock, flag);
2912 
2913 	return (0);
2914 }
2915 
2916 static int
2917 process_terminate(struct c4iw_ep *ep)
2918 {
2919 	struct c4iw_qp_attributes attrs = {0};
2920 
2921 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
2922 
2923 	if (ep && ep->com.qp) {
2924 
2925 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
2926 				ep->hwtid, ep->com.qp->wq.sq.qid);
2927 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2928 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
2929 				1);
2930 	} else
2931 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
2932 								ep->hwtid);
2933 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
2934 
2935 	return 0;
2936 }
2937 
2938 int __init c4iw_cm_init(void)
2939 {
2940 
2941 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
2942 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
2943 	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
2944 	t4_register_an_handler(c4iw_ev_handler);
2945 
2946 	TAILQ_INIT(&req_list);
2947 	spin_lock_init(&req_lock);
2948 	INIT_LIST_HEAD(&err_cqe_list);
2949 	spin_lock_init(&err_cqe_lock);
2950 
2951 	INIT_WORK(&c4iw_task, process_req);
2952 
2953 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
2954 	if (!c4iw_taskq)
2955 		return -ENOMEM;
2956 
2957 	return 0;
2958 }
2959 
2960 void __exit c4iw_cm_term(void)
2961 {
2962 	WARN_ON(!TAILQ_EMPTY(&req_list));
2963 	WARN_ON(!list_empty(&err_cqe_list));
2964 	flush_workqueue(c4iw_taskq);
2965 	destroy_workqueue(c4iw_taskq);
2966 
2967 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
2968 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
2969 	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
2970 	t4_register_an_handler(NULL);
2971 }
2972 #endif
2973