xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision 5e53a4f90f82c4345f277dd87cc9292f26e04a29)
1 /*
2  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 
37 #ifdef TCP_OFFLOAD
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/socket.h>
41 #include <sys/socketvar.h>
42 #include <sys/sockio.h>
43 #include <sys/taskqueue.h>
44 #include <netinet/in.h>
45 #include <net/route.h>
46 
47 #include <netinet/in_systm.h>
48 #include <netinet/in_pcb.h>
49 #include <netinet6/in6_pcb.h>
50 #include <netinet/ip.h>
51 #include <netinet/in_fib.h>
52 #include <netinet6/in6_fib.h>
53 #include <netinet6/scope6_var.h>
54 #include <netinet/ip_var.h>
55 #include <netinet/tcp_var.h>
56 #include <netinet/tcp.h>
57 #include <netinet/tcpip.h>
58 
59 #include <netinet/toecore.h>
60 
61 struct sge_iq;
62 struct rss_header;
63 struct cpl_set_tcb_rpl;
64 #include <linux/types.h>
65 #include "offload.h"
66 #include "tom/t4_tom.h"
67 
68 #define TOEPCB(so)  ((struct toepcb *)(so_sototcpcb((so))->t_toe))
69 
70 #include "iw_cxgbe.h"
71 #include <linux/module.h>
72 #include <linux/workqueue.h>
73 #include <linux/notifier.h>
74 #include <linux/inetdevice.h>
75 #include <linux/if_vlan.h>
76 #include <net/netevent.h>
77 
78 static spinlock_t req_lock;
79 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
80 static struct work_struct c4iw_task;
81 static struct workqueue_struct *c4iw_taskq;
82 static LIST_HEAD(err_cqe_list);
83 static spinlock_t err_cqe_lock;
84 static LIST_HEAD(listen_port_list);
85 static DEFINE_MUTEX(listen_port_mutex);
86 
87 static void process_req(struct work_struct *ctx);
88 static void start_ep_timer(struct c4iw_ep *ep);
89 static int stop_ep_timer(struct c4iw_ep *ep);
90 static int set_tcpinfo(struct c4iw_ep *ep);
91 static void process_timeout(struct c4iw_ep *ep);
92 static void process_err_cqes(void);
93 static void *alloc_ep(int size, gfp_t flags);
94 static void close_socket(struct socket *so);
95 static int send_mpa_req(struct c4iw_ep *ep);
96 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
97 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
98 static void close_complete_upcall(struct c4iw_ep *ep, int status);
99 static int send_abort(struct c4iw_ep *ep);
100 static void peer_close_upcall(struct c4iw_ep *ep);
101 static void peer_abort_upcall(struct c4iw_ep *ep);
102 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
103 static int connect_request_upcall(struct c4iw_ep *ep);
104 static void established_upcall(struct c4iw_ep *ep);
105 static int process_mpa_reply(struct c4iw_ep *ep);
106 static int process_mpa_request(struct c4iw_ep *ep);
107 static void process_peer_close(struct c4iw_ep *ep);
108 static void process_conn_error(struct c4iw_ep *ep);
109 static void process_close_complete(struct c4iw_ep *ep);
110 static void ep_timeout(unsigned long arg);
111 static void setiwsockopt(struct socket *so);
112 static void init_iwarp_socket(struct socket *so, void *arg);
113 static void uninit_iwarp_socket(struct socket *so);
114 static void process_data(struct c4iw_ep *ep);
115 static void process_connected(struct c4iw_ep *ep);
116 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
117 static void process_socket_event(struct c4iw_ep *ep);
118 static void release_ep_resources(struct c4iw_ep *ep);
119 static int process_terminate(struct c4iw_ep *ep);
120 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
121     struct mbuf *m);
122 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
123 static struct listen_port_info *
124 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
125 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
126 static struct c4iw_listen_ep *
127 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
128 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
129 		struct ifnet **ifp);
130 static void process_newconn(struct c4iw_listen_ep *master_lep,
131 		struct socket *new_so);
132 #define START_EP_TIMER(ep) \
133     do { \
134 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
135 		__func__, __LINE__, (ep)); \
136 	    start_ep_timer(ep); \
137     } while (0)
138 
139 #define STOP_EP_TIMER(ep) \
140     ({ \
141 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
142 		__func__, __LINE__, (ep)); \
143 	    stop_ep_timer(ep); \
144     })
145 
146 #define GET_LOCAL_ADDR(pladdr, so) \
147 	do { \
148 		struct sockaddr_storage *__a = NULL; \
149 		struct  inpcb *__inp = sotoinpcb(so); \
150 		KASSERT(__inp != NULL, \
151 		   ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
152 		if (__inp->inp_vflag & INP_IPV4) \
153 			in_getsockaddr(so, (struct sockaddr **)&__a); \
154 		else \
155 			in6_getsockaddr(so, (struct sockaddr **)&__a); \
156 		*(pladdr) = *__a; \
157 		free(__a, M_SONAME); \
158 	} while (0)
159 
160 #define GET_REMOTE_ADDR(praddr, so) \
161 	do { \
162 		struct sockaddr_storage *__a = NULL; \
163 		struct  inpcb *__inp = sotoinpcb(so); \
164 		KASSERT(__inp != NULL, \
165 		   ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
166 		if (__inp->inp_vflag & INP_IPV4) \
167 			in_getpeeraddr(so, (struct sockaddr **)&__a); \
168 		else \
169 			in6_getpeeraddr(so, (struct sockaddr **)&__a); \
170 		*(praddr) = *__a; \
171 		free(__a, M_SONAME); \
172 	} while (0)
173 
174 #ifdef KTR
175 static char *states[] = {
176 	"idle",
177 	"listen",
178 	"connecting",
179 	"mpa_wait_req",
180 	"mpa_req_sent",
181 	"mpa_req_rcvd",
182 	"mpa_rep_sent",
183 	"fpdu_mode",
184 	"aborting",
185 	"closing",
186 	"moribund",
187 	"dead",
188 	NULL,
189 };
190 #endif
191 
192 static void deref_cm_id(struct c4iw_ep_common *epc)
193 {
194       epc->cm_id->rem_ref(epc->cm_id);
195       epc->cm_id = NULL;
196       set_bit(CM_ID_DEREFED, &epc->history);
197 }
198 
199 static void ref_cm_id(struct c4iw_ep_common *epc)
200 {
201       set_bit(CM_ID_REFED, &epc->history);
202       epc->cm_id->add_ref(epc->cm_id);
203 }
204 
205 static void deref_qp(struct c4iw_ep *ep)
206 {
207 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
208 	clear_bit(QP_REFERENCED, &ep->com.flags);
209 	set_bit(QP_DEREFED, &ep->com.history);
210 }
211 
212 static void ref_qp(struct c4iw_ep *ep)
213 {
214 	set_bit(QP_REFERENCED, &ep->com.flags);
215 	set_bit(QP_REFED, &ep->com.history);
216 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
217 }
218 /* allocated per TCP port while listening */
219 struct listen_port_info {
220 	uint16_t port_num; /* TCP port address */
221 	struct list_head list; /* belongs to listen_port_list */
222 	struct list_head lep_list; /* per port lep list */
223 	uint32_t refcnt; /* number of lep's listening */
224 };
225 
226 /*
227  * Following two lists are used to manage INADDR_ANY listeners:
228  * 1)listen_port_list
229  * 2)lep_list
230  *
231  * Below is the INADDR_ANY listener lists overview on a system with a two port
232  * adapter:
233  *   |------------------|
234  *   |listen_port_list  |
235  *   |------------------|
236  *            |
237  *            |              |-----------|       |-----------|
238  *            |              | port_num:X|       | port_num:X|
239  *            |--------------|-list------|-------|-list------|-------....
240  *                           | lep_list----|     | lep_list----|
241  *                           | refcnt    | |     | refcnt    | |
242  *                           |           | |     |           | |
243  *                           |           | |     |           | |
244  *                           |-----------| |     |-----------| |
245  *                                         |                   |
246  *                                         |                   |
247  *                                         |                   |
248  *                                         |                   |         lep1                  lep2
249  *                                         |                   |    |----------------|    |----------------|
250  *                                         |                   |----| listen_ep_list |----| listen_ep_list |
251  *                                         |                        |----------------|    |----------------|
252  *                                         |
253  *                                         |
254  *                                         |        lep1                  lep2
255  *                                         |   |----------------|    |----------------|
256  *                                         |---| listen_ep_list |----| listen_ep_list |
257  *                                             |----------------|    |----------------|
258  *
259  * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
260  * each TCP port number.
261  *
262  * Here 'lep1' is always marked as Master lep, because solisten() is always
263  * called through first lep.
264  *
265  */
266 static struct listen_port_info *
267 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
268 {
269 	uint16_t port;
270 	struct listen_port_info *port_info = NULL;
271 	struct sockaddr_storage *laddr = &lep->com.local_addr;
272 
273 	port = (laddr->ss_family == AF_INET) ?
274 		((struct sockaddr_in *)laddr)->sin_port :
275 		((struct sockaddr_in6 *)laddr)->sin6_port;
276 
277 	mutex_lock(&listen_port_mutex);
278 
279 	list_for_each_entry(port_info, &listen_port_list, list)
280 		if (port_info->port_num == port)
281 			goto found_port;
282 
283 	port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
284 	port_info->port_num = port;
285 	port_info->refcnt    = 0;
286 
287 	list_add_tail(&port_info->list, &listen_port_list);
288 	INIT_LIST_HEAD(&port_info->lep_list);
289 
290 found_port:
291 	port_info->refcnt++;
292 	list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
293 	mutex_unlock(&listen_port_mutex);
294 	return port_info;
295 }
296 
297 static int
298 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
299 {
300 	uint16_t port;
301 	struct listen_port_info *port_info = NULL;
302 	struct sockaddr_storage *laddr = &lep->com.local_addr;
303 	int refcnt = 0;
304 
305 	port = (laddr->ss_family == AF_INET) ?
306 		((struct sockaddr_in *)laddr)->sin_port :
307 		((struct sockaddr_in6 *)laddr)->sin6_port;
308 
309 	mutex_lock(&listen_port_mutex);
310 
311 	/* get the port_info structure based on the lep's port address */
312 	list_for_each_entry(port_info, &listen_port_list, list) {
313 		if (port_info->port_num == port) {
314 			port_info->refcnt--;
315 			refcnt = port_info->refcnt;
316 			/* remove the current lep from the listen list */
317 			list_del(&lep->listen_ep_list);
318 			if (port_info->refcnt == 0) {
319 				/* Remove this entry from the list as there
320 				 * are no more listeners for this port_num.
321 				 */
322 				list_del(&port_info->list);
323 				kfree(port_info);
324 			}
325 			break;
326 		}
327 	}
328 	mutex_unlock(&listen_port_mutex);
329 	return refcnt;
330 }
331 
332 /*
333  * Find the lep that belongs to the ifnet on which the SYN frame was received.
334  */
335 struct c4iw_listen_ep *
336 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
337 {
338 	struct adapter *adap = NULL;
339 	struct c4iw_listen_ep *lep = NULL;
340 	struct sockaddr_storage remote = { 0 };
341 	struct ifnet *new_conn_ifp = NULL;
342 	struct listen_port_info *port_info = NULL;
343 	int err = 0, i = 0,
344 	    found_portinfo = 0, found_lep = 0;
345 	uint16_t port;
346 
347 	/* STEP 1: get 'ifnet' based on socket's remote address */
348 	GET_REMOTE_ADDR(&remote, so);
349 
350 	err = get_ifnet_from_raddr(&remote, &new_conn_ifp);
351 	if (err) {
352 		CTR4(KTR_IW_CXGBE, "%s: Failed to get ifnet, sock %p, "
353 				"master_lep %p err %d",
354 				__func__, so, master_lep, err);
355 		return (NULL);
356 	}
357 
358 	/* STEP 2: Find 'port_info' with listener local port address. */
359 	port = (master_lep->com.local_addr.ss_family == AF_INET) ?
360 		((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
361 		((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
362 
363 
364 	mutex_lock(&listen_port_mutex);
365 	list_for_each_entry(port_info, &listen_port_list, list)
366 		if (port_info->port_num == port) {
367 			found_portinfo =1;
368 			break;
369 		}
370 	if (!found_portinfo)
371 		goto out;
372 
373 	/* STEP 3: Traverse through list of lep's that are bound to the current
374 	 * TCP port address and find the lep that belongs to the ifnet on which
375 	 * the SYN frame was received.
376 	 */
377 	list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
378 		adap = lep->com.dev->rdev.adap;
379 		for_each_port(adap, i) {
380 			if (new_conn_ifp == adap->port[i]->vi[0].ifp) {
381 				found_lep =1;
382 				goto out;
383 			}
384 		}
385 	}
386 out:
387 	mutex_unlock(&listen_port_mutex);
388 	return found_lep ? lep : (NULL);
389 }
390 
391 static void process_timeout(struct c4iw_ep *ep)
392 {
393 	struct c4iw_qp_attributes attrs = {0};
394 	int abort = 1;
395 
396 	CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
397 			ep, ep->hwtid, ep->com.state);
398 	set_bit(TIMEDOUT, &ep->com.history);
399 	switch (ep->com.state) {
400 	case MPA_REQ_SENT:
401 		connect_reply_upcall(ep, -ETIMEDOUT);
402 		break;
403 	case MPA_REQ_WAIT:
404 	case MPA_REQ_RCVD:
405 	case MPA_REP_SENT:
406 	case FPDU_MODE:
407 		break;
408 	case CLOSING:
409 	case MORIBUND:
410 		if (ep->com.cm_id && ep->com.qp) {
411 			attrs.next_state = C4IW_QP_STATE_ERROR;
412 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
413 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
414 		}
415 		close_complete_upcall(ep, -ETIMEDOUT);
416 		break;
417 	case ABORTING:
418 	case DEAD:
419 		/*
420 		 * These states are expected if the ep timed out at the same
421 		 * time as another thread was calling stop_ep_timer().
422 		 * So we silently do nothing for these states.
423 		 */
424 		abort = 0;
425 		break;
426 	default:
427 		CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u\n"
428 				, __func__, ep, ep->hwtid, ep->com.state);
429 		abort = 0;
430 	}
431 	if (abort)
432 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
433 	c4iw_put_ep(&ep->com);
434 	return;
435 }
436 
437 struct cqe_list_entry {
438 	struct list_head entry;
439 	struct c4iw_dev *rhp;
440 	struct t4_cqe err_cqe;
441 };
442 
443 static void
444 process_err_cqes(void)
445 {
446 	unsigned long flag;
447 	struct cqe_list_entry *cle;
448 
449 	spin_lock_irqsave(&err_cqe_lock, flag);
450 	while (!list_empty(&err_cqe_list)) {
451 		struct list_head *tmp;
452 		tmp = err_cqe_list.next;
453 		list_del(tmp);
454 		tmp->next = tmp->prev = NULL;
455 		spin_unlock_irqrestore(&err_cqe_lock, flag);
456 		cle = list_entry(tmp, struct cqe_list_entry, entry);
457 		c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
458 		free(cle, M_CXGBE);
459 		spin_lock_irqsave(&err_cqe_lock, flag);
460 	}
461 	spin_unlock_irqrestore(&err_cqe_lock, flag);
462 
463 	return;
464 }
465 
466 static void
467 process_req(struct work_struct *ctx)
468 {
469 	struct c4iw_ep_common *epc;
470 	unsigned long flag;
471 	int ep_events;
472 
473 	process_err_cqes();
474 	spin_lock_irqsave(&req_lock, flag);
475 	while (!TAILQ_EMPTY(&req_list)) {
476 		epc = TAILQ_FIRST(&req_list);
477 		TAILQ_REMOVE(&req_list, epc, entry);
478 		epc->entry.tqe_prev = NULL;
479 		ep_events = epc->ep_events;
480 		epc->ep_events = 0;
481 		spin_unlock_irqrestore(&req_lock, flag);
482 		mutex_lock(&epc->mutex);
483 		CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
484 		    __func__, epc->so, epc, states[epc->state], ep_events);
485 		if (ep_events & C4IW_EVENT_TERM)
486 			process_terminate((struct c4iw_ep *)epc);
487 		if (ep_events & C4IW_EVENT_TIMEOUT)
488 			process_timeout((struct c4iw_ep *)epc);
489 		if (ep_events & C4IW_EVENT_SOCKET)
490 			process_socket_event((struct c4iw_ep *)epc);
491 		mutex_unlock(&epc->mutex);
492 		c4iw_put_ep(epc);
493 		process_err_cqes();
494 		spin_lock_irqsave(&req_lock, flag);
495 	}
496 	spin_unlock_irqrestore(&req_lock, flag);
497 }
498 
499 /*
500  * XXX: doesn't belong here in the iWARP driver.
501  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
502  *      set.  Is this a valid assumption for active open?
503  */
504 static int
505 set_tcpinfo(struct c4iw_ep *ep)
506 {
507 	struct socket *so = ep->com.so;
508 	struct inpcb *inp = sotoinpcb(so);
509 	struct tcpcb *tp;
510 	struct toepcb *toep;
511 	int rc = 0;
512 
513 	INP_WLOCK(inp);
514 	tp = intotcpcb(inp);
515 	if ((tp->t_flags & TF_TOE) == 0) {
516 		rc = EINVAL;
517 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
518 		    __func__, so, ep);
519 		goto done;
520 	}
521 	toep = TOEPCB(so);
522 
523 	ep->hwtid = toep->tid;
524 	ep->snd_seq = tp->snd_nxt;
525 	ep->rcv_seq = tp->rcv_nxt;
526 	ep->emss = max(tp->t_maxseg, 128);
527 done:
528 	INP_WUNLOCK(inp);
529 	return (rc);
530 
531 }
532 static int
533 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp)
534 {
535 	int err = 0;
536 
537 	if (raddr->ss_family == AF_INET) {
538 		struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
539 		struct nhop4_extended nh4 = {0};
540 
541 		err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, raddr4->sin_addr,
542 				NHR_REF, 0, &nh4);
543 		*ifp = nh4.nh_ifp;
544 		if (err)
545 			fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
546 	} else {
547 		struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
548 		struct nhop6_extended nh6 = {0};
549 		struct in6_addr addr6;
550 		uint32_t scopeid;
551 
552 		memset(&addr6, 0, sizeof(addr6));
553 		in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
554 					&addr6, &scopeid);
555 		err = fib6_lookup_nh_ext(RT_DEFAULT_FIB, &addr6, scopeid,
556 				NHR_REF, 0, &nh6);
557 		*ifp = nh6.nh_ifp;
558 		if (err)
559 			fib6_free_nh_ext(RT_DEFAULT_FIB, &nh6);
560 	}
561 
562 	CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
563 	return err;
564 }
565 
566 static void
567 close_socket(struct socket *so)
568 {
569 	uninit_iwarp_socket(so);
570 	soclose(so);
571 }
572 
573 static void
574 process_peer_close(struct c4iw_ep *ep)
575 {
576 	struct c4iw_qp_attributes attrs = {0};
577 	int disconnect = 1;
578 	int release = 0;
579 
580 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
581 	    ep->com.so, states[ep->com.state]);
582 
583 	switch (ep->com.state) {
584 
585 		case MPA_REQ_WAIT:
586 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
587 			    __func__, ep);
588 			/* Fallthrough */
589 		case MPA_REQ_SENT:
590 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
591 			    __func__, ep);
592 			ep->com.state = DEAD;
593 			connect_reply_upcall(ep, -ECONNABORTED);
594 
595 			disconnect = 0;
596 			STOP_EP_TIMER(ep);
597 			close_socket(ep->com.so);
598 			deref_cm_id(&ep->com);
599 			release = 1;
600 			break;
601 
602 		case MPA_REQ_RCVD:
603 
604 			/*
605 			 * We're gonna mark this puppy DEAD, but keep
606 			 * the reference on it until the ULP accepts or
607 			 * rejects the CR.
608 			 */
609 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
610 			    __func__, ep);
611 			ep->com.state = CLOSING;
612 			break;
613 
614 		case MPA_REP_SENT:
615 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
616 			    __func__, ep);
617 			ep->com.state = CLOSING;
618 			break;
619 
620 		case FPDU_MODE:
621 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
622 			    __func__, ep);
623 			START_EP_TIMER(ep);
624 			ep->com.state = CLOSING;
625 			attrs.next_state = C4IW_QP_STATE_CLOSING;
626 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
627 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
628 			peer_close_upcall(ep);
629 			break;
630 
631 		case ABORTING:
632 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
633 			    __func__, ep);
634 			disconnect = 0;
635 			break;
636 
637 		case CLOSING:
638 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
639 			    __func__, ep);
640 			ep->com.state = MORIBUND;
641 			disconnect = 0;
642 			break;
643 
644 		case MORIBUND:
645 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
646 			    ep);
647 			STOP_EP_TIMER(ep);
648 			if (ep->com.cm_id && ep->com.qp) {
649 				attrs.next_state = C4IW_QP_STATE_IDLE;
650 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
651 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
652 			}
653 			close_socket(ep->com.so);
654 			close_complete_upcall(ep, 0);
655 			ep->com.state = DEAD;
656 			release = 1;
657 			disconnect = 0;
658 			break;
659 
660 		case DEAD:
661 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
662 			    __func__, ep);
663 			disconnect = 0;
664 			break;
665 
666 		default:
667 			panic("%s: ep %p state %d", __func__, ep,
668 			    ep->com.state);
669 			break;
670 	}
671 
672 
673 	if (disconnect) {
674 
675 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
676 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
677 	}
678 	if (release) {
679 
680 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
681 		c4iw_put_ep(&ep->com);
682 	}
683 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
684 	return;
685 }
686 
687 static void
688 process_conn_error(struct c4iw_ep *ep)
689 {
690 	struct c4iw_qp_attributes attrs = {0};
691 	int ret;
692 	int state;
693 
694 	state = ep->com.state;
695 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
696 	    __func__, ep, ep->com.so, ep->com.so->so_error,
697 	    states[ep->com.state]);
698 
699 	switch (state) {
700 
701 		case MPA_REQ_WAIT:
702 			STOP_EP_TIMER(ep);
703 			c4iw_put_ep(&ep->parent_ep->com);
704 			break;
705 
706 		case MPA_REQ_SENT:
707 			STOP_EP_TIMER(ep);
708 			connect_reply_upcall(ep, -ECONNRESET);
709 			break;
710 
711 		case MPA_REP_SENT:
712 			ep->com.rpl_err = ECONNRESET;
713 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
714 			break;
715 
716 		case MPA_REQ_RCVD:
717 			break;
718 
719 		case MORIBUND:
720 		case CLOSING:
721 			STOP_EP_TIMER(ep);
722 			/*FALLTHROUGH*/
723 		case FPDU_MODE:
724 
725 			if (ep->com.cm_id && ep->com.qp) {
726 
727 				attrs.next_state = C4IW_QP_STATE_ERROR;
728 				ret = c4iw_modify_qp(ep->com.qp->rhp,
729 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
730 					&attrs, 1);
731 				if (ret)
732 					log(LOG_ERR,
733 							"%s - qp <- error failed!\n",
734 							__func__);
735 			}
736 			peer_abort_upcall(ep);
737 			break;
738 
739 		case ABORTING:
740 			break;
741 
742 		case DEAD:
743 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
744 			    __func__, ep->com.so->so_error);
745 			return;
746 
747 		default:
748 			panic("%s: ep %p state %d", __func__, ep, state);
749 			break;
750 	}
751 
752 	if (state != ABORTING) {
753 		close_socket(ep->com.so);
754 		ep->com.state = DEAD;
755 		c4iw_put_ep(&ep->com);
756 	}
757 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
758 	return;
759 }
760 
761 static void
762 process_close_complete(struct c4iw_ep *ep)
763 {
764 	struct c4iw_qp_attributes attrs = {0};
765 	int release = 0;
766 
767 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
768 	    ep->com.so, states[ep->com.state]);
769 
770 	/* The cm_id may be null if we failed to connect */
771 	set_bit(CLOSE_CON_RPL, &ep->com.history);
772 
773 	switch (ep->com.state) {
774 
775 		case CLOSING:
776 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
777 			    __func__, ep);
778 			ep->com.state = MORIBUND;
779 			break;
780 
781 		case MORIBUND:
782 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
783 			    ep);
784 			STOP_EP_TIMER(ep);
785 
786 			if ((ep->com.cm_id) && (ep->com.qp)) {
787 
788 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
789 				    __func__, ep);
790 				attrs.next_state = C4IW_QP_STATE_IDLE;
791 				c4iw_modify_qp(ep->com.dev,
792 						ep->com.qp,
793 						C4IW_QP_ATTR_NEXT_STATE,
794 						&attrs, 1);
795 			}
796 
797 			close_socket(ep->com.so);
798 			close_complete_upcall(ep, 0);
799 			ep->com.state = DEAD;
800 			release = 1;
801 			break;
802 
803 		case ABORTING:
804 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
805 			break;
806 
807 		case DEAD:
808 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
809 			break;
810 		default:
811 			CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
812 					__func__, ep);
813 			panic("%s:pcc6 %p unknown ep state", __func__, ep);
814 			break;
815 	}
816 
817 	if (release) {
818 
819 		CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
820 		release_ep_resources(ep);
821 	}
822 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
823 	return;
824 }
825 
826 static void
827 setiwsockopt(struct socket *so)
828 {
829 	int rc;
830 	struct sockopt sopt;
831 	int on = 1;
832 
833 	sopt.sopt_dir = SOPT_SET;
834 	sopt.sopt_level = IPPROTO_TCP;
835 	sopt.sopt_name = TCP_NODELAY;
836 	sopt.sopt_val = (caddr_t)&on;
837 	sopt.sopt_valsize = sizeof on;
838 	sopt.sopt_td = NULL;
839 	rc = sosetopt(so, &sopt);
840 	if (rc) {
841 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
842 		    __func__, so, rc);
843 	}
844 }
845 
846 static void
847 init_iwarp_socket(struct socket *so, void *arg)
848 {
849 	if (SOLISTENING(so)) {
850 		SOLISTEN_LOCK(so);
851 		solisten_upcall_set(so, c4iw_so_upcall, arg);
852 		so->so_state |= SS_NBIO;
853 		SOLISTEN_UNLOCK(so);
854 	} else {
855 		SOCKBUF_LOCK(&so->so_rcv);
856 		soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
857 		so->so_state |= SS_NBIO;
858 		SOCKBUF_UNLOCK(&so->so_rcv);
859 	}
860 }
861 
862 static void
863 uninit_iwarp_socket(struct socket *so)
864 {
865 	if (SOLISTENING(so)) {
866 		SOLISTEN_LOCK(so);
867 		solisten_upcall_set(so, NULL, NULL);
868 		SOLISTEN_UNLOCK(so);
869 	} else {
870 		SOCKBUF_LOCK(&so->so_rcv);
871 		soupcall_clear(so, SO_RCV);
872 		SOCKBUF_UNLOCK(&so->so_rcv);
873 	}
874 }
875 
876 static void
877 process_data(struct c4iw_ep *ep)
878 {
879 	int disconnect = 0;
880 
881 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
882 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
883 
884 	switch (ep->com.state) {
885 	case MPA_REQ_SENT:
886 		disconnect = process_mpa_reply(ep);
887 		break;
888 	case MPA_REQ_WAIT:
889 		disconnect = process_mpa_request(ep);
890 		if (disconnect)
891 			/* Refered in process_newconn() */
892 			c4iw_put_ep(&ep->parent_ep->com);
893 		break;
894 	default:
895 		if (sbused(&ep->com.so->so_rcv))
896 			log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
897 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
898 			    __func__, ep, ep->com.state, ep->com.so,
899 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
900 		break;
901 	}
902 	if (disconnect)
903 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
904 
905 }
906 
907 static void
908 process_connected(struct c4iw_ep *ep)
909 {
910 	struct socket *so = ep->com.so;
911 
912 	if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
913 		if (send_mpa_req(ep))
914 			goto err;
915 	} else {
916 		connect_reply_upcall(ep, -so->so_error);
917 		goto err;
918 	}
919 	return;
920 err:
921 	close_socket(so);
922 	ep->com.state = DEAD;
923 	c4iw_put_ep(&ep->com);
924 	return;
925 }
926 
927 static inline int c4iw_zero_addr(struct sockaddr *addr)
928 {
929 	struct in6_addr *ip6;
930 
931 	if (addr->sa_family == AF_INET)
932 		return IN_ZERONET(
933 			ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
934 	else {
935 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
936 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
937 				ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
938 	}
939 }
940 
941 static inline int c4iw_loopback_addr(struct sockaddr *addr)
942 {
943 	if (addr->sa_family == AF_INET)
944 		return IN_LOOPBACK(
945 			ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
946 	else
947 		return IN6_IS_ADDR_LOOPBACK(
948 				&((struct sockaddr_in6 *) addr)->sin6_addr);
949 }
950 
951 static inline int c4iw_any_addr(struct sockaddr *addr)
952 {
953 	return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr);
954 }
955 
956 static void
957 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
958 {
959 	struct c4iw_listen_ep *real_lep = NULL;
960 	struct c4iw_ep *new_ep = NULL;
961 	struct sockaddr_in *remote = NULL;
962 	int ret = 0;
963 
964 	MPASS(new_so != NULL);
965 
966 	if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) {
967 		/* Here we need to find the 'real_lep' that belongs to the
968 		 * incomming socket's network interface, such that the newly
969 		 * created 'ep' can be attached to the real 'lep'.
970 		 */
971 		real_lep = find_real_listen_ep(master_lep, new_so);
972 		if (real_lep == NULL) {
973 			CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
974 					"ep for sock: %p", __func__, new_so);
975 			log(LOG_ERR,"%s: Could not find the real listen ep for "
976 					"sock: %p\n", __func__, new_so);
977 			/* FIXME: properly free the 'new_so' in failure case.
978 			 * Use of soabort() and  soclose() are not legal
979 			 * here(before soaccept()).
980 			 */
981 			return;
982 		}
983 	} else /* for Non-Wildcard address, master_lep is always the real_lep */
984 		real_lep = master_lep;
985 
986 	new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
987 
988 	CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
989 	    "listening so %p, new so %p", __func__, master_lep, real_lep,
990 	    new_ep, master_lep->com.so, new_so);
991 
992 	new_ep->com.dev = real_lep->com.dev;
993 	new_ep->com.so = new_so;
994 	new_ep->com.cm_id = NULL;
995 	new_ep->com.thread = real_lep->com.thread;
996 	new_ep->parent_ep = real_lep;
997 
998 	GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
999 	GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1000 	c4iw_get_ep(&real_lep->com);
1001 	init_timer(&new_ep->timer);
1002 	new_ep->com.state = MPA_REQ_WAIT;
1003 	START_EP_TIMER(new_ep);
1004 
1005 	setiwsockopt(new_so);
1006 	ret = soaccept(new_so, (struct sockaddr **)&remote);
1007 	if (ret != 0) {
1008 		CTR4(KTR_IW_CXGBE,
1009 				"%s:listen sock:%p, new sock:%p, ret:%d\n",
1010 				__func__, master_lep->com.so, new_so, ret);
1011 		if (remote != NULL)
1012 			free(remote, M_SONAME);
1013 		uninit_iwarp_socket(new_so);
1014 		soclose(new_so);
1015 		c4iw_put_ep(&new_ep->com);
1016 		c4iw_put_ep(&real_lep->com);
1017 		return;
1018 	}
1019 	free(remote, M_SONAME);
1020 
1021 	/* MPA request might have been queued up on the socket already, so we
1022 	 * initialize the socket/upcall_handler under lock to prevent processing
1023 	 * MPA request on another thread(via process_req()) simultaniously.
1024 	 */
1025 	c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1026 				      avoid freeing of ep before ep unlock. */
1027 	mutex_lock(&new_ep->com.mutex);
1028 	init_iwarp_socket(new_so, &new_ep->com);
1029 
1030 	ret = process_mpa_request(new_ep);
1031 	if (ret) {
1032 		/* ABORT */
1033 		c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1034 		c4iw_put_ep(&real_lep->com);
1035 	}
1036 	mutex_unlock(&new_ep->com.mutex);
1037 	c4iw_put_ep(&new_ep->com);
1038 	return;
1039 }
1040 
1041 static int
1042 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1043 {
1044 	unsigned long flag;
1045 
1046 	spin_lock_irqsave(&req_lock, flag);
1047 	if (ep && ep->com.so) {
1048 		ep->com.ep_events |= new_ep_event;
1049 		if (!ep->com.entry.tqe_prev) {
1050 			c4iw_get_ep(&ep->com);
1051 			TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1052 			queue_work(c4iw_taskq, &c4iw_task);
1053 		}
1054 	}
1055 	spin_unlock_irqrestore(&req_lock, flag);
1056 
1057 	return (0);
1058 }
1059 
1060 static int
1061 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1062 {
1063 	struct c4iw_ep *ep = arg;
1064 
1065 	CTR6(KTR_IW_CXGBE,
1066 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1067 	    __func__, so, so->so_state, ep, states[ep->com.state],
1068 	    ep->com.entry.tqe_prev);
1069 
1070 	MPASS(ep->com.so == so);
1071 	/*
1072 	 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1073 	 * with locks held.
1074 	 */
1075 	if (so->so_error)
1076 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1077 	add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1078 
1079 	return (SU_OK);
1080 }
1081 
1082 
1083 static int
1084 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1085 {
1086 	struct adapter *sc = iq->adapter;
1087 	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1088 	unsigned int tid = GET_TID(cpl);
1089 	struct toepcb *toep = lookup_tid(sc, tid);
1090 	struct socket *so;
1091 	struct c4iw_ep *ep;
1092 
1093 	INP_WLOCK(toep->inp);
1094 	so = inp_inpcbtosocket(toep->inp);
1095 	ep = so->so_rcv.sb_upcallarg;
1096 	INP_WUNLOCK(toep->inp);
1097 
1098 	CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1099 	add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1100 
1101 	return 0;
1102 }
1103 
1104 static void
1105 process_socket_event(struct c4iw_ep *ep)
1106 {
1107 	int state = ep->com.state;
1108 	struct socket *so = ep->com.so;
1109 
1110 	if (ep->com.state == DEAD) {
1111 		CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1112 			"ep %p ep_state %s", __func__, ep, states[state]);
1113 		return;
1114 	}
1115 
1116 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1117 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1118 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
1119 
1120 	if (state == CONNECTING) {
1121 		process_connected(ep);
1122 		return;
1123 	}
1124 
1125 	if (state == LISTEN) {
1126 		struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1127 		struct socket *listen_so = so, *new_so = NULL;
1128 		int error = 0;
1129 
1130 		SOLISTEN_LOCK(listen_so);
1131 		do {
1132 			error = solisten_dequeue(listen_so, &new_so,
1133 						SOCK_NONBLOCK);
1134 			if (error) {
1135 				CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1136 					"error %d", __func__, lep, listen_so,
1137 					error);
1138 				return;
1139 			}
1140 			process_newconn(lep, new_so);
1141 
1142 			/* solisten_dequeue() unlocks while return, so aquire
1143 			 * lock again for sol_qlen and also for next iteration.
1144 			 */
1145 			SOLISTEN_LOCK(listen_so);
1146 		} while (listen_so->sol_qlen);
1147 		SOLISTEN_UNLOCK(listen_so);
1148 
1149 		return;
1150 	}
1151 
1152 	/* connection error */
1153 	if (so->so_error) {
1154 		process_conn_error(ep);
1155 		return;
1156 	}
1157 
1158 	/* peer close */
1159 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1160 		process_peer_close(ep);
1161 		/*
1162 		 * check whether socket disconnect event is pending before
1163 		 * returning. Fallthrough if yes.
1164 		 */
1165 		if (!(so->so_state & SS_ISDISCONNECTED))
1166 			return;
1167 	}
1168 
1169 	/* close complete */
1170 	if (so->so_state & SS_ISDISCONNECTED) {
1171 		process_close_complete(ep);
1172 		return;
1173 	}
1174 
1175 	/* rx data */
1176 	process_data(ep);
1177 }
1178 
1179 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
1180 
1181 static int dack_mode = 0;
1182 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1183 		"Delayed ack mode (default = 0)");
1184 
1185 int c4iw_max_read_depth = 8;
1186 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1187 		"Per-connection max ORD/IRD (default = 8)");
1188 
1189 static int enable_tcp_timestamps;
1190 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1191 		"Enable tcp timestamps (default = 0)");
1192 
1193 static int enable_tcp_sack;
1194 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1195 		"Enable tcp SACK (default = 0)");
1196 
1197 static int enable_tcp_window_scaling = 1;
1198 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1199 		"Enable tcp window scaling (default = 1)");
1200 
1201 int c4iw_debug = 0;
1202 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1203 		"Enable debug logging (default = 0)");
1204 
1205 static int peer2peer = 1;
1206 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1207 		"Support peer2peer ULPs (default = 1)");
1208 
1209 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1210 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1211 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1212 
1213 static int ep_timeout_secs = 60;
1214 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1215 		"CM Endpoint operation timeout in seconds (default = 60)");
1216 
1217 static int mpa_rev = 1;
1218 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1219 		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1220 
1221 static int markers_enabled;
1222 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1223 		"Enable MPA MARKERS (default(0) = disabled)");
1224 
1225 static int crc_enabled = 1;
1226 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1227 		"Enable MPA CRC (default(1) = enabled)");
1228 
1229 static int rcv_win = 256 * 1024;
1230 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1231 		"TCP receive window in bytes (default = 256KB)");
1232 
1233 static int snd_win = 128 * 1024;
1234 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1235 		"TCP send window in bytes (default = 128KB)");
1236 
1237 static void
1238 start_ep_timer(struct c4iw_ep *ep)
1239 {
1240 
1241 	if (timer_pending(&ep->timer)) {
1242 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1243 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1244 		    ep);
1245 		return;
1246 	}
1247 	clear_bit(TIMEOUT, &ep->com.flags);
1248 	c4iw_get_ep(&ep->com);
1249 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1250 	ep->timer.data = (unsigned long)ep;
1251 	ep->timer.function = ep_timeout;
1252 	add_timer(&ep->timer);
1253 }
1254 
1255 static int
1256 stop_ep_timer(struct c4iw_ep *ep)
1257 {
1258 
1259 	del_timer_sync(&ep->timer);
1260 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1261 		c4iw_put_ep(&ep->com);
1262 		return 0;
1263 	}
1264 	return 1;
1265 }
1266 
1267 static void *
1268 alloc_ep(int size, gfp_t gfp)
1269 {
1270 	struct c4iw_ep_common *epc;
1271 
1272 	epc = kzalloc(size, gfp);
1273 	if (epc == NULL)
1274 		return (NULL);
1275 
1276 	kref_init(&epc->kref);
1277 	mutex_init(&epc->mutex);
1278 	c4iw_init_wr_wait(&epc->wr_wait);
1279 
1280 	return (epc);
1281 }
1282 
1283 void _c4iw_free_ep(struct kref *kref)
1284 {
1285 	struct c4iw_ep *ep;
1286 	struct c4iw_ep_common *epc;
1287 
1288 	ep = container_of(kref, struct c4iw_ep, com.kref);
1289 	epc = &ep->com;
1290 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1291 	    __func__, epc));
1292 	if (test_bit(QP_REFERENCED, &ep->com.flags))
1293 		deref_qp(ep);
1294 	CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1295 	    __func__, ep, epc->history, epc->flags);
1296 	kfree(ep);
1297 }
1298 
1299 static void release_ep_resources(struct c4iw_ep *ep)
1300 {
1301 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1302 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
1303 	c4iw_put_ep(&ep->com);
1304 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1305 }
1306 
1307 static int
1308 send_mpa_req(struct c4iw_ep *ep)
1309 {
1310 	int mpalen;
1311 	struct mpa_message *mpa;
1312 	struct mpa_v2_conn_params mpa_v2_params;
1313 	struct mbuf *m;
1314 	char mpa_rev_to_use = mpa_rev;
1315 	int err = 0;
1316 
1317 	if (ep->retry_with_mpa_v1)
1318 		mpa_rev_to_use = 1;
1319 	mpalen = sizeof(*mpa) + ep->plen;
1320 	if (mpa_rev_to_use == 2)
1321 		mpalen += sizeof(struct mpa_v2_conn_params);
1322 
1323 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1324 	if (mpa == NULL) {
1325 		err = -ENOMEM;
1326 		CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1327 				__func__, ep, err);
1328 		goto err;
1329 	}
1330 
1331 	memset(mpa, 0, mpalen);
1332 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1333 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1334 		(markers_enabled ? MPA_MARKERS : 0) |
1335 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1336 	mpa->private_data_size = htons(ep->plen);
1337 	mpa->revision = mpa_rev_to_use;
1338 
1339 	if (mpa_rev_to_use == 1) {
1340 		ep->tried_with_mpa_v1 = 1;
1341 		ep->retry_with_mpa_v1 = 0;
1342 	}
1343 
1344 	if (mpa_rev_to_use == 2) {
1345 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1346 					    sizeof(struct mpa_v2_conn_params));
1347 		mpa_v2_params.ird = htons((u16)ep->ird);
1348 		mpa_v2_params.ord = htons((u16)ep->ord);
1349 
1350 		if (peer2peer) {
1351 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1352 
1353 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1354 				mpa_v2_params.ord |=
1355 				    htons(MPA_V2_RDMA_WRITE_RTR);
1356 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1357 				mpa_v2_params.ord |=
1358 					htons(MPA_V2_RDMA_READ_RTR);
1359 			}
1360 		}
1361 		memcpy(mpa->private_data, &mpa_v2_params,
1362 			sizeof(struct mpa_v2_conn_params));
1363 
1364 		if (ep->plen) {
1365 
1366 			memcpy(mpa->private_data +
1367 				sizeof(struct mpa_v2_conn_params),
1368 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1369 		}
1370 	} else {
1371 
1372 		if (ep->plen)
1373 			memcpy(mpa->private_data,
1374 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1375 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1376 	}
1377 
1378 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1379 	if (m == NULL) {
1380 		err = -ENOMEM;
1381 		CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1382 				__func__, ep, err);
1383 		free(mpa, M_CXGBE);
1384 		goto err;
1385 	}
1386 	m_copyback(m, 0, mpalen, (void *)mpa);
1387 	free(mpa, M_CXGBE);
1388 
1389 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1390 			ep->com.thread);
1391 	if (err) {
1392 		CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1393 				__func__, ep, err);
1394 		goto err;
1395 	}
1396 
1397 	START_EP_TIMER(ep);
1398 	ep->com.state = MPA_REQ_SENT;
1399 	ep->mpa_attr.initiator = 1;
1400 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1401 	return 0;
1402 err:
1403 	connect_reply_upcall(ep, err);
1404 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1405 	return err;
1406 }
1407 
1408 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1409 {
1410 	int mpalen ;
1411 	struct mpa_message *mpa;
1412 	struct mpa_v2_conn_params mpa_v2_params;
1413 	struct mbuf *m;
1414 	int err;
1415 
1416 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1417 	    ep->plen);
1418 
1419 	mpalen = sizeof(*mpa) + plen;
1420 
1421 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1422 
1423 		mpalen += sizeof(struct mpa_v2_conn_params);
1424 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1425 		    ep->mpa_attr.version, mpalen);
1426 	}
1427 
1428 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1429 	if (mpa == NULL)
1430 		return (-ENOMEM);
1431 
1432 	memset(mpa, 0, mpalen);
1433 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1434 	mpa->flags = MPA_REJECT;
1435 	mpa->revision = mpa_rev;
1436 	mpa->private_data_size = htons(plen);
1437 
1438 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1439 
1440 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1441 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1442 					    sizeof(struct mpa_v2_conn_params));
1443 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1444 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1445 				 0));
1446 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1447 					(p2p_type ==
1448 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1449 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1450 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1451 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1452 		memcpy(mpa->private_data, &mpa_v2_params,
1453 				sizeof(struct mpa_v2_conn_params));
1454 
1455 		if (ep->plen)
1456 			memcpy(mpa->private_data +
1457 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1458 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1459 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1460 	} else
1461 		if (plen)
1462 			memcpy(mpa->private_data, pdata, plen);
1463 
1464 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1465 	if (m == NULL) {
1466 		free(mpa, M_CXGBE);
1467 		return (-ENOMEM);
1468 	}
1469 	m_copyback(m, 0, mpalen, (void *)mpa);
1470 	free(mpa, M_CXGBE);
1471 
1472 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1473 	if (!err)
1474 		ep->snd_seq += mpalen;
1475 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1476 	return err;
1477 }
1478 
1479 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1480 {
1481 	int mpalen;
1482 	struct mpa_message *mpa;
1483 	struct mbuf *m;
1484 	struct mpa_v2_conn_params mpa_v2_params;
1485 	int err;
1486 
1487 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1488 
1489 	mpalen = sizeof(*mpa) + plen;
1490 
1491 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1492 
1493 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1494 		    ep->mpa_attr.version);
1495 		mpalen += sizeof(struct mpa_v2_conn_params);
1496 	}
1497 
1498 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1499 	if (mpa == NULL)
1500 		return (-ENOMEM);
1501 
1502 	memset(mpa, 0, sizeof(*mpa));
1503 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1504 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1505 		(markers_enabled ? MPA_MARKERS : 0);
1506 	mpa->revision = ep->mpa_attr.version;
1507 	mpa->private_data_size = htons(plen);
1508 
1509 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1510 
1511 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1512 		mpa->private_data_size +=
1513 			htons(sizeof(struct mpa_v2_conn_params));
1514 		mpa_v2_params.ird = htons((u16)ep->ird);
1515 		mpa_v2_params.ord = htons((u16)ep->ord);
1516 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1517 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1518 
1519 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1520 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1521 
1522 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1523 
1524 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1525 
1526 				mpa_v2_params.ord |=
1527 					htons(MPA_V2_RDMA_WRITE_RTR);
1528 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1529 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1530 				    mpa_v2_params.ord);
1531 			}
1532 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1533 
1534 				mpa_v2_params.ord |=
1535 					htons(MPA_V2_RDMA_READ_RTR);
1536 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1537 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1538 				    mpa_v2_params.ord);
1539 			}
1540 		}
1541 
1542 		memcpy(mpa->private_data, &mpa_v2_params,
1543 			sizeof(struct mpa_v2_conn_params));
1544 
1545 		if (ep->plen)
1546 			memcpy(mpa->private_data +
1547 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1548 	} else
1549 		if (plen)
1550 			memcpy(mpa->private_data, pdata, plen);
1551 
1552 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1553 	if (m == NULL) {
1554 		free(mpa, M_CXGBE);
1555 		return (-ENOMEM);
1556 	}
1557 	m_copyback(m, 0, mpalen, (void *)mpa);
1558 	free(mpa, M_CXGBE);
1559 
1560 
1561 	ep->com.state = MPA_REP_SENT;
1562 	ep->snd_seq += mpalen;
1563 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1564 			ep->com.thread);
1565 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1566 	return err;
1567 }
1568 
1569 
1570 
1571 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1572 {
1573 	struct iw_cm_event event;
1574 
1575 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1576 	memset(&event, 0, sizeof(event));
1577 	event.event = IW_CM_EVENT_CLOSE;
1578 	event.status = status;
1579 
1580 	if (ep->com.cm_id) {
1581 
1582 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1583 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1584 		deref_cm_id(&ep->com);
1585 		set_bit(CLOSE_UPCALL, &ep->com.history);
1586 	}
1587 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1588 }
1589 
1590 static int
1591 send_abort(struct c4iw_ep *ep)
1592 {
1593 	struct socket *so = ep->com.so;
1594 	struct sockopt sopt;
1595 	int rc;
1596 	struct linger l;
1597 
1598 	CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1599 	    states[ep->com.state], ep->hwtid);
1600 
1601 	l.l_onoff = 1;
1602 	l.l_linger = 0;
1603 
1604 	/* linger_time of 0 forces RST to be sent */
1605 	sopt.sopt_dir = SOPT_SET;
1606 	sopt.sopt_level = SOL_SOCKET;
1607 	sopt.sopt_name = SO_LINGER;
1608 	sopt.sopt_val = (caddr_t)&l;
1609 	sopt.sopt_valsize = sizeof l;
1610 	sopt.sopt_td = NULL;
1611 	rc = sosetopt(so, &sopt);
1612 	if (rc != 0) {
1613 		log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1614 		    __func__, so, rc);
1615 	}
1616 
1617 	uninit_iwarp_socket(so);
1618 	soclose(so);
1619 	set_bit(ABORT_CONN, &ep->com.history);
1620 
1621 	/*
1622 	 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1623 	 * request it has sent. But the current TOE driver is not propagating
1624 	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1625 	 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1626 	 * handler(not yet implemented) of iw_cxgbe driver.
1627 	 */
1628 	release_ep_resources(ep);
1629 
1630 	return (0);
1631 }
1632 
1633 static void peer_close_upcall(struct c4iw_ep *ep)
1634 {
1635 	struct iw_cm_event event;
1636 
1637 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1638 	memset(&event, 0, sizeof(event));
1639 	event.event = IW_CM_EVENT_DISCONNECT;
1640 
1641 	if (ep->com.cm_id) {
1642 
1643 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1644 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1645 		set_bit(DISCONN_UPCALL, &ep->com.history);
1646 	}
1647 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1648 }
1649 
1650 static void peer_abort_upcall(struct c4iw_ep *ep)
1651 {
1652 	struct iw_cm_event event;
1653 
1654 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1655 	memset(&event, 0, sizeof(event));
1656 	event.event = IW_CM_EVENT_CLOSE;
1657 	event.status = -ECONNRESET;
1658 
1659 	if (ep->com.cm_id) {
1660 
1661 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1662 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1663 		deref_cm_id(&ep->com);
1664 		set_bit(ABORT_UPCALL, &ep->com.history);
1665 	}
1666 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1667 }
1668 
1669 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1670 {
1671 	struct iw_cm_event event;
1672 
1673 	CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1674 	memset(&event, 0, sizeof(event));
1675 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1676 	event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1677 					-ECONNRESET : status;
1678 	event.local_addr = ep->com.local_addr;
1679 	event.remote_addr = ep->com.remote_addr;
1680 
1681 	if ((status == 0) || (status == -ECONNREFUSED)) {
1682 
1683 		if (!ep->tried_with_mpa_v1) {
1684 
1685 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1686 			/* this means MPA_v2 is used */
1687 			event.ord = ep->ird;
1688 			event.ird = ep->ord;
1689 			event.private_data_len = ep->plen -
1690 				sizeof(struct mpa_v2_conn_params);
1691 			event.private_data = ep->mpa_pkt +
1692 				sizeof(struct mpa_message) +
1693 				sizeof(struct mpa_v2_conn_params);
1694 		} else {
1695 
1696 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1697 			/* this means MPA_v1 is used */
1698 			event.ord = c4iw_max_read_depth;
1699 			event.ird = c4iw_max_read_depth;
1700 			event.private_data_len = ep->plen;
1701 			event.private_data = ep->mpa_pkt +
1702 				sizeof(struct mpa_message);
1703 		}
1704 	}
1705 
1706 	if (ep->com.cm_id) {
1707 
1708 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1709 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1710 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1711 	}
1712 
1713 	if(status == -ECONNABORTED) {
1714 
1715 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1716 		return;
1717 	}
1718 
1719 	if (status < 0) {
1720 
1721 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1722 		deref_cm_id(&ep->com);
1723 	}
1724 
1725 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1726 }
1727 
1728 static int connect_request_upcall(struct c4iw_ep *ep)
1729 {
1730 	struct iw_cm_event event;
1731 	int ret;
1732 
1733 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1734 	    ep->tried_with_mpa_v1);
1735 
1736 	memset(&event, 0, sizeof(event));
1737 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1738 	event.local_addr = ep->com.local_addr;
1739 	event.remote_addr = ep->com.remote_addr;
1740 	event.provider_data = ep;
1741 
1742 	if (!ep->tried_with_mpa_v1) {
1743 		/* this means MPA_v2 is used */
1744 		event.ord = ep->ord;
1745 		event.ird = ep->ird;
1746 		event.private_data_len = ep->plen -
1747 			sizeof(struct mpa_v2_conn_params);
1748 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1749 			sizeof(struct mpa_v2_conn_params);
1750 	} else {
1751 
1752 		/* this means MPA_v1 is used. Send max supported */
1753 		event.ord = c4iw_max_read_depth;
1754 		event.ird = c4iw_max_read_depth;
1755 		event.private_data_len = ep->plen;
1756 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1757 	}
1758 
1759 	c4iw_get_ep(&ep->com);
1760 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1761 	    &event);
1762 	if(ret) {
1763 		CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1764 			" IWCM, err:%d", __func__, ep, ret);
1765 		c4iw_put_ep(&ep->com);
1766 	} else
1767 		/* Dereference parent_ep only in success case.
1768 		 * In case of failure, parent_ep is dereferenced by the caller
1769 		 * of process_mpa_request().
1770 		 */
1771 		c4iw_put_ep(&ep->parent_ep->com);
1772 
1773 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1774 	return ret;
1775 }
1776 
1777 static void established_upcall(struct c4iw_ep *ep)
1778 {
1779 	struct iw_cm_event event;
1780 
1781 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1782 	memset(&event, 0, sizeof(event));
1783 	event.event = IW_CM_EVENT_ESTABLISHED;
1784 	event.ird = ep->ord;
1785 	event.ord = ep->ird;
1786 
1787 	if (ep->com.cm_id) {
1788 
1789 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1790 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1791 		set_bit(ESTAB_UPCALL, &ep->com.history);
1792 	}
1793 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1794 }
1795 
1796 
1797 #define RELAXED_IRD_NEGOTIATION 1
1798 
1799 /*
1800  * process_mpa_reply - process streaming mode MPA reply
1801  *
1802  * Returns:
1803  *
1804  * 0 upon success indicating a connect request was delivered to the ULP
1805  * or the mpa request is incomplete but valid so far.
1806  *
1807  * 1 if a failure requires the caller to close the connection.
1808  *
1809  * 2 if a failure requires the caller to abort the connection.
1810  */
1811 static int process_mpa_reply(struct c4iw_ep *ep)
1812 {
1813 	struct mpa_message *mpa;
1814 	struct mpa_v2_conn_params *mpa_v2_params;
1815 	u16 plen;
1816 	u16 resp_ird, resp_ord;
1817 	u8 rtr_mismatch = 0, insuff_ird = 0;
1818 	struct c4iw_qp_attributes attrs = {0};
1819 	enum c4iw_qp_attr_mask mask;
1820 	int err;
1821 	struct mbuf *top, *m;
1822 	int flags = MSG_DONTWAIT;
1823 	struct uio uio;
1824 	int disconnect = 0;
1825 
1826 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1827 
1828 	/*
1829 	 * Stop mpa timer.  If it expired, then
1830 	 * we ignore the MPA reply.  process_timeout()
1831 	 * will abort the connection.
1832 	 */
1833 	if (STOP_EP_TIMER(ep))
1834 		return 0;
1835 
1836 	uio.uio_resid = 1000000;
1837 	uio.uio_td = ep->com.thread;
1838 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1839 
1840 	if (err) {
1841 
1842 		if (err == EWOULDBLOCK) {
1843 
1844 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1845 			START_EP_TIMER(ep);
1846 			return 0;
1847 		}
1848 		err = -err;
1849 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1850 		goto err;
1851 	}
1852 
1853 	if (ep->com.so->so_rcv.sb_mb) {
1854 
1855 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1856 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1857 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1858 	}
1859 
1860 	m = top;
1861 
1862 	do {
1863 
1864 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1865 		/*
1866 		 * If we get more than the supported amount of private data
1867 		 * then we must fail this connection.
1868 		 */
1869 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1870 
1871 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1872 			    ep->mpa_pkt_len + m->m_len);
1873 			err = (-EINVAL);
1874 			goto err_stop_timer;
1875 		}
1876 
1877 		/*
1878 		 * copy the new data into our accumulation buffer.
1879 		 */
1880 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1881 		ep->mpa_pkt_len += m->m_len;
1882 		if (!m->m_next)
1883 			m = m->m_nextpkt;
1884 		else
1885 			m = m->m_next;
1886 	} while (m);
1887 
1888 	m_freem(top);
1889 	/*
1890 	 * if we don't even have the mpa message, then bail.
1891 	 */
1892 	if (ep->mpa_pkt_len < sizeof(*mpa)) {
1893 		return 0;
1894 	}
1895 	mpa = (struct mpa_message *) ep->mpa_pkt;
1896 
1897 	/* Validate MPA header. */
1898 	if (mpa->revision > mpa_rev) {
1899 
1900 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1901 		    mpa->revision, mpa_rev);
1902 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1903 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1904 		err = -EPROTO;
1905 		goto err_stop_timer;
1906 	}
1907 
1908 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1909 
1910 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1911 		err = -EPROTO;
1912 		goto err_stop_timer;
1913 	}
1914 
1915 	plen = ntohs(mpa->private_data_size);
1916 
1917 	/*
1918 	 * Fail if there's too much private data.
1919 	 */
1920 	if (plen > MPA_MAX_PRIVATE_DATA) {
1921 
1922 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1923 		err = -EPROTO;
1924 		goto err_stop_timer;
1925 	}
1926 
1927 	/*
1928 	 * If plen does not account for pkt size
1929 	 */
1930 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1931 
1932 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1933 		STOP_EP_TIMER(ep);
1934 		err = -EPROTO;
1935 		goto err_stop_timer;
1936 	}
1937 
1938 	ep->plen = (u8) plen;
1939 
1940 	/*
1941 	 * If we don't have all the pdata yet, then bail.
1942 	 * We'll continue process when more data arrives.
1943 	 */
1944 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1945 
1946 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1947 		return 0;
1948 	}
1949 
1950 	if (mpa->flags & MPA_REJECT) {
1951 
1952 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1953 		err = -ECONNREFUSED;
1954 		goto err_stop_timer;
1955 	}
1956 
1957 	/*
1958 	 * If we get here we have accumulated the entire mpa
1959 	 * start reply message including private data. And
1960 	 * the MPA header is valid.
1961 	 */
1962 	ep->com.state = FPDU_MODE;
1963 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1964 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1965 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1966 	ep->mpa_attr.version = mpa->revision;
1967 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1968 
1969 	if (mpa->revision == 2) {
1970 
1971 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
1972 		ep->mpa_attr.enhanced_rdma_conn =
1973 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1974 
1975 		if (ep->mpa_attr.enhanced_rdma_conn) {
1976 
1977 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
1978 			mpa_v2_params = (struct mpa_v2_conn_params *)
1979 				(ep->mpa_pkt + sizeof(*mpa));
1980 			resp_ird = ntohs(mpa_v2_params->ird) &
1981 				MPA_V2_IRD_ORD_MASK;
1982 			resp_ord = ntohs(mpa_v2_params->ord) &
1983 				MPA_V2_IRD_ORD_MASK;
1984 
1985 			/*
1986 			 * This is a double-check. Ideally, below checks are
1987 			 * not required since ird/ord stuff has been taken
1988 			 * care of in c4iw_accept_cr
1989 			 */
1990 			if (ep->ird < resp_ord) {
1991 				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1992 				   ep->com.dev->rdev.adap->params.max_ordird_qp)
1993 					ep->ird = resp_ord;
1994 				else
1995 					insuff_ird = 1;
1996 			} else if (ep->ird > resp_ord) {
1997 				ep->ird = resp_ord;
1998 			}
1999 			if (ep->ord > resp_ird) {
2000 				if (RELAXED_IRD_NEGOTIATION)
2001 					ep->ord = resp_ird;
2002 				else
2003 					insuff_ird = 1;
2004 			}
2005 			if (insuff_ird) {
2006 				err = -ENOMEM;
2007 				ep->ird = resp_ord;
2008 				ep->ord = resp_ird;
2009 			}
2010 
2011 			if (ntohs(mpa_v2_params->ird) &
2012 				MPA_V2_PEER2PEER_MODEL) {
2013 
2014 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2015 				if (ntohs(mpa_v2_params->ord) &
2016 					MPA_V2_RDMA_WRITE_RTR) {
2017 
2018 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2019 					ep->mpa_attr.p2p_type =
2020 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2021 				}
2022 				else if (ntohs(mpa_v2_params->ord) &
2023 					MPA_V2_RDMA_READ_RTR) {
2024 
2025 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2026 					ep->mpa_attr.p2p_type =
2027 						FW_RI_INIT_P2PTYPE_READ_REQ;
2028 				}
2029 			}
2030 		}
2031 	} else {
2032 
2033 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2034 
2035 		if (mpa->revision == 1) {
2036 
2037 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2038 
2039 			if (peer2peer) {
2040 
2041 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2042 				ep->mpa_attr.p2p_type = p2p_type;
2043 			}
2044 		}
2045 	}
2046 
2047 	if (set_tcpinfo(ep)) {
2048 
2049 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2050 		printf("%s set_tcpinfo error\n", __func__);
2051 		err = -ECONNRESET;
2052 		goto err;
2053 	}
2054 
2055 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2056 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2057 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2058 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2059 	    ep->mpa_attr.p2p_type);
2060 
2061 	/*
2062 	 * If responder's RTR does not match with that of initiator, assign
2063 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2064 	 * generated when moving QP to RTS state.
2065 	 * A TERM message will be sent after QP has moved to RTS state
2066 	 */
2067 	if ((ep->mpa_attr.version == 2) && peer2peer &&
2068 		(ep->mpa_attr.p2p_type != p2p_type)) {
2069 
2070 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2071 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2072 		rtr_mismatch = 1;
2073 	}
2074 
2075 
2076 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2077 	attrs.mpa_attr = ep->mpa_attr;
2078 	attrs.max_ird = ep->ird;
2079 	attrs.max_ord = ep->ord;
2080 	attrs.llp_stream_handle = ep;
2081 	attrs.next_state = C4IW_QP_STATE_RTS;
2082 
2083 	mask = C4IW_QP_ATTR_NEXT_STATE |
2084 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2085 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2086 
2087 	/* bind QP and TID with INIT_WR */
2088 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2089 
2090 	if (err) {
2091 
2092 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2093 		goto err;
2094 	}
2095 
2096 	/*
2097 	 * If responder's RTR requirement did not match with what initiator
2098 	 * supports, generate TERM message
2099 	 */
2100 	if (rtr_mismatch) {
2101 
2102 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2103 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2104 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2105 		attrs.ecode = MPA_NOMATCH_RTR;
2106 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2107 		attrs.send_term = 1;
2108 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2109 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2110 		err = -ENOMEM;
2111 		disconnect = 1;
2112 		goto out;
2113 	}
2114 
2115 	/*
2116 	 * Generate TERM if initiator IRD is not sufficient for responder
2117 	 * provided ORD. Currently, we do the same behaviour even when
2118 	 * responder provided IRD is also not sufficient as regards to
2119 	 * initiator ORD.
2120 	 */
2121 	if (insuff_ird) {
2122 
2123 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2124 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2125 				__func__);
2126 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2127 		attrs.ecode = MPA_INSUFF_IRD;
2128 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2129 		attrs.send_term = 1;
2130 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2131 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2132 		err = -ENOMEM;
2133 		disconnect = 1;
2134 		goto out;
2135 	}
2136 	goto out;
2137 err_stop_timer:
2138 	STOP_EP_TIMER(ep);
2139 err:
2140 	disconnect = 2;
2141 out:
2142 	connect_reply_upcall(ep, err);
2143 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2144 	return disconnect;
2145 }
2146 
2147 /*
2148  * process_mpa_request - process streaming mode MPA request
2149  *
2150  * Returns:
2151  *
2152  * 0 upon success indicating a connect request was delivered to the ULP
2153  * or the mpa request is incomplete but valid so far.
2154  *
2155  * 1 if a failure requires the caller to close the connection.
2156  *
2157  * 2 if a failure requires the caller to abort the connection.
2158  */
2159 static int
2160 process_mpa_request(struct c4iw_ep *ep)
2161 {
2162 	struct mpa_message *mpa;
2163 	struct mpa_v2_conn_params *mpa_v2_params;
2164 	u16 plen;
2165 	int flags = MSG_DONTWAIT;
2166 	int rc;
2167 	struct iovec iov;
2168 	struct uio uio;
2169 	enum c4iw_ep_state state = ep->com.state;
2170 
2171 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2172 
2173 	if (state != MPA_REQ_WAIT)
2174 		return 0;
2175 
2176 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2177 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2178 	uio.uio_iov = &iov;
2179 	uio.uio_iovcnt = 1;
2180 	uio.uio_offset = 0;
2181 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2182 	uio.uio_segflg = UIO_SYSSPACE;
2183 	uio.uio_rw = UIO_READ;
2184 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2185 
2186 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2187 	if (rc == EAGAIN)
2188 		return 0;
2189 	else if (rc)
2190 		goto err_stop_timer;
2191 
2192 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2193 	    __func__, ep->com.so));
2194 	ep->mpa_pkt_len += uio.uio_offset;
2195 
2196 	/*
2197 	 * If we get more than the supported amount of private data then we must
2198 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
2199 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2200 	 * byte is filled by the soreceive above.
2201 	 */
2202 
2203 	/* Don't even have the MPA message.  Wait for more data to arrive. */
2204 	if (ep->mpa_pkt_len < sizeof(*mpa))
2205 		return 0;
2206 	mpa = (struct mpa_message *) ep->mpa_pkt;
2207 
2208 	/*
2209 	 * Validate MPA Header.
2210 	 */
2211 	if (mpa->revision > mpa_rev) {
2212 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2213 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
2214 		goto err_stop_timer;
2215 	}
2216 
2217 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2218 		goto err_stop_timer;
2219 
2220 	/*
2221 	 * Fail if there's too much private data.
2222 	 */
2223 	plen = ntohs(mpa->private_data_size);
2224 	if (plen > MPA_MAX_PRIVATE_DATA)
2225 		goto err_stop_timer;
2226 
2227 	/*
2228 	 * If plen does not account for pkt size
2229 	 */
2230 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2231 		goto err_stop_timer;
2232 
2233 	ep->plen = (u8) plen;
2234 
2235 	/*
2236 	 * If we don't have all the pdata yet, then bail.
2237 	 */
2238 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2239 		return 0;
2240 
2241 	/*
2242 	 * If we get here we have accumulated the entire mpa
2243 	 * start reply message including private data.
2244 	 */
2245 	ep->mpa_attr.initiator = 0;
2246 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2247 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2248 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2249 	ep->mpa_attr.version = mpa->revision;
2250 	if (mpa->revision == 1)
2251 		ep->tried_with_mpa_v1 = 1;
2252 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2253 
2254 	if (mpa->revision == 2) {
2255 		ep->mpa_attr.enhanced_rdma_conn =
2256 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2257 		if (ep->mpa_attr.enhanced_rdma_conn) {
2258 			mpa_v2_params = (struct mpa_v2_conn_params *)
2259 				(ep->mpa_pkt + sizeof(*mpa));
2260 			ep->ird = ntohs(mpa_v2_params->ird) &
2261 				MPA_V2_IRD_ORD_MASK;
2262 			ep->ird = min_t(u32, ep->ird,
2263 					cur_max_read_depth(ep->com.dev));
2264 			ep->ord = ntohs(mpa_v2_params->ord) &
2265 				MPA_V2_IRD_ORD_MASK;
2266 			ep->ord = min_t(u32, ep->ord,
2267 					cur_max_read_depth(ep->com.dev));
2268 			CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u\n",
2269 				 __func__, ep->ird, ep->ord);
2270 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2271 				if (peer2peer) {
2272 					if (ntohs(mpa_v2_params->ord) &
2273 							MPA_V2_RDMA_WRITE_RTR)
2274 						ep->mpa_attr.p2p_type =
2275 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2276 					else if (ntohs(mpa_v2_params->ord) &
2277 							MPA_V2_RDMA_READ_RTR)
2278 						ep->mpa_attr.p2p_type =
2279 						FW_RI_INIT_P2PTYPE_READ_REQ;
2280 				}
2281 		}
2282 	} else if (mpa->revision == 1 && peer2peer)
2283 		ep->mpa_attr.p2p_type = p2p_type;
2284 
2285 	if (set_tcpinfo(ep))
2286 		goto err_stop_timer;
2287 
2288 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2289 	    "xmit_marker_enabled = %d, version = %d", __func__,
2290 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2291 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2292 
2293 	ep->com.state = MPA_REQ_RCVD;
2294 	STOP_EP_TIMER(ep);
2295 
2296 	/* drive upcall */
2297 	if (ep->parent_ep->com.state != DEAD)
2298 		if (connect_request_upcall(ep))
2299 			goto err_out;
2300 	return 0;
2301 
2302 err_stop_timer:
2303 	STOP_EP_TIMER(ep);
2304 err_out:
2305 	return 2;
2306 }
2307 
2308 /*
2309  * Upcall from the adapter indicating data has been transmitted.
2310  * For us its just the single MPA request or reply.  We can now free
2311  * the skb holding the mpa message.
2312  */
2313 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2314 {
2315 	int err;
2316 	struct c4iw_ep *ep = to_ep(cm_id);
2317 	int abort = 0;
2318 
2319 	mutex_lock(&ep->com.mutex);
2320 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2321 
2322 	if ((ep->com.state == DEAD) ||
2323 			(ep->com.state != MPA_REQ_RCVD)) {
2324 
2325 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2326 		mutex_unlock(&ep->com.mutex);
2327 		c4iw_put_ep(&ep->com);
2328 		return -ECONNRESET;
2329 	}
2330 	set_bit(ULP_REJECT, &ep->com.history);
2331 
2332 	if (mpa_rev == 0) {
2333 
2334 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2335 		abort = 1;
2336 	}
2337 	else {
2338 
2339 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2340 		abort = send_mpa_reject(ep, pdata, pdata_len);
2341 	}
2342 	STOP_EP_TIMER(ep);
2343 	err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2344 	mutex_unlock(&ep->com.mutex);
2345 	c4iw_put_ep(&ep->com);
2346 	CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2347 	return 0;
2348 }
2349 
2350 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2351 {
2352 	int err;
2353 	struct c4iw_qp_attributes attrs = {0};
2354 	enum c4iw_qp_attr_mask mask;
2355 	struct c4iw_ep *ep = to_ep(cm_id);
2356 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2357 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2358 	int abort = 0;
2359 
2360 	mutex_lock(&ep->com.mutex);
2361 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2362 
2363 	if ((ep->com.state == DEAD) ||
2364 			(ep->com.state != MPA_REQ_RCVD)) {
2365 
2366 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2367 		err = -ECONNRESET;
2368 		goto err_out;
2369 	}
2370 
2371 	BUG_ON(!qp);
2372 
2373 	set_bit(ULP_ACCEPT, &ep->com.history);
2374 
2375 	if ((conn_param->ord > c4iw_max_read_depth) ||
2376 		(conn_param->ird > c4iw_max_read_depth)) {
2377 
2378 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2379 		err = -EINVAL;
2380 		goto err_abort;
2381 	}
2382 
2383 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2384 
2385 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2386 
2387 		if (conn_param->ord > ep->ird) {
2388 			if (RELAXED_IRD_NEGOTIATION) {
2389 				conn_param->ord = ep->ird;
2390 			} else {
2391 				ep->ird = conn_param->ird;
2392 				ep->ord = conn_param->ord;
2393 				send_mpa_reject(ep, conn_param->private_data,
2394 						conn_param->private_data_len);
2395 				err = -ENOMEM;
2396 				goto err_abort;
2397 			}
2398 		}
2399 		if (conn_param->ird < ep->ord) {
2400 			if (RELAXED_IRD_NEGOTIATION &&
2401 			    ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2402 				conn_param->ird = ep->ord;
2403 			} else {
2404 				err = -ENOMEM;
2405 				goto err_abort;
2406 			}
2407 		}
2408 	}
2409 	ep->ird = conn_param->ird;
2410 	ep->ord = conn_param->ord;
2411 
2412 	if (ep->mpa_attr.version == 1) {
2413 		if (peer2peer && ep->ird == 0)
2414 			ep->ird = 1;
2415 	} else {
2416 		if (peer2peer &&
2417 		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2418 		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2419 			ep->ird = 1;
2420 	}
2421 
2422 	CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d\n", __func__, __LINE__,
2423 			ep->ird, ep->ord);
2424 
2425 	ep->com.cm_id = cm_id;
2426 	ref_cm_id(&ep->com);
2427 	ep->com.qp = qp;
2428 	ref_qp(ep);
2429 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2430 
2431 	/* bind QP to EP and move to RTS */
2432 	attrs.mpa_attr = ep->mpa_attr;
2433 	attrs.max_ird = ep->ird;
2434 	attrs.max_ord = ep->ord;
2435 	attrs.llp_stream_handle = ep;
2436 	attrs.next_state = C4IW_QP_STATE_RTS;
2437 
2438 	/* bind QP and TID with INIT_WR */
2439 	mask = C4IW_QP_ATTR_NEXT_STATE |
2440 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2441 		C4IW_QP_ATTR_MPA_ATTR |
2442 		C4IW_QP_ATTR_MAX_IRD |
2443 		C4IW_QP_ATTR_MAX_ORD;
2444 
2445 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2446 	if (err) {
2447 		CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2448 		goto err_defef_cm_id;
2449 	}
2450 
2451 	err = send_mpa_reply(ep, conn_param->private_data,
2452 			conn_param->private_data_len);
2453 	if (err) {
2454 		CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2455 		goto err_defef_cm_id;
2456 	}
2457 
2458 	ep->com.state = FPDU_MODE;
2459 	established_upcall(ep);
2460 	mutex_unlock(&ep->com.mutex);
2461 	c4iw_put_ep(&ep->com);
2462 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2463 	return 0;
2464 err_defef_cm_id:
2465 	deref_cm_id(&ep->com);
2466 err_abort:
2467 	abort = 1;
2468 err_out:
2469 	if (abort)
2470 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2471 	mutex_unlock(&ep->com.mutex);
2472 	c4iw_put_ep(&ep->com);
2473 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2474 	return err;
2475 }
2476 
2477 static int
2478 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2479 {
2480 	int ret;
2481 	int size;
2482 	struct socket *sock = NULL;
2483 
2484 	ret = sock_create_kern(laddr->ss_family,
2485 			SOCK_STREAM, IPPROTO_TCP, &sock);
2486 	if (ret) {
2487 		CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2488 				__func__, ret);
2489 		return ret;
2490 	}
2491 
2492 	ret = sobind(sock, (struct sockaddr *)laddr, curthread);
2493 	if (ret) {
2494 		CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2495 				__func__, ret);
2496 		sock_release(sock);
2497 		return ret;
2498 	}
2499 
2500 	size = laddr->ss_family == AF_INET6 ?
2501 		sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2502 	ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2503 	if (ret) {
2504 		CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2505 				__func__, ret);
2506 		sock_release(sock);
2507 		return ret;
2508 	}
2509 
2510 	*so = sock;
2511 	return 0;
2512 }
2513 
2514 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2515 {
2516 	int err = 0;
2517 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2518 	struct c4iw_ep *ep = NULL;
2519 	struct ifnet    *nh_ifp;        /* Logical egress interface */
2520 
2521 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2522 
2523 
2524 	if ((conn_param->ord > c4iw_max_read_depth) ||
2525 		(conn_param->ird > c4iw_max_read_depth)) {
2526 
2527 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2528 		err = -EINVAL;
2529 		goto out;
2530 	}
2531 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2532 
2533 	init_timer(&ep->timer);
2534 	ep->plen = conn_param->private_data_len;
2535 
2536 	if (ep->plen) {
2537 
2538 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2539 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2540 				conn_param->private_data, ep->plen);
2541 	}
2542 	ep->ird = conn_param->ird;
2543 	ep->ord = conn_param->ord;
2544 
2545 	if (peer2peer && ep->ord == 0) {
2546 
2547 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2548 		ep->ord = 1;
2549 	}
2550 
2551 	ep->com.dev = dev;
2552 	ep->com.cm_id = cm_id;
2553 	ref_cm_id(&ep->com);
2554 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2555 
2556 	if (!ep->com.qp) {
2557 
2558 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2559 		err = -EINVAL;
2560 		goto fail;
2561 	}
2562 	ref_qp(ep);
2563 	ep->com.thread = curthread;
2564 
2565 	err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2566 	if (err) {
2567 
2568 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2569 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2570 		err = EHOSTUNREACH;
2571 		return err;
2572 	}
2573 
2574 	if (!(nh_ifp->if_capenable & IFCAP_TOE) ||
2575 	    TOEDEV(nh_ifp) == NULL) {
2576 		err = -ENOPROTOOPT;
2577 		goto fail;
2578 	}
2579 	ep->com.state = CONNECTING;
2580 	ep->tos = 0;
2581 	ep->com.local_addr = cm_id->local_addr;
2582 	ep->com.remote_addr = cm_id->remote_addr;
2583 
2584 	err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2585 	if (err)
2586 		goto fail;
2587 
2588 	setiwsockopt(ep->com.so);
2589 	err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2590 		ep->com.thread);
2591 	if (!err) {
2592 		init_iwarp_socket(ep->com.so, &ep->com);
2593 		goto out;
2594 	} else
2595 		goto fail_free_so;
2596 
2597 fail_free_so:
2598 	sock_release(ep->com.so);
2599 fail:
2600 	deref_cm_id(&ep->com);
2601 	c4iw_put_ep(&ep->com);
2602 	ep = NULL;
2603 out:
2604 	CTR2(KTR_IW_CXGBE, "%s:ccE ret:%d", __func__, err);
2605 	return err;
2606 }
2607 
2608 /*
2609  * iwcm->create_listen.  Returns -errno on failure.
2610  */
2611 int
2612 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2613 {
2614 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2615 	struct c4iw_listen_ep *lep = NULL;
2616 	struct listen_port_info *port_info = NULL;
2617 	int rc = 0;
2618 
2619 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
2620 			backlog);
2621 	lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2622 	lep->com.cm_id = cm_id;
2623 	ref_cm_id(&lep->com);
2624 	lep->com.dev = dev;
2625 	lep->backlog = backlog;
2626 	lep->com.local_addr = cm_id->local_addr;
2627 	lep->com.thread = curthread;
2628 	cm_id->provider_data = lep;
2629 	lep->com.state = LISTEN;
2630 
2631 	/* In case of INDADDR_ANY, ibcore creates cmid for each device and
2632 	 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2633 	 * HW listeners for each device seperately. But toecore expects single
2634 	 * solisten() call with INADDR_ANY address to create HW listeners on
2635 	 * all devices for a given port number. So iw_cxgbe driver calls
2636 	 * solisten() only once for INADDR_ANY(usually done at first time
2637 	 * listener callback from ibcore). And all the subsequent INADDR_ANY
2638 	 * listener callbacks from ibcore(for the same port address) do not
2639 	 * invoke solisten() as first listener callback has already created
2640 	 * listeners for all other devices(via solisten).
2641 	 */
2642 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2643 		port_info = add_ep_to_listenlist(lep);
2644 		/* skip solisten() if refcnt > 1, as the listeners were
2645 		 * alredy created by 'Master lep'
2646 		 */
2647 		if (port_info->refcnt > 1) {
2648 			/* As there will be only one listener socket for a TCP
2649 			 * port, copy Master lep's socket pointer to other lep's
2650 			 * that are belonging to same TCP port.
2651 			 */
2652 			struct c4iw_listen_ep *head_lep =
2653 					container_of(port_info->lep_list.next,
2654 					struct c4iw_listen_ep, listen_ep_list);
2655 			lep->com.so =  head_lep->com.so;
2656 			goto out;
2657 		}
2658 	}
2659 	rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2660 	if (rc) {
2661 		CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2662 				__func__, rc);
2663 		goto fail;
2664 	}
2665 
2666 	rc = solisten(lep->com.so, backlog, curthread);
2667 	if (rc) {
2668 		CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2669 				__func__, lep->com.so, rc);
2670 		goto fail_free_so;
2671 	}
2672 	init_iwarp_socket(lep->com.so, &lep->com);
2673 out:
2674 	return 0;
2675 
2676 fail_free_so:
2677 	sock_release(lep->com.so);
2678 fail:
2679 	if (port_info)
2680 		rem_ep_from_listenlist(lep);
2681 	deref_cm_id(&lep->com);
2682 	c4iw_put_ep(&lep->com);
2683 	return rc;
2684 }
2685 
2686 int
2687 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2688 {
2689 	struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2690 
2691 	mutex_lock(&lep->com.mutex);
2692 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2693 	    states[lep->com.state]);
2694 
2695 	lep->com.state = DEAD;
2696 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2697 		/* if no refcount then close listen socket */
2698 		if (!rem_ep_from_listenlist(lep))
2699 			close_socket(lep->com.so);
2700 	} else
2701 		close_socket(lep->com.so);
2702 	deref_cm_id(&lep->com);
2703 	mutex_unlock(&lep->com.mutex);
2704 	c4iw_put_ep(&lep->com);
2705 	return 0;
2706 }
2707 
2708 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2709 {
2710 	int ret;
2711 	mutex_lock(&ep->com.mutex);
2712 	ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2713 	mutex_unlock(&ep->com.mutex);
2714 	return ret;
2715 }
2716 
2717 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2718 {
2719 	int ret = 0;
2720 	int close = 0;
2721 	int fatal = 0;
2722 	struct c4iw_rdev *rdev;
2723 
2724 
2725 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2726 
2727 	rdev = &ep->com.dev->rdev;
2728 
2729 	if (c4iw_fatal_error(rdev)) {
2730 
2731 		CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2732 		fatal = 1;
2733 		close_complete_upcall(ep, -ECONNRESET);
2734 		send_abort(ep);
2735 		ep->com.state = DEAD;
2736 	}
2737 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2738 	    states[ep->com.state]);
2739 
2740 	/*
2741 	 * Ref the ep here in case we have fatal errors causing the
2742 	 * ep to be released and freed.
2743 	 */
2744 	c4iw_get_ep(&ep->com);
2745 	switch (ep->com.state) {
2746 
2747 		case MPA_REQ_WAIT:
2748 		case MPA_REQ_SENT:
2749 		case MPA_REQ_RCVD:
2750 		case MPA_REP_SENT:
2751 		case FPDU_MODE:
2752 			close = 1;
2753 			if (abrupt)
2754 				ep->com.state = ABORTING;
2755 			else {
2756 				ep->com.state = CLOSING;
2757 				START_EP_TIMER(ep);
2758 			}
2759 			set_bit(CLOSE_SENT, &ep->com.flags);
2760 			break;
2761 
2762 		case CLOSING:
2763 
2764 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2765 
2766 				close = 1;
2767 				if (abrupt) {
2768 					STOP_EP_TIMER(ep);
2769 					ep->com.state = ABORTING;
2770 				} else
2771 					ep->com.state = MORIBUND;
2772 			}
2773 			break;
2774 
2775 		case MORIBUND:
2776 		case ABORTING:
2777 		case DEAD:
2778 			CTR3(KTR_IW_CXGBE,
2779 			    "%s ignoring disconnect ep %p state %u", __func__,
2780 			    ep, ep->com.state);
2781 			break;
2782 
2783 		default:
2784 			BUG();
2785 			break;
2786 	}
2787 
2788 
2789 	if (close) {
2790 
2791 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2792 
2793 		if (abrupt) {
2794 
2795 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2796 			set_bit(EP_DISC_ABORT, &ep->com.history);
2797 			close_complete_upcall(ep, -ECONNRESET);
2798 			ret = send_abort(ep);
2799 			if (ret)
2800 				fatal = 1;
2801 		} else {
2802 
2803 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2804 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2805 
2806 			if (!ep->parent_ep)
2807 				ep->com.state = MORIBUND;
2808 			sodisconnect(ep->com.so);
2809 		}
2810 
2811 	}
2812 
2813 	if (fatal) {
2814 		set_bit(EP_DISC_FAIL, &ep->com.history);
2815 		if (!abrupt) {
2816 			STOP_EP_TIMER(ep);
2817 			close_complete_upcall(ep, -EIO);
2818 		}
2819 		if (ep->com.qp) {
2820 			struct c4iw_qp_attributes attrs = {0};
2821 
2822 			attrs.next_state = C4IW_QP_STATE_ERROR;
2823 			ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
2824 						C4IW_QP_ATTR_NEXT_STATE,
2825 						&attrs, 1);
2826 			if (ret) {
2827 				CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep);
2828 				printf("%s - qp <- error failed!\n", __func__);
2829 			}
2830 		}
2831 		release_ep_resources(ep);
2832 		ep->com.state = DEAD;
2833 		CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2834 	}
2835 	c4iw_put_ep(&ep->com);
2836 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2837 	return ret;
2838 }
2839 
2840 #ifdef C4IW_EP_REDIRECT
2841 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2842 		struct l2t_entry *l2t)
2843 {
2844 	struct c4iw_ep *ep = ctx;
2845 
2846 	if (ep->dst != old)
2847 		return 0;
2848 
2849 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2850 			l2t);
2851 	dst_hold(new);
2852 	cxgb4_l2t_release(ep->l2t);
2853 	ep->l2t = l2t;
2854 	dst_release(old);
2855 	ep->dst = new;
2856 	return 1;
2857 }
2858 #endif
2859 
2860 
2861 
2862 static void ep_timeout(unsigned long arg)
2863 {
2864 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2865 
2866 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2867 
2868 		/*
2869 		 * Only insert if it is not already on the list.
2870 		 */
2871 		if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2872 			CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2873 			add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2874 		}
2875 	}
2876 }
2877 
2878 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2879 {
2880 	uint64_t val = be64toh(*rpl);
2881 	int ret;
2882 	struct c4iw_wr_wait *wr_waitp;
2883 
2884 	ret = (int)((val >> 8) & 0xff);
2885 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2886 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2887 	if (wr_waitp)
2888 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2889 
2890 	return (0);
2891 }
2892 
2893 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2894 {
2895 	struct cqe_list_entry *cle;
2896 	unsigned long flag;
2897 
2898 	cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
2899 	cle->rhp = sc->iwarp_softc;
2900 	cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
2901 
2902 	spin_lock_irqsave(&err_cqe_lock, flag);
2903 	list_add_tail(&cle->entry, &err_cqe_list);
2904 	queue_work(c4iw_taskq, &c4iw_task);
2905 	spin_unlock_irqrestore(&err_cqe_lock, flag);
2906 
2907 	return (0);
2908 }
2909 
2910 static int
2911 process_terminate(struct c4iw_ep *ep)
2912 {
2913 	struct c4iw_qp_attributes attrs = {0};
2914 
2915 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
2916 
2917 	if (ep && ep->com.qp) {
2918 
2919 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
2920 				ep->hwtid, ep->com.qp->wq.sq.qid);
2921 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2922 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
2923 				1);
2924 	} else
2925 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
2926 								ep->hwtid);
2927 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
2928 
2929 	return 0;
2930 }
2931 
2932 int __init c4iw_cm_init(void)
2933 {
2934 
2935 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
2936 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
2937 	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
2938 	t4_register_an_handler(c4iw_ev_handler);
2939 
2940 	TAILQ_INIT(&req_list);
2941 	spin_lock_init(&req_lock);
2942 	INIT_LIST_HEAD(&err_cqe_list);
2943 	spin_lock_init(&err_cqe_lock);
2944 
2945 	INIT_WORK(&c4iw_task, process_req);
2946 
2947 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
2948 	if (!c4iw_taskq)
2949 		return -ENOMEM;
2950 
2951 	return 0;
2952 }
2953 
2954 void __exit c4iw_cm_term(void)
2955 {
2956 	WARN_ON(!TAILQ_EMPTY(&req_list));
2957 	WARN_ON(!list_empty(&err_cqe_list));
2958 	flush_workqueue(c4iw_taskq);
2959 	destroy_workqueue(c4iw_taskq);
2960 
2961 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
2962 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
2963 	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
2964 	t4_register_an_handler(NULL);
2965 }
2966 #endif
2967