xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision 97cb52fa9aefd90fad38790fded50905aeeb9b9e)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *	  copyright notice, this list of conditions and the following
18  *	  disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *	  copyright notice, this list of conditions and the following
22  *	  disclaimer in the documentation and/or other materials
23  *	  provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include "opt_inet.h"
38 
39 #ifdef TCP_OFFLOAD
40 #include <sys/types.h>
41 #include <sys/malloc.h>
42 #include <sys/socket.h>
43 #include <sys/socketvar.h>
44 #include <sys/sockio.h>
45 #include <sys/taskqueue.h>
46 #include <netinet/in.h>
47 #include <net/route.h>
48 
49 #include <netinet/in_systm.h>
50 #include <netinet/in_pcb.h>
51 #include <netinet6/in6_pcb.h>
52 #include <netinet/ip.h>
53 #include <netinet/in_fib.h>
54 #include <netinet6/in6_fib.h>
55 #include <netinet6/scope6_var.h>
56 #include <netinet/ip_var.h>
57 #include <netinet/tcp_var.h>
58 #include <netinet/tcp.h>
59 #include <netinet/tcpip.h>
60 
61 #include <netinet/toecore.h>
62 
63 struct sge_iq;
64 struct rss_header;
65 struct cpl_set_tcb_rpl;
66 #include <linux/types.h>
67 #include "offload.h"
68 #include "tom/t4_tom.h"
69 
70 #define TOEPCB(so)  ((struct toepcb *)(so_sototcpcb((so))->t_toe))
71 
72 #include "iw_cxgbe.h"
73 #include <linux/module.h>
74 #include <linux/workqueue.h>
75 #include <linux/notifier.h>
76 #include <linux/inetdevice.h>
77 #include <linux/if_vlan.h>
78 #include <net/netevent.h>
79 
80 static spinlock_t req_lock;
81 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
82 static struct work_struct c4iw_task;
83 static struct workqueue_struct *c4iw_taskq;
84 static LIST_HEAD(err_cqe_list);
85 static spinlock_t err_cqe_lock;
86 static LIST_HEAD(listen_port_list);
87 static DEFINE_MUTEX(listen_port_mutex);
88 
89 static void process_req(struct work_struct *ctx);
90 static void start_ep_timer(struct c4iw_ep *ep);
91 static int stop_ep_timer(struct c4iw_ep *ep);
92 static int set_tcpinfo(struct c4iw_ep *ep);
93 static void process_timeout(struct c4iw_ep *ep);
94 static void process_err_cqes(void);
95 static void *alloc_ep(int size, gfp_t flags);
96 static void close_socket(struct socket *so);
97 static int send_mpa_req(struct c4iw_ep *ep);
98 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
99 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
100 static void close_complete_upcall(struct c4iw_ep *ep, int status);
101 static int send_abort(struct c4iw_ep *ep);
102 static void peer_close_upcall(struct c4iw_ep *ep);
103 static void peer_abort_upcall(struct c4iw_ep *ep);
104 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
105 static int connect_request_upcall(struct c4iw_ep *ep);
106 static void established_upcall(struct c4iw_ep *ep);
107 static int process_mpa_reply(struct c4iw_ep *ep);
108 static int process_mpa_request(struct c4iw_ep *ep);
109 static void process_peer_close(struct c4iw_ep *ep);
110 static void process_conn_error(struct c4iw_ep *ep);
111 static void process_close_complete(struct c4iw_ep *ep);
112 static void ep_timeout(unsigned long arg);
113 static void setiwsockopt(struct socket *so);
114 static void init_iwarp_socket(struct socket *so, void *arg);
115 static void uninit_iwarp_socket(struct socket *so);
116 static void process_data(struct c4iw_ep *ep);
117 static void process_connected(struct c4iw_ep *ep);
118 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
119 static void process_socket_event(struct c4iw_ep *ep);
120 static void release_ep_resources(struct c4iw_ep *ep);
121 static int process_terminate(struct c4iw_ep *ep);
122 static int terminate(struct sge_iq *iq, const struct rss_header *rss,
123     struct mbuf *m);
124 static int add_ep_to_req_list(struct c4iw_ep *ep, int ep_events);
125 static struct listen_port_info *
126 add_ep_to_listenlist(struct c4iw_listen_ep *lep);
127 static int rem_ep_from_listenlist(struct c4iw_listen_ep *lep);
128 static struct c4iw_listen_ep *
129 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so);
130 static int get_ifnet_from_raddr(struct sockaddr_storage *raddr,
131 		struct ifnet **ifp);
132 static void process_newconn(struct c4iw_listen_ep *master_lep,
133 		struct socket *new_so);
134 #define START_EP_TIMER(ep) \
135     do { \
136 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
137 		__func__, __LINE__, (ep)); \
138 	    start_ep_timer(ep); \
139     } while (0)
140 
141 #define STOP_EP_TIMER(ep) \
142     ({ \
143 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
144 		__func__, __LINE__, (ep)); \
145 	    stop_ep_timer(ep); \
146     })
147 
148 #define GET_LOCAL_ADDR(pladdr, so) \
149 	do { \
150 		struct sockaddr_storage *__a = NULL; \
151 		struct  inpcb *__inp = sotoinpcb(so); \
152 		KASSERT(__inp != NULL, \
153 		   ("GET_LOCAL_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
154 		if (__inp->inp_vflag & INP_IPV4) \
155 			in_getsockaddr(so, (struct sockaddr **)&__a); \
156 		else \
157 			in6_getsockaddr(so, (struct sockaddr **)&__a); \
158 		*(pladdr) = *__a; \
159 		free(__a, M_SONAME); \
160 	} while (0)
161 
162 #define GET_REMOTE_ADDR(praddr, so) \
163 	do { \
164 		struct sockaddr_storage *__a = NULL; \
165 		struct  inpcb *__inp = sotoinpcb(so); \
166 		KASSERT(__inp != NULL, \
167 		   ("GET_REMOTE_ADDR(%s):so:%p, inp = NULL", __func__, so)); \
168 		if (__inp->inp_vflag & INP_IPV4) \
169 			in_getpeeraddr(so, (struct sockaddr **)&__a); \
170 		else \
171 			in6_getpeeraddr(so, (struct sockaddr **)&__a); \
172 		*(praddr) = *__a; \
173 		free(__a, M_SONAME); \
174 	} while (0)
175 
176 #ifdef KTR
177 static char *states[] = {
178 	"idle",
179 	"listen",
180 	"connecting",
181 	"mpa_wait_req",
182 	"mpa_req_sent",
183 	"mpa_req_rcvd",
184 	"mpa_rep_sent",
185 	"fpdu_mode",
186 	"aborting",
187 	"closing",
188 	"moribund",
189 	"dead",
190 	NULL,
191 };
192 #endif
193 
194 static void deref_cm_id(struct c4iw_ep_common *epc)
195 {
196       epc->cm_id->rem_ref(epc->cm_id);
197       epc->cm_id = NULL;
198       set_bit(CM_ID_DEREFED, &epc->history);
199 }
200 
201 static void ref_cm_id(struct c4iw_ep_common *epc)
202 {
203       set_bit(CM_ID_REFED, &epc->history);
204       epc->cm_id->add_ref(epc->cm_id);
205 }
206 
207 static void deref_qp(struct c4iw_ep *ep)
208 {
209 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
210 	clear_bit(QP_REFERENCED, &ep->com.flags);
211 	set_bit(QP_DEREFED, &ep->com.history);
212 }
213 
214 static void ref_qp(struct c4iw_ep *ep)
215 {
216 	set_bit(QP_REFERENCED, &ep->com.flags);
217 	set_bit(QP_REFED, &ep->com.history);
218 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
219 }
220 /* allocated per TCP port while listening */
221 struct listen_port_info {
222 	uint16_t port_num; /* TCP port address */
223 	struct list_head list; /* belongs to listen_port_list */
224 	struct list_head lep_list; /* per port lep list */
225 	uint32_t refcnt; /* number of lep's listening */
226 };
227 
228 /*
229  * Following two lists are used to manage INADDR_ANY listeners:
230  * 1)listen_port_list
231  * 2)lep_list
232  *
233  * Below is the INADDR_ANY listener lists overview on a system with a two port
234  * adapter:
235  *   |------------------|
236  *   |listen_port_list  |
237  *   |------------------|
238  *            |
239  *            |              |-----------|       |-----------|
240  *            |              | port_num:X|       | port_num:X|
241  *            |--------------|-list------|-------|-list------|-------....
242  *                           | lep_list----|     | lep_list----|
243  *                           | refcnt    | |     | refcnt    | |
244  *                           |           | |     |           | |
245  *                           |           | |     |           | |
246  *                           |-----------| |     |-----------| |
247  *                                         |                   |
248  *                                         |                   |
249  *                                         |                   |
250  *                                         |                   |         lep1                  lep2
251  *                                         |                   |    |----------------|    |----------------|
252  *                                         |                   |----| listen_ep_list |----| listen_ep_list |
253  *                                         |                        |----------------|    |----------------|
254  *                                         |
255  *                                         |
256  *                                         |        lep1                  lep2
257  *                                         |   |----------------|    |----------------|
258  *                                         |---| listen_ep_list |----| listen_ep_list |
259  *                                             |----------------|    |----------------|
260  *
261  * Because of two port adapter, the number of lep's are two(lep1 & lep2) for
262  * each TCP port number.
263  *
264  * Here 'lep1' is always marked as Master lep, because solisten() is always
265  * called through first lep.
266  *
267  */
268 static struct listen_port_info *
269 add_ep_to_listenlist(struct c4iw_listen_ep *lep)
270 {
271 	uint16_t port;
272 	struct listen_port_info *port_info = NULL;
273 	struct sockaddr_storage *laddr = &lep->com.local_addr;
274 
275 	port = (laddr->ss_family == AF_INET) ?
276 		((struct sockaddr_in *)laddr)->sin_port :
277 		((struct sockaddr_in6 *)laddr)->sin6_port;
278 
279 	mutex_lock(&listen_port_mutex);
280 
281 	list_for_each_entry(port_info, &listen_port_list, list)
282 		if (port_info->port_num == port)
283 			goto found_port;
284 
285 	port_info = malloc(sizeof(*port_info), M_CXGBE, M_WAITOK);
286 	port_info->port_num = port;
287 	port_info->refcnt    = 0;
288 
289 	list_add_tail(&port_info->list, &listen_port_list);
290 	INIT_LIST_HEAD(&port_info->lep_list);
291 
292 found_port:
293 	port_info->refcnt++;
294 	list_add_tail(&lep->listen_ep_list, &port_info->lep_list);
295 	mutex_unlock(&listen_port_mutex);
296 	return port_info;
297 }
298 
299 static int
300 rem_ep_from_listenlist(struct c4iw_listen_ep *lep)
301 {
302 	uint16_t port;
303 	struct listen_port_info *port_info = NULL;
304 	struct sockaddr_storage *laddr = &lep->com.local_addr;
305 	int refcnt = 0;
306 
307 	port = (laddr->ss_family == AF_INET) ?
308 		((struct sockaddr_in *)laddr)->sin_port :
309 		((struct sockaddr_in6 *)laddr)->sin6_port;
310 
311 	mutex_lock(&listen_port_mutex);
312 
313 	/* get the port_info structure based on the lep's port address */
314 	list_for_each_entry(port_info, &listen_port_list, list) {
315 		if (port_info->port_num == port) {
316 			port_info->refcnt--;
317 			refcnt = port_info->refcnt;
318 			/* remove the current lep from the listen list */
319 			list_del(&lep->listen_ep_list);
320 			if (port_info->refcnt == 0) {
321 				/* Remove this entry from the list as there
322 				 * are no more listeners for this port_num.
323 				 */
324 				list_del(&port_info->list);
325 				kfree(port_info);
326 			}
327 			break;
328 		}
329 	}
330 	mutex_unlock(&listen_port_mutex);
331 	return refcnt;
332 }
333 
334 /*
335  * Find the lep that belongs to the ifnet on which the SYN frame was received.
336  */
337 struct c4iw_listen_ep *
338 find_real_listen_ep(struct c4iw_listen_ep *master_lep, struct socket *so)
339 {
340 	struct adapter *adap = NULL;
341 	struct c4iw_listen_ep *lep = NULL;
342 	struct sockaddr_storage remote = { 0 };
343 	struct ifnet *new_conn_ifp = NULL;
344 	struct listen_port_info *port_info = NULL;
345 	int err = 0, i = 0,
346 	    found_portinfo = 0, found_lep = 0;
347 	uint16_t port;
348 
349 	/* STEP 1: get 'ifnet' based on socket's remote address */
350 	GET_REMOTE_ADDR(&remote, so);
351 
352 	err = get_ifnet_from_raddr(&remote, &new_conn_ifp);
353 	if (err) {
354 		CTR4(KTR_IW_CXGBE, "%s: Failed to get ifnet, sock %p, "
355 				"master_lep %p err %d",
356 				__func__, so, master_lep, err);
357 		return (NULL);
358 	}
359 
360 	/* STEP 2: Find 'port_info' with listener local port address. */
361 	port = (master_lep->com.local_addr.ss_family == AF_INET) ?
362 		((struct sockaddr_in *)&master_lep->com.local_addr)->sin_port :
363 		((struct sockaddr_in6 *)&master_lep->com.local_addr)->sin6_port;
364 
365 
366 	mutex_lock(&listen_port_mutex);
367 	list_for_each_entry(port_info, &listen_port_list, list)
368 		if (port_info->port_num == port) {
369 			found_portinfo =1;
370 			break;
371 		}
372 	if (!found_portinfo)
373 		goto out;
374 
375 	/* STEP 3: Traverse through list of lep's that are bound to the current
376 	 * TCP port address and find the lep that belongs to the ifnet on which
377 	 * the SYN frame was received.
378 	 */
379 	list_for_each_entry(lep, &port_info->lep_list, listen_ep_list) {
380 		adap = lep->com.dev->rdev.adap;
381 		for_each_port(adap, i) {
382 			if (new_conn_ifp == adap->port[i]->vi[0].ifp) {
383 				found_lep =1;
384 				goto out;
385 			}
386 		}
387 	}
388 out:
389 	mutex_unlock(&listen_port_mutex);
390 	return found_lep ? lep : (NULL);
391 }
392 
393 static void process_timeout(struct c4iw_ep *ep)
394 {
395 	struct c4iw_qp_attributes attrs = {0};
396 	int abort = 1;
397 
398 	CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
399 			ep, ep->hwtid, ep->com.state);
400 	set_bit(TIMEDOUT, &ep->com.history);
401 	switch (ep->com.state) {
402 	case MPA_REQ_SENT:
403 		connect_reply_upcall(ep, -ETIMEDOUT);
404 		break;
405 	case MPA_REQ_WAIT:
406 	case MPA_REQ_RCVD:
407 	case MPA_REP_SENT:
408 	case FPDU_MODE:
409 		break;
410 	case CLOSING:
411 	case MORIBUND:
412 		if (ep->com.cm_id && ep->com.qp) {
413 			attrs.next_state = C4IW_QP_STATE_ERROR;
414 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
415 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
416 		}
417 		close_complete_upcall(ep, -ETIMEDOUT);
418 		break;
419 	case ABORTING:
420 	case DEAD:
421 		/*
422 		 * These states are expected if the ep timed out at the same
423 		 * time as another thread was calling stop_ep_timer().
424 		 * So we silently do nothing for these states.
425 		 */
426 		abort = 0;
427 		break;
428 	default:
429 		CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u\n"
430 				, __func__, ep, ep->hwtid, ep->com.state);
431 		abort = 0;
432 	}
433 	if (abort)
434 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
435 	c4iw_put_ep(&ep->com);
436 	return;
437 }
438 
439 struct cqe_list_entry {
440 	struct list_head entry;
441 	struct c4iw_dev *rhp;
442 	struct t4_cqe err_cqe;
443 };
444 
445 static void
446 process_err_cqes(void)
447 {
448 	unsigned long flag;
449 	struct cqe_list_entry *cle;
450 
451 	spin_lock_irqsave(&err_cqe_lock, flag);
452 	while (!list_empty(&err_cqe_list)) {
453 		struct list_head *tmp;
454 		tmp = err_cqe_list.next;
455 		list_del(tmp);
456 		tmp->next = tmp->prev = NULL;
457 		spin_unlock_irqrestore(&err_cqe_lock, flag);
458 		cle = list_entry(tmp, struct cqe_list_entry, entry);
459 		c4iw_ev_dispatch(cle->rhp, &cle->err_cqe);
460 		free(cle, M_CXGBE);
461 		spin_lock_irqsave(&err_cqe_lock, flag);
462 	}
463 	spin_unlock_irqrestore(&err_cqe_lock, flag);
464 
465 	return;
466 }
467 
468 static void
469 process_req(struct work_struct *ctx)
470 {
471 	struct c4iw_ep_common *epc;
472 	unsigned long flag;
473 	int ep_events;
474 
475 	process_err_cqes();
476 	spin_lock_irqsave(&req_lock, flag);
477 	while (!TAILQ_EMPTY(&req_list)) {
478 		epc = TAILQ_FIRST(&req_list);
479 		TAILQ_REMOVE(&req_list, epc, entry);
480 		epc->entry.tqe_prev = NULL;
481 		ep_events = epc->ep_events;
482 		epc->ep_events = 0;
483 		spin_unlock_irqrestore(&req_lock, flag);
484 		mutex_lock(&epc->mutex);
485 		CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, ep_state %s events 0x%x",
486 		    __func__, epc->so, epc, states[epc->state], ep_events);
487 		if (ep_events & C4IW_EVENT_TERM)
488 			process_terminate((struct c4iw_ep *)epc);
489 		if (ep_events & C4IW_EVENT_TIMEOUT)
490 			process_timeout((struct c4iw_ep *)epc);
491 		if (ep_events & C4IW_EVENT_SOCKET)
492 			process_socket_event((struct c4iw_ep *)epc);
493 		mutex_unlock(&epc->mutex);
494 		c4iw_put_ep(epc);
495 		process_err_cqes();
496 		spin_lock_irqsave(&req_lock, flag);
497 	}
498 	spin_unlock_irqrestore(&req_lock, flag);
499 }
500 
501 /*
502  * XXX: doesn't belong here in the iWARP driver.
503  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
504  *      set.  Is this a valid assumption for active open?
505  */
506 static int
507 set_tcpinfo(struct c4iw_ep *ep)
508 {
509 	struct socket *so = ep->com.so;
510 	struct inpcb *inp = sotoinpcb(so);
511 	struct tcpcb *tp;
512 	struct toepcb *toep;
513 	int rc = 0;
514 
515 	INP_WLOCK(inp);
516 	tp = intotcpcb(inp);
517 	if ((tp->t_flags & TF_TOE) == 0) {
518 		rc = EINVAL;
519 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
520 		    __func__, so, ep);
521 		goto done;
522 	}
523 	toep = TOEPCB(so);
524 
525 	ep->hwtid = toep->tid;
526 	ep->snd_seq = tp->snd_nxt;
527 	ep->rcv_seq = tp->rcv_nxt;
528 	ep->emss = max(tp->t_maxseg, 128);
529 done:
530 	INP_WUNLOCK(inp);
531 	return (rc);
532 
533 }
534 static int
535 get_ifnet_from_raddr(struct sockaddr_storage *raddr, struct ifnet **ifp)
536 {
537 	int err = 0;
538 
539 	if (raddr->ss_family == AF_INET) {
540 		struct sockaddr_in *raddr4 = (struct sockaddr_in *)raddr;
541 		struct nhop4_extended nh4 = {0};
542 
543 		err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, raddr4->sin_addr,
544 				NHR_REF, 0, &nh4);
545 		*ifp = nh4.nh_ifp;
546 		if (err)
547 			fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
548 	} else {
549 		struct sockaddr_in6 *raddr6 = (struct sockaddr_in6 *)raddr;
550 		struct nhop6_extended nh6 = {0};
551 		struct in6_addr addr6;
552 		uint32_t scopeid;
553 
554 		memset(&addr6, 0, sizeof(addr6));
555 		in6_splitscope((struct in6_addr *)&raddr6->sin6_addr,
556 					&addr6, &scopeid);
557 		err = fib6_lookup_nh_ext(RT_DEFAULT_FIB, &addr6, scopeid,
558 				NHR_REF, 0, &nh6);
559 		*ifp = nh6.nh_ifp;
560 		if (err)
561 			fib6_free_nh_ext(RT_DEFAULT_FIB, &nh6);
562 	}
563 
564 	CTR2(KTR_IW_CXGBE, "%s: return: %d", __func__, err);
565 	return err;
566 }
567 
568 static void
569 close_socket(struct socket *so)
570 {
571 	uninit_iwarp_socket(so);
572 	soclose(so);
573 }
574 
575 static void
576 process_peer_close(struct c4iw_ep *ep)
577 {
578 	struct c4iw_qp_attributes attrs = {0};
579 	int disconnect = 1;
580 	int release = 0;
581 
582 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
583 	    ep->com.so, states[ep->com.state]);
584 
585 	switch (ep->com.state) {
586 
587 		case MPA_REQ_WAIT:
588 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT DEAD",
589 			    __func__, ep);
590 			/* Fallthrough */
591 		case MPA_REQ_SENT:
592 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT DEAD",
593 			    __func__, ep);
594 			ep->com.state = DEAD;
595 			connect_reply_upcall(ep, -ECONNABORTED);
596 
597 			disconnect = 0;
598 			STOP_EP_TIMER(ep);
599 			close_socket(ep->com.so);
600 			deref_cm_id(&ep->com);
601 			release = 1;
602 			break;
603 
604 		case MPA_REQ_RCVD:
605 
606 			/*
607 			 * We're gonna mark this puppy DEAD, but keep
608 			 * the reference on it until the ULP accepts or
609 			 * rejects the CR.
610 			 */
611 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
612 			    __func__, ep);
613 			ep->com.state = CLOSING;
614 			break;
615 
616 		case MPA_REP_SENT:
617 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
618 			    __func__, ep);
619 			ep->com.state = CLOSING;
620 			break;
621 
622 		case FPDU_MODE:
623 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
624 			    __func__, ep);
625 			START_EP_TIMER(ep);
626 			ep->com.state = CLOSING;
627 			attrs.next_state = C4IW_QP_STATE_CLOSING;
628 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
629 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
630 			peer_close_upcall(ep);
631 			break;
632 
633 		case ABORTING:
634 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
635 			    __func__, ep);
636 			disconnect = 0;
637 			break;
638 
639 		case CLOSING:
640 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
641 			    __func__, ep);
642 			ep->com.state = MORIBUND;
643 			disconnect = 0;
644 			break;
645 
646 		case MORIBUND:
647 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
648 			    ep);
649 			STOP_EP_TIMER(ep);
650 			if (ep->com.cm_id && ep->com.qp) {
651 				attrs.next_state = C4IW_QP_STATE_IDLE;
652 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
653 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
654 			}
655 			close_socket(ep->com.so);
656 			close_complete_upcall(ep, 0);
657 			ep->com.state = DEAD;
658 			release = 1;
659 			disconnect = 0;
660 			break;
661 
662 		case DEAD:
663 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
664 			    __func__, ep);
665 			disconnect = 0;
666 			break;
667 
668 		default:
669 			panic("%s: ep %p state %d", __func__, ep,
670 			    ep->com.state);
671 			break;
672 	}
673 
674 
675 	if (disconnect) {
676 
677 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
678 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
679 	}
680 	if (release) {
681 
682 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
683 		c4iw_put_ep(&ep->com);
684 	}
685 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
686 	return;
687 }
688 
689 static void
690 process_conn_error(struct c4iw_ep *ep)
691 {
692 	struct c4iw_qp_attributes attrs = {0};
693 	int ret;
694 	int state;
695 
696 	state = ep->com.state;
697 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
698 	    __func__, ep, ep->com.so, ep->com.so->so_error,
699 	    states[ep->com.state]);
700 
701 	switch (state) {
702 
703 		case MPA_REQ_WAIT:
704 			STOP_EP_TIMER(ep);
705 			c4iw_put_ep(&ep->parent_ep->com);
706 			break;
707 
708 		case MPA_REQ_SENT:
709 			STOP_EP_TIMER(ep);
710 			connect_reply_upcall(ep, -ECONNRESET);
711 			break;
712 
713 		case MPA_REP_SENT:
714 			ep->com.rpl_err = ECONNRESET;
715 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
716 			break;
717 
718 		case MPA_REQ_RCVD:
719 			break;
720 
721 		case MORIBUND:
722 		case CLOSING:
723 			STOP_EP_TIMER(ep);
724 			/*FALLTHROUGH*/
725 		case FPDU_MODE:
726 
727 			if (ep->com.cm_id && ep->com.qp) {
728 
729 				attrs.next_state = C4IW_QP_STATE_ERROR;
730 				ret = c4iw_modify_qp(ep->com.qp->rhp,
731 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
732 					&attrs, 1);
733 				if (ret)
734 					log(LOG_ERR,
735 							"%s - qp <- error failed!\n",
736 							__func__);
737 			}
738 			peer_abort_upcall(ep);
739 			break;
740 
741 		case ABORTING:
742 			break;
743 
744 		case DEAD:
745 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
746 			    __func__, ep->com.so->so_error);
747 			return;
748 
749 		default:
750 			panic("%s: ep %p state %d", __func__, ep, state);
751 			break;
752 	}
753 
754 	if (state != ABORTING) {
755 		close_socket(ep->com.so);
756 		ep->com.state = DEAD;
757 		c4iw_put_ep(&ep->com);
758 	}
759 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
760 	return;
761 }
762 
763 static void
764 process_close_complete(struct c4iw_ep *ep)
765 {
766 	struct c4iw_qp_attributes attrs = {0};
767 	int release = 0;
768 
769 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
770 	    ep->com.so, states[ep->com.state]);
771 
772 	/* The cm_id may be null if we failed to connect */
773 	set_bit(CLOSE_CON_RPL, &ep->com.history);
774 
775 	switch (ep->com.state) {
776 
777 		case CLOSING:
778 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
779 			    __func__, ep);
780 			ep->com.state = MORIBUND;
781 			break;
782 
783 		case MORIBUND:
784 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
785 			    ep);
786 			STOP_EP_TIMER(ep);
787 
788 			if ((ep->com.cm_id) && (ep->com.qp)) {
789 
790 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
791 				    __func__, ep);
792 				attrs.next_state = C4IW_QP_STATE_IDLE;
793 				c4iw_modify_qp(ep->com.dev,
794 						ep->com.qp,
795 						C4IW_QP_ATTR_NEXT_STATE,
796 						&attrs, 1);
797 			}
798 
799 			close_socket(ep->com.so);
800 			close_complete_upcall(ep, 0);
801 			ep->com.state = DEAD;
802 			release = 1;
803 			break;
804 
805 		case ABORTING:
806 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
807 			break;
808 
809 		case DEAD:
810 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
811 			break;
812 		default:
813 			CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
814 					__func__, ep);
815 			panic("%s:pcc6 %p unknown ep state", __func__, ep);
816 			break;
817 	}
818 
819 	if (release) {
820 
821 		CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
822 		release_ep_resources(ep);
823 	}
824 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
825 	return;
826 }
827 
828 static void
829 setiwsockopt(struct socket *so)
830 {
831 	int rc;
832 	struct sockopt sopt;
833 	int on = 1;
834 
835 	sopt.sopt_dir = SOPT_SET;
836 	sopt.sopt_level = IPPROTO_TCP;
837 	sopt.sopt_name = TCP_NODELAY;
838 	sopt.sopt_val = (caddr_t)&on;
839 	sopt.sopt_valsize = sizeof on;
840 	sopt.sopt_td = NULL;
841 	rc = sosetopt(so, &sopt);
842 	if (rc) {
843 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
844 		    __func__, so, rc);
845 	}
846 }
847 
848 static void
849 init_iwarp_socket(struct socket *so, void *arg)
850 {
851 	if (SOLISTENING(so)) {
852 		SOLISTEN_LOCK(so);
853 		solisten_upcall_set(so, c4iw_so_upcall, arg);
854 		so->so_state |= SS_NBIO;
855 		SOLISTEN_UNLOCK(so);
856 	} else {
857 		SOCKBUF_LOCK(&so->so_rcv);
858 		soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
859 		so->so_state |= SS_NBIO;
860 		SOCKBUF_UNLOCK(&so->so_rcv);
861 	}
862 }
863 
864 static void
865 uninit_iwarp_socket(struct socket *so)
866 {
867 	if (SOLISTENING(so)) {
868 		SOLISTEN_LOCK(so);
869 		solisten_upcall_set(so, NULL, NULL);
870 		SOLISTEN_UNLOCK(so);
871 	} else {
872 		SOCKBUF_LOCK(&so->so_rcv);
873 		soupcall_clear(so, SO_RCV);
874 		SOCKBUF_UNLOCK(&so->so_rcv);
875 	}
876 }
877 
878 static void
879 process_data(struct c4iw_ep *ep)
880 {
881 	int disconnect = 0;
882 
883 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
884 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
885 
886 	switch (ep->com.state) {
887 	case MPA_REQ_SENT:
888 		disconnect = process_mpa_reply(ep);
889 		break;
890 	case MPA_REQ_WAIT:
891 		disconnect = process_mpa_request(ep);
892 		if (disconnect)
893 			/* Refered in process_newconn() */
894 			c4iw_put_ep(&ep->parent_ep->com);
895 		break;
896 	default:
897 		if (sbused(&ep->com.so->so_rcv))
898 			log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
899 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
900 			    __func__, ep, ep->com.state, ep->com.so,
901 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
902 		break;
903 	}
904 	if (disconnect)
905 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
906 
907 }
908 
909 static void
910 process_connected(struct c4iw_ep *ep)
911 {
912 	struct socket *so = ep->com.so;
913 
914 	if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
915 		if (send_mpa_req(ep))
916 			goto err;
917 	} else {
918 		connect_reply_upcall(ep, -so->so_error);
919 		goto err;
920 	}
921 	return;
922 err:
923 	close_socket(so);
924 	ep->com.state = DEAD;
925 	c4iw_put_ep(&ep->com);
926 	return;
927 }
928 
929 static inline int c4iw_zero_addr(struct sockaddr *addr)
930 {
931 	struct in6_addr *ip6;
932 
933 	if (addr->sa_family == AF_INET)
934 		return IN_ZERONET(
935 			ntohl(((struct sockaddr_in *)addr)->sin_addr.s_addr));
936 	else {
937 		ip6 = &((struct sockaddr_in6 *) addr)->sin6_addr;
938 		return (ip6->s6_addr32[0] | ip6->s6_addr32[1] |
939 				ip6->s6_addr32[2] | ip6->s6_addr32[3]) == 0;
940 	}
941 }
942 
943 static inline int c4iw_loopback_addr(struct sockaddr *addr)
944 {
945 	if (addr->sa_family == AF_INET)
946 		return IN_LOOPBACK(
947 			ntohl(((struct sockaddr_in *) addr)->sin_addr.s_addr));
948 	else
949 		return IN6_IS_ADDR_LOOPBACK(
950 				&((struct sockaddr_in6 *) addr)->sin6_addr);
951 }
952 
953 static inline int c4iw_any_addr(struct sockaddr *addr)
954 {
955 	return c4iw_zero_addr(addr) || c4iw_loopback_addr(addr);
956 }
957 
958 static void
959 process_newconn(struct c4iw_listen_ep *master_lep, struct socket *new_so)
960 {
961 	struct c4iw_listen_ep *real_lep = NULL;
962 	struct c4iw_ep *new_ep = NULL;
963 	struct sockaddr_in *remote = NULL;
964 	int ret = 0;
965 
966 	MPASS(new_so != NULL);
967 
968 	if (c4iw_any_addr((struct sockaddr *)&master_lep->com.local_addr)) {
969 		/* Here we need to find the 'real_lep' that belongs to the
970 		 * incomming socket's network interface, such that the newly
971 		 * created 'ep' can be attached to the real 'lep'.
972 		 */
973 		real_lep = find_real_listen_ep(master_lep, new_so);
974 		if (real_lep == NULL) {
975 			CTR2(KTR_IW_CXGBE, "%s: Could not find the real listen "
976 					"ep for sock: %p", __func__, new_so);
977 			log(LOG_ERR,"%s: Could not find the real listen ep for "
978 					"sock: %p\n", __func__, new_so);
979 			/* FIXME: properly free the 'new_so' in failure case.
980 			 * Use of soabort() and  soclose() are not legal
981 			 * here(before soaccept()).
982 			 */
983 			return;
984 		}
985 	} else /* for Non-Wildcard address, master_lep is always the real_lep */
986 		real_lep = master_lep;
987 
988 	new_ep = alloc_ep(sizeof(*new_ep), GFP_KERNEL);
989 
990 	CTR6(KTR_IW_CXGBE, "%s: master_lep %p, real_lep: %p, new ep %p, "
991 	    "listening so %p, new so %p", __func__, master_lep, real_lep,
992 	    new_ep, master_lep->com.so, new_so);
993 
994 	new_ep->com.dev = real_lep->com.dev;
995 	new_ep->com.so = new_so;
996 	new_ep->com.cm_id = NULL;
997 	new_ep->com.thread = real_lep->com.thread;
998 	new_ep->parent_ep = real_lep;
999 
1000 	GET_LOCAL_ADDR(&new_ep->com.local_addr, new_so);
1001 	GET_REMOTE_ADDR(&new_ep->com.remote_addr, new_so);
1002 	c4iw_get_ep(&real_lep->com);
1003 	init_timer(&new_ep->timer);
1004 	new_ep->com.state = MPA_REQ_WAIT;
1005 	START_EP_TIMER(new_ep);
1006 
1007 	setiwsockopt(new_so);
1008 	ret = soaccept(new_so, (struct sockaddr **)&remote);
1009 	if (ret != 0) {
1010 		CTR4(KTR_IW_CXGBE,
1011 				"%s:listen sock:%p, new sock:%p, ret:%d\n",
1012 				__func__, master_lep->com.so, new_so, ret);
1013 		if (remote != NULL)
1014 			free(remote, M_SONAME);
1015 		uninit_iwarp_socket(new_so);
1016 		soclose(new_so);
1017 		c4iw_put_ep(&new_ep->com);
1018 		c4iw_put_ep(&real_lep->com);
1019 		return;
1020 	}
1021 	free(remote, M_SONAME);
1022 
1023 	/* MPA request might have been queued up on the socket already, so we
1024 	 * initialize the socket/upcall_handler under lock to prevent processing
1025 	 * MPA request on another thread(via process_req()) simultaniously.
1026 	 */
1027 	c4iw_get_ep(&new_ep->com); /* Dereferenced at the end below, this is to
1028 				      avoid freeing of ep before ep unlock. */
1029 	mutex_lock(&new_ep->com.mutex);
1030 	init_iwarp_socket(new_so, &new_ep->com);
1031 
1032 	ret = process_mpa_request(new_ep);
1033 	if (ret) {
1034 		/* ABORT */
1035 		c4iw_ep_disconnect(new_ep, 1, GFP_KERNEL);
1036 		c4iw_put_ep(&real_lep->com);
1037 	}
1038 	mutex_unlock(&new_ep->com.mutex);
1039 	c4iw_put_ep(&new_ep->com);
1040 	return;
1041 }
1042 
1043 static int
1044 add_ep_to_req_list(struct c4iw_ep *ep, int new_ep_event)
1045 {
1046 	unsigned long flag;
1047 
1048 	spin_lock_irqsave(&req_lock, flag);
1049 	if (ep && ep->com.so) {
1050 		ep->com.ep_events |= new_ep_event;
1051 		if (!ep->com.entry.tqe_prev) {
1052 			c4iw_get_ep(&ep->com);
1053 			TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
1054 			queue_work(c4iw_taskq, &c4iw_task);
1055 		}
1056 	}
1057 	spin_unlock_irqrestore(&req_lock, flag);
1058 
1059 	return (0);
1060 }
1061 
1062 static int
1063 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
1064 {
1065 	struct c4iw_ep *ep = arg;
1066 
1067 	CTR6(KTR_IW_CXGBE,
1068 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
1069 	    __func__, so, so->so_state, ep, states[ep->com.state],
1070 	    ep->com.entry.tqe_prev);
1071 
1072 	MPASS(ep->com.so == so);
1073 	/*
1074 	 * Wake up any threads waiting in rdma_init()/rdma_fini(),
1075 	 * with locks held.
1076 	 */
1077 	if (so->so_error)
1078 		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);
1079 	add_ep_to_req_list(ep, C4IW_EVENT_SOCKET);
1080 
1081 	return (SU_OK);
1082 }
1083 
1084 
1085 static int
1086 terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
1087 {
1088 	struct adapter *sc = iq->adapter;
1089 	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
1090 	unsigned int tid = GET_TID(cpl);
1091 	struct toepcb *toep = lookup_tid(sc, tid);
1092 	struct socket *so;
1093 	struct c4iw_ep *ep;
1094 
1095 	INP_WLOCK(toep->inp);
1096 	so = inp_inpcbtosocket(toep->inp);
1097 	ep = so->so_rcv.sb_upcallarg;
1098 	INP_WUNLOCK(toep->inp);
1099 
1100 	CTR3(KTR_IW_CXGBE, "%s: so %p, ep %p", __func__, so, ep);
1101 	add_ep_to_req_list(ep, C4IW_EVENT_TERM);
1102 
1103 	return 0;
1104 }
1105 
1106 static void
1107 process_socket_event(struct c4iw_ep *ep)
1108 {
1109 	int state = ep->com.state;
1110 	struct socket *so = ep->com.so;
1111 
1112 	if (ep->com.state == DEAD) {
1113 		CTR3(KTR_IW_CXGBE, "%s: Pending socket event discarded "
1114 			"ep %p ep_state %s", __func__, ep, states[state]);
1115 		return;
1116 	}
1117 
1118 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
1119 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
1120 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
1121 
1122 	if (state == CONNECTING) {
1123 		process_connected(ep);
1124 		return;
1125 	}
1126 
1127 	if (state == LISTEN) {
1128 		struct c4iw_listen_ep *lep = (struct c4iw_listen_ep *)ep;
1129 		struct socket *listen_so = so, *new_so = NULL;
1130 		int error = 0;
1131 
1132 		SOLISTEN_LOCK(listen_so);
1133 		do {
1134 			error = solisten_dequeue(listen_so, &new_so,
1135 						SOCK_NONBLOCK);
1136 			if (error) {
1137 				CTR4(KTR_IW_CXGBE, "%s: lep %p listen_so %p "
1138 					"error %d", __func__, lep, listen_so,
1139 					error);
1140 				return;
1141 			}
1142 			process_newconn(lep, new_so);
1143 
1144 			/* solisten_dequeue() unlocks while return, so aquire
1145 			 * lock again for sol_qlen and also for next iteration.
1146 			 */
1147 			SOLISTEN_LOCK(listen_so);
1148 		} while (listen_so->sol_qlen);
1149 		SOLISTEN_UNLOCK(listen_so);
1150 
1151 		return;
1152 	}
1153 
1154 	/* connection error */
1155 	if (so->so_error) {
1156 		process_conn_error(ep);
1157 		return;
1158 	}
1159 
1160 	/* peer close */
1161 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
1162 		process_peer_close(ep);
1163 		/*
1164 		 * check whether socket disconnect event is pending before
1165 		 * returning. Fallthrough if yes.
1166 		 */
1167 		if (!(so->so_state & SS_ISDISCONNECTED))
1168 			return;
1169 	}
1170 
1171 	/* close complete */
1172 	if (so->so_state & SS_ISDISCONNECTED) {
1173 		process_close_complete(ep);
1174 		return;
1175 	}
1176 
1177 	/* rx data */
1178 	process_data(ep);
1179 }
1180 
1181 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
1182 
1183 static int dack_mode = 0;
1184 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
1185 		"Delayed ack mode (default = 0)");
1186 
1187 int c4iw_max_read_depth = 8;
1188 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
1189 		"Per-connection max ORD/IRD (default = 8)");
1190 
1191 static int enable_tcp_timestamps;
1192 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
1193 		"Enable tcp timestamps (default = 0)");
1194 
1195 static int enable_tcp_sack;
1196 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
1197 		"Enable tcp SACK (default = 0)");
1198 
1199 static int enable_tcp_window_scaling = 1;
1200 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
1201 		"Enable tcp window scaling (default = 1)");
1202 
1203 int c4iw_debug = 0;
1204 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
1205 		"Enable debug logging (default = 0)");
1206 
1207 static int peer2peer = 1;
1208 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
1209 		"Support peer2peer ULPs (default = 1)");
1210 
1211 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
1212 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
1213 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
1214 
1215 static int ep_timeout_secs = 60;
1216 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
1217 		"CM Endpoint operation timeout in seconds (default = 60)");
1218 
1219 static int mpa_rev = 1;
1220 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
1221 		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
1222 
1223 static int markers_enabled;
1224 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
1225 		"Enable MPA MARKERS (default(0) = disabled)");
1226 
1227 static int crc_enabled = 1;
1228 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
1229 		"Enable MPA CRC (default(1) = enabled)");
1230 
1231 static int rcv_win = 256 * 1024;
1232 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
1233 		"TCP receive window in bytes (default = 256KB)");
1234 
1235 static int snd_win = 128 * 1024;
1236 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
1237 		"TCP send window in bytes (default = 128KB)");
1238 
1239 static void
1240 start_ep_timer(struct c4iw_ep *ep)
1241 {
1242 
1243 	if (timer_pending(&ep->timer)) {
1244 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
1245 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
1246 		    ep);
1247 		return;
1248 	}
1249 	clear_bit(TIMEOUT, &ep->com.flags);
1250 	c4iw_get_ep(&ep->com);
1251 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
1252 	ep->timer.data = (unsigned long)ep;
1253 	ep->timer.function = ep_timeout;
1254 	add_timer(&ep->timer);
1255 }
1256 
1257 static int
1258 stop_ep_timer(struct c4iw_ep *ep)
1259 {
1260 
1261 	del_timer_sync(&ep->timer);
1262 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
1263 		c4iw_put_ep(&ep->com);
1264 		return 0;
1265 	}
1266 	return 1;
1267 }
1268 
1269 static void *
1270 alloc_ep(int size, gfp_t gfp)
1271 {
1272 	struct c4iw_ep_common *epc;
1273 
1274 	epc = kzalloc(size, gfp);
1275 	if (epc == NULL)
1276 		return (NULL);
1277 
1278 	kref_init(&epc->kref);
1279 	mutex_init(&epc->mutex);
1280 	c4iw_init_wr_wait(&epc->wr_wait);
1281 
1282 	return (epc);
1283 }
1284 
1285 void _c4iw_free_ep(struct kref *kref)
1286 {
1287 	struct c4iw_ep *ep;
1288 	struct c4iw_ep_common *epc;
1289 
1290 	ep = container_of(kref, struct c4iw_ep, com.kref);
1291 	epc = &ep->com;
1292 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
1293 	    __func__, epc));
1294 	if (test_bit(QP_REFERENCED, &ep->com.flags))
1295 		deref_qp(ep);
1296 	CTR4(KTR_IW_CXGBE, "%s: ep %p, history 0x%lx, flags 0x%lx",
1297 	    __func__, ep, epc->history, epc->flags);
1298 	kfree(ep);
1299 }
1300 
1301 static void release_ep_resources(struct c4iw_ep *ep)
1302 {
1303 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
1304 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
1305 	c4iw_put_ep(&ep->com);
1306 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
1307 }
1308 
1309 static int
1310 send_mpa_req(struct c4iw_ep *ep)
1311 {
1312 	int mpalen;
1313 	struct mpa_message *mpa;
1314 	struct mpa_v2_conn_params mpa_v2_params;
1315 	struct mbuf *m;
1316 	char mpa_rev_to_use = mpa_rev;
1317 	int err = 0;
1318 
1319 	if (ep->retry_with_mpa_v1)
1320 		mpa_rev_to_use = 1;
1321 	mpalen = sizeof(*mpa) + ep->plen;
1322 	if (mpa_rev_to_use == 2)
1323 		mpalen += sizeof(struct mpa_v2_conn_params);
1324 
1325 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1326 	if (mpa == NULL) {
1327 		err = -ENOMEM;
1328 		CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
1329 				__func__, ep, err);
1330 		goto err;
1331 	}
1332 
1333 	memset(mpa, 0, mpalen);
1334 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
1335 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1336 		(markers_enabled ? MPA_MARKERS : 0) |
1337 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1338 	mpa->private_data_size = htons(ep->plen);
1339 	mpa->revision = mpa_rev_to_use;
1340 
1341 	if (mpa_rev_to_use == 1) {
1342 		ep->tried_with_mpa_v1 = 1;
1343 		ep->retry_with_mpa_v1 = 0;
1344 	}
1345 
1346 	if (mpa_rev_to_use == 2) {
1347 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1348 					    sizeof(struct mpa_v2_conn_params));
1349 		mpa_v2_params.ird = htons((u16)ep->ird);
1350 		mpa_v2_params.ord = htons((u16)ep->ord);
1351 
1352 		if (peer2peer) {
1353 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1354 
1355 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1356 				mpa_v2_params.ord |=
1357 				    htons(MPA_V2_RDMA_WRITE_RTR);
1358 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1359 				mpa_v2_params.ord |=
1360 					htons(MPA_V2_RDMA_READ_RTR);
1361 			}
1362 		}
1363 		memcpy(mpa->private_data, &mpa_v2_params,
1364 			sizeof(struct mpa_v2_conn_params));
1365 
1366 		if (ep->plen) {
1367 
1368 			memcpy(mpa->private_data +
1369 				sizeof(struct mpa_v2_conn_params),
1370 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1371 		}
1372 	} else {
1373 
1374 		if (ep->plen)
1375 			memcpy(mpa->private_data,
1376 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1377 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1378 	}
1379 
1380 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1381 	if (m == NULL) {
1382 		err = -ENOMEM;
1383 		CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1384 				__func__, ep, err);
1385 		free(mpa, M_CXGBE);
1386 		goto err;
1387 	}
1388 	m_copyback(m, 0, mpalen, (void *)mpa);
1389 	free(mpa, M_CXGBE);
1390 
1391 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1392 			ep->com.thread);
1393 	if (err) {
1394 		CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1395 				__func__, ep, err);
1396 		goto err;
1397 	}
1398 
1399 	START_EP_TIMER(ep);
1400 	ep->com.state = MPA_REQ_SENT;
1401 	ep->mpa_attr.initiator = 1;
1402 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1403 	return 0;
1404 err:
1405 	connect_reply_upcall(ep, err);
1406 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1407 	return err;
1408 }
1409 
1410 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1411 {
1412 	int mpalen ;
1413 	struct mpa_message *mpa;
1414 	struct mpa_v2_conn_params mpa_v2_params;
1415 	struct mbuf *m;
1416 	int err;
1417 
1418 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1419 	    ep->plen);
1420 
1421 	mpalen = sizeof(*mpa) + plen;
1422 
1423 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1424 
1425 		mpalen += sizeof(struct mpa_v2_conn_params);
1426 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1427 		    ep->mpa_attr.version, mpalen);
1428 	}
1429 
1430 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1431 	if (mpa == NULL)
1432 		return (-ENOMEM);
1433 
1434 	memset(mpa, 0, mpalen);
1435 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1436 	mpa->flags = MPA_REJECT;
1437 	mpa->revision = mpa_rev;
1438 	mpa->private_data_size = htons(plen);
1439 
1440 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1441 
1442 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1443 		mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
1444 					    sizeof(struct mpa_v2_conn_params));
1445 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1446 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1447 				 0));
1448 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1449 					(p2p_type ==
1450 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1451 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1452 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1453 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1454 		memcpy(mpa->private_data, &mpa_v2_params,
1455 				sizeof(struct mpa_v2_conn_params));
1456 
1457 		if (ep->plen)
1458 			memcpy(mpa->private_data +
1459 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1460 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1461 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1462 	} else
1463 		if (plen)
1464 			memcpy(mpa->private_data, pdata, plen);
1465 
1466 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1467 	if (m == NULL) {
1468 		free(mpa, M_CXGBE);
1469 		return (-ENOMEM);
1470 	}
1471 	m_copyback(m, 0, mpalen, (void *)mpa);
1472 	free(mpa, M_CXGBE);
1473 
1474 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1475 	if (!err)
1476 		ep->snd_seq += mpalen;
1477 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1478 	return err;
1479 }
1480 
1481 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1482 {
1483 	int mpalen;
1484 	struct mpa_message *mpa;
1485 	struct mbuf *m;
1486 	struct mpa_v2_conn_params mpa_v2_params;
1487 	int err;
1488 
1489 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1490 
1491 	mpalen = sizeof(*mpa) + plen;
1492 
1493 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1494 
1495 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1496 		    ep->mpa_attr.version);
1497 		mpalen += sizeof(struct mpa_v2_conn_params);
1498 	}
1499 
1500 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1501 	if (mpa == NULL)
1502 		return (-ENOMEM);
1503 
1504 	memset(mpa, 0, sizeof(*mpa));
1505 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1506 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1507 		(markers_enabled ? MPA_MARKERS : 0);
1508 	mpa->revision = ep->mpa_attr.version;
1509 	mpa->private_data_size = htons(plen);
1510 
1511 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1512 
1513 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1514 		mpa->private_data_size +=
1515 			htons(sizeof(struct mpa_v2_conn_params));
1516 		mpa_v2_params.ird = htons((u16)ep->ird);
1517 		mpa_v2_params.ord = htons((u16)ep->ord);
1518 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1519 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1520 
1521 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1522 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1523 
1524 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1525 
1526 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1527 
1528 				mpa_v2_params.ord |=
1529 					htons(MPA_V2_RDMA_WRITE_RTR);
1530 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1531 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1532 				    mpa_v2_params.ord);
1533 			}
1534 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1535 
1536 				mpa_v2_params.ord |=
1537 					htons(MPA_V2_RDMA_READ_RTR);
1538 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1539 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1540 				    mpa_v2_params.ord);
1541 			}
1542 		}
1543 
1544 		memcpy(mpa->private_data, &mpa_v2_params,
1545 			sizeof(struct mpa_v2_conn_params));
1546 
1547 		if (ep->plen)
1548 			memcpy(mpa->private_data +
1549 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1550 	} else
1551 		if (plen)
1552 			memcpy(mpa->private_data, pdata, plen);
1553 
1554 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1555 	if (m == NULL) {
1556 		free(mpa, M_CXGBE);
1557 		return (-ENOMEM);
1558 	}
1559 	m_copyback(m, 0, mpalen, (void *)mpa);
1560 	free(mpa, M_CXGBE);
1561 
1562 
1563 	ep->com.state = MPA_REP_SENT;
1564 	ep->snd_seq += mpalen;
1565 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1566 			ep->com.thread);
1567 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1568 	return err;
1569 }
1570 
1571 
1572 
1573 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1574 {
1575 	struct iw_cm_event event;
1576 
1577 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1578 	memset(&event, 0, sizeof(event));
1579 	event.event = IW_CM_EVENT_CLOSE;
1580 	event.status = status;
1581 
1582 	if (ep->com.cm_id) {
1583 
1584 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1585 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1586 		deref_cm_id(&ep->com);
1587 		set_bit(CLOSE_UPCALL, &ep->com.history);
1588 	}
1589 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1590 }
1591 
1592 static int
1593 send_abort(struct c4iw_ep *ep)
1594 {
1595 	struct socket *so = ep->com.so;
1596 	struct sockopt sopt;
1597 	int rc;
1598 	struct linger l;
1599 
1600 	CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1601 	    states[ep->com.state], ep->hwtid);
1602 
1603 	l.l_onoff = 1;
1604 	l.l_linger = 0;
1605 
1606 	/* linger_time of 0 forces RST to be sent */
1607 	sopt.sopt_dir = SOPT_SET;
1608 	sopt.sopt_level = SOL_SOCKET;
1609 	sopt.sopt_name = SO_LINGER;
1610 	sopt.sopt_val = (caddr_t)&l;
1611 	sopt.sopt_valsize = sizeof l;
1612 	sopt.sopt_td = NULL;
1613 	rc = sosetopt(so, &sopt);
1614 	if (rc != 0) {
1615 		log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1616 		    __func__, so, rc);
1617 	}
1618 
1619 	uninit_iwarp_socket(so);
1620 	soclose(so);
1621 	set_bit(ABORT_CONN, &ep->com.history);
1622 
1623 	/*
1624 	 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1625 	 * request it has sent. But the current TOE driver is not propagating
1626 	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1627 	 * around de-refererece 'ep' here instead of doing it in abort_rpl()
1628 	 * handler(not yet implemented) of iw_cxgbe driver.
1629 	 */
1630 	release_ep_resources(ep);
1631 
1632 	return (0);
1633 }
1634 
1635 static void peer_close_upcall(struct c4iw_ep *ep)
1636 {
1637 	struct iw_cm_event event;
1638 
1639 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1640 	memset(&event, 0, sizeof(event));
1641 	event.event = IW_CM_EVENT_DISCONNECT;
1642 
1643 	if (ep->com.cm_id) {
1644 
1645 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1646 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1647 		set_bit(DISCONN_UPCALL, &ep->com.history);
1648 	}
1649 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1650 }
1651 
1652 static void peer_abort_upcall(struct c4iw_ep *ep)
1653 {
1654 	struct iw_cm_event event;
1655 
1656 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1657 	memset(&event, 0, sizeof(event));
1658 	event.event = IW_CM_EVENT_CLOSE;
1659 	event.status = -ECONNRESET;
1660 
1661 	if (ep->com.cm_id) {
1662 
1663 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1664 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1665 		deref_cm_id(&ep->com);
1666 		set_bit(ABORT_UPCALL, &ep->com.history);
1667 	}
1668 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1669 }
1670 
1671 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1672 {
1673 	struct iw_cm_event event;
1674 
1675 	CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1676 	memset(&event, 0, sizeof(event));
1677 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1678 	event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1679 					-ECONNRESET : status;
1680 	event.local_addr = ep->com.local_addr;
1681 	event.remote_addr = ep->com.remote_addr;
1682 
1683 	if ((status == 0) || (status == -ECONNREFUSED)) {
1684 
1685 		if (!ep->tried_with_mpa_v1) {
1686 
1687 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1688 			/* this means MPA_v2 is used */
1689 			event.ord = ep->ird;
1690 			event.ird = ep->ord;
1691 			event.private_data_len = ep->plen -
1692 				sizeof(struct mpa_v2_conn_params);
1693 			event.private_data = ep->mpa_pkt +
1694 				sizeof(struct mpa_message) +
1695 				sizeof(struct mpa_v2_conn_params);
1696 		} else {
1697 
1698 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1699 			/* this means MPA_v1 is used */
1700 			event.ord = c4iw_max_read_depth;
1701 			event.ird = c4iw_max_read_depth;
1702 			event.private_data_len = ep->plen;
1703 			event.private_data = ep->mpa_pkt +
1704 				sizeof(struct mpa_message);
1705 		}
1706 	}
1707 
1708 	if (ep->com.cm_id) {
1709 
1710 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1711 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1712 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1713 	}
1714 
1715 	if(status == -ECONNABORTED) {
1716 
1717 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1718 		return;
1719 	}
1720 
1721 	if (status < 0) {
1722 
1723 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1724 		deref_cm_id(&ep->com);
1725 	}
1726 
1727 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1728 }
1729 
1730 static int connect_request_upcall(struct c4iw_ep *ep)
1731 {
1732 	struct iw_cm_event event;
1733 	int ret;
1734 
1735 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1736 	    ep->tried_with_mpa_v1);
1737 
1738 	memset(&event, 0, sizeof(event));
1739 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1740 	event.local_addr = ep->com.local_addr;
1741 	event.remote_addr = ep->com.remote_addr;
1742 	event.provider_data = ep;
1743 
1744 	if (!ep->tried_with_mpa_v1) {
1745 		/* this means MPA_v2 is used */
1746 		event.ord = ep->ord;
1747 		event.ird = ep->ird;
1748 		event.private_data_len = ep->plen -
1749 			sizeof(struct mpa_v2_conn_params);
1750 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1751 			sizeof(struct mpa_v2_conn_params);
1752 	} else {
1753 
1754 		/* this means MPA_v1 is used. Send max supported */
1755 		event.ord = c4iw_max_read_depth;
1756 		event.ird = c4iw_max_read_depth;
1757 		event.private_data_len = ep->plen;
1758 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1759 	}
1760 
1761 	c4iw_get_ep(&ep->com);
1762 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1763 	    &event);
1764 	if(ret) {
1765 		CTR3(KTR_IW_CXGBE, "%s: ep %p, Failure while notifying event to"
1766 			" IWCM, err:%d", __func__, ep, ret);
1767 		c4iw_put_ep(&ep->com);
1768 	} else
1769 		/* Dereference parent_ep only in success case.
1770 		 * In case of failure, parent_ep is dereferenced by the caller
1771 		 * of process_mpa_request().
1772 		 */
1773 		c4iw_put_ep(&ep->parent_ep->com);
1774 
1775 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1776 	return ret;
1777 }
1778 
1779 static void established_upcall(struct c4iw_ep *ep)
1780 {
1781 	struct iw_cm_event event;
1782 
1783 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1784 	memset(&event, 0, sizeof(event));
1785 	event.event = IW_CM_EVENT_ESTABLISHED;
1786 	event.ird = ep->ord;
1787 	event.ord = ep->ird;
1788 
1789 	if (ep->com.cm_id) {
1790 
1791 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1792 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1793 		set_bit(ESTAB_UPCALL, &ep->com.history);
1794 	}
1795 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1796 }
1797 
1798 
1799 #define RELAXED_IRD_NEGOTIATION 1
1800 
1801 /*
1802  * process_mpa_reply - process streaming mode MPA reply
1803  *
1804  * Returns:
1805  *
1806  * 0 upon success indicating a connect request was delivered to the ULP
1807  * or the mpa request is incomplete but valid so far.
1808  *
1809  * 1 if a failure requires the caller to close the connection.
1810  *
1811  * 2 if a failure requires the caller to abort the connection.
1812  */
1813 static int process_mpa_reply(struct c4iw_ep *ep)
1814 {
1815 	struct mpa_message *mpa;
1816 	struct mpa_v2_conn_params *mpa_v2_params;
1817 	u16 plen;
1818 	u16 resp_ird, resp_ord;
1819 	u8 rtr_mismatch = 0, insuff_ird = 0;
1820 	struct c4iw_qp_attributes attrs = {0};
1821 	enum c4iw_qp_attr_mask mask;
1822 	int err;
1823 	struct mbuf *top, *m;
1824 	int flags = MSG_DONTWAIT;
1825 	struct uio uio;
1826 	int disconnect = 0;
1827 
1828 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1829 
1830 	/*
1831 	 * Stop mpa timer.  If it expired, then
1832 	 * we ignore the MPA reply.  process_timeout()
1833 	 * will abort the connection.
1834 	 */
1835 	if (STOP_EP_TIMER(ep))
1836 		return 0;
1837 
1838 	uio.uio_resid = 1000000;
1839 	uio.uio_td = ep->com.thread;
1840 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1841 
1842 	if (err) {
1843 
1844 		if (err == EWOULDBLOCK) {
1845 
1846 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1847 			START_EP_TIMER(ep);
1848 			return 0;
1849 		}
1850 		err = -err;
1851 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1852 		goto err;
1853 	}
1854 
1855 	if (ep->com.so->so_rcv.sb_mb) {
1856 
1857 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1858 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1859 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1860 	}
1861 
1862 	m = top;
1863 
1864 	do {
1865 
1866 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1867 		/*
1868 		 * If we get more than the supported amount of private data
1869 		 * then we must fail this connection.
1870 		 */
1871 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1872 
1873 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1874 			    ep->mpa_pkt_len + m->m_len);
1875 			err = (-EINVAL);
1876 			goto err_stop_timer;
1877 		}
1878 
1879 		/*
1880 		 * copy the new data into our accumulation buffer.
1881 		 */
1882 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1883 		ep->mpa_pkt_len += m->m_len;
1884 		if (!m->m_next)
1885 			m = m->m_nextpkt;
1886 		else
1887 			m = m->m_next;
1888 	} while (m);
1889 
1890 	m_freem(top);
1891 	/*
1892 	 * if we don't even have the mpa message, then bail.
1893 	 */
1894 	if (ep->mpa_pkt_len < sizeof(*mpa)) {
1895 		return 0;
1896 	}
1897 	mpa = (struct mpa_message *) ep->mpa_pkt;
1898 
1899 	/* Validate MPA header. */
1900 	if (mpa->revision > mpa_rev) {
1901 
1902 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1903 		    mpa->revision, mpa_rev);
1904 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1905 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1906 		err = -EPROTO;
1907 		goto err_stop_timer;
1908 	}
1909 
1910 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1911 
1912 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1913 		err = -EPROTO;
1914 		goto err_stop_timer;
1915 	}
1916 
1917 	plen = ntohs(mpa->private_data_size);
1918 
1919 	/*
1920 	 * Fail if there's too much private data.
1921 	 */
1922 	if (plen > MPA_MAX_PRIVATE_DATA) {
1923 
1924 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1925 		err = -EPROTO;
1926 		goto err_stop_timer;
1927 	}
1928 
1929 	/*
1930 	 * If plen does not account for pkt size
1931 	 */
1932 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1933 
1934 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1935 		STOP_EP_TIMER(ep);
1936 		err = -EPROTO;
1937 		goto err_stop_timer;
1938 	}
1939 
1940 	ep->plen = (u8) plen;
1941 
1942 	/*
1943 	 * If we don't have all the pdata yet, then bail.
1944 	 * We'll continue process when more data arrives.
1945 	 */
1946 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1947 
1948 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1949 		return 0;
1950 	}
1951 
1952 	if (mpa->flags & MPA_REJECT) {
1953 
1954 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1955 		err = -ECONNREFUSED;
1956 		goto err_stop_timer;
1957 	}
1958 
1959 	/*
1960 	 * If we get here we have accumulated the entire mpa
1961 	 * start reply message including private data. And
1962 	 * the MPA header is valid.
1963 	 */
1964 	ep->com.state = FPDU_MODE;
1965 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1966 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1967 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1968 	ep->mpa_attr.version = mpa->revision;
1969 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1970 
1971 	if (mpa->revision == 2) {
1972 
1973 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
1974 		ep->mpa_attr.enhanced_rdma_conn =
1975 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1976 
1977 		if (ep->mpa_attr.enhanced_rdma_conn) {
1978 
1979 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
1980 			mpa_v2_params = (struct mpa_v2_conn_params *)
1981 				(ep->mpa_pkt + sizeof(*mpa));
1982 			resp_ird = ntohs(mpa_v2_params->ird) &
1983 				MPA_V2_IRD_ORD_MASK;
1984 			resp_ord = ntohs(mpa_v2_params->ord) &
1985 				MPA_V2_IRD_ORD_MASK;
1986 
1987 			/*
1988 			 * This is a double-check. Ideally, below checks are
1989 			 * not required since ird/ord stuff has been taken
1990 			 * care of in c4iw_accept_cr
1991 			 */
1992 			if (ep->ird < resp_ord) {
1993 				if (RELAXED_IRD_NEGOTIATION && resp_ord <=
1994 				   ep->com.dev->rdev.adap->params.max_ordird_qp)
1995 					ep->ird = resp_ord;
1996 				else
1997 					insuff_ird = 1;
1998 			} else if (ep->ird > resp_ord) {
1999 				ep->ird = resp_ord;
2000 			}
2001 			if (ep->ord > resp_ird) {
2002 				if (RELAXED_IRD_NEGOTIATION)
2003 					ep->ord = resp_ird;
2004 				else
2005 					insuff_ird = 1;
2006 			}
2007 			if (insuff_ird) {
2008 				err = -ENOMEM;
2009 				ep->ird = resp_ord;
2010 				ep->ord = resp_ird;
2011 			}
2012 
2013 			if (ntohs(mpa_v2_params->ird) &
2014 				MPA_V2_PEER2PEER_MODEL) {
2015 
2016 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
2017 				if (ntohs(mpa_v2_params->ord) &
2018 					MPA_V2_RDMA_WRITE_RTR) {
2019 
2020 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
2021 					ep->mpa_attr.p2p_type =
2022 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2023 				}
2024 				else if (ntohs(mpa_v2_params->ord) &
2025 					MPA_V2_RDMA_READ_RTR) {
2026 
2027 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
2028 					ep->mpa_attr.p2p_type =
2029 						FW_RI_INIT_P2PTYPE_READ_REQ;
2030 				}
2031 			}
2032 		}
2033 	} else {
2034 
2035 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
2036 
2037 		if (mpa->revision == 1) {
2038 
2039 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
2040 
2041 			if (peer2peer) {
2042 
2043 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
2044 				ep->mpa_attr.p2p_type = p2p_type;
2045 			}
2046 		}
2047 	}
2048 
2049 	if (set_tcpinfo(ep)) {
2050 
2051 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
2052 		printf("%s set_tcpinfo error\n", __func__);
2053 		err = -ECONNRESET;
2054 		goto err;
2055 	}
2056 
2057 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
2058 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
2059 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2060 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
2061 	    ep->mpa_attr.p2p_type);
2062 
2063 	/*
2064 	 * If responder's RTR does not match with that of initiator, assign
2065 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
2066 	 * generated when moving QP to RTS state.
2067 	 * A TERM message will be sent after QP has moved to RTS state
2068 	 */
2069 	if ((ep->mpa_attr.version == 2) && peer2peer &&
2070 		(ep->mpa_attr.p2p_type != p2p_type)) {
2071 
2072 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
2073 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2074 		rtr_mismatch = 1;
2075 	}
2076 
2077 
2078 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2079 	attrs.mpa_attr = ep->mpa_attr;
2080 	attrs.max_ird = ep->ird;
2081 	attrs.max_ord = ep->ord;
2082 	attrs.llp_stream_handle = ep;
2083 	attrs.next_state = C4IW_QP_STATE_RTS;
2084 
2085 	mask = C4IW_QP_ATTR_NEXT_STATE |
2086 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
2087 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
2088 
2089 	/* bind QP and TID with INIT_WR */
2090 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2091 
2092 	if (err) {
2093 
2094 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
2095 		goto err;
2096 	}
2097 
2098 	/*
2099 	 * If responder's RTR requirement did not match with what initiator
2100 	 * supports, generate TERM message
2101 	 */
2102 	if (rtr_mismatch) {
2103 
2104 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
2105 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
2106 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2107 		attrs.ecode = MPA_NOMATCH_RTR;
2108 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2109 		attrs.send_term = 1;
2110 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2111 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2112 		err = -ENOMEM;
2113 		disconnect = 1;
2114 		goto out;
2115 	}
2116 
2117 	/*
2118 	 * Generate TERM if initiator IRD is not sufficient for responder
2119 	 * provided ORD. Currently, we do the same behaviour even when
2120 	 * responder provided IRD is also not sufficient as regards to
2121 	 * initiator ORD.
2122 	 */
2123 	if (insuff_ird) {
2124 
2125 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
2126 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
2127 				__func__);
2128 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
2129 		attrs.ecode = MPA_INSUFF_IRD;
2130 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2131 		attrs.send_term = 1;
2132 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
2133 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2134 		err = -ENOMEM;
2135 		disconnect = 1;
2136 		goto out;
2137 	}
2138 	goto out;
2139 err_stop_timer:
2140 	STOP_EP_TIMER(ep);
2141 err:
2142 	disconnect = 2;
2143 out:
2144 	connect_reply_upcall(ep, err);
2145 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
2146 	return disconnect;
2147 }
2148 
2149 /*
2150  * process_mpa_request - process streaming mode MPA request
2151  *
2152  * Returns:
2153  *
2154  * 0 upon success indicating a connect request was delivered to the ULP
2155  * or the mpa request is incomplete but valid so far.
2156  *
2157  * 1 if a failure requires the caller to close the connection.
2158  *
2159  * 2 if a failure requires the caller to abort the connection.
2160  */
2161 static int
2162 process_mpa_request(struct c4iw_ep *ep)
2163 {
2164 	struct mpa_message *mpa;
2165 	struct mpa_v2_conn_params *mpa_v2_params;
2166 	u16 plen;
2167 	int flags = MSG_DONTWAIT;
2168 	int rc;
2169 	struct iovec iov;
2170 	struct uio uio;
2171 	enum c4iw_ep_state state = ep->com.state;
2172 
2173 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
2174 
2175 	if (state != MPA_REQ_WAIT)
2176 		return 0;
2177 
2178 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
2179 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2180 	uio.uio_iov = &iov;
2181 	uio.uio_iovcnt = 1;
2182 	uio.uio_offset = 0;
2183 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
2184 	uio.uio_segflg = UIO_SYSSPACE;
2185 	uio.uio_rw = UIO_READ;
2186 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
2187 
2188 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
2189 	if (rc == EAGAIN)
2190 		return 0;
2191 	else if (rc)
2192 		goto err_stop_timer;
2193 
2194 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
2195 	    __func__, ep->com.so));
2196 	ep->mpa_pkt_len += uio.uio_offset;
2197 
2198 	/*
2199 	 * If we get more than the supported amount of private data then we must
2200 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
2201 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
2202 	 * byte is filled by the soreceive above.
2203 	 */
2204 
2205 	/* Don't even have the MPA message.  Wait for more data to arrive. */
2206 	if (ep->mpa_pkt_len < sizeof(*mpa))
2207 		return 0;
2208 	mpa = (struct mpa_message *) ep->mpa_pkt;
2209 
2210 	/*
2211 	 * Validate MPA Header.
2212 	 */
2213 	if (mpa->revision > mpa_rev) {
2214 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
2215 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
2216 		goto err_stop_timer;
2217 	}
2218 
2219 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
2220 		goto err_stop_timer;
2221 
2222 	/*
2223 	 * Fail if there's too much private data.
2224 	 */
2225 	plen = ntohs(mpa->private_data_size);
2226 	if (plen > MPA_MAX_PRIVATE_DATA)
2227 		goto err_stop_timer;
2228 
2229 	/*
2230 	 * If plen does not account for pkt size
2231 	 */
2232 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
2233 		goto err_stop_timer;
2234 
2235 	ep->plen = (u8) plen;
2236 
2237 	/*
2238 	 * If we don't have all the pdata yet, then bail.
2239 	 */
2240 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
2241 		return 0;
2242 
2243 	/*
2244 	 * If we get here we have accumulated the entire mpa
2245 	 * start reply message including private data.
2246 	 */
2247 	ep->mpa_attr.initiator = 0;
2248 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
2249 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
2250 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
2251 	ep->mpa_attr.version = mpa->revision;
2252 	if (mpa->revision == 1)
2253 		ep->tried_with_mpa_v1 = 1;
2254 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
2255 
2256 	if (mpa->revision == 2) {
2257 		ep->mpa_attr.enhanced_rdma_conn =
2258 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
2259 		if (ep->mpa_attr.enhanced_rdma_conn) {
2260 			mpa_v2_params = (struct mpa_v2_conn_params *)
2261 				(ep->mpa_pkt + sizeof(*mpa));
2262 			ep->ird = ntohs(mpa_v2_params->ird) &
2263 				MPA_V2_IRD_ORD_MASK;
2264 			ep->ird = min_t(u32, ep->ird,
2265 					cur_max_read_depth(ep->com.dev));
2266 			ep->ord = ntohs(mpa_v2_params->ord) &
2267 				MPA_V2_IRD_ORD_MASK;
2268 			ep->ord = min_t(u32, ep->ord,
2269 					cur_max_read_depth(ep->com.dev));
2270 			CTR3(KTR_IW_CXGBE, "%s initiator ird %u ord %u\n",
2271 				 __func__, ep->ird, ep->ord);
2272 			if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
2273 				if (peer2peer) {
2274 					if (ntohs(mpa_v2_params->ord) &
2275 							MPA_V2_RDMA_WRITE_RTR)
2276 						ep->mpa_attr.p2p_type =
2277 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
2278 					else if (ntohs(mpa_v2_params->ord) &
2279 							MPA_V2_RDMA_READ_RTR)
2280 						ep->mpa_attr.p2p_type =
2281 						FW_RI_INIT_P2PTYPE_READ_REQ;
2282 				}
2283 		}
2284 	} else if (mpa->revision == 1 && peer2peer)
2285 		ep->mpa_attr.p2p_type = p2p_type;
2286 
2287 	if (set_tcpinfo(ep))
2288 		goto err_stop_timer;
2289 
2290 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
2291 	    "xmit_marker_enabled = %d, version = %d", __func__,
2292 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
2293 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
2294 
2295 	ep->com.state = MPA_REQ_RCVD;
2296 	STOP_EP_TIMER(ep);
2297 
2298 	/* drive upcall */
2299 	if (ep->parent_ep->com.state != DEAD)
2300 		if (connect_request_upcall(ep))
2301 			goto err_out;
2302 	return 0;
2303 
2304 err_stop_timer:
2305 	STOP_EP_TIMER(ep);
2306 err_out:
2307 	return 2;
2308 }
2309 
2310 /*
2311  * Upcall from the adapter indicating data has been transmitted.
2312  * For us its just the single MPA request or reply.  We can now free
2313  * the skb holding the mpa message.
2314  */
2315 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
2316 {
2317 	int err;
2318 	struct c4iw_ep *ep = to_ep(cm_id);
2319 	int abort = 0;
2320 
2321 	mutex_lock(&ep->com.mutex);
2322 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
2323 
2324 	if ((ep->com.state == DEAD) ||
2325 			(ep->com.state != MPA_REQ_RCVD)) {
2326 
2327 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
2328 		mutex_unlock(&ep->com.mutex);
2329 		c4iw_put_ep(&ep->com);
2330 		return -ECONNRESET;
2331 	}
2332 	set_bit(ULP_REJECT, &ep->com.history);
2333 
2334 	if (mpa_rev == 0) {
2335 
2336 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
2337 		abort = 1;
2338 	}
2339 	else {
2340 
2341 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
2342 		abort = send_mpa_reject(ep, pdata, pdata_len);
2343 	}
2344 	STOP_EP_TIMER(ep);
2345 	err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
2346 	mutex_unlock(&ep->com.mutex);
2347 	c4iw_put_ep(&ep->com);
2348 	CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
2349 	return 0;
2350 }
2351 
2352 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2353 {
2354 	int err;
2355 	struct c4iw_qp_attributes attrs = {0};
2356 	enum c4iw_qp_attr_mask mask;
2357 	struct c4iw_ep *ep = to_ep(cm_id);
2358 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
2359 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
2360 	int abort = 0;
2361 
2362 	mutex_lock(&ep->com.mutex);
2363 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
2364 
2365 	if ((ep->com.state == DEAD) ||
2366 			(ep->com.state != MPA_REQ_RCVD)) {
2367 
2368 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2369 		err = -ECONNRESET;
2370 		goto err_out;
2371 	}
2372 
2373 	BUG_ON(!qp);
2374 
2375 	set_bit(ULP_ACCEPT, &ep->com.history);
2376 
2377 	if ((conn_param->ord > c4iw_max_read_depth) ||
2378 		(conn_param->ird > c4iw_max_read_depth)) {
2379 
2380 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2381 		err = -EINVAL;
2382 		goto err_abort;
2383 	}
2384 
2385 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2386 
2387 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2388 
2389 		if (conn_param->ord > ep->ird) {
2390 			if (RELAXED_IRD_NEGOTIATION) {
2391 				conn_param->ord = ep->ird;
2392 			} else {
2393 				ep->ird = conn_param->ird;
2394 				ep->ord = conn_param->ord;
2395 				send_mpa_reject(ep, conn_param->private_data,
2396 						conn_param->private_data_len);
2397 				err = -ENOMEM;
2398 				goto err_abort;
2399 			}
2400 		}
2401 		if (conn_param->ird < ep->ord) {
2402 			if (RELAXED_IRD_NEGOTIATION &&
2403 			    ep->ord <= h->rdev.adap->params.max_ordird_qp) {
2404 				conn_param->ird = ep->ord;
2405 			} else {
2406 				err = -ENOMEM;
2407 				goto err_abort;
2408 			}
2409 		}
2410 	}
2411 	ep->ird = conn_param->ird;
2412 	ep->ord = conn_param->ord;
2413 
2414 	if (ep->mpa_attr.version == 1) {
2415 		if (peer2peer && ep->ird == 0)
2416 			ep->ird = 1;
2417 	} else {
2418 		if (peer2peer &&
2419 		    (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
2420 		    (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ird == 0)
2421 			ep->ird = 1;
2422 	}
2423 
2424 	CTR4(KTR_IW_CXGBE, "%s %d ird %d ord %d\n", __func__, __LINE__,
2425 			ep->ird, ep->ord);
2426 
2427 	ep->com.cm_id = cm_id;
2428 	ref_cm_id(&ep->com);
2429 	ep->com.qp = qp;
2430 	ref_qp(ep);
2431 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2432 
2433 	/* bind QP to EP and move to RTS */
2434 	attrs.mpa_attr = ep->mpa_attr;
2435 	attrs.max_ird = ep->ird;
2436 	attrs.max_ord = ep->ord;
2437 	attrs.llp_stream_handle = ep;
2438 	attrs.next_state = C4IW_QP_STATE_RTS;
2439 
2440 	/* bind QP and TID with INIT_WR */
2441 	mask = C4IW_QP_ATTR_NEXT_STATE |
2442 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2443 		C4IW_QP_ATTR_MPA_ATTR |
2444 		C4IW_QP_ATTR_MAX_IRD |
2445 		C4IW_QP_ATTR_MAX_ORD;
2446 
2447 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2448 	if (err) {
2449 		CTR3(KTR_IW_CXGBE, "%s:caca %p, err: %d", __func__, ep, err);
2450 		goto err_defef_cm_id;
2451 	}
2452 
2453 	err = send_mpa_reply(ep, conn_param->private_data,
2454 			conn_param->private_data_len);
2455 	if (err) {
2456 		CTR3(KTR_IW_CXGBE, "%s:cacb %p, err: %d", __func__, ep, err);
2457 		goto err_defef_cm_id;
2458 	}
2459 
2460 	ep->com.state = FPDU_MODE;
2461 	established_upcall(ep);
2462 	mutex_unlock(&ep->com.mutex);
2463 	c4iw_put_ep(&ep->com);
2464 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2465 	return 0;
2466 err_defef_cm_id:
2467 	deref_cm_id(&ep->com);
2468 err_abort:
2469 	abort = 1;
2470 err_out:
2471 	if (abort)
2472 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2473 	mutex_unlock(&ep->com.mutex);
2474 	c4iw_put_ep(&ep->com);
2475 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2476 	return err;
2477 }
2478 
2479 static int
2480 c4iw_sock_create(struct sockaddr_storage *laddr, struct socket **so)
2481 {
2482 	int ret;
2483 	int size;
2484 	struct socket *sock = NULL;
2485 
2486 	ret = sock_create_kern(laddr->ss_family,
2487 			SOCK_STREAM, IPPROTO_TCP, &sock);
2488 	if (ret) {
2489 		CTR2(KTR_IW_CXGBE, "%s:Failed to create TCP socket. err %d",
2490 				__func__, ret);
2491 		return ret;
2492 	}
2493 
2494 	ret = sobind(sock, (struct sockaddr *)laddr, curthread);
2495 	if (ret) {
2496 		CTR2(KTR_IW_CXGBE, "%s:Failed to bind socket. err %p",
2497 				__func__, ret);
2498 		sock_release(sock);
2499 		return ret;
2500 	}
2501 
2502 	size = laddr->ss_family == AF_INET6 ?
2503 		sizeof(struct sockaddr_in6) : sizeof(struct sockaddr_in);
2504 	ret = sock_getname(sock, (struct sockaddr *)laddr, &size, 0);
2505 	if (ret) {
2506 		CTR2(KTR_IW_CXGBE, "%s:sock_getname failed. err %p",
2507 				__func__, ret);
2508 		sock_release(sock);
2509 		return ret;
2510 	}
2511 
2512 	*so = sock;
2513 	return 0;
2514 }
2515 
2516 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2517 {
2518 	int err = 0;
2519 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2520 	struct c4iw_ep *ep = NULL;
2521 	struct ifnet    *nh_ifp;        /* Logical egress interface */
2522 
2523 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2524 
2525 
2526 	if ((conn_param->ord > c4iw_max_read_depth) ||
2527 		(conn_param->ird > c4iw_max_read_depth)) {
2528 
2529 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2530 		err = -EINVAL;
2531 		goto out;
2532 	}
2533 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2534 
2535 	init_timer(&ep->timer);
2536 	ep->plen = conn_param->private_data_len;
2537 
2538 	if (ep->plen) {
2539 
2540 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2541 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2542 				conn_param->private_data, ep->plen);
2543 	}
2544 	ep->ird = conn_param->ird;
2545 	ep->ord = conn_param->ord;
2546 
2547 	if (peer2peer && ep->ord == 0) {
2548 
2549 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2550 		ep->ord = 1;
2551 	}
2552 
2553 	ep->com.dev = dev;
2554 	ep->com.cm_id = cm_id;
2555 	ref_cm_id(&ep->com);
2556 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2557 
2558 	if (!ep->com.qp) {
2559 
2560 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2561 		err = -EINVAL;
2562 		goto fail;
2563 	}
2564 	ref_qp(ep);
2565 	ep->com.thread = curthread;
2566 
2567 	err = get_ifnet_from_raddr(&cm_id->remote_addr, &nh_ifp);
2568 	if (err) {
2569 
2570 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2571 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2572 		err = EHOSTUNREACH;
2573 		return err;
2574 	}
2575 
2576 	if (!(nh_ifp->if_capenable & IFCAP_TOE) ||
2577 	    TOEDEV(nh_ifp) == NULL) {
2578 		err = -ENOPROTOOPT;
2579 		goto fail;
2580 	}
2581 	ep->com.state = CONNECTING;
2582 	ep->tos = 0;
2583 	ep->com.local_addr = cm_id->local_addr;
2584 	ep->com.remote_addr = cm_id->remote_addr;
2585 
2586 	err = c4iw_sock_create(&cm_id->local_addr, &ep->com.so);
2587 	if (err)
2588 		goto fail;
2589 
2590 	setiwsockopt(ep->com.so);
2591 	err = -soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2592 		ep->com.thread);
2593 	if (!err) {
2594 		init_iwarp_socket(ep->com.so, &ep->com);
2595 		goto out;
2596 	} else
2597 		goto fail_free_so;
2598 
2599 fail_free_so:
2600 	sock_release(ep->com.so);
2601 fail:
2602 	deref_cm_id(&ep->com);
2603 	c4iw_put_ep(&ep->com);
2604 	ep = NULL;
2605 out:
2606 	CTR2(KTR_IW_CXGBE, "%s:ccE ret:%d", __func__, err);
2607 	return err;
2608 }
2609 
2610 /*
2611  * iwcm->create_listen.  Returns -errno on failure.
2612  */
2613 int
2614 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2615 {
2616 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2617 	struct c4iw_listen_ep *lep = NULL;
2618 	struct listen_port_info *port_info = NULL;
2619 	int rc = 0;
2620 
2621 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, backlog %s", __func__, cm_id,
2622 			backlog);
2623 	lep = alloc_ep(sizeof(*lep), GFP_KERNEL);
2624 	lep->com.cm_id = cm_id;
2625 	ref_cm_id(&lep->com);
2626 	lep->com.dev = dev;
2627 	lep->backlog = backlog;
2628 	lep->com.local_addr = cm_id->local_addr;
2629 	lep->com.thread = curthread;
2630 	cm_id->provider_data = lep;
2631 	lep->com.state = LISTEN;
2632 
2633 	/* In case of INDADDR_ANY, ibcore creates cmid for each device and
2634 	 * invokes iw_cxgbe listener callbacks assuming that iw_cxgbe creates
2635 	 * HW listeners for each device seperately. But toecore expects single
2636 	 * solisten() call with INADDR_ANY address to create HW listeners on
2637 	 * all devices for a given port number. So iw_cxgbe driver calls
2638 	 * solisten() only once for INADDR_ANY(usually done at first time
2639 	 * listener callback from ibcore). And all the subsequent INADDR_ANY
2640 	 * listener callbacks from ibcore(for the same port address) do not
2641 	 * invoke solisten() as first listener callback has already created
2642 	 * listeners for all other devices(via solisten).
2643 	 */
2644 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2645 		port_info = add_ep_to_listenlist(lep);
2646 		/* skip solisten() if refcnt > 1, as the listeners were
2647 		 * alredy created by 'Master lep'
2648 		 */
2649 		if (port_info->refcnt > 1) {
2650 			/* As there will be only one listener socket for a TCP
2651 			 * port, copy Master lep's socket pointer to other lep's
2652 			 * that are belonging to same TCP port.
2653 			 */
2654 			struct c4iw_listen_ep *head_lep =
2655 					container_of(port_info->lep_list.next,
2656 					struct c4iw_listen_ep, listen_ep_list);
2657 			lep->com.so =  head_lep->com.so;
2658 			goto out;
2659 		}
2660 	}
2661 	rc = c4iw_sock_create(&cm_id->local_addr, &lep->com.so);
2662 	if (rc) {
2663 		CTR2(KTR_IW_CXGBE, "%s:Failed to create socket. err %d",
2664 				__func__, rc);
2665 		goto fail;
2666 	}
2667 
2668 	rc = solisten(lep->com.so, backlog, curthread);
2669 	if (rc) {
2670 		CTR3(KTR_IW_CXGBE, "%s:Failed to listen on sock:%p. err %d",
2671 				__func__, lep->com.so, rc);
2672 		goto fail_free_so;
2673 	}
2674 	init_iwarp_socket(lep->com.so, &lep->com);
2675 out:
2676 	return 0;
2677 
2678 fail_free_so:
2679 	sock_release(lep->com.so);
2680 fail:
2681 	if (port_info)
2682 		rem_ep_from_listenlist(lep);
2683 	deref_cm_id(&lep->com);
2684 	c4iw_put_ep(&lep->com);
2685 	return rc;
2686 }
2687 
2688 int
2689 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2690 {
2691 	struct c4iw_listen_ep *lep = to_listen_ep(cm_id);
2692 
2693 	mutex_lock(&lep->com.mutex);
2694 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, state %s", __func__, cm_id,
2695 	    states[lep->com.state]);
2696 
2697 	lep->com.state = DEAD;
2698 	if (c4iw_any_addr((struct sockaddr *)&lep->com.local_addr)) {
2699 		/* if no refcount then close listen socket */
2700 		if (!rem_ep_from_listenlist(lep))
2701 			close_socket(lep->com.so);
2702 	} else
2703 		close_socket(lep->com.so);
2704 	deref_cm_id(&lep->com);
2705 	mutex_unlock(&lep->com.mutex);
2706 	c4iw_put_ep(&lep->com);
2707 	return 0;
2708 }
2709 
2710 int __c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2711 {
2712 	int ret;
2713 	mutex_lock(&ep->com.mutex);
2714 	ret = c4iw_ep_disconnect(ep, abrupt, gfp);
2715 	mutex_unlock(&ep->com.mutex);
2716 	return ret;
2717 }
2718 
2719 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2720 {
2721 	int ret = 0;
2722 	int close = 0;
2723 	int fatal = 0;
2724 	struct c4iw_rdev *rdev;
2725 
2726 
2727 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2728 
2729 	rdev = &ep->com.dev->rdev;
2730 
2731 	if (c4iw_fatal_error(rdev)) {
2732 
2733 		CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2734 		fatal = 1;
2735 		close_complete_upcall(ep, -ECONNRESET);
2736 		send_abort(ep);
2737 		ep->com.state = DEAD;
2738 	}
2739 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2740 	    states[ep->com.state]);
2741 
2742 	/*
2743 	 * Ref the ep here in case we have fatal errors causing the
2744 	 * ep to be released and freed.
2745 	 */
2746 	c4iw_get_ep(&ep->com);
2747 	switch (ep->com.state) {
2748 
2749 		case MPA_REQ_WAIT:
2750 		case MPA_REQ_SENT:
2751 		case MPA_REQ_RCVD:
2752 		case MPA_REP_SENT:
2753 		case FPDU_MODE:
2754 			close = 1;
2755 			if (abrupt)
2756 				ep->com.state = ABORTING;
2757 			else {
2758 				ep->com.state = CLOSING;
2759 				START_EP_TIMER(ep);
2760 			}
2761 			set_bit(CLOSE_SENT, &ep->com.flags);
2762 			break;
2763 
2764 		case CLOSING:
2765 
2766 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2767 
2768 				close = 1;
2769 				if (abrupt) {
2770 					STOP_EP_TIMER(ep);
2771 					ep->com.state = ABORTING;
2772 				} else
2773 					ep->com.state = MORIBUND;
2774 			}
2775 			break;
2776 
2777 		case MORIBUND:
2778 		case ABORTING:
2779 		case DEAD:
2780 			CTR3(KTR_IW_CXGBE,
2781 			    "%s ignoring disconnect ep %p state %u", __func__,
2782 			    ep, ep->com.state);
2783 			break;
2784 
2785 		default:
2786 			BUG();
2787 			break;
2788 	}
2789 
2790 
2791 	if (close) {
2792 
2793 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2794 
2795 		if (abrupt) {
2796 
2797 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2798 			set_bit(EP_DISC_ABORT, &ep->com.history);
2799 			close_complete_upcall(ep, -ECONNRESET);
2800 			ret = send_abort(ep);
2801 			if (ret)
2802 				fatal = 1;
2803 		} else {
2804 
2805 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2806 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2807 
2808 			if (!ep->parent_ep)
2809 				ep->com.state = MORIBUND;
2810 			sodisconnect(ep->com.so);
2811 		}
2812 
2813 	}
2814 
2815 	if (fatal) {
2816 		set_bit(EP_DISC_FAIL, &ep->com.history);
2817 		if (!abrupt) {
2818 			STOP_EP_TIMER(ep);
2819 			close_complete_upcall(ep, -EIO);
2820 		}
2821 		if (ep->com.qp) {
2822 			struct c4iw_qp_attributes attrs = {0};
2823 
2824 			attrs.next_state = C4IW_QP_STATE_ERROR;
2825 			ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
2826 						C4IW_QP_ATTR_NEXT_STATE,
2827 						&attrs, 1);
2828 			if (ret) {
2829 				CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep);
2830 				printf("%s - qp <- error failed!\n", __func__);
2831 			}
2832 		}
2833 		release_ep_resources(ep);
2834 		ep->com.state = DEAD;
2835 		CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2836 	}
2837 	c4iw_put_ep(&ep->com);
2838 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2839 	return ret;
2840 }
2841 
2842 #ifdef C4IW_EP_REDIRECT
2843 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2844 		struct l2t_entry *l2t)
2845 {
2846 	struct c4iw_ep *ep = ctx;
2847 
2848 	if (ep->dst != old)
2849 		return 0;
2850 
2851 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2852 			l2t);
2853 	dst_hold(new);
2854 	cxgb4_l2t_release(ep->l2t);
2855 	ep->l2t = l2t;
2856 	dst_release(old);
2857 	ep->dst = new;
2858 	return 1;
2859 }
2860 #endif
2861 
2862 
2863 
2864 static void ep_timeout(unsigned long arg)
2865 {
2866 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2867 
2868 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2869 
2870 		/*
2871 		 * Only insert if it is not already on the list.
2872 		 */
2873 		if (!(ep->com.ep_events & C4IW_EVENT_TIMEOUT)) {
2874 			CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2875 			add_ep_to_req_list(ep, C4IW_EVENT_TIMEOUT);
2876 		}
2877 	}
2878 }
2879 
2880 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2881 {
2882 	uint64_t val = be64toh(*rpl);
2883 	int ret;
2884 	struct c4iw_wr_wait *wr_waitp;
2885 
2886 	ret = (int)((val >> 8) & 0xff);
2887 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2888 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2889 	if (wr_waitp)
2890 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2891 
2892 	return (0);
2893 }
2894 
2895 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2896 {
2897 	struct cqe_list_entry *cle;
2898 	unsigned long flag;
2899 
2900 	cle = malloc(sizeof(*cle), M_CXGBE, M_NOWAIT);
2901 	cle->rhp = sc->iwarp_softc;
2902 	cle->err_cqe = *(const struct t4_cqe *)(&rpl[0]);
2903 
2904 	spin_lock_irqsave(&err_cqe_lock, flag);
2905 	list_add_tail(&cle->entry, &err_cqe_list);
2906 	queue_work(c4iw_taskq, &c4iw_task);
2907 	spin_unlock_irqrestore(&err_cqe_lock, flag);
2908 
2909 	return (0);
2910 }
2911 
2912 static int
2913 process_terminate(struct c4iw_ep *ep)
2914 {
2915 	struct c4iw_qp_attributes attrs = {0};
2916 
2917 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
2918 
2919 	if (ep && ep->com.qp) {
2920 
2921 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n",
2922 				ep->hwtid, ep->com.qp->wq.sq.qid);
2923 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2924 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
2925 				1);
2926 	} else
2927 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n",
2928 								ep->hwtid);
2929 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
2930 
2931 	return 0;
2932 }
2933 
2934 int __init c4iw_cm_init(void)
2935 {
2936 
2937 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
2938 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
2939 	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
2940 	t4_register_an_handler(c4iw_ev_handler);
2941 
2942 	TAILQ_INIT(&req_list);
2943 	spin_lock_init(&req_lock);
2944 	INIT_LIST_HEAD(&err_cqe_list);
2945 	spin_lock_init(&err_cqe_lock);
2946 
2947 	INIT_WORK(&c4iw_task, process_req);
2948 
2949 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
2950 	if (!c4iw_taskq)
2951 		return -ENOMEM;
2952 
2953 	return 0;
2954 }
2955 
2956 void __exit c4iw_cm_term(void)
2957 {
2958 	WARN_ON(!TAILQ_EMPTY(&req_list));
2959 	WARN_ON(!list_empty(&err_cqe_list));
2960 	flush_workqueue(c4iw_taskq);
2961 	destroy_workqueue(c4iw_taskq);
2962 
2963 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
2964 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
2965 	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
2966 	t4_register_an_handler(NULL);
2967 }
2968 #endif
2969