xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision 642870485c089b57000fe538d3485e272b038d59)
1 /*
2  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 
37 #ifdef TCP_OFFLOAD
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/socket.h>
41 #include <sys/socketvar.h>
42 #include <sys/sockio.h>
43 #include <sys/taskqueue.h>
44 #include <netinet/in.h>
45 #include <net/route.h>
46 
47 #include <netinet/in_systm.h>
48 #include <netinet/in_pcb.h>
49 #include <netinet/ip.h>
50 #include <netinet/in_fib.h>
51 #include <netinet/ip_var.h>
52 #include <netinet/tcp_var.h>
53 #include <netinet/tcp.h>
54 #include <netinet/tcpip.h>
55 
56 #include <netinet/toecore.h>
57 
58 struct sge_iq;
59 struct rss_header;
60 struct cpl_set_tcb_rpl;
61 #include <linux/types.h>
62 #include "offload.h"
63 #include "tom/t4_tom.h"
64 
65 #define TOEPCB(so)  ((struct toepcb *)(so_sototcpcb((so))->t_toe))
66 
67 #include "iw_cxgbe.h"
68 #include <linux/module.h>
69 #include <linux/workqueue.h>
70 #include <linux/notifier.h>
71 #include <linux/inetdevice.h>
72 #include <linux/if_vlan.h>
73 #include <net/netevent.h>
74 
75 static spinlock_t req_lock;
76 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
77 static struct work_struct c4iw_task;
78 static struct workqueue_struct *c4iw_taskq;
79 static LIST_HEAD(timeout_list);
80 static spinlock_t timeout_lock;
81 
82 static void process_req(struct work_struct *ctx);
83 static void start_ep_timer(struct c4iw_ep *ep);
84 static int stop_ep_timer(struct c4iw_ep *ep);
85 static int set_tcpinfo(struct c4iw_ep *ep);
86 static void process_timeout(struct c4iw_ep *ep);
87 static void process_timedout_eps(void);
88 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc);
89 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
90 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
91 static void *alloc_ep(int size, gfp_t flags);
92 void __free_ep(struct c4iw_ep_common *epc);
93 static int find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
94 		__be16 peer_port, u8 tos, struct nhop4_extended *pnh4);
95 static void close_socket(struct socket *so);
96 static int send_mpa_req(struct c4iw_ep *ep);
97 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
98 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
99 static void close_complete_upcall(struct c4iw_ep *ep, int status);
100 static int send_abort(struct c4iw_ep *ep);
101 static void peer_close_upcall(struct c4iw_ep *ep);
102 static void peer_abort_upcall(struct c4iw_ep *ep);
103 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
104 static int connect_request_upcall(struct c4iw_ep *ep);
105 static void established_upcall(struct c4iw_ep *ep);
106 static int process_mpa_reply(struct c4iw_ep *ep);
107 static int process_mpa_request(struct c4iw_ep *ep);
108 static void process_peer_close(struct c4iw_ep *ep);
109 static void process_conn_error(struct c4iw_ep *ep);
110 static void process_close_complete(struct c4iw_ep *ep);
111 static void ep_timeout(unsigned long arg);
112 static void init_iwarp_socket(struct socket *so, void *arg);
113 static void uninit_iwarp_socket(struct socket *so);
114 static void process_data(struct c4iw_ep *ep);
115 static void process_connected(struct c4iw_ep *ep);
116 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
117 static void process_socket_event(struct c4iw_ep *ep);
118 static void release_ep_resources(struct c4iw_ep *ep);
119 
120 #define START_EP_TIMER(ep) \
121     do { \
122 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
123 		__func__, __LINE__, (ep)); \
124 	    start_ep_timer(ep); \
125     } while (0)
126 
127 #define STOP_EP_TIMER(ep) \
128     ({ \
129 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
130 		__func__, __LINE__, (ep)); \
131 	    stop_ep_timer(ep); \
132     })
133 
134 #ifdef KTR
135 static char *states[] = {
136 	"idle",
137 	"listen",
138 	"connecting",
139 	"mpa_wait_req",
140 	"mpa_req_sent",
141 	"mpa_req_rcvd",
142 	"mpa_rep_sent",
143 	"fpdu_mode",
144 	"aborting",
145 	"closing",
146 	"moribund",
147 	"dead",
148 	NULL,
149 };
150 #endif
151 
152 
153 static void deref_cm_id(struct c4iw_ep_common *epc)
154 {
155       epc->cm_id->rem_ref(epc->cm_id);
156       epc->cm_id = NULL;
157       set_bit(CM_ID_DEREFED, &epc->history);
158 }
159 
160 static void ref_cm_id(struct c4iw_ep_common *epc)
161 {
162       set_bit(CM_ID_REFED, &epc->history);
163       epc->cm_id->add_ref(epc->cm_id);
164 }
165 
166 static void deref_qp(struct c4iw_ep *ep)
167 {
168 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
169 	clear_bit(QP_REFERENCED, &ep->com.flags);
170 	set_bit(QP_DEREFED, &ep->com.history);
171 }
172 
173 static void ref_qp(struct c4iw_ep *ep)
174 {
175 	set_bit(QP_REFERENCED, &ep->com.flags);
176 	set_bit(QP_REFED, &ep->com.history);
177 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
178 }
179 
180 static void process_timeout(struct c4iw_ep *ep)
181 {
182 	struct c4iw_qp_attributes attrs;
183 	int abort = 1;
184 
185 	mutex_lock(&ep->com.mutex);
186 	CTR4(KTR_IW_CXGBE, "%s ep :%p, tid:%u, state %d", __func__,
187 			ep, ep->hwtid, ep->com.state);
188 	set_bit(TIMEDOUT, &ep->com.history);
189 	switch (ep->com.state) {
190 	case MPA_REQ_SENT:
191 		connect_reply_upcall(ep, -ETIMEDOUT);
192 		break;
193 	case MPA_REQ_WAIT:
194 	case MPA_REQ_RCVD:
195 	case MPA_REP_SENT:
196 	case FPDU_MODE:
197 		break;
198 	case CLOSING:
199 	case MORIBUND:
200 		if (ep->com.cm_id && ep->com.qp) {
201 			attrs.next_state = C4IW_QP_STATE_ERROR;
202 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
203 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
204 		}
205 		close_complete_upcall(ep, -ETIMEDOUT);
206 		break;
207 	case ABORTING:
208 	case DEAD:
209 		/*
210 		 * These states are expected if the ep timed out at the same
211 		 * time as another thread was calling stop_ep_timer().
212 		 * So we silently do nothing for these states.
213 		 */
214 		abort = 0;
215 		break;
216 	default:
217 		CTR4(KTR_IW_CXGBE, "%s unexpected state ep %p tid %u state %u\n"
218 				, __func__, ep, ep->hwtid, ep->com.state);
219 		abort = 0;
220 	}
221 	mutex_unlock(&ep->com.mutex);
222 	if (abort)
223 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
224 	c4iw_put_ep(&ep->com);
225 	return;
226 }
227 
228 static void process_timedout_eps(void)
229 {
230 	struct c4iw_ep *ep;
231 
232 	spin_lock(&timeout_lock);
233 	while (!list_empty(&timeout_list)) {
234 		struct list_head *tmp;
235 		tmp = timeout_list.next;
236 		list_del(tmp);
237 		tmp->next = tmp->prev = NULL;
238 		spin_unlock(&timeout_lock);
239 		ep = list_entry(tmp, struct c4iw_ep, entry);
240 		process_timeout(ep);
241 		spin_lock(&timeout_lock);
242 	}
243 	spin_unlock(&timeout_lock);
244 	return;
245 }
246 
247 static void
248 process_req(struct work_struct *ctx)
249 {
250 	struct c4iw_ep_common *epc;
251 
252 	process_timedout_eps();
253 	spin_lock(&req_lock);
254 	while (!TAILQ_EMPTY(&req_list)) {
255 		epc = TAILQ_FIRST(&req_list);
256 		TAILQ_REMOVE(&req_list, epc, entry);
257 		epc->entry.tqe_prev = NULL;
258 		spin_unlock(&req_lock);
259 		CTR3(KTR_IW_CXGBE, "%s so :%p, ep:%p", __func__,
260 				epc->so, epc);
261 		if (epc->so)
262 			process_socket_event((struct c4iw_ep *)epc);
263 		c4iw_put_ep(epc);
264 		process_timedout_eps();
265 		spin_lock(&req_lock);
266 	}
267 	spin_unlock(&req_lock);
268 }
269 
270 /*
271  * XXX: doesn't belong here in the iWARP driver.
272  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
273  *      set.  Is this a valid assumption for active open?
274  */
275 static int
276 set_tcpinfo(struct c4iw_ep *ep)
277 {
278 	struct socket *so = ep->com.so;
279 	struct inpcb *inp = sotoinpcb(so);
280 	struct tcpcb *tp;
281 	struct toepcb *toep;
282 	int rc = 0;
283 
284 	INP_WLOCK(inp);
285 	tp = intotcpcb(inp);
286 	if ((tp->t_flags & TF_TOE) == 0) {
287 		rc = EINVAL;
288 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
289 		    __func__, so, ep);
290 		goto done;
291 	}
292 	toep = TOEPCB(so);
293 
294 	ep->hwtid = toep->tid;
295 	ep->snd_seq = tp->snd_nxt;
296 	ep->rcv_seq = tp->rcv_nxt;
297 	ep->emss = max(tp->t_maxseg, 128);
298 done:
299 	INP_WUNLOCK(inp);
300 	return (rc);
301 
302 }
303 
304 static int
305 find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
306 		__be16 peer_port, u8 tos, struct nhop4_extended *pnh4)
307 {
308 	struct in_addr addr;
309 	int err;
310 
311 	CTR5(KTR_IW_CXGBE, "%s:frtB %x, %x, %d, %d", __func__, local_ip,
312 	    peer_ip, ntohs(local_port), ntohs(peer_port));
313 
314 	addr.s_addr = peer_ip;
315 	err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, addr, NHR_REF, 0, pnh4);
316 
317 	CTR2(KTR_IW_CXGBE, "%s:frtE %d", __func__, err);
318 	return err;
319 }
320 
321 static void
322 close_socket(struct socket *so)
323 {
324 
325 	uninit_iwarp_socket(so);
326 	sodisconnect(so);
327 }
328 
329 static void
330 process_peer_close(struct c4iw_ep *ep)
331 {
332 	struct c4iw_qp_attributes attrs;
333 	int disconnect = 1;
334 	int release = 0;
335 
336 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
337 	    ep->com.so, states[ep->com.state]);
338 
339 	mutex_lock(&ep->com.mutex);
340 	switch (ep->com.state) {
341 
342 		case MPA_REQ_WAIT:
343 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT CLOSING",
344 			    __func__, ep);
345 			__state_set(&ep->com, CLOSING);
346 			break;
347 
348 		case MPA_REQ_SENT:
349 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT CLOSING",
350 			    __func__, ep);
351 			__state_set(&ep->com, DEAD);
352 			connect_reply_upcall(ep, -ECONNABORTED);
353 
354 			disconnect = 0;
355 			STOP_EP_TIMER(ep);
356 			close_socket(ep->com.so);
357 			deref_cm_id(&ep->com);
358 			release = 1;
359 			break;
360 
361 		case MPA_REQ_RCVD:
362 
363 			/*
364 			 * We're gonna mark this puppy DEAD, but keep
365 			 * the reference on it until the ULP accepts or
366 			 * rejects the CR.
367 			 */
368 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
369 			    __func__, ep);
370 			__state_set(&ep->com, CLOSING);
371 			c4iw_get_ep(&ep->com);
372 			break;
373 
374 		case MPA_REP_SENT:
375 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
376 			    __func__, ep);
377 			__state_set(&ep->com, CLOSING);
378 			break;
379 
380 		case FPDU_MODE:
381 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
382 			    __func__, ep);
383 			START_EP_TIMER(ep);
384 			__state_set(&ep->com, CLOSING);
385 			attrs.next_state = C4IW_QP_STATE_CLOSING;
386 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
387 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
388 			peer_close_upcall(ep);
389 			break;
390 
391 		case ABORTING:
392 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
393 			    __func__, ep);
394 			disconnect = 0;
395 			break;
396 
397 		case CLOSING:
398 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
399 			    __func__, ep);
400 			__state_set(&ep->com, MORIBUND);
401 			disconnect = 0;
402 			break;
403 
404 		case MORIBUND:
405 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
406 			    ep);
407 			STOP_EP_TIMER(ep);
408 			if (ep->com.cm_id && ep->com.qp) {
409 				attrs.next_state = C4IW_QP_STATE_IDLE;
410 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
411 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
412 			}
413 			close_socket(ep->com.so);
414 			close_complete_upcall(ep, 0);
415 			__state_set(&ep->com, DEAD);
416 			release = 1;
417 			disconnect = 0;
418 			break;
419 
420 		case DEAD:
421 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
422 			    __func__, ep);
423 			disconnect = 0;
424 			break;
425 
426 		default:
427 			panic("%s: ep %p state %d", __func__, ep,
428 			    ep->com.state);
429 			break;
430 	}
431 
432 	mutex_unlock(&ep->com.mutex);
433 
434 	if (disconnect) {
435 
436 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
437 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
438 	}
439 	if (release) {
440 
441 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
442 		c4iw_put_ep(&ep->com);
443 	}
444 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
445 	return;
446 }
447 
448 static void
449 process_conn_error(struct c4iw_ep *ep)
450 {
451 	struct c4iw_qp_attributes attrs;
452 	int ret;
453 	int state;
454 
455 	mutex_lock(&ep->com.mutex);
456 	state = ep->com.state;
457 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
458 	    __func__, ep, ep->com.so, ep->com.so->so_error,
459 	    states[ep->com.state]);
460 
461 	switch (state) {
462 
463 		case MPA_REQ_WAIT:
464 			STOP_EP_TIMER(ep);
465 			break;
466 
467 		case MPA_REQ_SENT:
468 			STOP_EP_TIMER(ep);
469 			connect_reply_upcall(ep, -ECONNRESET);
470 			break;
471 
472 		case MPA_REP_SENT:
473 			ep->com.rpl_err = ECONNRESET;
474 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
475 			break;
476 
477 		case MPA_REQ_RCVD:
478 
479 			/*
480 			 * We're gonna mark this puppy DEAD, but keep
481 			 * the reference on it until the ULP accepts or
482 			 * rejects the CR.
483 			 */
484 			c4iw_get_ep(&ep->com);
485 			break;
486 
487 		case MORIBUND:
488 		case CLOSING:
489 			STOP_EP_TIMER(ep);
490 			/*FALLTHROUGH*/
491 		case FPDU_MODE:
492 
493 			if (ep->com.cm_id && ep->com.qp) {
494 
495 				attrs.next_state = C4IW_QP_STATE_ERROR;
496 				ret = c4iw_modify_qp(ep->com.qp->rhp,
497 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
498 					&attrs, 1);
499 				if (ret)
500 					log(LOG_ERR,
501 							"%s - qp <- error failed!\n",
502 							__func__);
503 			}
504 			peer_abort_upcall(ep);
505 			break;
506 
507 		case ABORTING:
508 			break;
509 
510 		case DEAD:
511 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
512 			    __func__, ep->com.so->so_error);
513 			mutex_unlock(&ep->com.mutex);
514 			return;
515 
516 		default:
517 			panic("%s: ep %p state %d", __func__, ep, state);
518 			break;
519 	}
520 
521 	if (state != ABORTING) {
522 		close_socket(ep->com.so);
523 		__state_set(&ep->com, DEAD);
524 		c4iw_put_ep(&ep->com);
525 	}
526 	mutex_unlock(&ep->com.mutex);
527 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
528 	return;
529 }
530 
531 static void
532 process_close_complete(struct c4iw_ep *ep)
533 {
534 	struct c4iw_qp_attributes attrs;
535 	int release = 0;
536 
537 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
538 	    ep->com.so, states[ep->com.state]);
539 
540 	/* The cm_id may be null if we failed to connect */
541 	mutex_lock(&ep->com.mutex);
542 	set_bit(CLOSE_CON_RPL, &ep->com.history);
543 
544 	switch (ep->com.state) {
545 
546 		case CLOSING:
547 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
548 			    __func__, ep);
549 			__state_set(&ep->com, MORIBUND);
550 			break;
551 
552 		case MORIBUND:
553 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
554 			    ep);
555 			STOP_EP_TIMER(ep);
556 
557 			if ((ep->com.cm_id) && (ep->com.qp)) {
558 
559 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
560 				    __func__, ep);
561 				attrs.next_state = C4IW_QP_STATE_IDLE;
562 				c4iw_modify_qp(ep->com.dev,
563 						ep->com.qp,
564 						C4IW_QP_ATTR_NEXT_STATE,
565 						&attrs, 1);
566 			}
567 
568 			close_socket(ep->com.so);
569 			close_complete_upcall(ep, 0);
570 			__state_set(&ep->com, DEAD);
571 			release = 1;
572 			break;
573 
574 		case ABORTING:
575 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
576 			break;
577 
578 		case DEAD:
579 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
580 			break;
581 		default:
582 			CTR2(KTR_IW_CXGBE, "%s:pcc7 %p unknown ep state",
583 					__func__, ep);
584 			panic("%s:pcc6 %p unknown ep state", __func__, ep);
585 			break;
586 	}
587 	mutex_unlock(&ep->com.mutex);
588 
589 	if (release) {
590 
591 		CTR2(KTR_IW_CXGBE, "%s:pcc8 %p", __func__, ep);
592 		c4iw_put_ep(&ep->com);
593 	}
594 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
595 	return;
596 }
597 
598 static void
599 init_iwarp_socket(struct socket *so, void *arg)
600 {
601 	int rc;
602 	struct sockopt sopt;
603 	int on = 1;
604 
605 	/* Note that SOCK_LOCK(so) is same as SOCKBUF_LOCK(&so->so_rcv) */
606 	SOCK_LOCK(so);
607 	soupcall_set(so, SO_RCV, c4iw_so_upcall, arg);
608 	so->so_state |= SS_NBIO;
609 	SOCK_UNLOCK(so);
610 	sopt.sopt_dir = SOPT_SET;
611 	sopt.sopt_level = IPPROTO_TCP;
612 	sopt.sopt_name = TCP_NODELAY;
613 	sopt.sopt_val = (caddr_t)&on;
614 	sopt.sopt_valsize = sizeof on;
615 	sopt.sopt_td = NULL;
616 	rc = sosetopt(so, &sopt);
617 	if (rc) {
618 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
619 		    __func__, so, rc);
620 	}
621 }
622 
623 static void
624 uninit_iwarp_socket(struct socket *so)
625 {
626 
627 	SOCKBUF_LOCK(&so->so_rcv);
628 	soupcall_clear(so, SO_RCV);
629 	SOCKBUF_UNLOCK(&so->so_rcv);
630 }
631 
632 static void
633 process_data(struct c4iw_ep *ep)
634 {
635 	struct sockaddr_in *local, *remote;
636 	int disconnect = 0;
637 
638 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
639 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
640 
641 	switch (state_read(&ep->com)) {
642 	case MPA_REQ_SENT:
643 		disconnect = process_mpa_reply(ep);
644 		break;
645 	case MPA_REQ_WAIT:
646 		in_getsockaddr(ep->com.so, (struct sockaddr **)&local);
647 		in_getpeeraddr(ep->com.so, (struct sockaddr **)&remote);
648 		ep->com.local_addr = *local;
649 		ep->com.remote_addr = *remote;
650 		free(local, M_SONAME);
651 		free(remote, M_SONAME);
652 		disconnect = process_mpa_request(ep);
653 		break;
654 	default:
655 		if (sbused(&ep->com.so->so_rcv))
656 			log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
657 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
658 			    __func__, ep, state_read(&ep->com), ep->com.so,
659 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
660 		break;
661 	}
662 	if (disconnect)
663 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
664 
665 }
666 
667 static void
668 process_connected(struct c4iw_ep *ep)
669 {
670 	struct socket *so = ep->com.so;
671 
672 	if ((so->so_state & SS_ISCONNECTED) && !so->so_error) {
673 		if (send_mpa_req(ep))
674 			goto err;
675 	} else {
676 		connect_reply_upcall(ep, -so->so_error);
677 		goto err;
678 	}
679 	return;
680 err:
681 	close_socket(so);
682 	state_set(&ep->com, DEAD);
683 	c4iw_put_ep(&ep->com);
684 	return;
685 }
686 
687 void
688 process_newconn(struct iw_cm_id *parent_cm_id, struct socket *child_so)
689 {
690 	struct c4iw_ep *child_ep;
691 	struct sockaddr_in *local;
692 	struct sockaddr_in *remote;
693 	struct c4iw_ep *parent_ep = parent_cm_id->provider_data;
694 	int ret = 0;
695 
696 	MPASS(child_so != NULL);
697 
698 	child_ep = alloc_ep(sizeof(*child_ep), M_WAITOK);
699 
700 	CTR5(KTR_IW_CXGBE,
701 	    "%s: parent so %p, parent ep %p, child so %p, child ep %p",
702 	     __func__, parent_ep->com.so, parent_ep, child_so, child_ep);
703 
704 	in_getsockaddr(child_so, (struct sockaddr **)&local);
705 	in_getpeeraddr(child_so, (struct sockaddr **)&remote);
706 
707 	child_ep->com.local_addr = *local;
708 	child_ep->com.remote_addr = *remote;
709 	child_ep->com.dev = parent_ep->com.dev;
710 	child_ep->com.so = child_so;
711 	child_ep->com.cm_id = NULL;
712 	child_ep->com.thread = parent_ep->com.thread;
713 	child_ep->parent_ep = parent_ep;
714 
715 	free(local, M_SONAME);
716 	free(remote, M_SONAME);
717 
718 	init_iwarp_socket(child_so, &child_ep->com);
719 	c4iw_get_ep(&parent_ep->com);
720 	init_timer(&child_ep->timer);
721 	state_set(&child_ep->com, MPA_REQ_WAIT);
722 	START_EP_TIMER(child_ep);
723 
724 	/* maybe the request has already been queued up on the socket... */
725 	ret = process_mpa_request(child_ep);
726 	if (ret == 2)
727 		/* ABORT */
728 		c4iw_ep_disconnect(child_ep, 1, GFP_KERNEL);
729 	else if (ret == 1)
730 		/* CLOSE */
731 		c4iw_ep_disconnect(child_ep, 0, GFP_KERNEL);
732 
733 	return;
734 }
735 
736 static int
737 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
738 {
739 	struct c4iw_ep *ep = arg;
740 
741 	spin_lock(&req_lock);
742 
743 	CTR6(KTR_IW_CXGBE,
744 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
745 	    __func__, so, so->so_state, ep, states[ep->com.state],
746 	    ep->com.entry.tqe_prev);
747 
748 	if (ep && ep->com.so && !ep->com.entry.tqe_prev) {
749 		KASSERT(ep->com.so == so, ("%s: XXX review.", __func__));
750 		c4iw_get_ep(&ep->com);
751 		TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
752 		queue_work(c4iw_taskq, &c4iw_task);
753 	}
754 
755 	spin_unlock(&req_lock);
756 	return (SU_OK);
757 }
758 
759 static void
760 process_socket_event(struct c4iw_ep *ep)
761 {
762 	int state = state_read(&ep->com);
763 	struct socket *so = ep->com.so;
764 
765 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
766 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
767 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
768 
769 	if (state == CONNECTING) {
770 		process_connected(ep);
771 		return;
772 	}
773 
774 	if (state == LISTEN) {
775 		/* socket listening events are handled at IWCM */
776 		CTR3(KTR_IW_CXGBE, "%s Invalid ep state:%u, ep:%p", __func__,
777 			    ep->com.state, ep);
778 		BUG();
779 		return;
780 	}
781 
782 	/* connection error */
783 	if (so->so_error) {
784 		process_conn_error(ep);
785 		return;
786 	}
787 
788 	/* peer close */
789 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
790 		process_peer_close(ep);
791 		/*
792 		 * check whether socket disconnect event is pending before
793 		 * returning. Fallthrough if yes.
794 		 */
795 		if (!(so->so_state & SS_ISDISCONNECTED))
796 			return;
797 	}
798 
799 	/* close complete */
800 	if (so->so_state & SS_ISDISCONNECTED) {
801 		process_close_complete(ep);
802 		return;
803 	}
804 
805 	/* rx data */
806 	process_data(ep);
807 }
808 
809 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
810 
811 static int dack_mode = 0;
812 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
813 		"Delayed ack mode (default = 0)");
814 
815 int c4iw_max_read_depth = 8;
816 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
817 		"Per-connection max ORD/IRD (default = 8)");
818 
819 static int enable_tcp_timestamps;
820 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
821 		"Enable tcp timestamps (default = 0)");
822 
823 static int enable_tcp_sack;
824 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
825 		"Enable tcp SACK (default = 0)");
826 
827 static int enable_tcp_window_scaling = 1;
828 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
829 		"Enable tcp window scaling (default = 1)");
830 
831 int c4iw_debug = 1;
832 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
833 		"Enable debug logging (default = 0)");
834 
835 static int peer2peer = 1;
836 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
837 		"Support peer2peer ULPs (default = 1)");
838 
839 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
840 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
841 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
842 
843 static int ep_timeout_secs = 60;
844 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
845 		"CM Endpoint operation timeout in seconds (default = 60)");
846 
847 static int mpa_rev = 1;
848 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
849 		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
850 
851 static int markers_enabled;
852 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
853 		"Enable MPA MARKERS (default(0) = disabled)");
854 
855 static int crc_enabled = 1;
856 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
857 		"Enable MPA CRC (default(1) = enabled)");
858 
859 static int rcv_win = 256 * 1024;
860 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
861 		"TCP receive window in bytes (default = 256KB)");
862 
863 static int snd_win = 128 * 1024;
864 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
865 		"TCP send window in bytes (default = 128KB)");
866 
867 static void
868 start_ep_timer(struct c4iw_ep *ep)
869 {
870 
871 	if (timer_pending(&ep->timer)) {
872 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
873 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
874 		    ep);
875 		return;
876 	}
877 	clear_bit(TIMEOUT, &ep->com.flags);
878 	c4iw_get_ep(&ep->com);
879 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
880 	ep->timer.data = (unsigned long)ep;
881 	ep->timer.function = ep_timeout;
882 	add_timer(&ep->timer);
883 }
884 
885 static int
886 stop_ep_timer(struct c4iw_ep *ep)
887 {
888 
889 	del_timer_sync(&ep->timer);
890 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
891 		c4iw_put_ep(&ep->com);
892 		return 0;
893 	}
894 	return 1;
895 }
896 
897 static enum
898 c4iw_ep_state state_read(struct c4iw_ep_common *epc)
899 {
900 	enum c4iw_ep_state state;
901 
902 	mutex_lock(&epc->mutex);
903 	state = epc->state;
904 	mutex_unlock(&epc->mutex);
905 
906 	return (state);
907 }
908 
909 static void
910 __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
911 {
912 
913 	epc->state = new;
914 }
915 
916 static void
917 state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
918 {
919 
920 	mutex_lock(&epc->mutex);
921 	__state_set(epc, new);
922 	mutex_unlock(&epc->mutex);
923 }
924 
925 static void *
926 alloc_ep(int size, gfp_t gfp)
927 {
928 	struct c4iw_ep_common *epc;
929 
930 	epc = kzalloc(size, gfp);
931 	if (epc == NULL)
932 		return (NULL);
933 
934 	kref_init(&epc->kref);
935 	mutex_init(&epc->mutex);
936 	c4iw_init_wr_wait(&epc->wr_wait);
937 
938 	return (epc);
939 }
940 
941 void
942 __free_ep(struct c4iw_ep_common *epc)
943 {
944 	CTR2(KTR_IW_CXGBE, "%s:feB %p", __func__, epc);
945 	KASSERT(!epc->so, ("%s warning ep->so %p \n", __func__, epc->so));
946 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list!\n", __func__, epc));
947 	free(epc, M_DEVBUF);
948 	CTR2(KTR_IW_CXGBE, "%s:feE %p", __func__, epc);
949 }
950 
951 void _c4iw_free_ep(struct kref *kref)
952 {
953 	struct c4iw_ep *ep;
954 	struct c4iw_ep_common *epc;
955 
956 	ep = container_of(kref, struct c4iw_ep, com.kref);
957 	epc = &ep->com;
958 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
959 	    __func__, epc));
960 	if (test_bit(QP_REFERENCED, &ep->com.flags))
961 		deref_qp(ep);
962 	kfree(ep);
963 }
964 
965 static void release_ep_resources(struct c4iw_ep *ep)
966 {
967 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
968 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
969 	c4iw_put_ep(&ep->com);
970 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
971 }
972 
973 static int
974 send_mpa_req(struct c4iw_ep *ep)
975 {
976 	int mpalen;
977 	struct mpa_message *mpa;
978 	struct mpa_v2_conn_params mpa_v2_params;
979 	struct mbuf *m;
980 	char mpa_rev_to_use = mpa_rev;
981 	int err = 0;
982 
983 	if (ep->retry_with_mpa_v1)
984 		mpa_rev_to_use = 1;
985 	mpalen = sizeof(*mpa) + ep->plen;
986 	if (mpa_rev_to_use == 2)
987 		mpalen += sizeof(struct mpa_v2_conn_params);
988 
989 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
990 	if (mpa == NULL) {
991 		err = -ENOMEM;
992 		CTR3(KTR_IW_CXGBE, "%s:smr1 ep: %p , error: %d",
993 				__func__, ep, err);
994 		goto err;
995 	}
996 
997 	memset(mpa, 0, mpalen);
998 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
999 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
1000 		(markers_enabled ? MPA_MARKERS : 0) |
1001 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
1002 	mpa->private_data_size = htons(ep->plen);
1003 	mpa->revision = mpa_rev_to_use;
1004 
1005 	if (mpa_rev_to_use == 1) {
1006 		ep->tried_with_mpa_v1 = 1;
1007 		ep->retry_with_mpa_v1 = 0;
1008 	}
1009 
1010 	if (mpa_rev_to_use == 2) {
1011 		mpa->private_data_size +=
1012 			htons(sizeof(struct mpa_v2_conn_params));
1013 		mpa_v2_params.ird = htons((u16)ep->ird);
1014 		mpa_v2_params.ord = htons((u16)ep->ord);
1015 
1016 		if (peer2peer) {
1017 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1018 
1019 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1020 				mpa_v2_params.ord |=
1021 				    htons(MPA_V2_RDMA_WRITE_RTR);
1022 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1023 				mpa_v2_params.ord |=
1024 					htons(MPA_V2_RDMA_READ_RTR);
1025 			}
1026 		}
1027 		memcpy(mpa->private_data, &mpa_v2_params,
1028 			sizeof(struct mpa_v2_conn_params));
1029 
1030 		if (ep->plen) {
1031 
1032 			memcpy(mpa->private_data +
1033 				sizeof(struct mpa_v2_conn_params),
1034 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1035 		}
1036 	} else {
1037 
1038 		if (ep->plen)
1039 			memcpy(mpa->private_data,
1040 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1041 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1042 	}
1043 
1044 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1045 	if (m == NULL) {
1046 		err = -ENOMEM;
1047 		CTR3(KTR_IW_CXGBE, "%s:smr2 ep: %p , error: %d",
1048 				__func__, ep, err);
1049 		free(mpa, M_CXGBE);
1050 		goto err;
1051 	}
1052 	m_copyback(m, 0, mpalen, (void *)mpa);
1053 	free(mpa, M_CXGBE);
1054 
1055 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1056 			ep->com.thread);
1057 	if (err) {
1058 		CTR3(KTR_IW_CXGBE, "%s:smr3 ep: %p , error: %d",
1059 				__func__, ep, err);
1060 		goto err;
1061 	}
1062 
1063 	START_EP_TIMER(ep);
1064 	state_set(&ep->com, MPA_REQ_SENT);
1065 	ep->mpa_attr.initiator = 1;
1066 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1067 	return 0;
1068 err:
1069 	connect_reply_upcall(ep, err);
1070 	CTR3(KTR_IW_CXGBE, "%s:smrE %p, error: %d", __func__, ep, err);
1071 	return err;
1072 }
1073 
1074 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1075 {
1076 	int mpalen ;
1077 	struct mpa_message *mpa;
1078 	struct mpa_v2_conn_params mpa_v2_params;
1079 	struct mbuf *m;
1080 	int err;
1081 
1082 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1083 	    ep->plen);
1084 
1085 	mpalen = sizeof(*mpa) + plen;
1086 
1087 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1088 
1089 		mpalen += sizeof(struct mpa_v2_conn_params);
1090 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1091 		    ep->mpa_attr.version, mpalen);
1092 	}
1093 
1094 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1095 	if (mpa == NULL)
1096 		return (-ENOMEM);
1097 
1098 	memset(mpa, 0, mpalen);
1099 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1100 	mpa->flags = MPA_REJECT;
1101 	mpa->revision = mpa_rev;
1102 	mpa->private_data_size = htons(plen);
1103 
1104 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1105 
1106 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1107 		mpa->private_data_size +=
1108 			htons(sizeof(struct mpa_v2_conn_params));
1109 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1110 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1111 				 0));
1112 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1113 					(p2p_type ==
1114 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1115 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1116 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1117 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1118 		memcpy(mpa->private_data, &mpa_v2_params,
1119 				sizeof(struct mpa_v2_conn_params));
1120 
1121 		if (ep->plen)
1122 			memcpy(mpa->private_data +
1123 					sizeof(struct mpa_v2_conn_params), pdata, plen);
1124 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1125 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1126 	} else
1127 		if (plen)
1128 			memcpy(mpa->private_data, pdata, plen);
1129 
1130 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1131 	if (m == NULL) {
1132 		free(mpa, M_CXGBE);
1133 		return (-ENOMEM);
1134 	}
1135 	m_copyback(m, 0, mpalen, (void *)mpa);
1136 	free(mpa, M_CXGBE);
1137 
1138 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1139 	if (!err)
1140 		ep->snd_seq += mpalen;
1141 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1142 	return err;
1143 }
1144 
1145 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1146 {
1147 	int mpalen;
1148 	struct mpa_message *mpa;
1149 	struct mbuf *m;
1150 	struct mpa_v2_conn_params mpa_v2_params;
1151 	int err;
1152 
1153 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1154 
1155 	mpalen = sizeof(*mpa) + plen;
1156 
1157 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1158 
1159 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1160 		    ep->mpa_attr.version);
1161 		mpalen += sizeof(struct mpa_v2_conn_params);
1162 	}
1163 
1164 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1165 	if (mpa == NULL)
1166 		return (-ENOMEM);
1167 
1168 	memset(mpa, 0, sizeof(*mpa));
1169 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1170 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1171 		(markers_enabled ? MPA_MARKERS : 0);
1172 	mpa->revision = ep->mpa_attr.version;
1173 	mpa->private_data_size = htons(plen);
1174 
1175 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1176 
1177 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1178 		mpa->private_data_size +=
1179 			htons(sizeof(struct mpa_v2_conn_params));
1180 		mpa_v2_params.ird = htons((u16)ep->ird);
1181 		mpa_v2_params.ord = htons((u16)ep->ord);
1182 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1183 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1184 
1185 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1186 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1187 
1188 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1189 
1190 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1191 
1192 				mpa_v2_params.ord |=
1193 					htons(MPA_V2_RDMA_WRITE_RTR);
1194 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1195 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1196 				    mpa_v2_params.ord);
1197 			}
1198 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1199 
1200 				mpa_v2_params.ord |=
1201 					htons(MPA_V2_RDMA_READ_RTR);
1202 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1203 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1204 				    mpa_v2_params.ord);
1205 			}
1206 		}
1207 
1208 		memcpy(mpa->private_data, &mpa_v2_params,
1209 			sizeof(struct mpa_v2_conn_params));
1210 
1211 		if (ep->plen)
1212 			memcpy(mpa->private_data +
1213 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1214 	} else
1215 		if (plen)
1216 			memcpy(mpa->private_data, pdata, plen);
1217 
1218 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1219 	if (m == NULL) {
1220 		free(mpa, M_CXGBE);
1221 		return (-ENOMEM);
1222 	}
1223 	m_copyback(m, 0, mpalen, (void *)mpa);
1224 	free(mpa, M_CXGBE);
1225 
1226 
1227 	state_set(&ep->com, MPA_REP_SENT);
1228 	ep->snd_seq += mpalen;
1229 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1230 			ep->com.thread);
1231 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1232 	return err;
1233 }
1234 
1235 
1236 
1237 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1238 {
1239 	struct iw_cm_event event;
1240 
1241 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1242 	memset(&event, 0, sizeof(event));
1243 	event.event = IW_CM_EVENT_CLOSE;
1244 	event.status = status;
1245 
1246 	if (ep->com.cm_id) {
1247 
1248 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1249 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1250 		deref_cm_id(&ep->com);
1251 		set_bit(CLOSE_UPCALL, &ep->com.history);
1252 	}
1253 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1254 }
1255 
1256 static int
1257 send_abort(struct c4iw_ep *ep)
1258 {
1259 	struct socket *so = ep->com.so;
1260 	struct sockopt sopt;
1261 	int rc;
1262 	struct linger l;
1263 
1264 	CTR5(KTR_IW_CXGBE, "%s ep %p so %p state %s tid %d", __func__, ep, so,
1265 	    states[ep->com.state], ep->hwtid);
1266 
1267 	l.l_onoff = 1;
1268 	l.l_linger = 0;
1269 
1270 	/* linger_time of 0 forces RST to be sent */
1271 	sopt.sopt_dir = SOPT_SET;
1272 	sopt.sopt_level = SOL_SOCKET;
1273 	sopt.sopt_name = SO_LINGER;
1274 	sopt.sopt_val = (caddr_t)&l;
1275 	sopt.sopt_valsize = sizeof l;
1276 	sopt.sopt_td = NULL;
1277 	rc = sosetopt(so, &sopt);
1278 	if (rc != 0) {
1279 		log(LOG_ERR, "%s: sosetopt(%p, linger = 0) failed with %d.\n",
1280 		    __func__, so, rc);
1281 	}
1282 
1283 	uninit_iwarp_socket(so);
1284 	sodisconnect(so);
1285 	set_bit(ABORT_CONN, &ep->com.history);
1286 
1287 	/*
1288 	 * TBD: iw_cxgbe driver should receive ABORT reply for every ABORT
1289 	 * request it has sent. But the current TOE driver is not propagating
1290 	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1291 	 * around de-refer 'ep' (which was refered before sending ABORT request)
1292 	 * here instead of doing it in abort_rpl() handler of iw_cxgbe driver.
1293 	 */
1294 	c4iw_put_ep(&ep->com);
1295 
1296 	return (0);
1297 }
1298 
1299 static void peer_close_upcall(struct c4iw_ep *ep)
1300 {
1301 	struct iw_cm_event event;
1302 
1303 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1304 	memset(&event, 0, sizeof(event));
1305 	event.event = IW_CM_EVENT_DISCONNECT;
1306 
1307 	if (ep->com.cm_id) {
1308 
1309 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1310 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1311 		set_bit(DISCONN_UPCALL, &ep->com.history);
1312 	}
1313 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1314 }
1315 
1316 static void peer_abort_upcall(struct c4iw_ep *ep)
1317 {
1318 	struct iw_cm_event event;
1319 
1320 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1321 	memset(&event, 0, sizeof(event));
1322 	event.event = IW_CM_EVENT_CLOSE;
1323 	event.status = -ECONNRESET;
1324 
1325 	if (ep->com.cm_id) {
1326 
1327 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1328 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1329 		deref_cm_id(&ep->com);
1330 		set_bit(ABORT_UPCALL, &ep->com.history);
1331 	}
1332 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1333 }
1334 
1335 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1336 {
1337 	struct iw_cm_event event;
1338 
1339 	CTR3(KTR_IW_CXGBE, "%s:cruB %p, status: %d", __func__, ep, status);
1340 	memset(&event, 0, sizeof(event));
1341 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1342 	event.status = ((status == -ECONNABORTED) || (status == -EPIPE)) ?
1343 					-ECONNRESET : status;
1344 	event.local_addr = ep->com.local_addr;
1345 	event.remote_addr = ep->com.remote_addr;
1346 
1347 	if ((status == 0) || (status == -ECONNREFUSED)) {
1348 
1349 		if (!ep->tried_with_mpa_v1) {
1350 
1351 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1352 			/* this means MPA_v2 is used */
1353 			event.private_data_len = ep->plen -
1354 				sizeof(struct mpa_v2_conn_params);
1355 			event.private_data = ep->mpa_pkt +
1356 				sizeof(struct mpa_message) +
1357 				sizeof(struct mpa_v2_conn_params);
1358 		} else {
1359 
1360 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1361 			/* this means MPA_v1 is used */
1362 			event.private_data_len = ep->plen;
1363 			event.private_data = ep->mpa_pkt +
1364 				sizeof(struct mpa_message);
1365 		}
1366 	}
1367 
1368 	if (ep->com.cm_id) {
1369 
1370 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1371 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1372 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1373 	}
1374 
1375 	if(status == -ECONNABORTED) {
1376 
1377 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1378 		return;
1379 	}
1380 
1381 	if (status < 0) {
1382 
1383 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1384 		deref_cm_id(&ep->com);
1385 	}
1386 
1387 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1388 }
1389 
1390 static int connect_request_upcall(struct c4iw_ep *ep)
1391 {
1392 	struct iw_cm_event event;
1393 	int ret;
1394 
1395 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1396 	    ep->tried_with_mpa_v1);
1397 
1398 	memset(&event, 0, sizeof(event));
1399 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1400 	event.local_addr = ep->com.local_addr;
1401 	event.remote_addr = ep->com.remote_addr;
1402 	event.provider_data = ep;
1403 	event.so = ep->com.so;
1404 
1405 	if (!ep->tried_with_mpa_v1) {
1406 		/* this means MPA_v2 is used */
1407 		event.ord = ep->ord;
1408 		event.ird = ep->ird;
1409 		event.private_data_len = ep->plen -
1410 			sizeof(struct mpa_v2_conn_params);
1411 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1412 			sizeof(struct mpa_v2_conn_params);
1413 	} else {
1414 
1415 		/* this means MPA_v1 is used. Send max supported */
1416 		event.ord = c4iw_max_read_depth;
1417 		event.ird = c4iw_max_read_depth;
1418 		event.private_data_len = ep->plen;
1419 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1420 	}
1421 
1422 	c4iw_get_ep(&ep->com);
1423 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1424 	    &event);
1425 	if(ret)
1426 		c4iw_put_ep(&ep->com);
1427 
1428 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1429 	c4iw_put_ep(&ep->parent_ep->com);
1430 	return ret;
1431 }
1432 
1433 static void established_upcall(struct c4iw_ep *ep)
1434 {
1435 	struct iw_cm_event event;
1436 
1437 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1438 	memset(&event, 0, sizeof(event));
1439 	event.event = IW_CM_EVENT_ESTABLISHED;
1440 	event.ird = ep->ird;
1441 	event.ord = ep->ord;
1442 
1443 	if (ep->com.cm_id) {
1444 
1445 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1446 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1447 		set_bit(ESTAB_UPCALL, &ep->com.history);
1448 	}
1449 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1450 }
1451 
1452 
1453 /*
1454  * process_mpa_reply - process streaming mode MPA reply
1455  *
1456  * Returns:
1457  *
1458  * 0 upon success indicating a connect request was delivered to the ULP
1459  * or the mpa request is incomplete but valid so far.
1460  *
1461  * 1 if a failure requires the caller to close the connection.
1462  *
1463  * 2 if a failure requires the caller to abort the connection.
1464  */
1465 static int process_mpa_reply(struct c4iw_ep *ep)
1466 {
1467 	struct mpa_message *mpa;
1468 	struct mpa_v2_conn_params *mpa_v2_params;
1469 	u16 plen;
1470 	u16 resp_ird, resp_ord;
1471 	u8 rtr_mismatch = 0, insuff_ird = 0;
1472 	struct c4iw_qp_attributes attrs;
1473 	enum c4iw_qp_attr_mask mask;
1474 	int err;
1475 	struct mbuf *top, *m;
1476 	int flags = MSG_DONTWAIT;
1477 	struct uio uio;
1478 	int disconnect = 0;
1479 
1480 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1481 
1482 	/*
1483 	 * Stop mpa timer.  If it expired, then
1484 	 * we ignore the MPA reply.  process_timeout()
1485 	 * will abort the connection.
1486 	 */
1487 	if (STOP_EP_TIMER(ep))
1488 		return 0;
1489 
1490 	uio.uio_resid = 1000000;
1491 	uio.uio_td = ep->com.thread;
1492 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1493 
1494 	if (err) {
1495 
1496 		if (err == EWOULDBLOCK) {
1497 
1498 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1499 			START_EP_TIMER(ep);
1500 			return 0;
1501 		}
1502 		err = -err;
1503 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1504 		goto err;
1505 	}
1506 
1507 	if (ep->com.so->so_rcv.sb_mb) {
1508 
1509 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1510 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1511 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1512 	}
1513 
1514 	m = top;
1515 
1516 	do {
1517 
1518 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1519 		/*
1520 		 * If we get more than the supported amount of private data
1521 		 * then we must fail this connection.
1522 		 */
1523 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1524 
1525 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1526 			    ep->mpa_pkt_len + m->m_len);
1527 			err = (-EINVAL);
1528 			goto err_stop_timer;
1529 		}
1530 
1531 		/*
1532 		 * copy the new data into our accumulation buffer.
1533 		 */
1534 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1535 		ep->mpa_pkt_len += m->m_len;
1536 		if (!m->m_next)
1537 			m = m->m_nextpkt;
1538 		else
1539 			m = m->m_next;
1540 	} while (m);
1541 
1542 	m_freem(top);
1543 	/*
1544 	 * if we don't even have the mpa message, then bail.
1545 	 */
1546 	if (ep->mpa_pkt_len < sizeof(*mpa)) {
1547 		return 0;
1548 	}
1549 	mpa = (struct mpa_message *) ep->mpa_pkt;
1550 
1551 	/* Validate MPA header. */
1552 	if (mpa->revision > mpa_rev) {
1553 
1554 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1555 		    mpa->revision, mpa_rev);
1556 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1557 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1558 		err = -EPROTO;
1559 		goto err_stop_timer;
1560 	}
1561 
1562 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1563 
1564 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1565 		err = -EPROTO;
1566 		goto err_stop_timer;
1567 	}
1568 
1569 	plen = ntohs(mpa->private_data_size);
1570 
1571 	/*
1572 	 * Fail if there's too much private data.
1573 	 */
1574 	if (plen > MPA_MAX_PRIVATE_DATA) {
1575 
1576 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1577 		err = -EPROTO;
1578 		goto err_stop_timer;
1579 	}
1580 
1581 	/*
1582 	 * If plen does not account for pkt size
1583 	 */
1584 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1585 
1586 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1587 		STOP_EP_TIMER(ep);
1588 		err = -EPROTO;
1589 		goto err_stop_timer;
1590 	}
1591 
1592 	ep->plen = (u8) plen;
1593 
1594 	/*
1595 	 * If we don't have all the pdata yet, then bail.
1596 	 * We'll continue process when more data arrives.
1597 	 */
1598 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1599 
1600 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1601 		return 0;
1602 	}
1603 
1604 	if (mpa->flags & MPA_REJECT) {
1605 
1606 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1607 		err = -ECONNREFUSED;
1608 		goto err_stop_timer;
1609 	}
1610 
1611 	/*
1612 	 * If we get here we have accumulated the entire mpa
1613 	 * start reply message including private data. And
1614 	 * the MPA header is valid.
1615 	 */
1616 	state_set(&ep->com, FPDU_MODE);
1617 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1618 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1619 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1620 	ep->mpa_attr.version = mpa->revision;
1621 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1622 
1623 	if (mpa->revision == 2) {
1624 
1625 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
1626 		ep->mpa_attr.enhanced_rdma_conn =
1627 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1628 
1629 		if (ep->mpa_attr.enhanced_rdma_conn) {
1630 
1631 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
1632 			mpa_v2_params = (struct mpa_v2_conn_params *)
1633 				(ep->mpa_pkt + sizeof(*mpa));
1634 			resp_ird = ntohs(mpa_v2_params->ird) &
1635 				MPA_V2_IRD_ORD_MASK;
1636 			resp_ord = ntohs(mpa_v2_params->ord) &
1637 				MPA_V2_IRD_ORD_MASK;
1638 
1639 			/*
1640 			 * This is a double-check. Ideally, below checks are
1641 			 * not required since ird/ord stuff has been taken
1642 			 * care of in c4iw_accept_cr
1643 			 */
1644 			if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
1645 
1646 				CTR2(KTR_IW_CXGBE, "%s:pmre %p", __func__, ep);
1647 				err = -ENOMEM;
1648 				ep->ird = resp_ord;
1649 				ep->ord = resp_ird;
1650 				insuff_ird = 1;
1651 			}
1652 
1653 			if (ntohs(mpa_v2_params->ird) &
1654 				MPA_V2_PEER2PEER_MODEL) {
1655 
1656 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
1657 				if (ntohs(mpa_v2_params->ord) &
1658 					MPA_V2_RDMA_WRITE_RTR) {
1659 
1660 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
1661 					ep->mpa_attr.p2p_type =
1662 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1663 				}
1664 				else if (ntohs(mpa_v2_params->ord) &
1665 					MPA_V2_RDMA_READ_RTR) {
1666 
1667 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
1668 					ep->mpa_attr.p2p_type =
1669 						FW_RI_INIT_P2PTYPE_READ_REQ;
1670 				}
1671 			}
1672 		}
1673 	} else {
1674 
1675 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
1676 
1677 		if (mpa->revision == 1) {
1678 
1679 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
1680 
1681 			if (peer2peer) {
1682 
1683 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
1684 				ep->mpa_attr.p2p_type = p2p_type;
1685 			}
1686 		}
1687 	}
1688 
1689 	if (set_tcpinfo(ep)) {
1690 
1691 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
1692 		printf("%s set_tcpinfo error\n", __func__);
1693 		err = -ECONNRESET;
1694 		goto err;
1695 	}
1696 
1697 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
1698 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
1699 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1700 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1701 	    ep->mpa_attr.p2p_type);
1702 
1703 	/*
1704 	 * If responder's RTR does not match with that of initiator, assign
1705 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1706 	 * generated when moving QP to RTS state.
1707 	 * A TERM message will be sent after QP has moved to RTS state
1708 	 */
1709 	if ((ep->mpa_attr.version == 2) && peer2peer &&
1710 		(ep->mpa_attr.p2p_type != p2p_type)) {
1711 
1712 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
1713 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1714 		rtr_mismatch = 1;
1715 	}
1716 
1717 
1718 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
1719 	attrs.mpa_attr = ep->mpa_attr;
1720 	attrs.max_ird = ep->ird;
1721 	attrs.max_ord = ep->ord;
1722 	attrs.llp_stream_handle = ep;
1723 	attrs.next_state = C4IW_QP_STATE_RTS;
1724 
1725 	mask = C4IW_QP_ATTR_NEXT_STATE |
1726 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1727 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1728 
1729 	/* bind QP and TID with INIT_WR */
1730 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
1731 
1732 	if (err) {
1733 
1734 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
1735 		goto err;
1736 	}
1737 
1738 	/*
1739 	 * If responder's RTR requirement did not match with what initiator
1740 	 * supports, generate TERM message
1741 	 */
1742 	if (rtr_mismatch) {
1743 
1744 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
1745 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1746 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1747 		attrs.ecode = MPA_NOMATCH_RTR;
1748 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1749 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1750 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1751 		err = -ENOMEM;
1752 		disconnect = 1;
1753 		goto out;
1754 	}
1755 
1756 	/*
1757 	 * Generate TERM if initiator IRD is not sufficient for responder
1758 	 * provided ORD. Currently, we do the same behaviour even when
1759 	 * responder provided IRD is also not sufficient as regards to
1760 	 * initiator ORD.
1761 	 */
1762 	if (insuff_ird) {
1763 
1764 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
1765 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1766 				__func__);
1767 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1768 		attrs.ecode = MPA_INSUFF_IRD;
1769 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1770 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1771 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1772 		err = -ENOMEM;
1773 		disconnect = 1;
1774 		goto out;
1775 	}
1776 	goto out;
1777 err_stop_timer:
1778 	STOP_EP_TIMER(ep);
1779 err:
1780 	disconnect = 2;
1781 out:
1782 	connect_reply_upcall(ep, err);
1783 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
1784 	return disconnect;
1785 }
1786 
1787 /*
1788  * process_mpa_request - process streaming mode MPA request
1789  *
1790  * Returns:
1791  *
1792  * 0 upon success indicating a connect request was delivered to the ULP
1793  * or the mpa request is incomplete but valid so far.
1794  *
1795  * 1 if a failure requires the caller to close the connection.
1796  *
1797  * 2 if a failure requires the caller to abort the connection.
1798  */
1799 static int
1800 process_mpa_request(struct c4iw_ep *ep)
1801 {
1802 	struct mpa_message *mpa;
1803 	u16 plen;
1804 	int flags = MSG_DONTWAIT;
1805 	int rc;
1806 	struct iovec iov;
1807 	struct uio uio;
1808 	enum c4iw_ep_state state = state_read(&ep->com);
1809 
1810 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
1811 
1812 	if (state != MPA_REQ_WAIT)
1813 		return 0;
1814 
1815 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
1816 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
1817 	uio.uio_iov = &iov;
1818 	uio.uio_iovcnt = 1;
1819 	uio.uio_offset = 0;
1820 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
1821 	uio.uio_segflg = UIO_SYSSPACE;
1822 	uio.uio_rw = UIO_READ;
1823 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
1824 
1825 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
1826 	if (rc == EAGAIN)
1827 		return 0;
1828 	else if (rc)
1829 		goto err_stop_timer;
1830 
1831 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
1832 	    __func__, ep->com.so));
1833 	ep->mpa_pkt_len += uio.uio_offset;
1834 
1835 	/*
1836 	 * If we get more than the supported amount of private data then we must
1837 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
1838 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
1839 	 * byte is filled by the soreceive above.
1840 	 */
1841 
1842 	/* Don't even have the MPA message.  Wait for more data to arrive. */
1843 	if (ep->mpa_pkt_len < sizeof(*mpa))
1844 		return 0;
1845 	mpa = (struct mpa_message *) ep->mpa_pkt;
1846 
1847 	/*
1848 	 * Validate MPA Header.
1849 	 */
1850 	if (mpa->revision > mpa_rev) {
1851 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
1852 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
1853 		goto err_stop_timer;
1854 	}
1855 
1856 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1857 		goto err_stop_timer;
1858 
1859 	/*
1860 	 * Fail if there's too much private data.
1861 	 */
1862 	plen = ntohs(mpa->private_data_size);
1863 	if (plen > MPA_MAX_PRIVATE_DATA)
1864 		goto err_stop_timer;
1865 
1866 	/*
1867 	 * If plen does not account for pkt size
1868 	 */
1869 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1870 		goto err_stop_timer;
1871 
1872 	ep->plen = (u8) plen;
1873 
1874 	/*
1875 	 * If we don't have all the pdata yet, then bail.
1876 	 */
1877 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1878 		return 0;
1879 
1880 	/*
1881 	 * If we get here we have accumulated the entire mpa
1882 	 * start reply message including private data.
1883 	 */
1884 	ep->mpa_attr.initiator = 0;
1885 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1886 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1887 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1888 	ep->mpa_attr.version = mpa->revision;
1889 	if (mpa->revision == 1)
1890 		ep->tried_with_mpa_v1 = 1;
1891 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1892 
1893 	if (mpa->revision == 2) {
1894 		ep->mpa_attr.enhanced_rdma_conn =
1895 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1896 		if (ep->mpa_attr.enhanced_rdma_conn) {
1897 			struct mpa_v2_conn_params *mpa_v2_params;
1898 			u16 ird, ord;
1899 
1900 			mpa_v2_params = (void *)&ep->mpa_pkt[sizeof(*mpa)];
1901 			ird = ntohs(mpa_v2_params->ird);
1902 			ord = ntohs(mpa_v2_params->ord);
1903 
1904 			ep->ird = ird & MPA_V2_IRD_ORD_MASK;
1905 			ep->ord = ord & MPA_V2_IRD_ORD_MASK;
1906 			if (ird & MPA_V2_PEER2PEER_MODEL && peer2peer) {
1907 				if (ord & MPA_V2_RDMA_WRITE_RTR) {
1908 					ep->mpa_attr.p2p_type =
1909 					    FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1910 				} else if (ord & MPA_V2_RDMA_READ_RTR) {
1911 					ep->mpa_attr.p2p_type =
1912 					    FW_RI_INIT_P2PTYPE_READ_REQ;
1913 				}
1914 			}
1915 		}
1916 	} else if (mpa->revision == 1 && peer2peer)
1917 		ep->mpa_attr.p2p_type = p2p_type;
1918 
1919 	if (set_tcpinfo(ep))
1920 		goto err_stop_timer;
1921 
1922 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
1923 	    "xmit_marker_enabled = %d, version = %d", __func__,
1924 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1925 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1926 
1927 	state_set(&ep->com, MPA_REQ_RCVD);
1928 	STOP_EP_TIMER(ep);
1929 
1930 	/* drive upcall */
1931 	mutex_lock(&ep->parent_ep->com.mutex);
1932 	if (ep->parent_ep->com.state != DEAD) {
1933 		if (connect_request_upcall(ep))
1934 			goto err_unlock_parent;
1935 	} else
1936 		goto err_unlock_parent;
1937 	mutex_unlock(&ep->parent_ep->com.mutex);
1938 	return 0;
1939 
1940 err_unlock_parent:
1941 	mutex_unlock(&ep->parent_ep->com.mutex);
1942 	goto err_out;
1943 err_stop_timer:
1944 	STOP_EP_TIMER(ep);
1945 err_out:
1946 	return 2;
1947 }
1948 
1949 /*
1950  * Upcall from the adapter indicating data has been transmitted.
1951  * For us its just the single MPA request or reply.  We can now free
1952  * the skb holding the mpa message.
1953  */
1954 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1955 {
1956 	int err;
1957 	struct c4iw_ep *ep = to_ep(cm_id);
1958 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
1959 	int abort = 0;
1960 
1961 	if ((state_read(&ep->com) == DEAD) ||
1962 			(state_read(&ep->com) != MPA_REQ_RCVD)) {
1963 
1964 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
1965 		c4iw_put_ep(&ep->com);
1966 		return -ECONNRESET;
1967 	}
1968 	set_bit(ULP_REJECT, &ep->com.history);
1969 
1970 	if (mpa_rev == 0) {
1971 
1972 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
1973 		abort = 1;
1974 	}
1975 	else {
1976 
1977 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
1978 		abort = send_mpa_reject(ep, pdata, pdata_len);
1979 	}
1980 	stop_ep_timer(ep);
1981 	err = c4iw_ep_disconnect(ep, abort != 0, GFP_KERNEL);
1982 	c4iw_put_ep(&ep->com);
1983 	CTR3(KTR_IW_CXGBE, "%s:crc4 %p, err: %d", __func__, ep, err);
1984 	return 0;
1985 }
1986 
1987 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1988 {
1989 	int err;
1990 	struct c4iw_qp_attributes attrs;
1991 	enum c4iw_qp_attr_mask mask;
1992 	struct c4iw_ep *ep = to_ep(cm_id);
1993 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1994 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1995 	int abort = 0;
1996 
1997 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
1998 
1999 	if (state_read(&ep->com) == DEAD) {
2000 
2001 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
2002 		err = -ECONNRESET;
2003 		goto err_out;
2004 	}
2005 
2006 	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
2007 	BUG_ON(!qp);
2008 
2009 	set_bit(ULP_ACCEPT, &ep->com.history);
2010 
2011 	if ((conn_param->ord > c4iw_max_read_depth) ||
2012 		(conn_param->ird > c4iw_max_read_depth)) {
2013 
2014 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
2015 		err = -EINVAL;
2016 		goto err_abort;
2017 	}
2018 
2019 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
2020 
2021 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
2022 
2023 		if (conn_param->ord > ep->ird) {
2024 
2025 			CTR2(KTR_IW_CXGBE, "%s:cac4 %p", __func__, ep);
2026 			ep->ird = conn_param->ird;
2027 			ep->ord = conn_param->ord;
2028 			send_mpa_reject(ep, conn_param->private_data,
2029 					conn_param->private_data_len);
2030 			err = -ENOMEM;
2031 			goto err_abort;
2032 		}
2033 
2034 		if (conn_param->ird > ep->ord) {
2035 
2036 			CTR2(KTR_IW_CXGBE, "%s:cac5 %p", __func__, ep);
2037 
2038 			if (!ep->ord) {
2039 
2040 				CTR2(KTR_IW_CXGBE, "%s:cac6 %p", __func__, ep);
2041 				conn_param->ird = 1;
2042 			}
2043 			else {
2044 				CTR2(KTR_IW_CXGBE, "%s:cac7 %p", __func__, ep);
2045 				err = -ENOMEM;
2046 				goto err_abort;
2047 			}
2048 		}
2049 
2050 	}
2051 	ep->ird = conn_param->ird;
2052 	ep->ord = conn_param->ord;
2053 
2054 	if (ep->mpa_attr.version != 2) {
2055 
2056 		CTR2(KTR_IW_CXGBE, "%s:cac8 %p", __func__, ep);
2057 
2058 		if (peer2peer && ep->ird == 0) {
2059 
2060 			CTR2(KTR_IW_CXGBE, "%s:cac9 %p", __func__, ep);
2061 			ep->ird = 1;
2062 		}
2063 	}
2064 
2065 
2066 	ep->com.cm_id = cm_id;
2067 	ref_cm_id(&ep->com);
2068 	ep->com.qp = qp;
2069 	ref_qp(ep);
2070 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2071 
2072 	/* bind QP to EP and move to RTS */
2073 	attrs.mpa_attr = ep->mpa_attr;
2074 	attrs.max_ird = ep->ird;
2075 	attrs.max_ord = ep->ord;
2076 	attrs.llp_stream_handle = ep;
2077 	attrs.next_state = C4IW_QP_STATE_RTS;
2078 
2079 	/* bind QP and TID with INIT_WR */
2080 	mask = C4IW_QP_ATTR_NEXT_STATE |
2081 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2082 		C4IW_QP_ATTR_MPA_ATTR |
2083 		C4IW_QP_ATTR_MAX_IRD |
2084 		C4IW_QP_ATTR_MAX_ORD;
2085 
2086 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2087 
2088 	if (err) {
2089 
2090 		CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
2091 		goto err_defef_cm_id;
2092 	}
2093 	err = send_mpa_reply(ep, conn_param->private_data,
2094 			conn_param->private_data_len);
2095 
2096 	if (err) {
2097 
2098 		CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
2099 		goto err_defef_cm_id;
2100 	}
2101 
2102 	state_set(&ep->com, FPDU_MODE);
2103 	established_upcall(ep);
2104 	c4iw_put_ep(&ep->com);
2105 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2106 	return 0;
2107 err_defef_cm_id:
2108 	deref_cm_id(&ep->com);
2109 err_abort:
2110 	abort = 1;
2111 err_out:
2112 	if (abort)
2113 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2114 	c4iw_put_ep(&ep->com);
2115 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2116 	return err;
2117 }
2118 
2119 
2120 
2121 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2122 {
2123 	int err = 0;
2124 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2125 	struct c4iw_ep *ep = NULL;
2126 	struct nhop4_extended nh4;
2127 
2128 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2129 
2130 	if ((conn_param->ord > c4iw_max_read_depth) ||
2131 		(conn_param->ird > c4iw_max_read_depth)) {
2132 
2133 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2134 		err = -EINVAL;
2135 		goto out;
2136 	}
2137 	ep = alloc_ep(sizeof(*ep), M_NOWAIT);
2138 
2139 	if (!ep) {
2140 
2141 		CTR2(KTR_IW_CXGBE, "%s:cc2 %p", __func__, cm_id);
2142 		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2143 		err = -ENOMEM;
2144 		goto out;
2145 	}
2146 	init_timer(&ep->timer);
2147 	ep->plen = conn_param->private_data_len;
2148 
2149 	if (ep->plen) {
2150 
2151 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2152 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2153 				conn_param->private_data, ep->plen);
2154 	}
2155 	ep->ird = conn_param->ird;
2156 	ep->ord = conn_param->ord;
2157 
2158 	if (peer2peer && ep->ord == 0) {
2159 
2160 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2161 		ep->ord = 1;
2162 	}
2163 
2164 	ep->com.dev = dev;
2165 	ep->com.cm_id = cm_id;
2166 	ref_cm_id(&ep->com);
2167 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2168 
2169 	if (!ep->com.qp) {
2170 
2171 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2172 		err = -EINVAL;
2173 		goto fail2;
2174 	}
2175 	ref_qp(ep);
2176 	ep->com.thread = curthread;
2177 	ep->com.so = cm_id->so;
2178 
2179 	/* find a route */
2180 	err = find_route(
2181 		cm_id->local_addr.sin_addr.s_addr,
2182 		cm_id->remote_addr.sin_addr.s_addr,
2183 		cm_id->local_addr.sin_port,
2184 		cm_id->remote_addr.sin_port, 0, &nh4);
2185 
2186 	if (err) {
2187 
2188 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2189 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2190 		err = -EHOSTUNREACH;
2191 		goto fail2;
2192 	}
2193 
2194 	if (!(nh4.nh_ifp->if_capenable & IFCAP_TOE) ||
2195 	    TOEDEV(nh4.nh_ifp) == NULL) {
2196 		err = -ENOPROTOOPT;
2197 		goto fail3;
2198 	}
2199 	fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
2200 
2201 	state_set(&ep->com, CONNECTING);
2202 	ep->tos = 0;
2203 	ep->com.local_addr = cm_id->local_addr;
2204 	ep->com.remote_addr = cm_id->remote_addr;
2205 	err = soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2206 		ep->com.thread);
2207 
2208 	if (!err) {
2209 		init_iwarp_socket(cm_id->so, &ep->com);
2210 		goto out;
2211 	} else {
2212 		goto fail2;
2213 	}
2214 
2215 fail3:
2216 	fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
2217 fail2:
2218 	deref_cm_id(&ep->com);
2219 	c4iw_put_ep(&ep->com);
2220 	ep = NULL;	/* CTR shouldn't display already-freed ep. */
2221 out:
2222 	CTR2(KTR_IW_CXGBE, "%s:ccE %p", __func__, ep);
2223 	return err;
2224 }
2225 
2226 /*
2227  * iwcm->create_listen_ep.  Returns -errno on failure.
2228  */
2229 int
2230 c4iw_create_listen_ep(struct iw_cm_id *cm_id, int backlog)
2231 {
2232 	int rc;
2233 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2234 	struct c4iw_listen_ep *ep;
2235 	struct socket *so = cm_id->so;
2236 
2237 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2238 	CTR5(KTR_IW_CXGBE, "%s: cm_id %p, lso %p, ep %p, inp %p", __func__,
2239 	    cm_id, so, ep, so->so_pcb);
2240 	if (ep == NULL) {
2241 		log(LOG_ERR, "%s: failed to alloc memory for endpoint\n",
2242 		    __func__);
2243 		rc = ENOMEM;
2244 		goto failed;
2245 	}
2246 
2247 	ep->com.cm_id = cm_id;
2248 	ref_cm_id(&ep->com);
2249 	ep->com.dev = dev;
2250 	ep->backlog = backlog;
2251 	ep->com.local_addr = cm_id->local_addr;
2252 	ep->com.thread = curthread;
2253 	state_set(&ep->com, LISTEN);
2254 	ep->com.so = so;
2255 
2256 	cm_id->provider_data = ep;
2257 	return (0);
2258 
2259 failed:
2260 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, FAILED (%d)", __func__, cm_id, rc);
2261 	return (-rc);
2262 }
2263 
2264 void
2265 c4iw_destroy_listen_ep(struct iw_cm_id *cm_id)
2266 {
2267 	struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2268 
2269 	CTR4(KTR_IW_CXGBE, "%s: cm_id %p, so %p, state %s", __func__, cm_id,
2270 	    cm_id->so, states[ep->com.state]);
2271 
2272 	state_set(&ep->com, DEAD);
2273 	deref_cm_id(&ep->com);
2274 	c4iw_put_ep(&ep->com);
2275 
2276 	return;
2277 }
2278 
2279 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2280 {
2281 	int ret = 0;
2282 	int close = 0;
2283 	int fatal = 0;
2284 	struct c4iw_rdev *rdev;
2285 
2286 	mutex_lock(&ep->com.mutex);
2287 
2288 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2289 
2290 	rdev = &ep->com.dev->rdev;
2291 
2292 	if (c4iw_fatal_error(rdev)) {
2293 
2294 		CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2295 		fatal = 1;
2296 		close_complete_upcall(ep, -ECONNRESET);
2297 		ep->com.state = DEAD;
2298 	}
2299 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2300 	    states[ep->com.state]);
2301 
2302 	switch (ep->com.state) {
2303 
2304 		case MPA_REQ_WAIT:
2305 		case MPA_REQ_SENT:
2306 		case MPA_REQ_RCVD:
2307 		case MPA_REP_SENT:
2308 		case FPDU_MODE:
2309 			close = 1;
2310 			if (abrupt)
2311 				ep->com.state = ABORTING;
2312 			else {
2313 				ep->com.state = CLOSING;
2314 				START_EP_TIMER(ep);
2315 			}
2316 			set_bit(CLOSE_SENT, &ep->com.flags);
2317 			break;
2318 
2319 		case CLOSING:
2320 
2321 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2322 
2323 				close = 1;
2324 				if (abrupt) {
2325 					STOP_EP_TIMER(ep);
2326 					ep->com.state = ABORTING;
2327 				} else
2328 					ep->com.state = MORIBUND;
2329 			}
2330 			break;
2331 
2332 		case MORIBUND:
2333 		case ABORTING:
2334 		case DEAD:
2335 			CTR3(KTR_IW_CXGBE,
2336 			    "%s ignoring disconnect ep %p state %u", __func__,
2337 			    ep, ep->com.state);
2338 			break;
2339 
2340 		default:
2341 			BUG();
2342 			break;
2343 	}
2344 
2345 	mutex_unlock(&ep->com.mutex);
2346 
2347 	if (close) {
2348 
2349 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2350 
2351 		if (abrupt) {
2352 
2353 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2354 			set_bit(EP_DISC_ABORT, &ep->com.history);
2355 			close_complete_upcall(ep, -ECONNRESET);
2356 			ret = send_abort(ep);
2357 		} else {
2358 
2359 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2360 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2361 
2362 			if (!ep->parent_ep)
2363 				__state_set(&ep->com, MORIBUND);
2364 			ret = sodisconnect(ep->com.so);
2365 		}
2366 
2367 		if (ret) {
2368 
2369 			fatal = 1;
2370 		}
2371 	}
2372 
2373 	if (fatal) {
2374 		set_bit(EP_DISC_FAIL, &ep->com.history);
2375 		if (!abrupt) {
2376 			STOP_EP_TIMER(ep);
2377 			close_complete_upcall(ep, -EIO);
2378 		}
2379 		if (ep->com.qp) {
2380 			struct c4iw_qp_attributes attrs;
2381 
2382 			attrs.next_state = C4IW_QP_STATE_ERROR;
2383 			ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
2384 						C4IW_QP_ATTR_NEXT_STATE,
2385 						&attrs, 1);
2386 			if (ret) {
2387 				CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep);
2388 				printf("%s - qp <- error failed!\n", __func__);
2389 			}
2390 		}
2391 		release_ep_resources(ep);
2392 		ep->com.state = DEAD;
2393 		CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2394 	}
2395 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2396 	return ret;
2397 }
2398 
2399 #ifdef C4IW_EP_REDIRECT
2400 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2401 		struct l2t_entry *l2t)
2402 {
2403 	struct c4iw_ep *ep = ctx;
2404 
2405 	if (ep->dst != old)
2406 		return 0;
2407 
2408 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2409 			l2t);
2410 	dst_hold(new);
2411 	cxgb4_l2t_release(ep->l2t);
2412 	ep->l2t = l2t;
2413 	dst_release(old);
2414 	ep->dst = new;
2415 	return 1;
2416 }
2417 #endif
2418 
2419 
2420 
2421 static void ep_timeout(unsigned long arg)
2422 {
2423 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2424 	int kickit = 0;
2425 
2426 	CTR2(KTR_IW_CXGBE, "%s:etB %p", __func__, ep);
2427 	spin_lock(&timeout_lock);
2428 
2429 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2430 
2431 		/*
2432 		 * Only insert if it is not already on the list.
2433 		 */
2434 		if (!ep->entry.next) {
2435 			list_add_tail(&ep->entry, &timeout_list);
2436 			kickit = 1;
2437 		}
2438 	}
2439 	spin_unlock(&timeout_lock);
2440 
2441 	if (kickit) {
2442 
2443 		CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2444 		queue_work(c4iw_taskq, &c4iw_task);
2445 	}
2446 	CTR2(KTR_IW_CXGBE, "%s:etE %p", __func__, ep);
2447 }
2448 
2449 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2450 {
2451 	uint64_t val = be64toh(*rpl);
2452 	int ret;
2453 	struct c4iw_wr_wait *wr_waitp;
2454 
2455 	ret = (int)((val >> 8) & 0xff);
2456 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2457 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2458 	if (wr_waitp)
2459 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2460 
2461 	return (0);
2462 }
2463 
2464 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2465 {
2466 	struct t4_cqe cqe =*(const struct t4_cqe *)(&rpl[0]);
2467 
2468 	CTR2(KTR_IW_CXGBE, "%s rpl %p", __func__, rpl);
2469 	c4iw_ev_dispatch(sc->iwarp_softc, &cqe);
2470 
2471 	return (0);
2472 }
2473 
2474 static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
2475 {
2476 	struct adapter *sc = iq->adapter;
2477 	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
2478 	unsigned int tid = GET_TID(cpl);
2479 	struct c4iw_qp_attributes attrs;
2480 	struct toepcb *toep = lookup_tid(sc, tid);
2481 	struct socket *so;
2482 	struct c4iw_ep *ep;
2483 
2484 	INP_WLOCK(toep->inp);
2485 	so = inp_inpcbtosocket(toep->inp);
2486 	ep = so->so_rcv.sb_upcallarg;
2487 	INP_WUNLOCK(toep->inp);
2488 
2489 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
2490 
2491 	if (ep && ep->com.qp) {
2492 
2493 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2494 				ep->com.qp->wq.sq.qid);
2495 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2496 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
2497 				1);
2498 	} else
2499 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2500 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
2501 
2502 	return 0;
2503 }
2504 
2505 int __init c4iw_cm_init(void)
2506 {
2507 
2508 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
2509 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
2510 	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
2511 	t4_register_an_handler(c4iw_ev_handler);
2512 
2513 	TAILQ_INIT(&req_list);
2514 	spin_lock_init(&req_lock);
2515 	INIT_LIST_HEAD(&timeout_list);
2516 	spin_lock_init(&timeout_lock);
2517 
2518 	INIT_WORK(&c4iw_task, process_req);
2519 
2520 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
2521 	if (!c4iw_taskq)
2522 		return -ENOMEM;
2523 
2524 	return 0;
2525 }
2526 
2527 void __exit c4iw_cm_term(void)
2528 {
2529 	WARN_ON(!TAILQ_EMPTY(&req_list));
2530 	WARN_ON(!list_empty(&timeout_list));
2531 	flush_workqueue(c4iw_taskq);
2532 	destroy_workqueue(c4iw_taskq);
2533 
2534 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
2535 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
2536 	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
2537 	t4_register_an_handler(NULL);
2538 }
2539 #endif
2540