xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision b2d48be1bc7df45ddd13b143a160d0acb5a383c5)
1 /*
2  * Copyright (c) 2009-2013 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 
37 #ifdef TCP_OFFLOAD
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/socket.h>
41 #include <sys/socketvar.h>
42 #include <sys/sockio.h>
43 #include <sys/taskqueue.h>
44 #include <netinet/in.h>
45 #include <net/route.h>
46 
47 #include <netinet/in_systm.h>
48 #include <netinet/in_pcb.h>
49 #include <netinet/ip.h>
50 #include <netinet/ip_var.h>
51 #include <netinet/tcp_var.h>
52 #include <netinet/tcp.h>
53 #include <netinet/tcpip.h>
54 
55 #include <netinet/toecore.h>
56 
57 struct sge_iq;
58 struct rss_header;
59 #include <linux/types.h>
60 #include "offload.h"
61 #include "tom/t4_tom.h"
62 
63 #define TOEPCB(so)  ((struct toepcb *)(so_sototcpcb((so))->t_toe))
64 
65 #include "iw_cxgbe.h"
66 #include <linux/module.h>
67 #include <linux/workqueue.h>
68 #include <linux/notifier.h>
69 #include <linux/inetdevice.h>
70 #include <linux/if_vlan.h>
71 #include <net/netevent.h>
72 
73 static spinlock_t req_lock;
74 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
75 static struct work_struct c4iw_task;
76 static struct workqueue_struct *c4iw_taskq;
77 static LIST_HEAD(timeout_list);
78 static spinlock_t timeout_lock;
79 
80 static void process_req(struct work_struct *ctx);
81 static void start_ep_timer(struct c4iw_ep *ep);
82 static void stop_ep_timer(struct c4iw_ep *ep);
83 static int set_tcpinfo(struct c4iw_ep *ep);
84 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc);
85 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
86 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
87 static void *alloc_ep(int size, gfp_t flags);
88 void __free_ep(struct c4iw_ep_common *epc);
89 static struct rtentry * find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
90 		__be16 peer_port, u8 tos);
91 static int close_socket(struct c4iw_ep_common *epc, int close);
92 static int shutdown_socket(struct c4iw_ep_common *epc);
93 static void abort_socket(struct c4iw_ep *ep);
94 static void send_mpa_req(struct c4iw_ep *ep);
95 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
96 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
97 static void close_complete_upcall(struct c4iw_ep *ep, int status);
98 static int abort_connection(struct c4iw_ep *ep);
99 static void peer_close_upcall(struct c4iw_ep *ep);
100 static void peer_abort_upcall(struct c4iw_ep *ep);
101 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
102 static int connect_request_upcall(struct c4iw_ep *ep);
103 static void established_upcall(struct c4iw_ep *ep);
104 static void process_mpa_reply(struct c4iw_ep *ep);
105 static void process_mpa_request(struct c4iw_ep *ep);
106 static void process_peer_close(struct c4iw_ep *ep);
107 static void process_conn_error(struct c4iw_ep *ep);
108 static void process_close_complete(struct c4iw_ep *ep);
109 static void ep_timeout(unsigned long arg);
110 static void init_sock(struct c4iw_ep_common *epc);
111 static void process_data(struct c4iw_ep *ep);
112 static void process_connected(struct c4iw_ep *ep);
113 static struct socket * dequeue_socket(struct socket *head, struct sockaddr_in **remote, struct c4iw_ep *child_ep);
114 static void process_newconn(struct c4iw_ep *parent_ep);
115 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
116 static void process_socket_event(struct c4iw_ep *ep);
117 static void release_ep_resources(struct c4iw_ep *ep);
118 
119 #define START_EP_TIMER(ep) \
120     do { \
121 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
122 		__func__, __LINE__, (ep)); \
123 	    start_ep_timer(ep); \
124     } while (0)
125 
126 #define STOP_EP_TIMER(ep) \
127     do { \
128 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
129 		__func__, __LINE__, (ep)); \
130 	    stop_ep_timer(ep); \
131     } while (0)
132 
133 #ifdef KTR
134 static char *states[] = {
135 	"idle",
136 	"listen",
137 	"connecting",
138 	"mpa_wait_req",
139 	"mpa_req_sent",
140 	"mpa_req_rcvd",
141 	"mpa_rep_sent",
142 	"fpdu_mode",
143 	"aborting",
144 	"closing",
145 	"moribund",
146 	"dead",
147 	NULL,
148 };
149 #endif
150 
151 static void
152 process_req(struct work_struct *ctx)
153 {
154 	struct c4iw_ep_common *epc;
155 
156 	spin_lock(&req_lock);
157 	while (!TAILQ_EMPTY(&req_list)) {
158 		epc = TAILQ_FIRST(&req_list);
159 		TAILQ_REMOVE(&req_list, epc, entry);
160 		epc->entry.tqe_prev = NULL;
161 		spin_unlock(&req_lock);
162 		if (epc->so)
163 			process_socket_event((struct c4iw_ep *)epc);
164 		c4iw_put_ep(epc);
165 		spin_lock(&req_lock);
166 	}
167 	spin_unlock(&req_lock);
168 }
169 
170 /*
171  * XXX: doesn't belong here in the iWARP driver.
172  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
173  *      set.  Is this a valid assumption for active open?
174  */
175 static int
176 set_tcpinfo(struct c4iw_ep *ep)
177 {
178 	struct socket *so = ep->com.so;
179 	struct inpcb *inp = sotoinpcb(so);
180 	struct tcpcb *tp;
181 	struct toepcb *toep;
182 	int rc = 0;
183 
184 	INP_WLOCK(inp);
185 	tp = intotcpcb(inp);
186 	if ((tp->t_flags & TF_TOE) == 0) {
187 		rc = EINVAL;
188 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
189 		    __func__, so, ep);
190 		goto done;
191 	}
192 	toep = TOEPCB(so);
193 
194 	ep->hwtid = toep->tid;
195 	ep->snd_seq = tp->snd_nxt;
196 	ep->rcv_seq = tp->rcv_nxt;
197 	ep->emss = max(tp->t_maxseg, 128);
198 done:
199 	INP_WUNLOCK(inp);
200 	return (rc);
201 
202 }
203 
204 static struct rtentry *
205 find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
206 		__be16 peer_port, u8 tos)
207 {
208 	struct route iproute;
209 	struct sockaddr_in *dst = (struct sockaddr_in *)&iproute.ro_dst;
210 
211 	CTR5(KTR_IW_CXGBE, "%s:frtB %x, %x, %d, %d", __func__, local_ip,
212 	    peer_ip, ntohs(local_port), ntohs(peer_port));
213 	bzero(&iproute, sizeof iproute);
214 	dst->sin_family = AF_INET;
215 	dst->sin_len = sizeof *dst;
216 	dst->sin_addr.s_addr = peer_ip;
217 
218 	rtalloc(&iproute);
219 	CTR2(KTR_IW_CXGBE, "%s:frtE %p", __func__, (uint64_t)iproute.ro_rt);
220 	return iproute.ro_rt;
221 }
222 
223 static int
224 close_socket(struct c4iw_ep_common *epc, int close)
225 {
226 	struct socket *so = epc->so;
227 	int rc;
228 
229 	CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc, so,
230 	    states[epc->state]);
231 
232 	SOCK_LOCK(so);
233 	soupcall_clear(so, SO_RCV);
234 	SOCK_UNLOCK(so);
235 
236 	if (close)
237                 rc = soclose(so);
238         else
239                 rc = soshutdown(so, SHUT_WR | SHUT_RD);
240 	epc->so = NULL;
241 
242 	return (rc);
243 }
244 
245 static int
246 shutdown_socket(struct c4iw_ep_common *epc)
247 {
248 
249 	CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc->so, epc,
250 	    states[epc->state]);
251 
252 	return (soshutdown(epc->so, SHUT_WR));
253 }
254 
255 static void
256 abort_socket(struct c4iw_ep *ep)
257 {
258 	struct sockopt sopt;
259 	int rc;
260 	struct linger l;
261 
262 	CTR4(KTR_IW_CXGBE, "%s ep %p so %p state %s", __func__, ep, ep->com.so,
263 	    states[ep->com.state]);
264 
265 	l.l_onoff = 1;
266 	l.l_linger = 0;
267 
268 	/* linger_time of 0 forces RST to be sent */
269 	sopt.sopt_dir = SOPT_SET;
270 	sopt.sopt_level = SOL_SOCKET;
271 	sopt.sopt_name = SO_LINGER;
272 	sopt.sopt_val = (caddr_t)&l;
273 	sopt.sopt_valsize = sizeof l;
274 	sopt.sopt_td = NULL;
275 	rc = sosetopt(ep->com.so, &sopt);
276 	if (rc) {
277 		log(LOG_ERR, "%s: can't set linger to 0, no RST! err %d\n",
278 		    __func__, rc);
279 	}
280 }
281 
282 static void
283 process_peer_close(struct c4iw_ep *ep)
284 {
285 	struct c4iw_qp_attributes attrs;
286 	int disconnect = 1;
287 	int release = 0;
288 
289 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
290 	    ep->com.so, states[ep->com.state]);
291 
292 	mutex_lock(&ep->com.mutex);
293 	switch (ep->com.state) {
294 
295 		case MPA_REQ_WAIT:
296 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT CLOSING",
297 			    __func__, ep);
298 			__state_set(&ep->com, CLOSING);
299 			break;
300 
301 		case MPA_REQ_SENT:
302 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT CLOSING",
303 			    __func__, ep);
304 			__state_set(&ep->com, DEAD);
305 			connect_reply_upcall(ep, -ECONNABORTED);
306 
307 			disconnect = 0;
308 			STOP_EP_TIMER(ep);
309 			close_socket(&ep->com, 0);
310 			ep->com.cm_id->rem_ref(ep->com.cm_id);
311 			ep->com.cm_id = NULL;
312 			ep->com.qp = NULL;
313 			release = 1;
314 			break;
315 
316 		case MPA_REQ_RCVD:
317 
318 			/*
319 			 * We're gonna mark this puppy DEAD, but keep
320 			 * the reference on it until the ULP accepts or
321 			 * rejects the CR.
322 			 */
323 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
324 			    __func__, ep);
325 			__state_set(&ep->com, CLOSING);
326 			c4iw_get_ep(&ep->com);
327 			break;
328 
329 		case MPA_REP_SENT:
330 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
331 			    __func__, ep);
332 			__state_set(&ep->com, CLOSING);
333 			break;
334 
335 		case FPDU_MODE:
336 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
337 			    __func__, ep);
338 			START_EP_TIMER(ep);
339 			__state_set(&ep->com, CLOSING);
340 			attrs.next_state = C4IW_QP_STATE_CLOSING;
341 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
342 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
343 			peer_close_upcall(ep);
344 			break;
345 
346 		case ABORTING:
347 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
348 			    __func__, ep);
349 			disconnect = 0;
350 			break;
351 
352 		case CLOSING:
353 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
354 			    __func__, ep);
355 			__state_set(&ep->com, MORIBUND);
356 			disconnect = 0;
357 			break;
358 
359 		case MORIBUND:
360 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
361 			    ep);
362 			STOP_EP_TIMER(ep);
363 			if (ep->com.cm_id && ep->com.qp) {
364 				attrs.next_state = C4IW_QP_STATE_IDLE;
365 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
366 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
367 			}
368 			close_socket(&ep->com, 0);
369 			close_complete_upcall(ep, 0);
370 			__state_set(&ep->com, DEAD);
371 			release = 1;
372 			disconnect = 0;
373 			break;
374 
375 		case DEAD:
376 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
377 			    __func__, ep);
378 			disconnect = 0;
379 			break;
380 
381 		default:
382 			panic("%s: ep %p state %d", __func__, ep,
383 			    ep->com.state);
384 			break;
385 	}
386 
387 	mutex_unlock(&ep->com.mutex);
388 
389 	if (disconnect) {
390 
391 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
392 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
393 	}
394 	if (release) {
395 
396 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
397 		c4iw_put_ep(&ep->com);
398 	}
399 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
400 	return;
401 }
402 
403 static void
404 process_conn_error(struct c4iw_ep *ep)
405 {
406 	struct c4iw_qp_attributes attrs;
407 	int ret;
408 	int state;
409 
410 	state = state_read(&ep->com);
411 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
412 	    __func__, ep, ep->com.so, ep->com.so->so_error,
413 	    states[ep->com.state]);
414 
415 	switch (state) {
416 
417 		case MPA_REQ_WAIT:
418 			STOP_EP_TIMER(ep);
419 			break;
420 
421 		case MPA_REQ_SENT:
422 			STOP_EP_TIMER(ep);
423 			connect_reply_upcall(ep, -ECONNRESET);
424 			break;
425 
426 		case MPA_REP_SENT:
427 			ep->com.rpl_err = ECONNRESET;
428 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
429 			break;
430 
431 		case MPA_REQ_RCVD:
432 
433 			/*
434 			 * We're gonna mark this puppy DEAD, but keep
435 			 * the reference on it until the ULP accepts or
436 			 * rejects the CR.
437 			 */
438 			c4iw_get_ep(&ep->com);
439 			break;
440 
441 		case MORIBUND:
442 		case CLOSING:
443 			STOP_EP_TIMER(ep);
444 			/*FALLTHROUGH*/
445 		case FPDU_MODE:
446 
447 			if (ep->com.cm_id && ep->com.qp) {
448 
449 				attrs.next_state = C4IW_QP_STATE_ERROR;
450 				ret = c4iw_modify_qp(ep->com.qp->rhp,
451 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
452 					&attrs, 1);
453 				if (ret)
454 					log(LOG_ERR,
455 							"%s - qp <- error failed!\n",
456 							__func__);
457 			}
458 			peer_abort_upcall(ep);
459 			break;
460 
461 		case ABORTING:
462 			break;
463 
464 		case DEAD:
465 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
466 			    __func__, ep->com.so->so_error);
467 			return;
468 
469 		default:
470 			panic("%s: ep %p state %d", __func__, ep, state);
471 			break;
472 	}
473 
474 	if (state != ABORTING) {
475 
476 		CTR2(KTR_IW_CXGBE, "%s:pce1 %p", __func__, ep);
477 		close_socket(&ep->com, 1);
478 		state_set(&ep->com, DEAD);
479 		c4iw_put_ep(&ep->com);
480 	}
481 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
482 	return;
483 }
484 
485 static void
486 process_close_complete(struct c4iw_ep *ep)
487 {
488 	struct c4iw_qp_attributes attrs;
489 	int release = 0;
490 
491 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
492 	    ep->com.so, states[ep->com.state]);
493 
494 	/* The cm_id may be null if we failed to connect */
495 	mutex_lock(&ep->com.mutex);
496 
497 	switch (ep->com.state) {
498 
499 		case CLOSING:
500 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
501 			    __func__, ep);
502 			__state_set(&ep->com, MORIBUND);
503 			break;
504 
505 		case MORIBUND:
506 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
507 			    ep);
508 			STOP_EP_TIMER(ep);
509 
510 			if ((ep->com.cm_id) && (ep->com.qp)) {
511 
512 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
513 				    __func__, ep);
514 				attrs.next_state = C4IW_QP_STATE_IDLE;
515 				c4iw_modify_qp(ep->com.dev,
516 						ep->com.qp,
517 						C4IW_QP_ATTR_NEXT_STATE,
518 						&attrs, 1);
519 			}
520 
521 			if (ep->parent_ep) {
522 
523 				CTR2(KTR_IW_CXGBE, "%s:pcc3 %p", __func__, ep);
524 				close_socket(&ep->com, 1);
525 			}
526 			else {
527 
528 				CTR2(KTR_IW_CXGBE, "%s:pcc4 %p", __func__, ep);
529 				close_socket(&ep->com, 0);
530 			}
531 			close_complete_upcall(ep, 0);
532 			__state_set(&ep->com, DEAD);
533 			release = 1;
534 			break;
535 
536 		case ABORTING:
537 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
538 			break;
539 
540 		case DEAD:
541 		default:
542 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
543 			panic("%s:pcc6 %p DEAD", __func__, ep);
544 			break;
545 	}
546 	mutex_unlock(&ep->com.mutex);
547 
548 	if (release) {
549 
550 		CTR2(KTR_IW_CXGBE, "%s:pcc7 %p", __func__, ep);
551 		c4iw_put_ep(&ep->com);
552 	}
553 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
554 	return;
555 }
556 
557 static void
558 init_sock(struct c4iw_ep_common *epc)
559 {
560 	int rc;
561 	struct sockopt sopt;
562 	struct socket *so = epc->so;
563 	int on = 1;
564 
565 	SOCK_LOCK(so);
566 	soupcall_set(so, SO_RCV, c4iw_so_upcall, epc);
567 	so->so_state |= SS_NBIO;
568 	SOCK_UNLOCK(so);
569 	sopt.sopt_dir = SOPT_SET;
570 	sopt.sopt_level = IPPROTO_TCP;
571 	sopt.sopt_name = TCP_NODELAY;
572 	sopt.sopt_val = (caddr_t)&on;
573 	sopt.sopt_valsize = sizeof on;
574 	sopt.sopt_td = NULL;
575 	rc = sosetopt(so, &sopt);
576 	if (rc) {
577 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
578 		    __func__, so, rc);
579 	}
580 }
581 
582 static void
583 process_data(struct c4iw_ep *ep)
584 {
585 	struct sockaddr_in *local, *remote;
586 
587 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
588 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
589 
590 	switch (state_read(&ep->com)) {
591 	case MPA_REQ_SENT:
592 		process_mpa_reply(ep);
593 		break;
594 	case MPA_REQ_WAIT:
595 		in_getsockaddr(ep->com.so, (struct sockaddr **)&local);
596 		in_getpeeraddr(ep->com.so, (struct sockaddr **)&remote);
597 		ep->com.local_addr = *local;
598 		ep->com.remote_addr = *remote;
599 		free(local, M_SONAME);
600 		free(remote, M_SONAME);
601 		process_mpa_request(ep);
602 		break;
603 	default:
604 		if (sbused(&ep->com.so->so_rcv))
605 			log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
606 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
607 			    __func__, ep, state_read(&ep->com), ep->com.so,
608 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
609 		break;
610 	}
611 }
612 
613 static void
614 process_connected(struct c4iw_ep *ep)
615 {
616 
617 	if ((ep->com.so->so_state & SS_ISCONNECTED) && !ep->com.so->so_error)
618 		send_mpa_req(ep);
619 	else {
620 		connect_reply_upcall(ep, -ep->com.so->so_error);
621 		close_socket(&ep->com, 0);
622 		state_set(&ep->com, DEAD);
623 		c4iw_put_ep(&ep->com);
624 	}
625 }
626 
627 static struct socket *
628 dequeue_socket(struct socket *head, struct sockaddr_in **remote,
629     struct c4iw_ep *child_ep)
630 {
631 	struct socket *so;
632 
633 	ACCEPT_LOCK();
634 	so = TAILQ_FIRST(&head->so_comp);
635 	if (!so) {
636 		ACCEPT_UNLOCK();
637 		return (NULL);
638 	}
639 	TAILQ_REMOVE(&head->so_comp, so, so_list);
640 	head->so_qlen--;
641 	SOCK_LOCK(so);
642 	so->so_qstate &= ~SQ_COMP;
643 	so->so_head = NULL;
644 	soref(so);
645 	soupcall_set(so, SO_RCV, c4iw_so_upcall, child_ep);
646 	so->so_state |= SS_NBIO;
647 	SOCK_UNLOCK(so);
648 	ACCEPT_UNLOCK();
649 	soaccept(so, (struct sockaddr **)remote);
650 
651 	return (so);
652 }
653 
654 static void
655 process_newconn(struct c4iw_ep *parent_ep)
656 {
657 	struct socket *child_so;
658 	struct c4iw_ep *child_ep;
659 	struct sockaddr_in *remote;
660 
661 	child_ep = alloc_ep(sizeof(*child_ep), M_NOWAIT);
662 	if (!child_ep) {
663 		CTR3(KTR_IW_CXGBE, "%s: parent so %p, parent ep %p, ENOMEM",
664 		    __func__, parent_ep->com.so, parent_ep);
665 		log(LOG_ERR, "%s: failed to allocate ep entry\n", __func__);
666 		return;
667 	}
668 
669 	child_so = dequeue_socket(parent_ep->com.so, &remote, child_ep);
670 	if (!child_so) {
671 		CTR4(KTR_IW_CXGBE,
672 		    "%s: parent so %p, parent ep %p, child ep %p, dequeue err",
673 		    __func__, parent_ep->com.so, parent_ep, child_ep);
674 		log(LOG_ERR, "%s: failed to dequeue child socket\n", __func__);
675 		__free_ep(&child_ep->com);
676 		return;
677 
678 	}
679 
680 	CTR5(KTR_IW_CXGBE,
681 	    "%s: parent so %p, parent ep %p, child so %p, child ep %p",
682 	     __func__, parent_ep->com.so, parent_ep, child_so, child_ep);
683 
684 	child_ep->com.local_addr = parent_ep->com.local_addr;
685 	child_ep->com.remote_addr = *remote;
686 	child_ep->com.dev = parent_ep->com.dev;
687 	child_ep->com.so = child_so;
688 	child_ep->com.cm_id = NULL;
689 	child_ep->com.thread = parent_ep->com.thread;
690 	child_ep->parent_ep = parent_ep;
691 
692 	free(remote, M_SONAME);
693 	c4iw_get_ep(&parent_ep->com);
694 	child_ep->parent_ep = parent_ep;
695 	init_timer(&child_ep->timer);
696 	state_set(&child_ep->com, MPA_REQ_WAIT);
697 	START_EP_TIMER(child_ep);
698 
699 	/* maybe the request has already been queued up on the socket... */
700 	process_mpa_request(child_ep);
701 }
702 
703 static int
704 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
705 {
706 	struct c4iw_ep *ep = arg;
707 
708 	spin_lock(&req_lock);
709 
710 	CTR6(KTR_IW_CXGBE,
711 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
712 	    __func__, so, so->so_state, ep, states[ep->com.state],
713 	    ep->com.entry.tqe_prev);
714 
715 	if (ep && ep->com.so && !ep->com.entry.tqe_prev) {
716 		KASSERT(ep->com.so == so, ("%s: XXX review.", __func__));
717 		c4iw_get_ep(&ep->com);
718 		TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
719 		queue_work(c4iw_taskq, &c4iw_task);
720 	}
721 
722 	spin_unlock(&req_lock);
723 	return (SU_OK);
724 }
725 
726 static void
727 process_socket_event(struct c4iw_ep *ep)
728 {
729 	int state = state_read(&ep->com);
730 	struct socket *so = ep->com.so;
731 
732 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
733 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
734 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
735 
736 	if (state == CONNECTING) {
737 		process_connected(ep);
738 		return;
739 	}
740 
741 	if (state == LISTEN) {
742 		process_newconn(ep);
743 		return;
744 	}
745 
746 	/* connection error */
747 	if (so->so_error) {
748 		process_conn_error(ep);
749 		return;
750 	}
751 
752 	/* peer close */
753 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state < CLOSING) {
754 		process_peer_close(ep);
755 		return;
756 	}
757 
758 	/* close complete */
759 	if (so->so_state & SS_ISDISCONNECTED) {
760 		process_close_complete(ep);
761 		return;
762 	}
763 
764 	/* rx data */
765 	process_data(ep);
766 }
767 
768 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
769 
770 int db_delay_usecs = 1;
771 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RWTUN, &db_delay_usecs, 0,
772 		"Usecs to delay awaiting db fifo to drain");
773 
774 static int dack_mode = 1;
775 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
776 		"Delayed ack mode (default = 1)");
777 
778 int c4iw_max_read_depth = 8;
779 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
780 		"Per-connection max ORD/IRD (default = 8)");
781 
782 static int enable_tcp_timestamps;
783 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
784 		"Enable tcp timestamps (default = 0)");
785 
786 static int enable_tcp_sack;
787 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
788 		"Enable tcp SACK (default = 0)");
789 
790 static int enable_tcp_window_scaling = 1;
791 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
792 		"Enable tcp window scaling (default = 1)");
793 
794 int c4iw_debug = 1;
795 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
796 		"Enable debug logging (default = 0)");
797 
798 static int peer2peer;
799 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
800 		"Support peer2peer ULPs (default = 0)");
801 
802 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
803 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
804 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
805 
806 static int ep_timeout_secs = 60;
807 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
808 		"CM Endpoint operation timeout in seconds (default = 60)");
809 
810 static int mpa_rev = 1;
811 #ifdef IW_CM_MPAV2
812 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
813 		"MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
814 #else
815 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
816 		"MPA Revision, 0 supports amso1100, 1 is RFC0544 spec compliant (default = 1)");
817 #endif
818 
819 static int markers_enabled;
820 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
821 		"Enable MPA MARKERS (default(0) = disabled)");
822 
823 static int crc_enabled = 1;
824 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
825 		"Enable MPA CRC (default(1) = enabled)");
826 
827 static int rcv_win = 256 * 1024;
828 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
829 		"TCP receive window in bytes (default = 256KB)");
830 
831 static int snd_win = 128 * 1024;
832 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
833 		"TCP send window in bytes (default = 128KB)");
834 
835 int db_fc_threshold = 2000;
836 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RWTUN, &db_fc_threshold, 0,
837 		"QP count/threshold that triggers automatic");
838 
839 static void
840 start_ep_timer(struct c4iw_ep *ep)
841 {
842 
843 	if (timer_pending(&ep->timer)) {
844 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
845 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
846 		    ep);
847 		return;
848 	}
849 	clear_bit(TIMEOUT, &ep->com.flags);
850 	c4iw_get_ep(&ep->com);
851 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
852 	ep->timer.data = (unsigned long)ep;
853 	ep->timer.function = ep_timeout;
854 	add_timer(&ep->timer);
855 }
856 
857 static void
858 stop_ep_timer(struct c4iw_ep *ep)
859 {
860 
861 	del_timer_sync(&ep->timer);
862 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
863 		c4iw_put_ep(&ep->com);
864 	}
865 }
866 
867 static enum
868 c4iw_ep_state state_read(struct c4iw_ep_common *epc)
869 {
870 	enum c4iw_ep_state state;
871 
872 	mutex_lock(&epc->mutex);
873 	state = epc->state;
874 	mutex_unlock(&epc->mutex);
875 
876 	return (state);
877 }
878 
879 static void
880 __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
881 {
882 
883 	epc->state = new;
884 }
885 
886 static void
887 state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
888 {
889 
890 	mutex_lock(&epc->mutex);
891 	__state_set(epc, new);
892 	mutex_unlock(&epc->mutex);
893 }
894 
895 static void *
896 alloc_ep(int size, gfp_t gfp)
897 {
898 	struct c4iw_ep_common *epc;
899 
900 	epc = kzalloc(size, gfp);
901 	if (epc == NULL)
902 		return (NULL);
903 
904 	kref_init(&epc->kref);
905 	mutex_init(&epc->mutex);
906 	c4iw_init_wr_wait(&epc->wr_wait);
907 
908 	return (epc);
909 }
910 
911 void
912 __free_ep(struct c4iw_ep_common *epc)
913 {
914 	CTR2(KTR_IW_CXGBE, "%s:feB %p", __func__, epc);
915 	KASSERT(!epc->so, ("%s warning ep->so %p \n", __func__, epc->so));
916 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list!\n", __func__, epc));
917 	free(epc, M_DEVBUF);
918 	CTR2(KTR_IW_CXGBE, "%s:feE %p", __func__, epc);
919 }
920 
921 void _c4iw_free_ep(struct kref *kref)
922 {
923 	struct c4iw_ep *ep;
924 	struct c4iw_ep_common *epc;
925 
926 	ep = container_of(kref, struct c4iw_ep, com.kref);
927 	epc = &ep->com;
928 	KASSERT(!epc->so, ("%s ep->so %p", __func__, epc->so));
929 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
930 	    __func__, epc));
931 	kfree(ep);
932 }
933 
934 static void release_ep_resources(struct c4iw_ep *ep)
935 {
936 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
937 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
938 	c4iw_put_ep(&ep->com);
939 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
940 }
941 
942 static void
943 send_mpa_req(struct c4iw_ep *ep)
944 {
945 	int mpalen;
946 	struct mpa_message *mpa;
947 	struct mpa_v2_conn_params mpa_v2_params;
948 	struct mbuf *m;
949 	char mpa_rev_to_use = mpa_rev;
950 	int err;
951 
952 	if (ep->retry_with_mpa_v1)
953 		mpa_rev_to_use = 1;
954 	mpalen = sizeof(*mpa) + ep->plen;
955 	if (mpa_rev_to_use == 2)
956 		mpalen += sizeof(struct mpa_v2_conn_params);
957 
958 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
959 	if (mpa == NULL) {
960 failed:
961 		connect_reply_upcall(ep, -ENOMEM);
962 		return;
963 	}
964 
965 	memset(mpa, 0, mpalen);
966 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
967 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
968 		(markers_enabled ? MPA_MARKERS : 0) |
969 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
970 	mpa->private_data_size = htons(ep->plen);
971 	mpa->revision = mpa_rev_to_use;
972 
973 	if (mpa_rev_to_use == 1) {
974 		ep->tried_with_mpa_v1 = 1;
975 		ep->retry_with_mpa_v1 = 0;
976 	}
977 
978 	if (mpa_rev_to_use == 2) {
979 		mpa->private_data_size +=
980 			htons(sizeof(struct mpa_v2_conn_params));
981 		mpa_v2_params.ird = htons((u16)ep->ird);
982 		mpa_v2_params.ord = htons((u16)ep->ord);
983 
984 		if (peer2peer) {
985 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
986 
987 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
988 				mpa_v2_params.ord |=
989 				    htons(MPA_V2_RDMA_WRITE_RTR);
990 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
991 				mpa_v2_params.ord |=
992 					htons(MPA_V2_RDMA_READ_RTR);
993 			}
994 		}
995 		memcpy(mpa->private_data, &mpa_v2_params,
996 			sizeof(struct mpa_v2_conn_params));
997 
998 		if (ep->plen) {
999 
1000 			memcpy(mpa->private_data +
1001 				sizeof(struct mpa_v2_conn_params),
1002 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1003 		}
1004 	} else {
1005 
1006 		if (ep->plen)
1007 			memcpy(mpa->private_data,
1008 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1009 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1010 	}
1011 
1012 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1013 	if (m == NULL) {
1014 		free(mpa, M_CXGBE);
1015 		goto failed;
1016 	}
1017 	m_copyback(m, 0, mpalen, (void *)mpa);
1018 	free(mpa, M_CXGBE);
1019 
1020 	err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1021 	    ep->com.thread);
1022 	if (err)
1023 		goto failed;
1024 
1025 	START_EP_TIMER(ep);
1026 	state_set(&ep->com, MPA_REQ_SENT);
1027 	ep->mpa_attr.initiator = 1;
1028 }
1029 
1030 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1031 {
1032 	int mpalen ;
1033 	struct mpa_message *mpa;
1034 	struct mpa_v2_conn_params mpa_v2_params;
1035 	struct mbuf *m;
1036 	int err;
1037 
1038 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1039 	    ep->plen);
1040 
1041 	mpalen = sizeof(*mpa) + plen;
1042 
1043 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1044 
1045 		mpalen += sizeof(struct mpa_v2_conn_params);
1046 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1047 		    ep->mpa_attr.version, mpalen);
1048 	}
1049 
1050 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1051 	if (mpa == NULL)
1052 		return (-ENOMEM);
1053 
1054 	memset(mpa, 0, mpalen);
1055 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1056 	mpa->flags = MPA_REJECT;
1057 	mpa->revision = mpa_rev;
1058 	mpa->private_data_size = htons(plen);
1059 
1060 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1061 
1062 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1063 		mpa->private_data_size +=
1064 			htons(sizeof(struct mpa_v2_conn_params));
1065 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1066 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1067 				 0));
1068 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1069 					(p2p_type ==
1070 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1071 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1072 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1073 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1074 		memcpy(mpa->private_data, &mpa_v2_params,
1075 				sizeof(struct mpa_v2_conn_params));
1076 
1077 		if (ep->plen)
1078 			memcpy(mpa->private_data +
1079 					sizeof(struct mpa_v2_conn_params), pdata, plen);
1080 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1081 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1082 	} else
1083 		if (plen)
1084 			memcpy(mpa->private_data, pdata, plen);
1085 
1086 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1087 	if (m == NULL) {
1088 		free(mpa, M_CXGBE);
1089 		return (-ENOMEM);
1090 	}
1091 	m_copyback(m, 0, mpalen, (void *)mpa);
1092 	free(mpa, M_CXGBE);
1093 
1094 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1095 	if (!err)
1096 		ep->snd_seq += mpalen;
1097 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1098 	return err;
1099 }
1100 
1101 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1102 {
1103 	int mpalen;
1104 	struct mpa_message *mpa;
1105 	struct mbuf *m;
1106 	struct mpa_v2_conn_params mpa_v2_params;
1107 	int err;
1108 
1109 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1110 
1111 	mpalen = sizeof(*mpa) + plen;
1112 
1113 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1114 
1115 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1116 		    ep->mpa_attr.version);
1117 		mpalen += sizeof(struct mpa_v2_conn_params);
1118 	}
1119 
1120 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1121 	if (mpa == NULL)
1122 		return (-ENOMEM);
1123 
1124 	memset(mpa, 0, sizeof(*mpa));
1125 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1126 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1127 		(markers_enabled ? MPA_MARKERS : 0);
1128 	mpa->revision = ep->mpa_attr.version;
1129 	mpa->private_data_size = htons(plen);
1130 
1131 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1132 
1133 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1134 		mpa->private_data_size +=
1135 			htons(sizeof(struct mpa_v2_conn_params));
1136 		mpa_v2_params.ird = htons((u16)ep->ird);
1137 		mpa_v2_params.ord = htons((u16)ep->ord);
1138 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1139 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1140 
1141 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1142 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1143 
1144 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1145 
1146 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1147 
1148 				mpa_v2_params.ord |=
1149 					htons(MPA_V2_RDMA_WRITE_RTR);
1150 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1151 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1152 				    mpa_v2_params.ord);
1153 			}
1154 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1155 
1156 				mpa_v2_params.ord |=
1157 					htons(MPA_V2_RDMA_READ_RTR);
1158 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1159 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1160 				    mpa_v2_params.ord);
1161 			}
1162 		}
1163 
1164 		memcpy(mpa->private_data, &mpa_v2_params,
1165 			sizeof(struct mpa_v2_conn_params));
1166 
1167 		if (ep->plen)
1168 			memcpy(mpa->private_data +
1169 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1170 	} else
1171 		if (plen)
1172 			memcpy(mpa->private_data, pdata, plen);
1173 
1174 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1175 	if (m == NULL) {
1176 		free(mpa, M_CXGBE);
1177 		return (-ENOMEM);
1178 	}
1179 	m_copyback(m, 0, mpalen, (void *)mpa);
1180 	free(mpa, M_CXGBE);
1181 
1182 
1183 	state_set(&ep->com, MPA_REP_SENT);
1184 	ep->snd_seq += mpalen;
1185 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1186 			ep->com.thread);
1187 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1188 	return err;
1189 }
1190 
1191 
1192 
1193 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1194 {
1195 	struct iw_cm_event event;
1196 
1197 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1198 	memset(&event, 0, sizeof(event));
1199 	event.event = IW_CM_EVENT_CLOSE;
1200 	event.status = status;
1201 
1202 	if (ep->com.cm_id) {
1203 
1204 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1205 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1206 		ep->com.cm_id->rem_ref(ep->com.cm_id);
1207 		ep->com.cm_id = NULL;
1208 		ep->com.qp = NULL;
1209 		set_bit(CLOSE_UPCALL, &ep->com.history);
1210 	}
1211 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1212 }
1213 
1214 static int abort_connection(struct c4iw_ep *ep)
1215 {
1216 	int err;
1217 
1218 	CTR2(KTR_IW_CXGBE, "%s:abB %p", __func__, ep);
1219 	state_set(&ep->com, ABORTING);
1220 	abort_socket(ep);
1221 	err = close_socket(&ep->com, 0);
1222 	set_bit(ABORT_CONN, &ep->com.history);
1223 	CTR2(KTR_IW_CXGBE, "%s:abE %p", __func__, ep);
1224 	return err;
1225 }
1226 
1227 static void peer_close_upcall(struct c4iw_ep *ep)
1228 {
1229 	struct iw_cm_event event;
1230 
1231 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1232 	memset(&event, 0, sizeof(event));
1233 	event.event = IW_CM_EVENT_DISCONNECT;
1234 
1235 	if (ep->com.cm_id) {
1236 
1237 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1238 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1239 		set_bit(DISCONN_UPCALL, &ep->com.history);
1240 	}
1241 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1242 }
1243 
1244 static void peer_abort_upcall(struct c4iw_ep *ep)
1245 {
1246 	struct iw_cm_event event;
1247 
1248 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1249 	memset(&event, 0, sizeof(event));
1250 	event.event = IW_CM_EVENT_CLOSE;
1251 	event.status = -ECONNRESET;
1252 
1253 	if (ep->com.cm_id) {
1254 
1255 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1256 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1257 		ep->com.cm_id->rem_ref(ep->com.cm_id);
1258 		ep->com.cm_id = NULL;
1259 		ep->com.qp = NULL;
1260 		set_bit(ABORT_UPCALL, &ep->com.history);
1261 	}
1262 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1263 }
1264 
1265 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1266 {
1267 	struct iw_cm_event event;
1268 
1269 	CTR3(KTR_IW_CXGBE, "%s:cruB %p", __func__, ep, status);
1270 	memset(&event, 0, sizeof(event));
1271 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1272 	event.status = (status ==-ECONNABORTED)?-ECONNRESET: status;
1273 	event.local_addr = ep->com.local_addr;
1274 	event.remote_addr = ep->com.remote_addr;
1275 
1276 	if ((status == 0) || (status == -ECONNREFUSED)) {
1277 
1278 		if (!ep->tried_with_mpa_v1) {
1279 
1280 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1281 			/* this means MPA_v2 is used */
1282 			event.private_data_len = ep->plen -
1283 				sizeof(struct mpa_v2_conn_params);
1284 			event.private_data = ep->mpa_pkt +
1285 				sizeof(struct mpa_message) +
1286 				sizeof(struct mpa_v2_conn_params);
1287 		} else {
1288 
1289 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1290 			/* this means MPA_v1 is used */
1291 			event.private_data_len = ep->plen;
1292 			event.private_data = ep->mpa_pkt +
1293 				sizeof(struct mpa_message);
1294 		}
1295 	}
1296 
1297 	if (ep->com.cm_id) {
1298 
1299 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1300 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1301 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1302 	}
1303 
1304 	if(status == -ECONNABORTED) {
1305 
1306 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1307 		return;
1308 	}
1309 
1310 	if (status < 0) {
1311 
1312 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1313 		ep->com.cm_id->rem_ref(ep->com.cm_id);
1314 		ep->com.cm_id = NULL;
1315 		ep->com.qp = NULL;
1316 	}
1317 
1318 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1319 }
1320 
1321 static int connect_request_upcall(struct c4iw_ep *ep)
1322 {
1323 	struct iw_cm_event event;
1324 	int ret;
1325 
1326 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1327 	    ep->tried_with_mpa_v1);
1328 
1329 	memset(&event, 0, sizeof(event));
1330 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1331 	event.local_addr = ep->com.local_addr;
1332 	event.remote_addr = ep->com.remote_addr;
1333 	event.provider_data = ep;
1334 	event.so = ep->com.so;
1335 
1336 	if (!ep->tried_with_mpa_v1) {
1337 		/* this means MPA_v2 is used */
1338 #ifdef IW_CM_MPAV2
1339 		event.ord = ep->ord;
1340 		event.ird = ep->ird;
1341 #endif
1342 		event.private_data_len = ep->plen -
1343 			sizeof(struct mpa_v2_conn_params);
1344 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1345 			sizeof(struct mpa_v2_conn_params);
1346 	} else {
1347 
1348 		/* this means MPA_v1 is used. Send max supported */
1349 #ifdef IW_CM_MPAV2
1350 		event.ord = c4iw_max_read_depth;
1351 		event.ird = c4iw_max_read_depth;
1352 #endif
1353 		event.private_data_len = ep->plen;
1354 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1355 	}
1356 
1357 	c4iw_get_ep(&ep->com);
1358 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1359 	    &event);
1360 	if(ret)
1361 		c4iw_put_ep(&ep->com);
1362 
1363 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1364 	c4iw_put_ep(&ep->parent_ep->com);
1365 	return ret;
1366 }
1367 
1368 static void established_upcall(struct c4iw_ep *ep)
1369 {
1370 	struct iw_cm_event event;
1371 
1372 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1373 	memset(&event, 0, sizeof(event));
1374 	event.event = IW_CM_EVENT_ESTABLISHED;
1375 #ifdef IW_CM_MPAV2
1376 	event.ird = ep->ird;
1377 	event.ord = ep->ord;
1378 #endif
1379 	if (ep->com.cm_id) {
1380 
1381 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1382 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1383 		set_bit(ESTAB_UPCALL, &ep->com.history);
1384 	}
1385 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1386 }
1387 
1388 
1389 
1390 static void process_mpa_reply(struct c4iw_ep *ep)
1391 {
1392 	struct mpa_message *mpa;
1393 	struct mpa_v2_conn_params *mpa_v2_params;
1394 	u16 plen;
1395 	u16 resp_ird, resp_ord;
1396 	u8 rtr_mismatch = 0, insuff_ird = 0;
1397 	struct c4iw_qp_attributes attrs;
1398 	enum c4iw_qp_attr_mask mask;
1399 	int err;
1400 	struct mbuf *top, *m;
1401 	int flags = MSG_DONTWAIT;
1402 	struct uio uio;
1403 
1404 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1405 
1406 	/*
1407 	 * Stop mpa timer.  If it expired, then the state has
1408 	 * changed and we bail since ep_timeout already aborted
1409 	 * the connection.
1410 	 */
1411 	STOP_EP_TIMER(ep);
1412 	if (state_read(&ep->com) != MPA_REQ_SENT)
1413 		return;
1414 
1415 	uio.uio_resid = 1000000;
1416 	uio.uio_td = ep->com.thread;
1417 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1418 
1419 	if (err) {
1420 
1421 		if (err == EWOULDBLOCK) {
1422 
1423 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1424 			START_EP_TIMER(ep);
1425 			return;
1426 		}
1427 		err = -err;
1428 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1429 		goto err;
1430 	}
1431 
1432 	if (ep->com.so->so_rcv.sb_mb) {
1433 
1434 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1435 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1436 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1437 	}
1438 
1439 	m = top;
1440 
1441 	do {
1442 
1443 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1444 		/*
1445 		 * If we get more than the supported amount of private data
1446 		 * then we must fail this connection.
1447 		 */
1448 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1449 
1450 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1451 			    ep->mpa_pkt_len + m->m_len);
1452 			err = (-EINVAL);
1453 			goto err;
1454 		}
1455 
1456 		/*
1457 		 * copy the new data into our accumulation buffer.
1458 		 */
1459 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1460 		ep->mpa_pkt_len += m->m_len;
1461 		if (!m->m_next)
1462 			m = m->m_nextpkt;
1463 		else
1464 			m = m->m_next;
1465 	} while (m);
1466 
1467 	m_freem(top);
1468 	/*
1469 	 * if we don't even have the mpa message, then bail.
1470 	 */
1471 	if (ep->mpa_pkt_len < sizeof(*mpa))
1472 		return;
1473 	mpa = (struct mpa_message *) ep->mpa_pkt;
1474 
1475 	/* Validate MPA header. */
1476 	if (mpa->revision > mpa_rev) {
1477 
1478 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1479 		    mpa->revision, mpa_rev);
1480 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1481 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1482 		err = -EPROTO;
1483 		goto err;
1484 	}
1485 
1486 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1487 
1488 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1489 		err = -EPROTO;
1490 		goto err;
1491 	}
1492 
1493 	plen = ntohs(mpa->private_data_size);
1494 
1495 	/*
1496 	 * Fail if there's too much private data.
1497 	 */
1498 	if (plen > MPA_MAX_PRIVATE_DATA) {
1499 
1500 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1501 		err = -EPROTO;
1502 		goto err;
1503 	}
1504 
1505 	/*
1506 	 * If plen does not account for pkt size
1507 	 */
1508 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1509 
1510 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1511 		err = -EPROTO;
1512 		goto err;
1513 	}
1514 
1515 	ep->plen = (u8) plen;
1516 
1517 	/*
1518 	 * If we don't have all the pdata yet, then bail.
1519 	 * We'll continue process when more data arrives.
1520 	 */
1521 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1522 
1523 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1524 		return;
1525 	}
1526 
1527 	if (mpa->flags & MPA_REJECT) {
1528 
1529 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1530 		err = -ECONNREFUSED;
1531 		goto err;
1532 	}
1533 
1534 	/*
1535 	 * If we get here we have accumulated the entire mpa
1536 	 * start reply message including private data. And
1537 	 * the MPA header is valid.
1538 	 */
1539 	state_set(&ep->com, FPDU_MODE);
1540 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1541 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1542 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1543 	ep->mpa_attr.version = mpa->revision;
1544 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1545 
1546 	if (mpa->revision == 2) {
1547 
1548 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
1549 		ep->mpa_attr.enhanced_rdma_conn =
1550 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1551 
1552 		if (ep->mpa_attr.enhanced_rdma_conn) {
1553 
1554 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
1555 			mpa_v2_params = (struct mpa_v2_conn_params *)
1556 				(ep->mpa_pkt + sizeof(*mpa));
1557 			resp_ird = ntohs(mpa_v2_params->ird) &
1558 				MPA_V2_IRD_ORD_MASK;
1559 			resp_ord = ntohs(mpa_v2_params->ord) &
1560 				MPA_V2_IRD_ORD_MASK;
1561 
1562 			/*
1563 			 * This is a double-check. Ideally, below checks are
1564 			 * not required since ird/ord stuff has been taken
1565 			 * care of in c4iw_accept_cr
1566 			 */
1567 			if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
1568 
1569 				CTR2(KTR_IW_CXGBE, "%s:pmre %p", __func__, ep);
1570 				err = -ENOMEM;
1571 				ep->ird = resp_ord;
1572 				ep->ord = resp_ird;
1573 				insuff_ird = 1;
1574 			}
1575 
1576 			if (ntohs(mpa_v2_params->ird) &
1577 				MPA_V2_PEER2PEER_MODEL) {
1578 
1579 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
1580 				if (ntohs(mpa_v2_params->ord) &
1581 					MPA_V2_RDMA_WRITE_RTR) {
1582 
1583 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
1584 					ep->mpa_attr.p2p_type =
1585 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1586 				}
1587 				else if (ntohs(mpa_v2_params->ord) &
1588 					MPA_V2_RDMA_READ_RTR) {
1589 
1590 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
1591 					ep->mpa_attr.p2p_type =
1592 						FW_RI_INIT_P2PTYPE_READ_REQ;
1593 				}
1594 			}
1595 		}
1596 	} else {
1597 
1598 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
1599 
1600 		if (mpa->revision == 1) {
1601 
1602 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
1603 
1604 			if (peer2peer) {
1605 
1606 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
1607 				ep->mpa_attr.p2p_type = p2p_type;
1608 			}
1609 		}
1610 	}
1611 
1612 	if (set_tcpinfo(ep)) {
1613 
1614 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
1615 		printf("%s set_tcpinfo error\n", __func__);
1616 		goto err;
1617 	}
1618 
1619 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
1620 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
1621 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1622 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1623 	    ep->mpa_attr.p2p_type);
1624 
1625 	/*
1626 	 * If responder's RTR does not match with that of initiator, assign
1627 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1628 	 * generated when moving QP to RTS state.
1629 	 * A TERM message will be sent after QP has moved to RTS state
1630 	 */
1631 	if ((ep->mpa_attr.version == 2) && peer2peer &&
1632 		(ep->mpa_attr.p2p_type != p2p_type)) {
1633 
1634 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
1635 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1636 		rtr_mismatch = 1;
1637 	}
1638 
1639 
1640 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
1641 	attrs.mpa_attr = ep->mpa_attr;
1642 	attrs.max_ird = ep->ird;
1643 	attrs.max_ord = ep->ord;
1644 	attrs.llp_stream_handle = ep;
1645 	attrs.next_state = C4IW_QP_STATE_RTS;
1646 
1647 	mask = C4IW_QP_ATTR_NEXT_STATE |
1648 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1649 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1650 
1651 	/* bind QP and TID with INIT_WR */
1652 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
1653 
1654 	if (err) {
1655 
1656 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
1657 		goto err;
1658 	}
1659 
1660 	/*
1661 	 * If responder's RTR requirement did not match with what initiator
1662 	 * supports, generate TERM message
1663 	 */
1664 	if (rtr_mismatch) {
1665 
1666 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
1667 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1668 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1669 		attrs.ecode = MPA_NOMATCH_RTR;
1670 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1671 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1672 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1673 		err = -ENOMEM;
1674 		goto out;
1675 	}
1676 
1677 	/*
1678 	 * Generate TERM if initiator IRD is not sufficient for responder
1679 	 * provided ORD. Currently, we do the same behaviour even when
1680 	 * responder provided IRD is also not sufficient as regards to
1681 	 * initiator ORD.
1682 	 */
1683 	if (insuff_ird) {
1684 
1685 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
1686 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1687 				__func__);
1688 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1689 		attrs.ecode = MPA_INSUFF_IRD;
1690 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1691 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1692 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1693 		err = -ENOMEM;
1694 		goto out;
1695 	}
1696 	goto out;
1697 err:
1698 	state_set(&ep->com, ABORTING);
1699 	abort_connection(ep);
1700 out:
1701 	connect_reply_upcall(ep, err);
1702 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
1703 	return;
1704 }
1705 
1706 static void
1707 process_mpa_request(struct c4iw_ep *ep)
1708 {
1709 	struct mpa_message *mpa;
1710 	u16 plen;
1711 	int flags = MSG_DONTWAIT;
1712 	int rc;
1713 	struct iovec iov;
1714 	struct uio uio;
1715 	enum c4iw_ep_state state = state_read(&ep->com);
1716 
1717 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
1718 
1719 	if (state != MPA_REQ_WAIT)
1720 		return;
1721 
1722 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
1723 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
1724 	uio.uio_iov = &iov;
1725 	uio.uio_iovcnt = 1;
1726 	uio.uio_offset = 0;
1727 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
1728 	uio.uio_segflg = UIO_SYSSPACE;
1729 	uio.uio_rw = UIO_READ;
1730 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
1731 
1732 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
1733 	if (rc == EAGAIN)
1734 		return;
1735 	else if (rc) {
1736 abort:
1737 		STOP_EP_TIMER(ep);
1738 		abort_connection(ep);
1739 		return;
1740 	}
1741 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
1742 	    __func__, ep->com.so));
1743 	ep->mpa_pkt_len += uio.uio_offset;
1744 
1745 	/*
1746 	 * If we get more than the supported amount of private data then we must
1747 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
1748 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
1749 	 * byte is filled by the soreceive above.
1750 	 */
1751 
1752 	/* Don't even have the MPA message.  Wait for more data to arrive. */
1753 	if (ep->mpa_pkt_len < sizeof(*mpa))
1754 		return;
1755 	mpa = (struct mpa_message *) ep->mpa_pkt;
1756 
1757 	/*
1758 	 * Validate MPA Header.
1759 	 */
1760 	if (mpa->revision > mpa_rev) {
1761 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
1762 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
1763 		goto abort;
1764 	}
1765 
1766 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1767 		goto abort;
1768 
1769 	/*
1770 	 * Fail if there's too much private data.
1771 	 */
1772 	plen = ntohs(mpa->private_data_size);
1773 	if (plen > MPA_MAX_PRIVATE_DATA)
1774 		goto abort;
1775 
1776 	/*
1777 	 * If plen does not account for pkt size
1778 	 */
1779 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1780 		goto abort;
1781 
1782 	ep->plen = (u8) plen;
1783 
1784 	/*
1785 	 * If we don't have all the pdata yet, then bail.
1786 	 */
1787 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1788 		return;
1789 
1790 	/*
1791 	 * If we get here we have accumulated the entire mpa
1792 	 * start reply message including private data.
1793 	 */
1794 	ep->mpa_attr.initiator = 0;
1795 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1796 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1797 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1798 	ep->mpa_attr.version = mpa->revision;
1799 	if (mpa->revision == 1)
1800 		ep->tried_with_mpa_v1 = 1;
1801 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1802 
1803 	if (mpa->revision == 2) {
1804 		ep->mpa_attr.enhanced_rdma_conn =
1805 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1806 		if (ep->mpa_attr.enhanced_rdma_conn) {
1807 			struct mpa_v2_conn_params *mpa_v2_params;
1808 			u16 ird, ord;
1809 
1810 			mpa_v2_params = (void *)&ep->mpa_pkt[sizeof(*mpa)];
1811 			ird = ntohs(mpa_v2_params->ird);
1812 			ord = ntohs(mpa_v2_params->ord);
1813 
1814 			ep->ird = ird & MPA_V2_IRD_ORD_MASK;
1815 			ep->ord = ord & MPA_V2_IRD_ORD_MASK;
1816 			if (ird & MPA_V2_PEER2PEER_MODEL && peer2peer) {
1817 				if (ord & MPA_V2_RDMA_WRITE_RTR) {
1818 					ep->mpa_attr.p2p_type =
1819 					    FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1820 				} else if (ord & MPA_V2_RDMA_READ_RTR) {
1821 					ep->mpa_attr.p2p_type =
1822 					    FW_RI_INIT_P2PTYPE_READ_REQ;
1823 				}
1824 			}
1825 		}
1826 	} else if (mpa->revision == 1 && peer2peer)
1827 		ep->mpa_attr.p2p_type = p2p_type;
1828 
1829 	if (set_tcpinfo(ep))
1830 		goto abort;
1831 
1832 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
1833 	    "xmit_marker_enabled = %d, version = %d", __func__,
1834 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1835 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1836 
1837 	state_set(&ep->com, MPA_REQ_RCVD);
1838 	STOP_EP_TIMER(ep);
1839 
1840 	/* drive upcall */
1841 	mutex_lock(&ep->parent_ep->com.mutex);
1842 	if (ep->parent_ep->com.state != DEAD) {
1843 		if(connect_request_upcall(ep)) {
1844 			abort_connection(ep);
1845 		}
1846 	}else
1847 		abort_connection(ep);
1848 	mutex_unlock(&ep->parent_ep->com.mutex);
1849 }
1850 
1851 /*
1852  * Upcall from the adapter indicating data has been transmitted.
1853  * For us its just the single MPA request or reply.  We can now free
1854  * the skb holding the mpa message.
1855  */
1856 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1857 {
1858 	int err;
1859 	struct c4iw_ep *ep = to_ep(cm_id);
1860 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
1861 
1862 	if (state_read(&ep->com) == DEAD) {
1863 
1864 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
1865 		c4iw_put_ep(&ep->com);
1866 		return -ECONNRESET;
1867 	}
1868 	set_bit(ULP_REJECT, &ep->com.history);
1869 	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1870 
1871 	if (mpa_rev == 0) {
1872 
1873 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
1874 		abort_connection(ep);
1875 	}
1876 	else {
1877 
1878 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
1879 		err = send_mpa_reject(ep, pdata, pdata_len);
1880 		err = soshutdown(ep->com.so, 3);
1881 	}
1882 	c4iw_put_ep(&ep->com);
1883 	CTR2(KTR_IW_CXGBE, "%s:crc4 %p", __func__, ep);
1884 	return 0;
1885 }
1886 
1887 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1888 {
1889 	int err;
1890 	struct c4iw_qp_attributes attrs;
1891 	enum c4iw_qp_attr_mask mask;
1892 	struct c4iw_ep *ep = to_ep(cm_id);
1893 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1894 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1895 
1896 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
1897 
1898 	if (state_read(&ep->com) == DEAD) {
1899 
1900 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
1901 		err = -ECONNRESET;
1902 		goto err;
1903 	}
1904 
1905 	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1906 	BUG_ON(!qp);
1907 
1908 	set_bit(ULP_ACCEPT, &ep->com.history);
1909 
1910 	if ((conn_param->ord > c4iw_max_read_depth) ||
1911 		(conn_param->ird > c4iw_max_read_depth)) {
1912 
1913 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
1914 		abort_connection(ep);
1915 		err = -EINVAL;
1916 		goto err;
1917 	}
1918 
1919 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1920 
1921 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
1922 
1923 		if (conn_param->ord > ep->ird) {
1924 
1925 			CTR2(KTR_IW_CXGBE, "%s:cac4 %p", __func__, ep);
1926 			ep->ird = conn_param->ird;
1927 			ep->ord = conn_param->ord;
1928 			send_mpa_reject(ep, conn_param->private_data,
1929 					conn_param->private_data_len);
1930 			abort_connection(ep);
1931 			err = -ENOMEM;
1932 			goto err;
1933 		}
1934 
1935 		if (conn_param->ird > ep->ord) {
1936 
1937 			CTR2(KTR_IW_CXGBE, "%s:cac5 %p", __func__, ep);
1938 
1939 			if (!ep->ord) {
1940 
1941 				CTR2(KTR_IW_CXGBE, "%s:cac6 %p", __func__, ep);
1942 				conn_param->ird = 1;
1943 			}
1944 			else {
1945 				CTR2(KTR_IW_CXGBE, "%s:cac7 %p", __func__, ep);
1946 				abort_connection(ep);
1947 				err = -ENOMEM;
1948 				goto err;
1949 			}
1950 		}
1951 
1952 	}
1953 	ep->ird = conn_param->ird;
1954 	ep->ord = conn_param->ord;
1955 
1956 	if (ep->mpa_attr.version != 2) {
1957 
1958 		CTR2(KTR_IW_CXGBE, "%s:cac8 %p", __func__, ep);
1959 
1960 		if (peer2peer && ep->ird == 0) {
1961 
1962 			CTR2(KTR_IW_CXGBE, "%s:cac9 %p", __func__, ep);
1963 			ep->ird = 1;
1964 		}
1965 	}
1966 
1967 
1968 	cm_id->add_ref(cm_id);
1969 	ep->com.cm_id = cm_id;
1970 	ep->com.qp = qp;
1971 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
1972 
1973 	/* bind QP to EP and move to RTS */
1974 	attrs.mpa_attr = ep->mpa_attr;
1975 	attrs.max_ird = ep->ird;
1976 	attrs.max_ord = ep->ord;
1977 	attrs.llp_stream_handle = ep;
1978 	attrs.next_state = C4IW_QP_STATE_RTS;
1979 
1980 	/* bind QP and TID with INIT_WR */
1981 	mask = C4IW_QP_ATTR_NEXT_STATE |
1982 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1983 		C4IW_QP_ATTR_MPA_ATTR |
1984 		C4IW_QP_ATTR_MAX_IRD |
1985 		C4IW_QP_ATTR_MAX_ORD;
1986 
1987 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
1988 
1989 	if (err) {
1990 
1991 		CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
1992 		goto err1;
1993 	}
1994 	err = send_mpa_reply(ep, conn_param->private_data,
1995 			conn_param->private_data_len);
1996 
1997 	if (err) {
1998 
1999 		CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
2000 		goto err1;
2001 	}
2002 
2003 	state_set(&ep->com, FPDU_MODE);
2004 	established_upcall(ep);
2005 	c4iw_put_ep(&ep->com);
2006 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2007 	return 0;
2008 err1:
2009 	ep->com.cm_id = NULL;
2010 	ep->com.qp = NULL;
2011 	cm_id->rem_ref(cm_id);
2012 err:
2013 	c4iw_put_ep(&ep->com);
2014 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2015 	return err;
2016 }
2017 
2018 
2019 
2020 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2021 {
2022 	int err = 0;
2023 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2024 	struct c4iw_ep *ep = NULL;
2025 	struct rtentry *rt;
2026 	struct toedev *tdev;
2027 
2028 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2029 
2030 	if ((conn_param->ord > c4iw_max_read_depth) ||
2031 		(conn_param->ird > c4iw_max_read_depth)) {
2032 
2033 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2034 		err = -EINVAL;
2035 		goto out;
2036 	}
2037 	ep = alloc_ep(sizeof(*ep), M_NOWAIT);
2038 
2039 	if (!ep) {
2040 
2041 		CTR2(KTR_IW_CXGBE, "%s:cc2 %p", __func__, cm_id);
2042 		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2043 		err = -ENOMEM;
2044 		goto out;
2045 	}
2046 	init_timer(&ep->timer);
2047 	ep->plen = conn_param->private_data_len;
2048 
2049 	if (ep->plen) {
2050 
2051 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2052 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2053 				conn_param->private_data, ep->plen);
2054 	}
2055 	ep->ird = conn_param->ird;
2056 	ep->ord = conn_param->ord;
2057 
2058 	if (peer2peer && ep->ord == 0) {
2059 
2060 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2061 		ep->ord = 1;
2062 	}
2063 
2064 	cm_id->add_ref(cm_id);
2065 	ep->com.dev = dev;
2066 	ep->com.cm_id = cm_id;
2067 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2068 
2069 	if (!ep->com.qp) {
2070 
2071 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2072 		err = -EINVAL;
2073 		goto fail2;
2074 	}
2075 	ep->com.thread = curthread;
2076 	ep->com.so = cm_id->so;
2077 
2078 	init_sock(&ep->com);
2079 
2080 	/* find a route */
2081 	rt = find_route(
2082 		cm_id->local_addr.sin_addr.s_addr,
2083 		cm_id->remote_addr.sin_addr.s_addr,
2084 		cm_id->local_addr.sin_port,
2085 		cm_id->remote_addr.sin_port, 0);
2086 
2087 	if (!rt) {
2088 
2089 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2090 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2091 		err = -EHOSTUNREACH;
2092 		goto fail2;
2093 	}
2094 
2095 	if (!(rt->rt_ifp->if_capenable & IFCAP_TOE)) {
2096 
2097 		CTR2(KTR_IW_CXGBE, "%s:cc8 %p", __func__, ep);
2098 		printf("%s - interface not TOE capable.\n", __func__);
2099 		close_socket(&ep->com, 0);
2100 		err = -ENOPROTOOPT;
2101 		goto fail3;
2102 	}
2103 	tdev = TOEDEV(rt->rt_ifp);
2104 
2105 	if (tdev == NULL) {
2106 
2107 		CTR2(KTR_IW_CXGBE, "%s:cc9 %p", __func__, ep);
2108 		printf("%s - No toedev for interface.\n", __func__);
2109 		goto fail3;
2110 	}
2111 	RTFREE(rt);
2112 
2113 	state_set(&ep->com, CONNECTING);
2114 	ep->tos = 0;
2115 	ep->com.local_addr = cm_id->local_addr;
2116 	ep->com.remote_addr = cm_id->remote_addr;
2117 	err = soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2118 		ep->com.thread);
2119 
2120 	if (!err) {
2121 		CTR2(KTR_IW_CXGBE, "%s:cca %p", __func__, ep);
2122 		goto out;
2123 	} else {
2124 		close_socket(&ep->com, 0);
2125 		goto fail2;
2126 	}
2127 
2128 fail3:
2129 	CTR2(KTR_IW_CXGBE, "%s:ccb %p", __func__, ep);
2130 	RTFREE(rt);
2131 fail2:
2132 	cm_id->rem_ref(cm_id);
2133 	c4iw_put_ep(&ep->com);
2134 out:
2135 	CTR2(KTR_IW_CXGBE, "%s:ccE %p", __func__, ep);
2136 	return err;
2137 }
2138 
2139 /*
2140  * iwcm->create_listen.  Returns -errno on failure.
2141  */
2142 int
2143 c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2144 {
2145 	int rc;
2146 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2147 	struct c4iw_listen_ep *ep;
2148 	struct socket *so = cm_id->so;
2149 
2150 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2151 	CTR5(KTR_IW_CXGBE, "%s: cm_id %p, lso %p, ep %p, inp %p", __func__,
2152 	    cm_id, so, ep, so->so_pcb);
2153 	if (ep == NULL) {
2154 		log(LOG_ERR, "%s: failed to alloc memory for endpoint\n",
2155 		    __func__);
2156 		rc = ENOMEM;
2157 		goto failed;
2158 	}
2159 
2160 	cm_id->add_ref(cm_id);
2161 	ep->com.cm_id = cm_id;
2162 	ep->com.dev = dev;
2163 	ep->backlog = backlog;
2164 	ep->com.local_addr = cm_id->local_addr;
2165 	ep->com.thread = curthread;
2166 	state_set(&ep->com, LISTEN);
2167 	ep->com.so = so;
2168 	init_sock(&ep->com);
2169 
2170 	rc = solisten(so, ep->backlog, ep->com.thread);
2171 	if (rc != 0) {
2172 		log(LOG_ERR, "%s: failed to start listener: %d\n", __func__,
2173 		    rc);
2174 		close_socket(&ep->com, 0);
2175 		cm_id->rem_ref(cm_id);
2176 		c4iw_put_ep(&ep->com);
2177 		goto failed;
2178 	}
2179 
2180 	cm_id->provider_data = ep;
2181 	return (0);
2182 
2183 failed:
2184 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, FAILED (%d)", __func__, cm_id, rc);
2185 	return (-rc);
2186 }
2187 
2188 int
2189 c4iw_destroy_listen(struct iw_cm_id *cm_id)
2190 {
2191 	int rc;
2192 	struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2193 
2194 	CTR4(KTR_IW_CXGBE, "%s: cm_id %p, so %p, inp %p", __func__, cm_id,
2195 	    cm_id->so, cm_id->so->so_pcb);
2196 
2197 	state_set(&ep->com, DEAD);
2198 	rc = close_socket(&ep->com, 0);
2199 	cm_id->rem_ref(cm_id);
2200 	c4iw_put_ep(&ep->com);
2201 
2202 	return (rc);
2203 }
2204 
2205 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2206 {
2207 	int ret = 0;
2208 	int close = 0;
2209 	int fatal = 0;
2210 	struct c4iw_rdev *rdev;
2211 
2212 	mutex_lock(&ep->com.mutex);
2213 
2214 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2215 
2216 	rdev = &ep->com.dev->rdev;
2217 
2218 	if (c4iw_fatal_error(rdev)) {
2219 
2220 		CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2221 		fatal = 1;
2222 		close_complete_upcall(ep, -ECONNRESET);
2223 		ep->com.state = DEAD;
2224 	}
2225 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2226 	    states[ep->com.state]);
2227 
2228 	switch (ep->com.state) {
2229 
2230 		case MPA_REQ_WAIT:
2231 		case MPA_REQ_SENT:
2232 		case MPA_REQ_RCVD:
2233 		case MPA_REP_SENT:
2234 		case FPDU_MODE:
2235 			close = 1;
2236 			if (abrupt)
2237 				ep->com.state = ABORTING;
2238 			else {
2239 				ep->com.state = CLOSING;
2240 				START_EP_TIMER(ep);
2241 			}
2242 			set_bit(CLOSE_SENT, &ep->com.flags);
2243 			break;
2244 
2245 		case CLOSING:
2246 
2247 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2248 
2249 				close = 1;
2250 				if (abrupt) {
2251 					STOP_EP_TIMER(ep);
2252 					ep->com.state = ABORTING;
2253 				} else
2254 					ep->com.state = MORIBUND;
2255 			}
2256 			break;
2257 
2258 		case MORIBUND:
2259 		case ABORTING:
2260 		case DEAD:
2261 			CTR3(KTR_IW_CXGBE,
2262 			    "%s ignoring disconnect ep %p state %u", __func__,
2263 			    ep, ep->com.state);
2264 			break;
2265 
2266 		default:
2267 			BUG();
2268 			break;
2269 	}
2270 
2271 	mutex_unlock(&ep->com.mutex);
2272 
2273 	if (close) {
2274 
2275 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2276 
2277 		if (abrupt) {
2278 
2279 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2280 			set_bit(EP_DISC_ABORT, &ep->com.history);
2281 			ret = abort_connection(ep);
2282 		} else {
2283 
2284 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2285 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2286 
2287 			if (!ep->parent_ep)
2288 				__state_set(&ep->com, MORIBUND);
2289 			ret = shutdown_socket(&ep->com);
2290 		}
2291 
2292 		if (ret) {
2293 
2294 			fatal = 1;
2295 		}
2296 	}
2297 
2298 	if (fatal) {
2299 
2300 		release_ep_resources(ep);
2301 		CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2302 	}
2303 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2304 	return ret;
2305 }
2306 
2307 #ifdef C4IW_EP_REDIRECT
2308 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2309 		struct l2t_entry *l2t)
2310 {
2311 	struct c4iw_ep *ep = ctx;
2312 
2313 	if (ep->dst != old)
2314 		return 0;
2315 
2316 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2317 			l2t);
2318 	dst_hold(new);
2319 	cxgb4_l2t_release(ep->l2t);
2320 	ep->l2t = l2t;
2321 	dst_release(old);
2322 	ep->dst = new;
2323 	return 1;
2324 }
2325 #endif
2326 
2327 
2328 
2329 static void ep_timeout(unsigned long arg)
2330 {
2331 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2332 	int kickit = 0;
2333 
2334 	CTR2(KTR_IW_CXGBE, "%s:etB %p", __func__, ep);
2335 	spin_lock(&timeout_lock);
2336 
2337 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2338 
2339 		list_add_tail(&ep->entry, &timeout_list);
2340 		kickit = 1;
2341 	}
2342 	spin_unlock(&timeout_lock);
2343 
2344 	if (kickit) {
2345 
2346 		CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2347 		queue_work(c4iw_taskq, &c4iw_task);
2348 	}
2349 	CTR2(KTR_IW_CXGBE, "%s:etE %p", __func__, ep);
2350 }
2351 
2352 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2353 {
2354 	uint64_t val = be64toh(*rpl);
2355 	int ret;
2356 	struct c4iw_wr_wait *wr_waitp;
2357 
2358 	ret = (int)((val >> 8) & 0xff);
2359 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2360 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2361 	if (wr_waitp)
2362 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2363 
2364 	return (0);
2365 }
2366 
2367 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2368 {
2369 	struct t4_cqe cqe =*(const struct t4_cqe *)(&rpl[0]);
2370 
2371 	CTR2(KTR_IW_CXGBE, "%s rpl %p", __func__, rpl);
2372 	c4iw_ev_dispatch(sc->iwarp_softc, &cqe);
2373 
2374 	return (0);
2375 }
2376 
2377 static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
2378 {
2379 
2380 	struct adapter *sc = iq->adapter;
2381 
2382 	const struct cpl_rdma_terminate *rpl = (const void *)(rss + 1);
2383 	unsigned int tid = GET_TID(rpl);
2384 	struct c4iw_qp_attributes attrs;
2385 	struct toepcb *toep = lookup_tid(sc, tid);
2386 	struct socket *so = inp_inpcbtosocket(toep->inp);
2387 	struct c4iw_ep *ep = so->so_rcv.sb_upcallarg;
2388 
2389 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
2390 
2391 	if (ep && ep->com.qp) {
2392 
2393 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2394 				ep->com.qp->wq.sq.qid);
2395 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2396 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
2397 				1);
2398 	} else
2399 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2400 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
2401 
2402 	return 0;
2403 }
2404 
2405 	void
2406 c4iw_cm_init_cpl(struct adapter *sc)
2407 {
2408 
2409 	t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, terminate);
2410 	t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, fw6_wr_rpl);
2411 	t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, fw6_cqe_handler);
2412 	t4_register_an_handler(sc, c4iw_ev_handler);
2413 }
2414 
2415 	void
2416 c4iw_cm_term_cpl(struct adapter *sc)
2417 {
2418 
2419 	t4_register_cpl_handler(sc, CPL_RDMA_TERMINATE, NULL);
2420 	t4_register_fw_msg_handler(sc, FW6_TYPE_WR_RPL, NULL);
2421 	t4_register_fw_msg_handler(sc, FW6_TYPE_CQE, NULL);
2422 }
2423 
2424 int __init c4iw_cm_init(void)
2425 {
2426 
2427 	TAILQ_INIT(&req_list);
2428 	spin_lock_init(&req_lock);
2429 	INIT_LIST_HEAD(&timeout_list);
2430 	spin_lock_init(&timeout_lock);
2431 
2432 	INIT_WORK(&c4iw_task, process_req);
2433 
2434 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
2435 	if (!c4iw_taskq)
2436 		return -ENOMEM;
2437 
2438 
2439 	return 0;
2440 }
2441 
2442 void __exit c4iw_cm_term(void)
2443 {
2444 	WARN_ON(!TAILQ_EMPTY(&req_list));
2445 	WARN_ON(!list_empty(&timeout_list));
2446 	flush_workqueue(c4iw_taskq);
2447 	destroy_workqueue(c4iw_taskq);
2448 }
2449 #endif
2450