xref: /freebsd/sys/dev/cxgbe/iw_cxgbe/cm.c (revision eb9da1ada8b6b2c74378a5c17029ec5a7fb199e6)
1 /*
2  * Copyright (c) 2009-2013, 2016 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_inet.h"
36 
37 #ifdef TCP_OFFLOAD
38 #include <sys/types.h>
39 #include <sys/malloc.h>
40 #include <sys/socket.h>
41 #include <sys/socketvar.h>
42 #include <sys/sockio.h>
43 #include <sys/taskqueue.h>
44 #include <netinet/in.h>
45 #include <net/route.h>
46 
47 #include <netinet/in_systm.h>
48 #include <netinet/in_pcb.h>
49 #include <netinet/ip.h>
50 #include <netinet/in_fib.h>
51 #include <netinet/ip_var.h>
52 #include <netinet/tcp_var.h>
53 #include <netinet/tcp.h>
54 #include <netinet/tcpip.h>
55 
56 #include <netinet/toecore.h>
57 
58 struct sge_iq;
59 struct rss_header;
60 struct cpl_set_tcb_rpl;
61 #include <linux/types.h>
62 #include "offload.h"
63 #include "tom/t4_tom.h"
64 
65 #define TOEPCB(so)  ((struct toepcb *)(so_sototcpcb((so))->t_toe))
66 
67 #include "iw_cxgbe.h"
68 #include <linux/module.h>
69 #include <linux/workqueue.h>
70 #include <linux/notifier.h>
71 #include <linux/inetdevice.h>
72 #include <linux/if_vlan.h>
73 #include <net/netevent.h>
74 
75 static spinlock_t req_lock;
76 static TAILQ_HEAD(c4iw_ep_list, c4iw_ep_common) req_list;
77 static struct work_struct c4iw_task;
78 static struct workqueue_struct *c4iw_taskq;
79 static LIST_HEAD(timeout_list);
80 static spinlock_t timeout_lock;
81 
82 static void process_req(struct work_struct *ctx);
83 static void start_ep_timer(struct c4iw_ep *ep);
84 static int stop_ep_timer(struct c4iw_ep *ep);
85 static int set_tcpinfo(struct c4iw_ep *ep);
86 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc);
87 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
88 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state tostate);
89 static void *alloc_ep(int size, gfp_t flags);
90 void __free_ep(struct c4iw_ep_common *epc);
91 static int find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
92 		__be16 peer_port, u8 tos, struct nhop4_extended *pnh4);
93 static int close_socket(struct c4iw_ep_common *epc, int close);
94 static int shutdown_socket(struct c4iw_ep_common *epc);
95 static void abort_socket(struct c4iw_ep *ep);
96 static void send_mpa_req(struct c4iw_ep *ep);
97 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen);
98 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen);
99 static void close_complete_upcall(struct c4iw_ep *ep, int status);
100 static int send_abort(struct c4iw_ep *ep);
101 static void peer_close_upcall(struct c4iw_ep *ep);
102 static void peer_abort_upcall(struct c4iw_ep *ep);
103 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
104 static int connect_request_upcall(struct c4iw_ep *ep);
105 static void established_upcall(struct c4iw_ep *ep);
106 static int process_mpa_reply(struct c4iw_ep *ep);
107 static int process_mpa_request(struct c4iw_ep *ep);
108 static void process_peer_close(struct c4iw_ep *ep);
109 static void process_conn_error(struct c4iw_ep *ep);
110 static void process_close_complete(struct c4iw_ep *ep);
111 static void ep_timeout(unsigned long arg);
112 static void init_sock(struct c4iw_ep_common *epc);
113 static void process_data(struct c4iw_ep *ep);
114 static void process_connected(struct c4iw_ep *ep);
115 static int c4iw_so_upcall(struct socket *so, void *arg, int waitflag);
116 static void process_socket_event(struct c4iw_ep *ep);
117 static void release_ep_resources(struct c4iw_ep *ep);
118 
119 #define START_EP_TIMER(ep) \
120     do { \
121 	    CTR3(KTR_IW_CXGBE, "start_ep_timer (%s:%d) ep %p", \
122 		__func__, __LINE__, (ep)); \
123 	    start_ep_timer(ep); \
124     } while (0)
125 
126 #define STOP_EP_TIMER(ep) \
127     ({ \
128 	    CTR3(KTR_IW_CXGBE, "stop_ep_timer (%s:%d) ep %p", \
129 		__func__, __LINE__, (ep)); \
130 	    stop_ep_timer(ep); \
131     })
132 
133 #ifdef KTR
134 static char *states[] = {
135 	"idle",
136 	"listen",
137 	"connecting",
138 	"mpa_wait_req",
139 	"mpa_req_sent",
140 	"mpa_req_rcvd",
141 	"mpa_rep_sent",
142 	"fpdu_mode",
143 	"aborting",
144 	"closing",
145 	"moribund",
146 	"dead",
147 	NULL,
148 };
149 #endif
150 
151 
152 static void deref_cm_id(struct c4iw_ep_common *epc)
153 {
154       epc->cm_id->rem_ref(epc->cm_id);
155       epc->cm_id = NULL;
156       set_bit(CM_ID_DEREFED, &epc->history);
157 }
158 
159 static void ref_cm_id(struct c4iw_ep_common *epc)
160 {
161       set_bit(CM_ID_REFED, &epc->history);
162       epc->cm_id->add_ref(epc->cm_id);
163 }
164 
165 static void deref_qp(struct c4iw_ep *ep)
166 {
167 	c4iw_qp_rem_ref(&ep->com.qp->ibqp);
168 	clear_bit(QP_REFERENCED, &ep->com.flags);
169 	set_bit(QP_DEREFED, &ep->com.history);
170 }
171 
172 static void ref_qp(struct c4iw_ep *ep)
173 {
174 	set_bit(QP_REFERENCED, &ep->com.flags);
175 	set_bit(QP_REFED, &ep->com.history);
176 	c4iw_qp_add_ref(&ep->com.qp->ibqp);
177 }
178 
179 static void
180 process_req(struct work_struct *ctx)
181 {
182 	struct c4iw_ep_common *epc;
183 
184 	spin_lock(&req_lock);
185 	while (!TAILQ_EMPTY(&req_list)) {
186 		epc = TAILQ_FIRST(&req_list);
187 		TAILQ_REMOVE(&req_list, epc, entry);
188 		epc->entry.tqe_prev = NULL;
189 		spin_unlock(&req_lock);
190 		if (epc->so)
191 			process_socket_event((struct c4iw_ep *)epc);
192 		c4iw_put_ep(epc);
193 		spin_lock(&req_lock);
194 	}
195 	spin_unlock(&req_lock);
196 }
197 
198 /*
199  * XXX: doesn't belong here in the iWARP driver.
200  * XXX: assumes that the connection was offloaded by cxgbe/t4_tom if TF_TOE is
201  *      set.  Is this a valid assumption for active open?
202  */
203 static int
204 set_tcpinfo(struct c4iw_ep *ep)
205 {
206 	struct socket *so = ep->com.so;
207 	struct inpcb *inp = sotoinpcb(so);
208 	struct tcpcb *tp;
209 	struct toepcb *toep;
210 	int rc = 0;
211 
212 	INP_WLOCK(inp);
213 	tp = intotcpcb(inp);
214 	if ((tp->t_flags & TF_TOE) == 0) {
215 		rc = EINVAL;
216 		log(LOG_ERR, "%s: connection not offloaded (so %p, ep %p)\n",
217 		    __func__, so, ep);
218 		goto done;
219 	}
220 	toep = TOEPCB(so);
221 
222 	ep->hwtid = toep->tid;
223 	ep->snd_seq = tp->snd_nxt;
224 	ep->rcv_seq = tp->rcv_nxt;
225 	ep->emss = max(tp->t_maxseg, 128);
226 done:
227 	INP_WUNLOCK(inp);
228 	return (rc);
229 
230 }
231 
232 static int
233 find_route(__be32 local_ip, __be32 peer_ip, __be16 local_port,
234 		__be16 peer_port, u8 tos, struct nhop4_extended *pnh4)
235 {
236 	struct in_addr addr;
237 	int err;
238 
239 	CTR5(KTR_IW_CXGBE, "%s:frtB %x, %x, %d, %d", __func__, local_ip,
240 	    peer_ip, ntohs(local_port), ntohs(peer_port));
241 
242 	addr.s_addr = peer_ip;
243 	err = fib4_lookup_nh_ext(RT_DEFAULT_FIB, addr, NHR_REF, 0, pnh4);
244 
245 	CTR2(KTR_IW_CXGBE, "%s:frtE %d", __func__, err);
246 	return err;
247 }
248 
249 static int
250 close_socket(struct c4iw_ep_common *epc, int close)
251 {
252 	struct socket *so = epc->so;
253 	int rc;
254 
255 	CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc, so,
256 	    states[epc->state]);
257 
258 	SOCK_LOCK(so);
259 	soupcall_clear(so, SO_RCV);
260 	SOCK_UNLOCK(so);
261 
262 	if (close)
263                 rc = soclose(so);
264         else
265                 rc = soshutdown(so, SHUT_WR | SHUT_RD);
266 	epc->so = NULL;
267 
268 	return (rc);
269 }
270 
271 static int
272 shutdown_socket(struct c4iw_ep_common *epc)
273 {
274 
275 	CTR4(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s", __func__, epc->so, epc,
276 	    states[epc->state]);
277 
278 	return (soshutdown(epc->so, SHUT_WR));
279 }
280 
281 static void
282 abort_socket(struct c4iw_ep *ep)
283 {
284 	struct sockopt sopt;
285 	int rc;
286 	struct linger l;
287 
288 	CTR4(KTR_IW_CXGBE, "%s ep %p so %p state %s", __func__, ep, ep->com.so,
289 	    states[ep->com.state]);
290 
291 	l.l_onoff = 1;
292 	l.l_linger = 0;
293 
294 	/* linger_time of 0 forces RST to be sent */
295 	sopt.sopt_dir = SOPT_SET;
296 	sopt.sopt_level = SOL_SOCKET;
297 	sopt.sopt_name = SO_LINGER;
298 	sopt.sopt_val = (caddr_t)&l;
299 	sopt.sopt_valsize = sizeof l;
300 	sopt.sopt_td = NULL;
301 	rc = sosetopt(ep->com.so, &sopt);
302 	if (rc) {
303 		log(LOG_ERR, "%s: can't set linger to 0, no RST! err %d\n",
304 		    __func__, rc);
305 	}
306 }
307 
308 static void
309 process_peer_close(struct c4iw_ep *ep)
310 {
311 	struct c4iw_qp_attributes attrs;
312 	int disconnect = 1;
313 	int release = 0;
314 
315 	CTR4(KTR_IW_CXGBE, "%s:ppcB ep %p so %p state %s", __func__, ep,
316 	    ep->com.so, states[ep->com.state]);
317 
318 	mutex_lock(&ep->com.mutex);
319 	switch (ep->com.state) {
320 
321 		case MPA_REQ_WAIT:
322 			CTR2(KTR_IW_CXGBE, "%s:ppc1 %p MPA_REQ_WAIT CLOSING",
323 			    __func__, ep);
324 			__state_set(&ep->com, CLOSING);
325 			break;
326 
327 		case MPA_REQ_SENT:
328 			CTR2(KTR_IW_CXGBE, "%s:ppc2 %p MPA_REQ_SENT CLOSING",
329 			    __func__, ep);
330 			__state_set(&ep->com, DEAD);
331 			connect_reply_upcall(ep, -ECONNABORTED);
332 
333 			disconnect = 0;
334 			STOP_EP_TIMER(ep);
335 			close_socket(&ep->com, 0);
336 			deref_cm_id(&ep->com);
337 			release = 1;
338 			break;
339 
340 		case MPA_REQ_RCVD:
341 
342 			/*
343 			 * We're gonna mark this puppy DEAD, but keep
344 			 * the reference on it until the ULP accepts or
345 			 * rejects the CR.
346 			 */
347 			CTR2(KTR_IW_CXGBE, "%s:ppc3 %p MPA_REQ_RCVD CLOSING",
348 			    __func__, ep);
349 			__state_set(&ep->com, CLOSING);
350 			c4iw_get_ep(&ep->com);
351 			break;
352 
353 		case MPA_REP_SENT:
354 			CTR2(KTR_IW_CXGBE, "%s:ppc4 %p MPA_REP_SENT CLOSING",
355 			    __func__, ep);
356 			__state_set(&ep->com, CLOSING);
357 			break;
358 
359 		case FPDU_MODE:
360 			CTR2(KTR_IW_CXGBE, "%s:ppc5 %p FPDU_MODE CLOSING",
361 			    __func__, ep);
362 			START_EP_TIMER(ep);
363 			__state_set(&ep->com, CLOSING);
364 			attrs.next_state = C4IW_QP_STATE_CLOSING;
365 			c4iw_modify_qp(ep->com.dev, ep->com.qp,
366 					C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
367 			peer_close_upcall(ep);
368 			break;
369 
370 		case ABORTING:
371 			CTR2(KTR_IW_CXGBE, "%s:ppc6 %p ABORTING (disconn)",
372 			    __func__, ep);
373 			disconnect = 0;
374 			break;
375 
376 		case CLOSING:
377 			CTR2(KTR_IW_CXGBE, "%s:ppc7 %p CLOSING MORIBUND",
378 			    __func__, ep);
379 			__state_set(&ep->com, MORIBUND);
380 			disconnect = 0;
381 			break;
382 
383 		case MORIBUND:
384 			CTR2(KTR_IW_CXGBE, "%s:ppc8 %p MORIBUND DEAD", __func__,
385 			    ep);
386 			STOP_EP_TIMER(ep);
387 			if (ep->com.cm_id && ep->com.qp) {
388 				attrs.next_state = C4IW_QP_STATE_IDLE;
389 				c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
390 						C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
391 			}
392 			close_socket(&ep->com, 0);
393 			close_complete_upcall(ep, 0);
394 			__state_set(&ep->com, DEAD);
395 			release = 1;
396 			disconnect = 0;
397 			break;
398 
399 		case DEAD:
400 			CTR2(KTR_IW_CXGBE, "%s:ppc9 %p DEAD (disconn)",
401 			    __func__, ep);
402 			disconnect = 0;
403 			break;
404 
405 		default:
406 			panic("%s: ep %p state %d", __func__, ep,
407 			    ep->com.state);
408 			break;
409 	}
410 
411 	mutex_unlock(&ep->com.mutex);
412 
413 	if (disconnect) {
414 
415 		CTR2(KTR_IW_CXGBE, "%s:ppca %p", __func__, ep);
416 		c4iw_ep_disconnect(ep, 0, M_NOWAIT);
417 	}
418 	if (release) {
419 
420 		CTR2(KTR_IW_CXGBE, "%s:ppcb %p", __func__, ep);
421 		c4iw_put_ep(&ep->com);
422 	}
423 	CTR2(KTR_IW_CXGBE, "%s:ppcE %p", __func__, ep);
424 	return;
425 }
426 
427 static void
428 process_conn_error(struct c4iw_ep *ep)
429 {
430 	struct c4iw_qp_attributes attrs;
431 	int ret;
432 	int state;
433 
434 	state = state_read(&ep->com);
435 	CTR5(KTR_IW_CXGBE, "%s:pceB ep %p so %p so->so_error %u state %s",
436 	    __func__, ep, ep->com.so, ep->com.so->so_error,
437 	    states[ep->com.state]);
438 
439 	switch (state) {
440 
441 		case MPA_REQ_WAIT:
442 			STOP_EP_TIMER(ep);
443 			break;
444 
445 		case MPA_REQ_SENT:
446 			STOP_EP_TIMER(ep);
447 			connect_reply_upcall(ep, -ECONNRESET);
448 			break;
449 
450 		case MPA_REP_SENT:
451 			ep->com.rpl_err = ECONNRESET;
452 			CTR1(KTR_IW_CXGBE, "waking up ep %p", ep);
453 			break;
454 
455 		case MPA_REQ_RCVD:
456 
457 			/*
458 			 * We're gonna mark this puppy DEAD, but keep
459 			 * the reference on it until the ULP accepts or
460 			 * rejects the CR.
461 			 */
462 			c4iw_get_ep(&ep->com);
463 			break;
464 
465 		case MORIBUND:
466 		case CLOSING:
467 			STOP_EP_TIMER(ep);
468 			/*FALLTHROUGH*/
469 		case FPDU_MODE:
470 
471 			if (ep->com.cm_id && ep->com.qp) {
472 
473 				attrs.next_state = C4IW_QP_STATE_ERROR;
474 				ret = c4iw_modify_qp(ep->com.qp->rhp,
475 					ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
476 					&attrs, 1);
477 				if (ret)
478 					log(LOG_ERR,
479 							"%s - qp <- error failed!\n",
480 							__func__);
481 			}
482 			peer_abort_upcall(ep);
483 			break;
484 
485 		case ABORTING:
486 			break;
487 
488 		case DEAD:
489 			CTR2(KTR_IW_CXGBE, "%s so_error %d IN DEAD STATE!!!!",
490 			    __func__, ep->com.so->so_error);
491 			return;
492 
493 		default:
494 			panic("%s: ep %p state %d", __func__, ep, state);
495 			break;
496 	}
497 
498 	if (state != ABORTING) {
499 
500 		CTR2(KTR_IW_CXGBE, "%s:pce1 %p", __func__, ep);
501 		close_socket(&ep->com, 0);
502 		state_set(&ep->com, DEAD);
503 		c4iw_put_ep(&ep->com);
504 	}
505 	CTR2(KTR_IW_CXGBE, "%s:pceE %p", __func__, ep);
506 	return;
507 }
508 
509 static void
510 process_close_complete(struct c4iw_ep *ep)
511 {
512 	struct c4iw_qp_attributes attrs;
513 	int release = 0;
514 
515 	CTR4(KTR_IW_CXGBE, "%s:pccB ep %p so %p state %s", __func__, ep,
516 	    ep->com.so, states[ep->com.state]);
517 
518 	/* The cm_id may be null if we failed to connect */
519 	mutex_lock(&ep->com.mutex);
520 	set_bit(CLOSE_CON_RPL, &ep->com.history);
521 
522 	switch (ep->com.state) {
523 
524 		case CLOSING:
525 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p CLOSING MORIBUND",
526 			    __func__, ep);
527 			__state_set(&ep->com, MORIBUND);
528 			break;
529 
530 		case MORIBUND:
531 			CTR2(KTR_IW_CXGBE, "%s:pcc1 %p MORIBUND DEAD", __func__,
532 			    ep);
533 			STOP_EP_TIMER(ep);
534 
535 			if ((ep->com.cm_id) && (ep->com.qp)) {
536 
537 				CTR2(KTR_IW_CXGBE, "%s:pcc2 %p QP_STATE_IDLE",
538 				    __func__, ep);
539 				attrs.next_state = C4IW_QP_STATE_IDLE;
540 				c4iw_modify_qp(ep->com.dev,
541 						ep->com.qp,
542 						C4IW_QP_ATTR_NEXT_STATE,
543 						&attrs, 1);
544 			}
545 
546 			if (ep->parent_ep) {
547 
548 				CTR2(KTR_IW_CXGBE, "%s:pcc3 %p", __func__, ep);
549 				close_socket(&ep->com, 1);
550 			}
551 			else {
552 
553 				CTR2(KTR_IW_CXGBE, "%s:pcc4 %p", __func__, ep);
554 				close_socket(&ep->com, 0);
555 			}
556 			close_complete_upcall(ep, 0);
557 			__state_set(&ep->com, DEAD);
558 			release = 1;
559 			break;
560 
561 		case ABORTING:
562 			CTR2(KTR_IW_CXGBE, "%s:pcc5 %p ABORTING", __func__, ep);
563 			break;
564 
565 		case DEAD:
566 		default:
567 			CTR2(KTR_IW_CXGBE, "%s:pcc6 %p DEAD", __func__, ep);
568 			panic("%s:pcc6 %p DEAD", __func__, ep);
569 			break;
570 	}
571 	mutex_unlock(&ep->com.mutex);
572 
573 	if (release) {
574 
575 		CTR2(KTR_IW_CXGBE, "%s:pcc7 %p", __func__, ep);
576 		c4iw_put_ep(&ep->com);
577 	}
578 	CTR2(KTR_IW_CXGBE, "%s:pccE %p", __func__, ep);
579 	return;
580 }
581 
582 static void
583 init_sock(struct c4iw_ep_common *epc)
584 {
585 	int rc;
586 	struct sockopt sopt;
587 	struct socket *so = epc->so;
588 	int on = 1;
589 
590 	SOCK_LOCK(so);
591 	soupcall_set(so, SO_RCV, c4iw_so_upcall, epc);
592 	so->so_state |= SS_NBIO;
593 	SOCK_UNLOCK(so);
594 	sopt.sopt_dir = SOPT_SET;
595 	sopt.sopt_level = IPPROTO_TCP;
596 	sopt.sopt_name = TCP_NODELAY;
597 	sopt.sopt_val = (caddr_t)&on;
598 	sopt.sopt_valsize = sizeof on;
599 	sopt.sopt_td = NULL;
600 	rc = sosetopt(so, &sopt);
601 	if (rc) {
602 		log(LOG_ERR, "%s: can't set TCP_NODELAY on so %p (%d)\n",
603 		    __func__, so, rc);
604 	}
605 }
606 
607 static void
608 process_data(struct c4iw_ep *ep)
609 {
610 	struct sockaddr_in *local, *remote;
611 	int disconnect = 0;
612 
613 	CTR5(KTR_IW_CXGBE, "%s: so %p, ep %p, state %s, sbused %d", __func__,
614 	    ep->com.so, ep, states[ep->com.state], sbused(&ep->com.so->so_rcv));
615 
616 	switch (state_read(&ep->com)) {
617 	case MPA_REQ_SENT:
618 		disconnect = process_mpa_reply(ep);
619 		break;
620 	case MPA_REQ_WAIT:
621 		in_getsockaddr(ep->com.so, (struct sockaddr **)&local);
622 		in_getpeeraddr(ep->com.so, (struct sockaddr **)&remote);
623 		ep->com.local_addr = *local;
624 		ep->com.remote_addr = *remote;
625 		free(local, M_SONAME);
626 		free(remote, M_SONAME);
627 		disconnect = process_mpa_request(ep);
628 		break;
629 	default:
630 		if (sbused(&ep->com.so->so_rcv))
631 			log(LOG_ERR, "%s: Unexpected streaming data. ep %p, "
632 			    "state %d, so %p, so_state 0x%x, sbused %u\n",
633 			    __func__, ep, state_read(&ep->com), ep->com.so,
634 			    ep->com.so->so_state, sbused(&ep->com.so->so_rcv));
635 		break;
636 	}
637 	if (disconnect)
638 		c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
639 
640 }
641 
642 static void
643 process_connected(struct c4iw_ep *ep)
644 {
645 
646 	if ((ep->com.so->so_state & SS_ISCONNECTED) && !ep->com.so->so_error)
647 		send_mpa_req(ep);
648 	else {
649 		connect_reply_upcall(ep, -ep->com.so->so_error);
650 		close_socket(&ep->com, 0);
651 		state_set(&ep->com, DEAD);
652 		c4iw_put_ep(&ep->com);
653 	}
654 }
655 
656 void
657 process_newconn(struct iw_cm_id *parent_cm_id, struct socket *child_so)
658 {
659 	struct c4iw_ep *child_ep;
660 	struct sockaddr_in *local;
661 	struct sockaddr_in *remote;
662 	struct c4iw_ep *parent_ep = parent_cm_id->provider_data;
663 
664 	if (!child_so) {
665 		CTR4(KTR_IW_CXGBE,
666 		    "%s: parent so %p, parent ep %p, child so %p, invalid so",
667 		    __func__, parent_ep->com.so, parent_ep, child_so);
668 		log(LOG_ERR, "%s: invalid child socket\n", __func__);
669 		return;
670 	}
671 	child_ep = alloc_ep(sizeof(*child_ep), M_NOWAIT);
672 	if (!child_ep) {
673 		CTR3(KTR_IW_CXGBE, "%s: parent so %p, parent ep %p, ENOMEM",
674 		    __func__, parent_ep->com.so, parent_ep);
675 		log(LOG_ERR, "%s: failed to allocate ep entry\n", __func__);
676 		return;
677 	}
678 	SOCKBUF_LOCK(&child_so->so_rcv);
679 	soupcall_set(child_so, SO_RCV, c4iw_so_upcall, child_ep);
680 	SOCKBUF_UNLOCK(&child_so->so_rcv);
681 
682 	CTR5(KTR_IW_CXGBE,
683 	    "%s: parent so %p, parent ep %p, child so %p, child ep %p",
684 	     __func__, parent_ep->com.so, parent_ep, child_so, child_ep);
685 
686 	in_getsockaddr(child_so, (struct sockaddr **)&local);
687 	in_getpeeraddr(child_so, (struct sockaddr **)&remote);
688 
689 	child_ep->com.local_addr = *local;
690 	child_ep->com.remote_addr = *remote;
691 	child_ep->com.dev = parent_ep->com.dev;
692 	child_ep->com.so = child_so;
693 	child_ep->com.cm_id = NULL;
694 	child_ep->com.thread = parent_ep->com.thread;
695 	child_ep->parent_ep = parent_ep;
696 
697 	free(local, M_SONAME);
698 	free(remote, M_SONAME);
699 
700 	c4iw_get_ep(&parent_ep->com);
701 	init_timer(&child_ep->timer);
702 	state_set(&child_ep->com, MPA_REQ_WAIT);
703 	START_EP_TIMER(child_ep);
704 
705 	/* maybe the request has already been queued up on the socket... */
706 	process_mpa_request(child_ep);
707 	return;
708 }
709 
710 static int
711 c4iw_so_upcall(struct socket *so, void *arg, int waitflag)
712 {
713 	struct c4iw_ep *ep = arg;
714 
715 	spin_lock(&req_lock);
716 
717 	CTR6(KTR_IW_CXGBE,
718 	    "%s: so %p, so_state 0x%x, ep %p, ep_state %s, tqe_prev %p",
719 	    __func__, so, so->so_state, ep, states[ep->com.state],
720 	    ep->com.entry.tqe_prev);
721 
722 	if (ep && ep->com.so && !ep->com.entry.tqe_prev) {
723 		KASSERT(ep->com.so == so, ("%s: XXX review.", __func__));
724 		c4iw_get_ep(&ep->com);
725 		TAILQ_INSERT_TAIL(&req_list, &ep->com, entry);
726 		queue_work(c4iw_taskq, &c4iw_task);
727 	}
728 
729 	spin_unlock(&req_lock);
730 	return (SU_OK);
731 }
732 
733 static void
734 process_socket_event(struct c4iw_ep *ep)
735 {
736 	int state = state_read(&ep->com);
737 	struct socket *so = ep->com.so;
738 
739 	CTR6(KTR_IW_CXGBE, "process_socket_event: so %p, so_state 0x%x, "
740 	    "so_err %d, sb_state 0x%x, ep %p, ep_state %s", so, so->so_state,
741 	    so->so_error, so->so_rcv.sb_state, ep, states[state]);
742 
743 	if (state == CONNECTING) {
744 		process_connected(ep);
745 		return;
746 	}
747 
748 	if (state == LISTEN) {
749 		/* socket listening events are handled at IWCM */
750 		CTR3(KTR_IW_CXGBE, "%s Invalid ep state:%u, ep:%p", __func__,
751 			    ep->com.state, ep);
752 		BUG();
753 		return;
754 	}
755 
756 	/* connection error */
757 	if (so->so_error) {
758 		process_conn_error(ep);
759 		return;
760 	}
761 
762 	/* peer close */
763 	if ((so->so_rcv.sb_state & SBS_CANTRCVMORE) && state <= CLOSING) {
764 		process_peer_close(ep);
765 		return;
766 	}
767 
768 	/* close complete */
769 	if (so->so_state & SS_ISDISCONNECTED) {
770 		process_close_complete(ep);
771 		return;
772 	}
773 
774 	/* rx data */
775 	process_data(ep);
776 }
777 
778 SYSCTL_NODE(_hw, OID_AUTO, iw_cxgbe, CTLFLAG_RD, 0, "iw_cxgbe driver parameters");
779 
780 int db_delay_usecs = 1;
781 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_delay_usecs, CTLFLAG_RWTUN, &db_delay_usecs, 0,
782 		"Usecs to delay awaiting db fifo to drain");
783 
784 static int dack_mode = 0;
785 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, dack_mode, CTLFLAG_RWTUN, &dack_mode, 0,
786 		"Delayed ack mode (default = 0)");
787 
788 int c4iw_max_read_depth = 8;
789 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_max_read_depth, CTLFLAG_RWTUN, &c4iw_max_read_depth, 0,
790 		"Per-connection max ORD/IRD (default = 8)");
791 
792 static int enable_tcp_timestamps;
793 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_timestamps, CTLFLAG_RWTUN, &enable_tcp_timestamps, 0,
794 		"Enable tcp timestamps (default = 0)");
795 
796 static int enable_tcp_sack;
797 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_sack, CTLFLAG_RWTUN, &enable_tcp_sack, 0,
798 		"Enable tcp SACK (default = 0)");
799 
800 static int enable_tcp_window_scaling = 1;
801 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, enable_tcp_window_scaling, CTLFLAG_RWTUN, &enable_tcp_window_scaling, 0,
802 		"Enable tcp window scaling (default = 1)");
803 
804 int c4iw_debug = 1;
805 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, c4iw_debug, CTLFLAG_RWTUN, &c4iw_debug, 0,
806 		"Enable debug logging (default = 0)");
807 
808 static int peer2peer = 1;
809 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, peer2peer, CTLFLAG_RWTUN, &peer2peer, 0,
810 		"Support peer2peer ULPs (default = 1)");
811 
812 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
813 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, p2p_type, CTLFLAG_RWTUN, &p2p_type, 0,
814 		"RDMAP opcode to use for the RTR message: 1 = RDMA_READ 0 = RDMA_WRITE (default 1)");
815 
816 static int ep_timeout_secs = 60;
817 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, ep_timeout_secs, CTLFLAG_RWTUN, &ep_timeout_secs, 0,
818 		"CM Endpoint operation timeout in seconds (default = 60)");
819 
820 static int mpa_rev = 1;
821 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, mpa_rev, CTLFLAG_RWTUN, &mpa_rev, 0,
822 		"MPA Revision, 0 supports amso1100, 1 is RFC5044 spec compliant, 2 is IETF MPA Peer Connect Draft compliant (default = 1)");
823 
824 static int markers_enabled;
825 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, markers_enabled, CTLFLAG_RWTUN, &markers_enabled, 0,
826 		"Enable MPA MARKERS (default(0) = disabled)");
827 
828 static int crc_enabled = 1;
829 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, crc_enabled, CTLFLAG_RWTUN, &crc_enabled, 0,
830 		"Enable MPA CRC (default(1) = enabled)");
831 
832 static int rcv_win = 256 * 1024;
833 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, rcv_win, CTLFLAG_RWTUN, &rcv_win, 0,
834 		"TCP receive window in bytes (default = 256KB)");
835 
836 static int snd_win = 128 * 1024;
837 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, snd_win, CTLFLAG_RWTUN, &snd_win, 0,
838 		"TCP send window in bytes (default = 128KB)");
839 
840 int db_fc_threshold = 2000;
841 SYSCTL_INT(_hw_iw_cxgbe, OID_AUTO, db_fc_threshold, CTLFLAG_RWTUN, &db_fc_threshold, 0,
842 		"QP count/threshold that triggers automatic");
843 
844 static void
845 start_ep_timer(struct c4iw_ep *ep)
846 {
847 
848 	if (timer_pending(&ep->timer)) {
849 		CTR2(KTR_IW_CXGBE, "%s: ep %p, already started", __func__, ep);
850 		printk(KERN_ERR "%s timer already started! ep %p\n", __func__,
851 		    ep);
852 		return;
853 	}
854 	clear_bit(TIMEOUT, &ep->com.flags);
855 	c4iw_get_ep(&ep->com);
856 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
857 	ep->timer.data = (unsigned long)ep;
858 	ep->timer.function = ep_timeout;
859 	add_timer(&ep->timer);
860 }
861 
862 static int
863 stop_ep_timer(struct c4iw_ep *ep)
864 {
865 
866 	del_timer_sync(&ep->timer);
867 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
868 		c4iw_put_ep(&ep->com);
869 		return 0;
870 	}
871 	return 1;
872 }
873 
874 static enum
875 c4iw_ep_state state_read(struct c4iw_ep_common *epc)
876 {
877 	enum c4iw_ep_state state;
878 
879 	mutex_lock(&epc->mutex);
880 	state = epc->state;
881 	mutex_unlock(&epc->mutex);
882 
883 	return (state);
884 }
885 
886 static void
887 __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
888 {
889 
890 	epc->state = new;
891 }
892 
893 static void
894 state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
895 {
896 
897 	mutex_lock(&epc->mutex);
898 	__state_set(epc, new);
899 	mutex_unlock(&epc->mutex);
900 }
901 
902 static void *
903 alloc_ep(int size, gfp_t gfp)
904 {
905 	struct c4iw_ep_common *epc;
906 
907 	epc = kzalloc(size, gfp);
908 	if (epc == NULL)
909 		return (NULL);
910 
911 	kref_init(&epc->kref);
912 	mutex_init(&epc->mutex);
913 	c4iw_init_wr_wait(&epc->wr_wait);
914 
915 	return (epc);
916 }
917 
918 void
919 __free_ep(struct c4iw_ep_common *epc)
920 {
921 	CTR2(KTR_IW_CXGBE, "%s:feB %p", __func__, epc);
922 	KASSERT(!epc->so, ("%s warning ep->so %p \n", __func__, epc->so));
923 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list!\n", __func__, epc));
924 	free(epc, M_DEVBUF);
925 	CTR2(KTR_IW_CXGBE, "%s:feE %p", __func__, epc);
926 }
927 
928 void _c4iw_free_ep(struct kref *kref)
929 {
930 	struct c4iw_ep *ep;
931 	struct c4iw_ep_common *epc;
932 
933 	ep = container_of(kref, struct c4iw_ep, com.kref);
934 	epc = &ep->com;
935 	KASSERT(!epc->entry.tqe_prev, ("%s epc %p still on req list",
936 	    __func__, epc));
937 	if (test_bit(QP_REFERENCED, &ep->com.flags))
938 		deref_qp(ep);
939 	kfree(ep);
940 }
941 
942 static void release_ep_resources(struct c4iw_ep *ep)
943 {
944 	CTR2(KTR_IW_CXGBE, "%s:rerB %p", __func__, ep);
945 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
946 	c4iw_put_ep(&ep->com);
947 	CTR2(KTR_IW_CXGBE, "%s:rerE %p", __func__, ep);
948 }
949 
950 static void
951 send_mpa_req(struct c4iw_ep *ep)
952 {
953 	int mpalen;
954 	struct mpa_message *mpa;
955 	struct mpa_v2_conn_params mpa_v2_params;
956 	struct mbuf *m;
957 	char mpa_rev_to_use = mpa_rev;
958 	int err;
959 
960 	if (ep->retry_with_mpa_v1)
961 		mpa_rev_to_use = 1;
962 	mpalen = sizeof(*mpa) + ep->plen;
963 	if (mpa_rev_to_use == 2)
964 		mpalen += sizeof(struct mpa_v2_conn_params);
965 
966 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
967 	if (mpa == NULL) {
968 failed:
969 		connect_reply_upcall(ep, -ENOMEM);
970 		return;
971 	}
972 
973 	memset(mpa, 0, mpalen);
974 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
975 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
976 		(markers_enabled ? MPA_MARKERS : 0) |
977 		(mpa_rev_to_use == 2 ? MPA_ENHANCED_RDMA_CONN : 0);
978 	mpa->private_data_size = htons(ep->plen);
979 	mpa->revision = mpa_rev_to_use;
980 
981 	if (mpa_rev_to_use == 1) {
982 		ep->tried_with_mpa_v1 = 1;
983 		ep->retry_with_mpa_v1 = 0;
984 	}
985 
986 	if (mpa_rev_to_use == 2) {
987 		mpa->private_data_size +=
988 			htons(sizeof(struct mpa_v2_conn_params));
989 		mpa_v2_params.ird = htons((u16)ep->ird);
990 		mpa_v2_params.ord = htons((u16)ep->ord);
991 
992 		if (peer2peer) {
993 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
994 
995 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
996 				mpa_v2_params.ord |=
997 				    htons(MPA_V2_RDMA_WRITE_RTR);
998 			} else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
999 				mpa_v2_params.ord |=
1000 					htons(MPA_V2_RDMA_READ_RTR);
1001 			}
1002 		}
1003 		memcpy(mpa->private_data, &mpa_v2_params,
1004 			sizeof(struct mpa_v2_conn_params));
1005 
1006 		if (ep->plen) {
1007 
1008 			memcpy(mpa->private_data +
1009 				sizeof(struct mpa_v2_conn_params),
1010 				ep->mpa_pkt + sizeof(*mpa), ep->plen);
1011 		}
1012 	} else {
1013 
1014 		if (ep->plen)
1015 			memcpy(mpa->private_data,
1016 					ep->mpa_pkt + sizeof(*mpa), ep->plen);
1017 		CTR2(KTR_IW_CXGBE, "%s:smr7 %p", __func__, ep);
1018 	}
1019 
1020 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1021 	if (m == NULL) {
1022 		free(mpa, M_CXGBE);
1023 		goto failed;
1024 	}
1025 	m_copyback(m, 0, mpalen, (void *)mpa);
1026 	free(mpa, M_CXGBE);
1027 
1028 	err = sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1029 	    ep->com.thread);
1030 	if (err)
1031 		goto failed;
1032 
1033 	START_EP_TIMER(ep);
1034 	state_set(&ep->com, MPA_REQ_SENT);
1035 	ep->mpa_attr.initiator = 1;
1036 }
1037 
1038 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
1039 {
1040 	int mpalen ;
1041 	struct mpa_message *mpa;
1042 	struct mpa_v2_conn_params mpa_v2_params;
1043 	struct mbuf *m;
1044 	int err;
1045 
1046 	CTR4(KTR_IW_CXGBE, "%s:smrejB %p %u %d", __func__, ep, ep->hwtid,
1047 	    ep->plen);
1048 
1049 	mpalen = sizeof(*mpa) + plen;
1050 
1051 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1052 
1053 		mpalen += sizeof(struct mpa_v2_conn_params);
1054 		CTR4(KTR_IW_CXGBE, "%s:smrej1 %p %u %d", __func__, ep,
1055 		    ep->mpa_attr.version, mpalen);
1056 	}
1057 
1058 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1059 	if (mpa == NULL)
1060 		return (-ENOMEM);
1061 
1062 	memset(mpa, 0, mpalen);
1063 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1064 	mpa->flags = MPA_REJECT;
1065 	mpa->revision = mpa_rev;
1066 	mpa->private_data_size = htons(plen);
1067 
1068 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1069 
1070 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1071 		mpa->private_data_size +=
1072 			htons(sizeof(struct mpa_v2_conn_params));
1073 		mpa_v2_params.ird = htons(((u16)ep->ird) |
1074 				(peer2peer ? MPA_V2_PEER2PEER_MODEL :
1075 				 0));
1076 		mpa_v2_params.ord = htons(((u16)ep->ord) | (peer2peer ?
1077 					(p2p_type ==
1078 					 FW_RI_INIT_P2PTYPE_RDMA_WRITE ?
1079 					 MPA_V2_RDMA_WRITE_RTR : p2p_type ==
1080 					 FW_RI_INIT_P2PTYPE_READ_REQ ?
1081 					 MPA_V2_RDMA_READ_RTR : 0) : 0));
1082 		memcpy(mpa->private_data, &mpa_v2_params,
1083 				sizeof(struct mpa_v2_conn_params));
1084 
1085 		if (ep->plen)
1086 			memcpy(mpa->private_data +
1087 					sizeof(struct mpa_v2_conn_params), pdata, plen);
1088 		CTR5(KTR_IW_CXGBE, "%s:smrej3 %p %d %d %d", __func__, ep,
1089 		    mpa_v2_params.ird, mpa_v2_params.ord, ep->plen);
1090 	} else
1091 		if (plen)
1092 			memcpy(mpa->private_data, pdata, plen);
1093 
1094 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1095 	if (m == NULL) {
1096 		free(mpa, M_CXGBE);
1097 		return (-ENOMEM);
1098 	}
1099 	m_copyback(m, 0, mpalen, (void *)mpa);
1100 	free(mpa, M_CXGBE);
1101 
1102 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT, ep->com.thread);
1103 	if (!err)
1104 		ep->snd_seq += mpalen;
1105 	CTR4(KTR_IW_CXGBE, "%s:smrejE %p %u %d", __func__, ep, ep->hwtid, err);
1106 	return err;
1107 }
1108 
1109 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
1110 {
1111 	int mpalen;
1112 	struct mpa_message *mpa;
1113 	struct mbuf *m;
1114 	struct mpa_v2_conn_params mpa_v2_params;
1115 	int err;
1116 
1117 	CTR2(KTR_IW_CXGBE, "%s:smrepB %p", __func__, ep);
1118 
1119 	mpalen = sizeof(*mpa) + plen;
1120 
1121 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1122 
1123 		CTR3(KTR_IW_CXGBE, "%s:smrep1 %p %d", __func__, ep,
1124 		    ep->mpa_attr.version);
1125 		mpalen += sizeof(struct mpa_v2_conn_params);
1126 	}
1127 
1128 	mpa = malloc(mpalen, M_CXGBE, M_NOWAIT);
1129 	if (mpa == NULL)
1130 		return (-ENOMEM);
1131 
1132 	memset(mpa, 0, sizeof(*mpa));
1133 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
1134 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
1135 		(markers_enabled ? MPA_MARKERS : 0);
1136 	mpa->revision = ep->mpa_attr.version;
1137 	mpa->private_data_size = htons(plen);
1138 
1139 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1140 
1141 		mpa->flags |= MPA_ENHANCED_RDMA_CONN;
1142 		mpa->private_data_size +=
1143 			htons(sizeof(struct mpa_v2_conn_params));
1144 		mpa_v2_params.ird = htons((u16)ep->ird);
1145 		mpa_v2_params.ord = htons((u16)ep->ord);
1146 		CTR5(KTR_IW_CXGBE, "%s:smrep3 %p %d %d %d", __func__, ep,
1147 		    ep->mpa_attr.version, mpa_v2_params.ird, mpa_v2_params.ord);
1148 
1149 		if (peer2peer && (ep->mpa_attr.p2p_type !=
1150 			FW_RI_INIT_P2PTYPE_DISABLED)) {
1151 
1152 			mpa_v2_params.ird |= htons(MPA_V2_PEER2PEER_MODEL);
1153 
1154 			if (p2p_type == FW_RI_INIT_P2PTYPE_RDMA_WRITE) {
1155 
1156 				mpa_v2_params.ord |=
1157 					htons(MPA_V2_RDMA_WRITE_RTR);
1158 				CTR5(KTR_IW_CXGBE, "%s:smrep4 %p %d %d %d",
1159 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1160 				    mpa_v2_params.ord);
1161 			}
1162 			else if (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) {
1163 
1164 				mpa_v2_params.ord |=
1165 					htons(MPA_V2_RDMA_READ_RTR);
1166 				CTR5(KTR_IW_CXGBE, "%s:smrep5 %p %d %d %d",
1167 				    __func__, ep, p2p_type, mpa_v2_params.ird,
1168 				    mpa_v2_params.ord);
1169 			}
1170 		}
1171 
1172 		memcpy(mpa->private_data, &mpa_v2_params,
1173 			sizeof(struct mpa_v2_conn_params));
1174 
1175 		if (ep->plen)
1176 			memcpy(mpa->private_data +
1177 				sizeof(struct mpa_v2_conn_params), pdata, plen);
1178 	} else
1179 		if (plen)
1180 			memcpy(mpa->private_data, pdata, plen);
1181 
1182 	m = m_getm(NULL, mpalen, M_NOWAIT, MT_DATA);
1183 	if (m == NULL) {
1184 		free(mpa, M_CXGBE);
1185 		return (-ENOMEM);
1186 	}
1187 	m_copyback(m, 0, mpalen, (void *)mpa);
1188 	free(mpa, M_CXGBE);
1189 
1190 
1191 	state_set(&ep->com, MPA_REP_SENT);
1192 	ep->snd_seq += mpalen;
1193 	err = -sosend(ep->com.so, NULL, NULL, m, NULL, MSG_DONTWAIT,
1194 			ep->com.thread);
1195 	CTR3(KTR_IW_CXGBE, "%s:smrepE %p %d", __func__, ep, err);
1196 	return err;
1197 }
1198 
1199 
1200 
1201 static void close_complete_upcall(struct c4iw_ep *ep, int status)
1202 {
1203 	struct iw_cm_event event;
1204 
1205 	CTR2(KTR_IW_CXGBE, "%s:ccuB %p", __func__, ep);
1206 	memset(&event, 0, sizeof(event));
1207 	event.event = IW_CM_EVENT_CLOSE;
1208 	event.status = status;
1209 
1210 	if (ep->com.cm_id) {
1211 
1212 		CTR2(KTR_IW_CXGBE, "%s:ccu1 %1", __func__, ep);
1213 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1214 		deref_cm_id(&ep->com);
1215 		set_bit(CLOSE_UPCALL, &ep->com.history);
1216 	}
1217 	CTR2(KTR_IW_CXGBE, "%s:ccuE %p", __func__, ep);
1218 }
1219 
1220 static int send_abort(struct c4iw_ep *ep)
1221 {
1222 	int err;
1223 
1224 	CTR2(KTR_IW_CXGBE, "%s:abB %p", __func__, ep);
1225 	abort_socket(ep);
1226 
1227 	/*
1228 	 * Since socket options were set as l_onoff=1 and l_linger=0 in in
1229 	 * abort_socket, invoking soclose here sends a RST (reset) to the peer.
1230 	 */
1231 	err = close_socket(&ep->com, 1);
1232 	set_bit(ABORT_CONN, &ep->com.history);
1233 	CTR2(KTR_IW_CXGBE, "%s:abE %p", __func__, ep);
1234 
1235 	/*
1236 	 * TBD: iw_cgbe driver should receive ABORT reply for every ABORT
1237 	 * request it has sent. But the current TOE driver is not propagating
1238 	 * this ABORT reply event (via do_abort_rpl) to iw_cxgbe. So as a work-
1239 	 * around de-refer 'ep' (which was refered before sending ABORT request)
1240 	 * here instead of doing it in abort_rpl() handler of iw_cxgbe driver.
1241 	 */
1242 	c4iw_put_ep(&ep->com);
1243 	return err;
1244 }
1245 
1246 static void peer_close_upcall(struct c4iw_ep *ep)
1247 {
1248 	struct iw_cm_event event;
1249 
1250 	CTR2(KTR_IW_CXGBE, "%s:pcuB %p", __func__, ep);
1251 	memset(&event, 0, sizeof(event));
1252 	event.event = IW_CM_EVENT_DISCONNECT;
1253 
1254 	if (ep->com.cm_id) {
1255 
1256 		CTR2(KTR_IW_CXGBE, "%s:pcu1 %p", __func__, ep);
1257 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1258 		set_bit(DISCONN_UPCALL, &ep->com.history);
1259 	}
1260 	CTR2(KTR_IW_CXGBE, "%s:pcuE %p", __func__, ep);
1261 }
1262 
1263 static void peer_abort_upcall(struct c4iw_ep *ep)
1264 {
1265 	struct iw_cm_event event;
1266 
1267 	CTR2(KTR_IW_CXGBE, "%s:pauB %p", __func__, ep);
1268 	memset(&event, 0, sizeof(event));
1269 	event.event = IW_CM_EVENT_CLOSE;
1270 	event.status = -ECONNRESET;
1271 
1272 	if (ep->com.cm_id) {
1273 
1274 		CTR2(KTR_IW_CXGBE, "%s:pau1 %p", __func__, ep);
1275 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1276 		deref_cm_id(&ep->com);
1277 		set_bit(ABORT_UPCALL, &ep->com.history);
1278 	}
1279 	CTR2(KTR_IW_CXGBE, "%s:pauE %p", __func__, ep);
1280 }
1281 
1282 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
1283 {
1284 	struct iw_cm_event event;
1285 
1286 	CTR3(KTR_IW_CXGBE, "%s:cruB %p", __func__, ep, status);
1287 	memset(&event, 0, sizeof(event));
1288 	event.event = IW_CM_EVENT_CONNECT_REPLY;
1289 	event.status = (status ==-ECONNABORTED)?-ECONNRESET: status;
1290 	event.local_addr = ep->com.local_addr;
1291 	event.remote_addr = ep->com.remote_addr;
1292 
1293 	if ((status == 0) || (status == -ECONNREFUSED)) {
1294 
1295 		if (!ep->tried_with_mpa_v1) {
1296 
1297 			CTR2(KTR_IW_CXGBE, "%s:cru1 %p", __func__, ep);
1298 			/* this means MPA_v2 is used */
1299 			event.private_data_len = ep->plen -
1300 				sizeof(struct mpa_v2_conn_params);
1301 			event.private_data = ep->mpa_pkt +
1302 				sizeof(struct mpa_message) +
1303 				sizeof(struct mpa_v2_conn_params);
1304 		} else {
1305 
1306 			CTR2(KTR_IW_CXGBE, "%s:cru2 %p", __func__, ep);
1307 			/* this means MPA_v1 is used */
1308 			event.private_data_len = ep->plen;
1309 			event.private_data = ep->mpa_pkt +
1310 				sizeof(struct mpa_message);
1311 		}
1312 	}
1313 
1314 	if (ep->com.cm_id) {
1315 
1316 		CTR2(KTR_IW_CXGBE, "%s:cru3 %p", __func__, ep);
1317 		set_bit(CONN_RPL_UPCALL, &ep->com.history);
1318 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1319 	}
1320 
1321 	if(status == -ECONNABORTED) {
1322 
1323 		CTR3(KTR_IW_CXGBE, "%s:cruE %p %d", __func__, ep, status);
1324 		return;
1325 	}
1326 
1327 	if (status < 0) {
1328 
1329 		CTR3(KTR_IW_CXGBE, "%s:cru4 %p %d", __func__, ep, status);
1330 		deref_cm_id(&ep->com);
1331 	}
1332 
1333 	CTR2(KTR_IW_CXGBE, "%s:cruE %p", __func__, ep);
1334 }
1335 
1336 static int connect_request_upcall(struct c4iw_ep *ep)
1337 {
1338 	struct iw_cm_event event;
1339 	int ret;
1340 
1341 	CTR3(KTR_IW_CXGBE, "%s: ep %p, mpa_v1 %d", __func__, ep,
1342 	    ep->tried_with_mpa_v1);
1343 
1344 	memset(&event, 0, sizeof(event));
1345 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
1346 	event.local_addr = ep->com.local_addr;
1347 	event.remote_addr = ep->com.remote_addr;
1348 	event.provider_data = ep;
1349 	event.so = ep->com.so;
1350 
1351 	if (!ep->tried_with_mpa_v1) {
1352 		/* this means MPA_v2 is used */
1353 		event.ord = ep->ord;
1354 		event.ird = ep->ird;
1355 		event.private_data_len = ep->plen -
1356 			sizeof(struct mpa_v2_conn_params);
1357 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message) +
1358 			sizeof(struct mpa_v2_conn_params);
1359 	} else {
1360 
1361 		/* this means MPA_v1 is used. Send max supported */
1362 		event.ord = c4iw_max_read_depth;
1363 		event.ird = c4iw_max_read_depth;
1364 		event.private_data_len = ep->plen;
1365 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
1366 	}
1367 
1368 	c4iw_get_ep(&ep->com);
1369 	ret = ep->parent_ep->com.cm_id->event_handler(ep->parent_ep->com.cm_id,
1370 	    &event);
1371 	if(ret)
1372 		c4iw_put_ep(&ep->com);
1373 
1374 	set_bit(CONNREQ_UPCALL, &ep->com.history);
1375 	c4iw_put_ep(&ep->parent_ep->com);
1376 	return ret;
1377 }
1378 
1379 static void established_upcall(struct c4iw_ep *ep)
1380 {
1381 	struct iw_cm_event event;
1382 
1383 	CTR2(KTR_IW_CXGBE, "%s:euB %p", __func__, ep);
1384 	memset(&event, 0, sizeof(event));
1385 	event.event = IW_CM_EVENT_ESTABLISHED;
1386 	event.ird = ep->ird;
1387 	event.ord = ep->ord;
1388 
1389 	if (ep->com.cm_id) {
1390 
1391 		CTR2(KTR_IW_CXGBE, "%s:eu1 %p", __func__, ep);
1392 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
1393 		set_bit(ESTAB_UPCALL, &ep->com.history);
1394 	}
1395 	CTR2(KTR_IW_CXGBE, "%s:euE %p", __func__, ep);
1396 }
1397 
1398 
1399 /*
1400  * process_mpa_reply - process streaming mode MPA reply
1401  *
1402  * Returns:
1403  *
1404  * 0 upon success indicating a connect request was delivered to the ULP
1405  * or the mpa request is incomplete but valid so far.
1406  *
1407  * 1 if a failure requires the caller to close the connection.
1408  *
1409  * 2 if a failure requires the caller to abort the connection.
1410  */
1411 static int process_mpa_reply(struct c4iw_ep *ep)
1412 {
1413 	struct mpa_message *mpa;
1414 	struct mpa_v2_conn_params *mpa_v2_params;
1415 	u16 plen;
1416 	u16 resp_ird, resp_ord;
1417 	u8 rtr_mismatch = 0, insuff_ird = 0;
1418 	struct c4iw_qp_attributes attrs;
1419 	enum c4iw_qp_attr_mask mask;
1420 	int err;
1421 	struct mbuf *top, *m;
1422 	int flags = MSG_DONTWAIT;
1423 	struct uio uio;
1424 	int disconnect = 0;
1425 
1426 	CTR2(KTR_IW_CXGBE, "%s:pmrB %p", __func__, ep);
1427 
1428 	/*
1429 	 * Stop mpa timer.  If it expired, then
1430 	 * we ignore the MPA reply.  process_timeout()
1431 	 * will abort the connection.
1432 	 */
1433 	if (STOP_EP_TIMER(ep))
1434 		return 0;
1435 
1436 	uio.uio_resid = 1000000;
1437 	uio.uio_td = ep->com.thread;
1438 	err = soreceive(ep->com.so, NULL, &uio, &top, NULL, &flags);
1439 
1440 	if (err) {
1441 
1442 		if (err == EWOULDBLOCK) {
1443 
1444 			CTR2(KTR_IW_CXGBE, "%s:pmr1 %p", __func__, ep);
1445 			START_EP_TIMER(ep);
1446 			return 0;
1447 		}
1448 		err = -err;
1449 		CTR2(KTR_IW_CXGBE, "%s:pmr2 %p", __func__, ep);
1450 		goto err;
1451 	}
1452 
1453 	if (ep->com.so->so_rcv.sb_mb) {
1454 
1455 		CTR2(KTR_IW_CXGBE, "%s:pmr3 %p", __func__, ep);
1456 		printf("%s data after soreceive called! so %p sb_mb %p top %p\n",
1457 		       __func__, ep->com.so, ep->com.so->so_rcv.sb_mb, top);
1458 	}
1459 
1460 	m = top;
1461 
1462 	do {
1463 
1464 		CTR2(KTR_IW_CXGBE, "%s:pmr4 %p", __func__, ep);
1465 		/*
1466 		 * If we get more than the supported amount of private data
1467 		 * then we must fail this connection.
1468 		 */
1469 		if (ep->mpa_pkt_len + m->m_len > sizeof(ep->mpa_pkt)) {
1470 
1471 			CTR3(KTR_IW_CXGBE, "%s:pmr5 %p %d", __func__, ep,
1472 			    ep->mpa_pkt_len + m->m_len);
1473 			err = (-EINVAL);
1474 			goto err_stop_timer;
1475 		}
1476 
1477 		/*
1478 		 * copy the new data into our accumulation buffer.
1479 		 */
1480 		m_copydata(m, 0, m->m_len, &(ep->mpa_pkt[ep->mpa_pkt_len]));
1481 		ep->mpa_pkt_len += m->m_len;
1482 		if (!m->m_next)
1483 			m = m->m_nextpkt;
1484 		else
1485 			m = m->m_next;
1486 	} while (m);
1487 
1488 	m_freem(top);
1489 	/*
1490 	 * if we don't even have the mpa message, then bail.
1491 	 */
1492 	if (ep->mpa_pkt_len < sizeof(*mpa)) {
1493 		return 0;
1494 	}
1495 	mpa = (struct mpa_message *) ep->mpa_pkt;
1496 
1497 	/* Validate MPA header. */
1498 	if (mpa->revision > mpa_rev) {
1499 
1500 		CTR4(KTR_IW_CXGBE, "%s:pmr6 %p %d %d", __func__, ep,
1501 		    mpa->revision, mpa_rev);
1502 		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d, "
1503 				" Received = %d\n", __func__, mpa_rev, mpa->revision);
1504 		err = -EPROTO;
1505 		goto err_stop_timer;
1506 	}
1507 
1508 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
1509 
1510 		CTR2(KTR_IW_CXGBE, "%s:pmr7 %p", __func__, ep);
1511 		err = -EPROTO;
1512 		goto err_stop_timer;
1513 	}
1514 
1515 	plen = ntohs(mpa->private_data_size);
1516 
1517 	/*
1518 	 * Fail if there's too much private data.
1519 	 */
1520 	if (plen > MPA_MAX_PRIVATE_DATA) {
1521 
1522 		CTR2(KTR_IW_CXGBE, "%s:pmr8 %p", __func__, ep);
1523 		err = -EPROTO;
1524 		goto err_stop_timer;
1525 	}
1526 
1527 	/*
1528 	 * If plen does not account for pkt size
1529 	 */
1530 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1531 
1532 		CTR2(KTR_IW_CXGBE, "%s:pmr9 %p", __func__, ep);
1533 		STOP_EP_TIMER(ep);
1534 		err = -EPROTO;
1535 		goto err_stop_timer;
1536 	}
1537 
1538 	ep->plen = (u8) plen;
1539 
1540 	/*
1541 	 * If we don't have all the pdata yet, then bail.
1542 	 * We'll continue process when more data arrives.
1543 	 */
1544 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen)) {
1545 
1546 		CTR2(KTR_IW_CXGBE, "%s:pmra %p", __func__, ep);
1547 		return 0;
1548 	}
1549 
1550 	if (mpa->flags & MPA_REJECT) {
1551 
1552 		CTR2(KTR_IW_CXGBE, "%s:pmrb %p", __func__, ep);
1553 		err = -ECONNREFUSED;
1554 		goto err_stop_timer;
1555 	}
1556 
1557 	/*
1558 	 * If we get here we have accumulated the entire mpa
1559 	 * start reply message including private data. And
1560 	 * the MPA header is valid.
1561 	 */
1562 	state_set(&ep->com, FPDU_MODE);
1563 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1564 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1565 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1566 	ep->mpa_attr.version = mpa->revision;
1567 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1568 
1569 	if (mpa->revision == 2) {
1570 
1571 		CTR2(KTR_IW_CXGBE, "%s:pmrc %p", __func__, ep);
1572 		ep->mpa_attr.enhanced_rdma_conn =
1573 			mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1574 
1575 		if (ep->mpa_attr.enhanced_rdma_conn) {
1576 
1577 			CTR2(KTR_IW_CXGBE, "%s:pmrd %p", __func__, ep);
1578 			mpa_v2_params = (struct mpa_v2_conn_params *)
1579 				(ep->mpa_pkt + sizeof(*mpa));
1580 			resp_ird = ntohs(mpa_v2_params->ird) &
1581 				MPA_V2_IRD_ORD_MASK;
1582 			resp_ord = ntohs(mpa_v2_params->ord) &
1583 				MPA_V2_IRD_ORD_MASK;
1584 
1585 			/*
1586 			 * This is a double-check. Ideally, below checks are
1587 			 * not required since ird/ord stuff has been taken
1588 			 * care of in c4iw_accept_cr
1589 			 */
1590 			if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
1591 
1592 				CTR2(KTR_IW_CXGBE, "%s:pmre %p", __func__, ep);
1593 				err = -ENOMEM;
1594 				ep->ird = resp_ord;
1595 				ep->ord = resp_ird;
1596 				insuff_ird = 1;
1597 			}
1598 
1599 			if (ntohs(mpa_v2_params->ird) &
1600 				MPA_V2_PEER2PEER_MODEL) {
1601 
1602 				CTR2(KTR_IW_CXGBE, "%s:pmrf %p", __func__, ep);
1603 				if (ntohs(mpa_v2_params->ord) &
1604 					MPA_V2_RDMA_WRITE_RTR) {
1605 
1606 					CTR2(KTR_IW_CXGBE, "%s:pmrg %p", __func__, ep);
1607 					ep->mpa_attr.p2p_type =
1608 						FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1609 				}
1610 				else if (ntohs(mpa_v2_params->ord) &
1611 					MPA_V2_RDMA_READ_RTR) {
1612 
1613 					CTR2(KTR_IW_CXGBE, "%s:pmrh %p", __func__, ep);
1614 					ep->mpa_attr.p2p_type =
1615 						FW_RI_INIT_P2PTYPE_READ_REQ;
1616 				}
1617 			}
1618 		}
1619 	} else {
1620 
1621 		CTR2(KTR_IW_CXGBE, "%s:pmri %p", __func__, ep);
1622 
1623 		if (mpa->revision == 1) {
1624 
1625 			CTR2(KTR_IW_CXGBE, "%s:pmrj %p", __func__, ep);
1626 
1627 			if (peer2peer) {
1628 
1629 				CTR2(KTR_IW_CXGBE, "%s:pmrk %p", __func__, ep);
1630 				ep->mpa_attr.p2p_type = p2p_type;
1631 			}
1632 		}
1633 	}
1634 
1635 	if (set_tcpinfo(ep)) {
1636 
1637 		CTR2(KTR_IW_CXGBE, "%s:pmrl %p", __func__, ep);
1638 		printf("%s set_tcpinfo error\n", __func__);
1639 		goto err;
1640 	}
1641 
1642 	CTR6(KTR_IW_CXGBE, "%s - crc_enabled = %d, recv_marker_enabled = %d, "
1643 	    "xmit_marker_enabled = %d, version = %d p2p_type = %d", __func__,
1644 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1645 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1646 	    ep->mpa_attr.p2p_type);
1647 
1648 	/*
1649 	 * If responder's RTR does not match with that of initiator, assign
1650 	 * FW_RI_INIT_P2PTYPE_DISABLED in mpa attributes so that RTR is not
1651 	 * generated when moving QP to RTS state.
1652 	 * A TERM message will be sent after QP has moved to RTS state
1653 	 */
1654 	if ((ep->mpa_attr.version == 2) && peer2peer &&
1655 		(ep->mpa_attr.p2p_type != p2p_type)) {
1656 
1657 		CTR2(KTR_IW_CXGBE, "%s:pmrm %p", __func__, ep);
1658 		ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1659 		rtr_mismatch = 1;
1660 	}
1661 
1662 
1663 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
1664 	attrs.mpa_attr = ep->mpa_attr;
1665 	attrs.max_ird = ep->ird;
1666 	attrs.max_ord = ep->ord;
1667 	attrs.llp_stream_handle = ep;
1668 	attrs.next_state = C4IW_QP_STATE_RTS;
1669 
1670 	mask = C4IW_QP_ATTR_NEXT_STATE |
1671 		C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
1672 		C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
1673 
1674 	/* bind QP and TID with INIT_WR */
1675 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
1676 
1677 	if (err) {
1678 
1679 		CTR2(KTR_IW_CXGBE, "%s:pmrn %p", __func__, ep);
1680 		goto err;
1681 	}
1682 
1683 	/*
1684 	 * If responder's RTR requirement did not match with what initiator
1685 	 * supports, generate TERM message
1686 	 */
1687 	if (rtr_mismatch) {
1688 
1689 		CTR2(KTR_IW_CXGBE, "%s:pmro %p", __func__, ep);
1690 		printk(KERN_ERR "%s: RTR mismatch, sending TERM\n", __func__);
1691 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1692 		attrs.ecode = MPA_NOMATCH_RTR;
1693 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1694 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1695 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1696 		err = -ENOMEM;
1697 		disconnect = 1;
1698 		goto out;
1699 	}
1700 
1701 	/*
1702 	 * Generate TERM if initiator IRD is not sufficient for responder
1703 	 * provided ORD. Currently, we do the same behaviour even when
1704 	 * responder provided IRD is also not sufficient as regards to
1705 	 * initiator ORD.
1706 	 */
1707 	if (insuff_ird) {
1708 
1709 		CTR2(KTR_IW_CXGBE, "%s:pmrp %p", __func__, ep);
1710 		printk(KERN_ERR "%s: Insufficient IRD, sending TERM\n",
1711 				__func__);
1712 		attrs.layer_etype = LAYER_MPA | DDP_LLP;
1713 		attrs.ecode = MPA_INSUFF_IRD;
1714 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
1715 		err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1716 			C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
1717 		err = -ENOMEM;
1718 		disconnect = 1;
1719 		goto out;
1720 	}
1721 	goto out;
1722 err_stop_timer:
1723 	STOP_EP_TIMER(ep);
1724 err:
1725 	disconnect = 2;
1726 out:
1727 	connect_reply_upcall(ep, err);
1728 	CTR2(KTR_IW_CXGBE, "%s:pmrE %p", __func__, ep);
1729 	return disconnect;
1730 }
1731 
1732 /*
1733  * process_mpa_request - process streaming mode MPA request
1734  *
1735  * Returns:
1736  *
1737  * 0 upon success indicating a connect request was delivered to the ULP
1738  * or the mpa request is incomplete but valid so far.
1739  *
1740  * 1 if a failure requires the caller to close the connection.
1741  *
1742  * 2 if a failure requires the caller to abort the connection.
1743  */
1744 static int
1745 process_mpa_request(struct c4iw_ep *ep)
1746 {
1747 	struct mpa_message *mpa;
1748 	u16 plen;
1749 	int flags = MSG_DONTWAIT;
1750 	int rc;
1751 	struct iovec iov;
1752 	struct uio uio;
1753 	enum c4iw_ep_state state = state_read(&ep->com);
1754 
1755 	CTR3(KTR_IW_CXGBE, "%s: ep %p, state %s", __func__, ep, states[state]);
1756 
1757 	if (state != MPA_REQ_WAIT)
1758 		return 0;
1759 
1760 	iov.iov_base = &ep->mpa_pkt[ep->mpa_pkt_len];
1761 	iov.iov_len = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
1762 	uio.uio_iov = &iov;
1763 	uio.uio_iovcnt = 1;
1764 	uio.uio_offset = 0;
1765 	uio.uio_resid = sizeof(ep->mpa_pkt) - ep->mpa_pkt_len;
1766 	uio.uio_segflg = UIO_SYSSPACE;
1767 	uio.uio_rw = UIO_READ;
1768 	uio.uio_td = NULL; /* uio.uio_td = ep->com.thread; */
1769 
1770 	rc = soreceive(ep->com.so, NULL, &uio, NULL, NULL, &flags);
1771 	if (rc == EAGAIN)
1772 		return 0;
1773 	else if (rc)
1774 		goto err_stop_timer;
1775 
1776 	KASSERT(uio.uio_offset > 0, ("%s: sorecieve on so %p read no data",
1777 	    __func__, ep->com.so));
1778 	ep->mpa_pkt_len += uio.uio_offset;
1779 
1780 	/*
1781 	 * If we get more than the supported amount of private data then we must
1782 	 * fail this connection.  XXX: check so_rcv->sb_cc, or peek with another
1783 	 * soreceive, or increase the size of mpa_pkt by 1 and abort if the last
1784 	 * byte is filled by the soreceive above.
1785 	 */
1786 
1787 	/* Don't even have the MPA message.  Wait for more data to arrive. */
1788 	if (ep->mpa_pkt_len < sizeof(*mpa))
1789 		return 0;
1790 	mpa = (struct mpa_message *) ep->mpa_pkt;
1791 
1792 	/*
1793 	 * Validate MPA Header.
1794 	 */
1795 	if (mpa->revision > mpa_rev) {
1796 		log(LOG_ERR, "%s: MPA version mismatch. Local = %d,"
1797 		    " Received = %d\n", __func__, mpa_rev, mpa->revision);
1798 		goto err_stop_timer;
1799 	}
1800 
1801 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key)))
1802 		goto err_stop_timer;
1803 
1804 	/*
1805 	 * Fail if there's too much private data.
1806 	 */
1807 	plen = ntohs(mpa->private_data_size);
1808 	if (plen > MPA_MAX_PRIVATE_DATA)
1809 		goto err_stop_timer;
1810 
1811 	/*
1812 	 * If plen does not account for pkt size
1813 	 */
1814 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen))
1815 		goto err_stop_timer;
1816 
1817 	ep->plen = (u8) plen;
1818 
1819 	/*
1820 	 * If we don't have all the pdata yet, then bail.
1821 	 */
1822 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1823 		return 0;
1824 
1825 	/*
1826 	 * If we get here we have accumulated the entire mpa
1827 	 * start reply message including private data.
1828 	 */
1829 	ep->mpa_attr.initiator = 0;
1830 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1831 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1832 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1833 	ep->mpa_attr.version = mpa->revision;
1834 	if (mpa->revision == 1)
1835 		ep->tried_with_mpa_v1 = 1;
1836 	ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED;
1837 
1838 	if (mpa->revision == 2) {
1839 		ep->mpa_attr.enhanced_rdma_conn =
1840 		    mpa->flags & MPA_ENHANCED_RDMA_CONN ? 1 : 0;
1841 		if (ep->mpa_attr.enhanced_rdma_conn) {
1842 			struct mpa_v2_conn_params *mpa_v2_params;
1843 			u16 ird, ord;
1844 
1845 			mpa_v2_params = (void *)&ep->mpa_pkt[sizeof(*mpa)];
1846 			ird = ntohs(mpa_v2_params->ird);
1847 			ord = ntohs(mpa_v2_params->ord);
1848 
1849 			ep->ird = ird & MPA_V2_IRD_ORD_MASK;
1850 			ep->ord = ord & MPA_V2_IRD_ORD_MASK;
1851 			if (ird & MPA_V2_PEER2PEER_MODEL && peer2peer) {
1852 				if (ord & MPA_V2_RDMA_WRITE_RTR) {
1853 					ep->mpa_attr.p2p_type =
1854 					    FW_RI_INIT_P2PTYPE_RDMA_WRITE;
1855 				} else if (ord & MPA_V2_RDMA_READ_RTR) {
1856 					ep->mpa_attr.p2p_type =
1857 					    FW_RI_INIT_P2PTYPE_READ_REQ;
1858 				}
1859 			}
1860 		}
1861 	} else if (mpa->revision == 1 && peer2peer)
1862 		ep->mpa_attr.p2p_type = p2p_type;
1863 
1864 	if (set_tcpinfo(ep))
1865 		goto err_stop_timer;
1866 
1867 	CTR5(KTR_IW_CXGBE, "%s: crc_enabled = %d, recv_marker_enabled = %d, "
1868 	    "xmit_marker_enabled = %d, version = %d", __func__,
1869 	    ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1870 	    ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
1871 
1872 	state_set(&ep->com, MPA_REQ_RCVD);
1873 	STOP_EP_TIMER(ep);
1874 
1875 	/* drive upcall */
1876 	mutex_lock(&ep->parent_ep->com.mutex);
1877 	if (ep->parent_ep->com.state != DEAD) {
1878 		if (connect_request_upcall(ep))
1879 			goto err_unlock_parent;
1880 	} else
1881 		goto err_unlock_parent;
1882 	mutex_unlock(&ep->parent_ep->com.mutex);
1883 	return 0;
1884 
1885 err_unlock_parent:
1886 	mutex_unlock(&ep->parent_ep->com.mutex);
1887 	goto err_out;
1888 err_stop_timer:
1889 	STOP_EP_TIMER(ep);
1890 err_out:
1891 	return 2;
1892 }
1893 
1894 /*
1895  * Upcall from the adapter indicating data has been transmitted.
1896  * For us its just the single MPA request or reply.  We can now free
1897  * the skb holding the mpa message.
1898  */
1899 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1900 {
1901 	int err;
1902 	struct c4iw_ep *ep = to_ep(cm_id);
1903 	CTR2(KTR_IW_CXGBE, "%s:crcB %p", __func__, ep);
1904 	int disconnect = 0;
1905 
1906 	if (state_read(&ep->com) == DEAD) {
1907 
1908 		CTR2(KTR_IW_CXGBE, "%s:crc1 %p", __func__, ep);
1909 		c4iw_put_ep(&ep->com);
1910 		return -ECONNRESET;
1911 	}
1912 	set_bit(ULP_REJECT, &ep->com.history);
1913 	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1914 
1915 	if (mpa_rev == 0) {
1916 
1917 		CTR2(KTR_IW_CXGBE, "%s:crc2 %p", __func__, ep);
1918 		disconnect = 2;
1919 	}
1920 	else {
1921 
1922 		CTR2(KTR_IW_CXGBE, "%s:crc3 %p", __func__, ep);
1923 		err = send_mpa_reject(ep, pdata, pdata_len);
1924 		err = soshutdown(ep->com.so, 3);
1925 	}
1926 	c4iw_put_ep(&ep->com);
1927 	if (disconnect)
1928 		err = c4iw_ep_disconnect(ep, disconnect == 2, GFP_KERNEL);
1929 	CTR2(KTR_IW_CXGBE, "%s:crc4 %p", __func__, ep);
1930 	return 0;
1931 }
1932 
1933 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1934 {
1935 	int err;
1936 	struct c4iw_qp_attributes attrs;
1937 	enum c4iw_qp_attr_mask mask;
1938 	struct c4iw_ep *ep = to_ep(cm_id);
1939 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1940 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1941 	int abort = 0;
1942 
1943 	CTR2(KTR_IW_CXGBE, "%s:cacB %p", __func__, ep);
1944 
1945 	if (state_read(&ep->com) == DEAD) {
1946 
1947 		CTR2(KTR_IW_CXGBE, "%s:cac1 %p", __func__, ep);
1948 		err = -ECONNRESET;
1949 		goto err_out;
1950 	}
1951 
1952 	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1953 	BUG_ON(!qp);
1954 
1955 	set_bit(ULP_ACCEPT, &ep->com.history);
1956 
1957 	if ((conn_param->ord > c4iw_max_read_depth) ||
1958 		(conn_param->ird > c4iw_max_read_depth)) {
1959 
1960 		CTR2(KTR_IW_CXGBE, "%s:cac2 %p", __func__, ep);
1961 		err = -EINVAL;
1962 		goto err_abort;
1963 	}
1964 
1965 	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
1966 
1967 		CTR2(KTR_IW_CXGBE, "%s:cac3 %p", __func__, ep);
1968 
1969 		if (conn_param->ord > ep->ird) {
1970 
1971 			CTR2(KTR_IW_CXGBE, "%s:cac4 %p", __func__, ep);
1972 			ep->ird = conn_param->ird;
1973 			ep->ord = conn_param->ord;
1974 			send_mpa_reject(ep, conn_param->private_data,
1975 					conn_param->private_data_len);
1976 			err = -ENOMEM;
1977 			goto err_abort;
1978 		}
1979 
1980 		if (conn_param->ird > ep->ord) {
1981 
1982 			CTR2(KTR_IW_CXGBE, "%s:cac5 %p", __func__, ep);
1983 
1984 			if (!ep->ord) {
1985 
1986 				CTR2(KTR_IW_CXGBE, "%s:cac6 %p", __func__, ep);
1987 				conn_param->ird = 1;
1988 			}
1989 			else {
1990 				CTR2(KTR_IW_CXGBE, "%s:cac7 %p", __func__, ep);
1991 				err = -ENOMEM;
1992 				goto err_abort;
1993 			}
1994 		}
1995 
1996 	}
1997 	ep->ird = conn_param->ird;
1998 	ep->ord = conn_param->ord;
1999 
2000 	if (ep->mpa_attr.version != 2) {
2001 
2002 		CTR2(KTR_IW_CXGBE, "%s:cac8 %p", __func__, ep);
2003 
2004 		if (peer2peer && ep->ird == 0) {
2005 
2006 			CTR2(KTR_IW_CXGBE, "%s:cac9 %p", __func__, ep);
2007 			ep->ird = 1;
2008 		}
2009 	}
2010 
2011 
2012 	ep->com.cm_id = cm_id;
2013 	ref_cm_id(&ep->com);
2014 	ep->com.qp = qp;
2015 	ref_qp(ep);
2016 	//ep->ofld_txq = TOEPCB(ep->com.so)->ofld_txq;
2017 
2018 	/* bind QP to EP and move to RTS */
2019 	attrs.mpa_attr = ep->mpa_attr;
2020 	attrs.max_ird = ep->ird;
2021 	attrs.max_ord = ep->ord;
2022 	attrs.llp_stream_handle = ep;
2023 	attrs.next_state = C4IW_QP_STATE_RTS;
2024 
2025 	/* bind QP and TID with INIT_WR */
2026 	mask = C4IW_QP_ATTR_NEXT_STATE |
2027 		C4IW_QP_ATTR_LLP_STREAM_HANDLE |
2028 		C4IW_QP_ATTR_MPA_ATTR |
2029 		C4IW_QP_ATTR_MAX_IRD |
2030 		C4IW_QP_ATTR_MAX_ORD;
2031 
2032 	err = c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, mask, &attrs, 1);
2033 
2034 	if (err) {
2035 
2036 		CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
2037 		goto err_defef_cm_id;
2038 	}
2039 	err = send_mpa_reply(ep, conn_param->private_data,
2040 			conn_param->private_data_len);
2041 
2042 	if (err) {
2043 
2044 		CTR2(KTR_IW_CXGBE, "%s:caca %p", __func__, ep);
2045 		goto err_defef_cm_id;
2046 	}
2047 
2048 	state_set(&ep->com, FPDU_MODE);
2049 	established_upcall(ep);
2050 	c4iw_put_ep(&ep->com);
2051 	CTR2(KTR_IW_CXGBE, "%s:cacE %p", __func__, ep);
2052 	return 0;
2053 err_defef_cm_id:
2054 	deref_cm_id(&ep->com);
2055 err_abort:
2056 	abort = 1;
2057 err_out:
2058 	if (abort)
2059 		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
2060 	c4iw_put_ep(&ep->com);
2061 	CTR2(KTR_IW_CXGBE, "%s:cacE err %p", __func__, ep);
2062 	return err;
2063 }
2064 
2065 
2066 
2067 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
2068 {
2069 	int err = 0;
2070 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2071 	struct c4iw_ep *ep = NULL;
2072 	struct nhop4_extended nh4;
2073 	struct toedev *tdev;
2074 
2075 	CTR2(KTR_IW_CXGBE, "%s:ccB %p", __func__, cm_id);
2076 
2077 	if ((conn_param->ord > c4iw_max_read_depth) ||
2078 		(conn_param->ird > c4iw_max_read_depth)) {
2079 
2080 		CTR2(KTR_IW_CXGBE, "%s:cc1 %p", __func__, cm_id);
2081 		err = -EINVAL;
2082 		goto out;
2083 	}
2084 	ep = alloc_ep(sizeof(*ep), M_NOWAIT);
2085 
2086 	if (!ep) {
2087 
2088 		CTR2(KTR_IW_CXGBE, "%s:cc2 %p", __func__, cm_id);
2089 		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2090 		err = -ENOMEM;
2091 		goto out;
2092 	}
2093 	init_timer(&ep->timer);
2094 	ep->plen = conn_param->private_data_len;
2095 
2096 	if (ep->plen) {
2097 
2098 		CTR2(KTR_IW_CXGBE, "%s:cc3 %p", __func__, ep);
2099 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
2100 				conn_param->private_data, ep->plen);
2101 	}
2102 	ep->ird = conn_param->ird;
2103 	ep->ord = conn_param->ord;
2104 
2105 	if (peer2peer && ep->ord == 0) {
2106 
2107 		CTR2(KTR_IW_CXGBE, "%s:cc4 %p", __func__, ep);
2108 		ep->ord = 1;
2109 	}
2110 
2111 	ep->com.dev = dev;
2112 	ep->com.cm_id = cm_id;
2113 	ref_cm_id(&ep->com);
2114 	ep->com.qp = get_qhp(dev, conn_param->qpn);
2115 
2116 	if (!ep->com.qp) {
2117 
2118 		CTR2(KTR_IW_CXGBE, "%s:cc5 %p", __func__, ep);
2119 		err = -EINVAL;
2120 		goto fail2;
2121 	}
2122 	ref_qp(ep);
2123 	ep->com.thread = curthread;
2124 	ep->com.so = cm_id->so;
2125 
2126 	init_sock(&ep->com);
2127 
2128 	/* find a route */
2129 	err = find_route(
2130 		cm_id->local_addr.sin_addr.s_addr,
2131 		cm_id->remote_addr.sin_addr.s_addr,
2132 		cm_id->local_addr.sin_port,
2133 		cm_id->remote_addr.sin_port, 0, &nh4);
2134 
2135 	if (err) {
2136 
2137 		CTR2(KTR_IW_CXGBE, "%s:cc7 %p", __func__, ep);
2138 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
2139 		err = -EHOSTUNREACH;
2140 		goto fail2;
2141 	}
2142 
2143 	if (!(nh4.nh_ifp->if_capenable & IFCAP_TOE)) {
2144 
2145 		CTR2(KTR_IW_CXGBE, "%s:cc8 %p", __func__, ep);
2146 		printf("%s - interface not TOE capable.\n", __func__);
2147 		close_socket(&ep->com, 0);
2148 		err = -ENOPROTOOPT;
2149 		goto fail3;
2150 	}
2151 	tdev = TOEDEV(nh4.nh_ifp);
2152 
2153 	if (tdev == NULL) {
2154 
2155 		CTR2(KTR_IW_CXGBE, "%s:cc9 %p", __func__, ep);
2156 		printf("%s - No toedev for interface.\n", __func__);
2157 		goto fail3;
2158 	}
2159 	fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
2160 
2161 	state_set(&ep->com, CONNECTING);
2162 	ep->tos = 0;
2163 	ep->com.local_addr = cm_id->local_addr;
2164 	ep->com.remote_addr = cm_id->remote_addr;
2165 	err = soconnect(ep->com.so, (struct sockaddr *)&ep->com.remote_addr,
2166 		ep->com.thread);
2167 
2168 	if (!err) {
2169 		CTR2(KTR_IW_CXGBE, "%s:cca %p", __func__, ep);
2170 		goto out;
2171 	} else {
2172 		close_socket(&ep->com, 0);
2173 		goto fail2;
2174 	}
2175 
2176 fail3:
2177 	CTR2(KTR_IW_CXGBE, "%s:ccb %p", __func__, ep);
2178 	fib4_free_nh_ext(RT_DEFAULT_FIB, &nh4);
2179 fail2:
2180 	deref_cm_id(&ep->com);
2181 	c4iw_put_ep(&ep->com);
2182 out:
2183 	CTR2(KTR_IW_CXGBE, "%s:ccE %p", __func__, ep);
2184 	return err;
2185 }
2186 
2187 /*
2188  * iwcm->create_listen_ep.  Returns -errno on failure.
2189  */
2190 int
2191 c4iw_create_listen_ep(struct iw_cm_id *cm_id, int backlog)
2192 {
2193 	int rc;
2194 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2195 	struct c4iw_listen_ep *ep;
2196 	struct socket *so = cm_id->so;
2197 
2198 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2199 	CTR5(KTR_IW_CXGBE, "%s: cm_id %p, lso %p, ep %p, inp %p", __func__,
2200 	    cm_id, so, ep, so->so_pcb);
2201 	if (ep == NULL) {
2202 		log(LOG_ERR, "%s: failed to alloc memory for endpoint\n",
2203 		    __func__);
2204 		rc = ENOMEM;
2205 		goto failed;
2206 	}
2207 
2208 	ep->com.cm_id = cm_id;
2209 	ref_cm_id(&ep->com);
2210 	ep->com.dev = dev;
2211 	ep->backlog = backlog;
2212 	ep->com.local_addr = cm_id->local_addr;
2213 	ep->com.thread = curthread;
2214 	state_set(&ep->com, LISTEN);
2215 	ep->com.so = so;
2216 
2217 	cm_id->provider_data = ep;
2218 	return (0);
2219 
2220 failed:
2221 	CTR3(KTR_IW_CXGBE, "%s: cm_id %p, FAILED (%d)", __func__, cm_id, rc);
2222 	return (-rc);
2223 }
2224 
2225 void
2226 c4iw_destroy_listen_ep(struct iw_cm_id *cm_id)
2227 {
2228 	struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2229 
2230 	CTR4(KTR_IW_CXGBE, "%s: cm_id %p, so %p, state %s", __func__, cm_id,
2231 	    cm_id->so, states[ep->com.state]);
2232 
2233 	state_set(&ep->com, DEAD);
2234 	deref_cm_id(&ep->com);
2235 	c4iw_put_ep(&ep->com);
2236 
2237 	return;
2238 }
2239 
2240 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2241 {
2242 	int ret = 0;
2243 	int close = 0;
2244 	int fatal = 0;
2245 	struct c4iw_rdev *rdev;
2246 
2247 	mutex_lock(&ep->com.mutex);
2248 
2249 	CTR2(KTR_IW_CXGBE, "%s:cedB %p", __func__, ep);
2250 
2251 	rdev = &ep->com.dev->rdev;
2252 
2253 	if (c4iw_fatal_error(rdev)) {
2254 
2255 		CTR2(KTR_IW_CXGBE, "%s:ced1 %p", __func__, ep);
2256 		fatal = 1;
2257 		close_complete_upcall(ep, -ECONNRESET);
2258 		ep->com.state = DEAD;
2259 	}
2260 	CTR3(KTR_IW_CXGBE, "%s:ced2 %p %s", __func__, ep,
2261 	    states[ep->com.state]);
2262 
2263 	switch (ep->com.state) {
2264 
2265 		case MPA_REQ_WAIT:
2266 		case MPA_REQ_SENT:
2267 		case MPA_REQ_RCVD:
2268 		case MPA_REP_SENT:
2269 		case FPDU_MODE:
2270 			close = 1;
2271 			if (abrupt)
2272 				ep->com.state = ABORTING;
2273 			else {
2274 				ep->com.state = CLOSING;
2275 				START_EP_TIMER(ep);
2276 			}
2277 			set_bit(CLOSE_SENT, &ep->com.flags);
2278 			break;
2279 
2280 		case CLOSING:
2281 
2282 			if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2283 
2284 				close = 1;
2285 				if (abrupt) {
2286 					STOP_EP_TIMER(ep);
2287 					ep->com.state = ABORTING;
2288 				} else
2289 					ep->com.state = MORIBUND;
2290 			}
2291 			break;
2292 
2293 		case MORIBUND:
2294 		case ABORTING:
2295 		case DEAD:
2296 			CTR3(KTR_IW_CXGBE,
2297 			    "%s ignoring disconnect ep %p state %u", __func__,
2298 			    ep, ep->com.state);
2299 			break;
2300 
2301 		default:
2302 			BUG();
2303 			break;
2304 	}
2305 
2306 	mutex_unlock(&ep->com.mutex);
2307 
2308 	if (close) {
2309 
2310 		CTR2(KTR_IW_CXGBE, "%s:ced3 %p", __func__, ep);
2311 
2312 		if (abrupt) {
2313 
2314 			CTR2(KTR_IW_CXGBE, "%s:ced4 %p", __func__, ep);
2315 			set_bit(EP_DISC_ABORT, &ep->com.history);
2316 			close_complete_upcall(ep, -ECONNRESET);
2317 			ret = send_abort(ep);
2318 		} else {
2319 
2320 			CTR2(KTR_IW_CXGBE, "%s:ced5 %p", __func__, ep);
2321 			set_bit(EP_DISC_CLOSE, &ep->com.history);
2322 
2323 			if (!ep->parent_ep)
2324 				__state_set(&ep->com, MORIBUND);
2325 			ret = shutdown_socket(&ep->com);
2326 		}
2327 
2328 		if (ret) {
2329 
2330 			fatal = 1;
2331 		}
2332 	}
2333 
2334 	if (fatal) {
2335 		set_bit(EP_DISC_FAIL, &ep->com.history);
2336 		if (!abrupt) {
2337 			STOP_EP_TIMER(ep);
2338 			close_complete_upcall(ep, -EIO);
2339 		}
2340 		if (ep->com.qp) {
2341 			struct c4iw_qp_attributes attrs;
2342 
2343 			attrs.next_state = C4IW_QP_STATE_ERROR;
2344 			ret = c4iw_modify_qp(ep->com.dev, ep->com.qp,
2345 						C4IW_QP_ATTR_NEXT_STATE,
2346 						&attrs, 1);
2347 			if (ret) {
2348 				CTR2(KTR_IW_CXGBE, "%s:ced7 %p", __func__, ep);
2349 				printf("%s - qp <- error failed!\n", __func__);
2350 			}
2351 		}
2352 		release_ep_resources(ep);
2353 		ep->com.state = DEAD;
2354 		CTR2(KTR_IW_CXGBE, "%s:ced6 %p", __func__, ep);
2355 	}
2356 	CTR2(KTR_IW_CXGBE, "%s:cedE %p", __func__, ep);
2357 	return ret;
2358 }
2359 
2360 #ifdef C4IW_EP_REDIRECT
2361 int c4iw_ep_redirect(void *ctx, struct dst_entry *old, struct dst_entry *new,
2362 		struct l2t_entry *l2t)
2363 {
2364 	struct c4iw_ep *ep = ctx;
2365 
2366 	if (ep->dst != old)
2367 		return 0;
2368 
2369 	PDBG("%s ep %p redirect to dst %p l2t %p\n", __func__, ep, new,
2370 			l2t);
2371 	dst_hold(new);
2372 	cxgb4_l2t_release(ep->l2t);
2373 	ep->l2t = l2t;
2374 	dst_release(old);
2375 	ep->dst = new;
2376 	return 1;
2377 }
2378 #endif
2379 
2380 
2381 
2382 static void ep_timeout(unsigned long arg)
2383 {
2384 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2385 	int kickit = 0;
2386 
2387 	CTR2(KTR_IW_CXGBE, "%s:etB %p", __func__, ep);
2388 	spin_lock(&timeout_lock);
2389 
2390 	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) {
2391 
2392 		/*
2393 		 * Only insert if it is not already on the list.
2394 		 */
2395 		if (!ep->entry.next) {
2396 			list_add_tail(&ep->entry, &timeout_list);
2397 			kickit = 1;
2398 		}
2399 	}
2400 	spin_unlock(&timeout_lock);
2401 
2402 	if (kickit) {
2403 
2404 		CTR2(KTR_IW_CXGBE, "%s:et1 %p", __func__, ep);
2405 		queue_work(c4iw_taskq, &c4iw_task);
2406 	}
2407 	CTR2(KTR_IW_CXGBE, "%s:etE %p", __func__, ep);
2408 }
2409 
2410 static int fw6_wr_rpl(struct adapter *sc, const __be64 *rpl)
2411 {
2412 	uint64_t val = be64toh(*rpl);
2413 	int ret;
2414 	struct c4iw_wr_wait *wr_waitp;
2415 
2416 	ret = (int)((val >> 8) & 0xff);
2417 	wr_waitp = (struct c4iw_wr_wait *)rpl[1];
2418 	CTR3(KTR_IW_CXGBE, "%s wr_waitp %p ret %u", __func__, wr_waitp, ret);
2419 	if (wr_waitp)
2420 		c4iw_wake_up(wr_waitp, ret ? -ret : 0);
2421 
2422 	return (0);
2423 }
2424 
2425 static int fw6_cqe_handler(struct adapter *sc, const __be64 *rpl)
2426 {
2427 	struct t4_cqe cqe =*(const struct t4_cqe *)(&rpl[0]);
2428 
2429 	CTR2(KTR_IW_CXGBE, "%s rpl %p", __func__, rpl);
2430 	c4iw_ev_dispatch(sc->iwarp_softc, &cqe);
2431 
2432 	return (0);
2433 }
2434 
2435 static int terminate(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
2436 {
2437 	struct adapter *sc = iq->adapter;
2438 	const struct cpl_rdma_terminate *cpl = mtod(m, const void *);
2439 	unsigned int tid = GET_TID(cpl);
2440 	struct c4iw_qp_attributes attrs;
2441 	struct toepcb *toep = lookup_tid(sc, tid);
2442 	struct socket *so;
2443 	struct c4iw_ep *ep;
2444 
2445 	INP_WLOCK(toep->inp);
2446 	so = inp_inpcbtosocket(toep->inp);
2447 	ep = so->so_rcv.sb_upcallarg;
2448 	INP_WUNLOCK(toep->inp);
2449 
2450 	CTR2(KTR_IW_CXGBE, "%s:tB %p %d", __func__, ep);
2451 
2452 	if (ep && ep->com.qp) {
2453 
2454 		printk(KERN_WARNING MOD "TERM received tid %u qpid %u\n", tid,
2455 				ep->com.qp->wq.sq.qid);
2456 		attrs.next_state = C4IW_QP_STATE_TERMINATE;
2457 		c4iw_modify_qp(ep->com.dev, ep->com.qp, C4IW_QP_ATTR_NEXT_STATE, &attrs,
2458 				1);
2459 	} else
2460 		printk(KERN_WARNING MOD "TERM received tid %u no ep/qp\n", tid);
2461 	CTR2(KTR_IW_CXGBE, "%s:tE %p %d", __func__, ep);
2462 
2463 	return 0;
2464 }
2465 
2466 int __init c4iw_cm_init(void)
2467 {
2468 
2469 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, terminate);
2470 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, fw6_wr_rpl);
2471 	t4_register_fw_msg_handler(FW6_TYPE_CQE, fw6_cqe_handler);
2472 	t4_register_an_handler(c4iw_ev_handler);
2473 
2474 	TAILQ_INIT(&req_list);
2475 	spin_lock_init(&req_lock);
2476 	INIT_LIST_HEAD(&timeout_list);
2477 	spin_lock_init(&timeout_lock);
2478 
2479 	INIT_WORK(&c4iw_task, process_req);
2480 
2481 	c4iw_taskq = create_singlethread_workqueue("iw_cxgbe");
2482 	if (!c4iw_taskq)
2483 		return -ENOMEM;
2484 
2485 	return 0;
2486 }
2487 
2488 void __exit c4iw_cm_term(void)
2489 {
2490 	WARN_ON(!TAILQ_EMPTY(&req_list));
2491 	WARN_ON(!list_empty(&timeout_list));
2492 	flush_workqueue(c4iw_taskq);
2493 	destroy_workqueue(c4iw_taskq);
2494 
2495 	t4_register_cpl_handler(CPL_RDMA_TERMINATE, NULL);
2496 	t4_register_fw_msg_handler(FW6_TYPE_WR_RPL, NULL);
2497 	t4_register_fw_msg_handler(FW6_TYPE_CQE, NULL);
2498 	t4_register_an_handler(NULL);
2499 }
2500 #endif
2501