xref: /linux/drivers/infiniband/hw/cxgb4/cm.c (revision d39d0ed196aa1685bb24771e92f78633c66ac9cb)
1 /*
2  * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *	  copyright notice, this list of conditions and the following
16  *	  disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *	  copyright notice, this list of conditions and the following
20  *	  disclaimer in the documentation and/or other materials
21  *	  provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 #include <linux/module.h>
33 #include <linux/list.h>
34 #include <linux/workqueue.h>
35 #include <linux/skbuff.h>
36 #include <linux/timer.h>
37 #include <linux/notifier.h>
38 #include <linux/inetdevice.h>
39 #include <linux/ip.h>
40 #include <linux/tcp.h>
41 
42 #include <net/neighbour.h>
43 #include <net/netevent.h>
44 #include <net/route.h>
45 
46 #include "iw_cxgb4.h"
47 
48 static char *states[] = {
49 	"idle",
50 	"listen",
51 	"connecting",
52 	"mpa_wait_req",
53 	"mpa_req_sent",
54 	"mpa_req_rcvd",
55 	"mpa_rep_sent",
56 	"fpdu_mode",
57 	"aborting",
58 	"closing",
59 	"moribund",
60 	"dead",
61 	NULL,
62 };
63 
64 static int dack_mode;
65 module_param(dack_mode, int, 0644);
66 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=0)");
67 
68 int c4iw_max_read_depth = 8;
69 module_param(c4iw_max_read_depth, int, 0644);
70 MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
71 
72 static int enable_tcp_timestamps;
73 module_param(enable_tcp_timestamps, int, 0644);
74 MODULE_PARM_DESC(enable_tcp_timestamps, "Enable tcp timestamps (default=0)");
75 
76 static int enable_tcp_sack;
77 module_param(enable_tcp_sack, int, 0644);
78 MODULE_PARM_DESC(enable_tcp_sack, "Enable tcp SACK (default=0)");
79 
80 static int enable_tcp_window_scaling = 1;
81 module_param(enable_tcp_window_scaling, int, 0644);
82 MODULE_PARM_DESC(enable_tcp_window_scaling,
83 		 "Enable tcp window scaling (default=1)");
84 
85 int c4iw_debug;
86 module_param(c4iw_debug, int, 0644);
87 MODULE_PARM_DESC(c4iw_debug, "Enable debug logging (default=0)");
88 
89 static int peer2peer;
90 module_param(peer2peer, int, 0644);
91 MODULE_PARM_DESC(peer2peer, "Support peer2peer ULPs (default=0)");
92 
93 static int p2p_type = FW_RI_INIT_P2PTYPE_READ_REQ;
94 module_param(p2p_type, int, 0644);
95 MODULE_PARM_DESC(p2p_type, "RDMAP opcode to use for the RTR message: "
96 			   "1=RDMA_READ 0=RDMA_WRITE (default 1)");
97 
98 static int ep_timeout_secs = 60;
99 module_param(ep_timeout_secs, int, 0644);
100 MODULE_PARM_DESC(ep_timeout_secs, "CM Endpoint operation timeout "
101 				   "in seconds (default=60)");
102 
103 static int mpa_rev = 1;
104 module_param(mpa_rev, int, 0644);
105 MODULE_PARM_DESC(mpa_rev, "MPA Revision, 0 supports amso1100, "
106 		 "1 is spec compliant. (default=1)");
107 
108 static int markers_enabled;
109 module_param(markers_enabled, int, 0644);
110 MODULE_PARM_DESC(markers_enabled, "Enable MPA MARKERS (default(0)=disabled)");
111 
112 static int crc_enabled = 1;
113 module_param(crc_enabled, int, 0644);
114 MODULE_PARM_DESC(crc_enabled, "Enable MPA CRC (default(1)=enabled)");
115 
116 static int rcv_win = 256 * 1024;
117 module_param(rcv_win, int, 0644);
118 MODULE_PARM_DESC(rcv_win, "TCP receive window in bytes (default=256KB)");
119 
120 static int snd_win = 32 * 1024;
121 module_param(snd_win, int, 0644);
122 MODULE_PARM_DESC(snd_win, "TCP send window in bytes (default=32KB)");
123 
124 static struct workqueue_struct *workq;
125 
126 static struct sk_buff_head rxq;
127 
128 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp);
129 static void ep_timeout(unsigned long arg);
130 static void connect_reply_upcall(struct c4iw_ep *ep, int status);
131 
132 static LIST_HEAD(timeout_list);
133 static spinlock_t timeout_lock;
134 
135 static void start_ep_timer(struct c4iw_ep *ep)
136 {
137 	PDBG("%s ep %p\n", __func__, ep);
138 	if (timer_pending(&ep->timer)) {
139 		PDBG("%s stopped / restarted timer ep %p\n", __func__, ep);
140 		del_timer_sync(&ep->timer);
141 	} else
142 		c4iw_get_ep(&ep->com);
143 	ep->timer.expires = jiffies + ep_timeout_secs * HZ;
144 	ep->timer.data = (unsigned long)ep;
145 	ep->timer.function = ep_timeout;
146 	add_timer(&ep->timer);
147 }
148 
149 static void stop_ep_timer(struct c4iw_ep *ep)
150 {
151 	PDBG("%s ep %p\n", __func__, ep);
152 	if (!timer_pending(&ep->timer)) {
153 		printk(KERN_ERR "%s timer stopped when its not running! "
154 		       "ep %p state %u\n", __func__, ep, ep->com.state);
155 		WARN_ON(1);
156 		return;
157 	}
158 	del_timer_sync(&ep->timer);
159 	c4iw_put_ep(&ep->com);
160 }
161 
162 static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb,
163 		  struct l2t_entry *l2e)
164 {
165 	int	error = 0;
166 
167 	if (c4iw_fatal_error(rdev)) {
168 		kfree_skb(skb);
169 		PDBG("%s - device in error state - dropping\n", __func__);
170 		return -EIO;
171 	}
172 	error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e);
173 	if (error < 0)
174 		kfree_skb(skb);
175 	return error;
176 }
177 
178 int c4iw_ofld_send(struct c4iw_rdev *rdev, struct sk_buff *skb)
179 {
180 	int	error = 0;
181 
182 	if (c4iw_fatal_error(rdev)) {
183 		kfree_skb(skb);
184 		PDBG("%s - device in error state - dropping\n", __func__);
185 		return -EIO;
186 	}
187 	error = cxgb4_ofld_send(rdev->lldi.ports[0], skb);
188 	if (error < 0)
189 		kfree_skb(skb);
190 	return error;
191 }
192 
193 static void release_tid(struct c4iw_rdev *rdev, u32 hwtid, struct sk_buff *skb)
194 {
195 	struct cpl_tid_release *req;
196 
197 	skb = get_skb(skb, sizeof *req, GFP_KERNEL);
198 	if (!skb)
199 		return;
200 	req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req));
201 	INIT_TP_WR(req, hwtid);
202 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid));
203 	set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
204 	c4iw_ofld_send(rdev, skb);
205 	return;
206 }
207 
208 static void set_emss(struct c4iw_ep *ep, u16 opt)
209 {
210 	ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40;
211 	ep->mss = ep->emss;
212 	if (GET_TCPOPT_TSTAMP(opt))
213 		ep->emss -= 12;
214 	if (ep->emss < 128)
215 		ep->emss = 128;
216 	PDBG("%s mss_idx %u mss %u emss=%u\n", __func__, GET_TCPOPT_MSS(opt),
217 	     ep->mss, ep->emss);
218 }
219 
220 static enum c4iw_ep_state state_read(struct c4iw_ep_common *epc)
221 {
222 	unsigned long flags;
223 	enum c4iw_ep_state state;
224 
225 	spin_lock_irqsave(&epc->lock, flags);
226 	state = epc->state;
227 	spin_unlock_irqrestore(&epc->lock, flags);
228 	return state;
229 }
230 
231 static void __state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
232 {
233 	epc->state = new;
234 }
235 
236 static void state_set(struct c4iw_ep_common *epc, enum c4iw_ep_state new)
237 {
238 	unsigned long flags;
239 
240 	spin_lock_irqsave(&epc->lock, flags);
241 	PDBG("%s - %s -> %s\n", __func__, states[epc->state], states[new]);
242 	__state_set(epc, new);
243 	spin_unlock_irqrestore(&epc->lock, flags);
244 	return;
245 }
246 
247 static void *alloc_ep(int size, gfp_t gfp)
248 {
249 	struct c4iw_ep_common *epc;
250 
251 	epc = kzalloc(size, gfp);
252 	if (epc) {
253 		kref_init(&epc->kref);
254 		spin_lock_init(&epc->lock);
255 		init_waitqueue_head(&epc->waitq);
256 	}
257 	PDBG("%s alloc ep %p\n", __func__, epc);
258 	return epc;
259 }
260 
261 void _c4iw_free_ep(struct kref *kref)
262 {
263 	struct c4iw_ep *ep;
264 
265 	ep = container_of(kref, struct c4iw_ep, com.kref);
266 	PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
267 	if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
268 		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);
269 		dst_release(ep->dst);
270 		cxgb4_l2t_release(ep->l2t);
271 	}
272 	kfree(ep);
273 }
274 
275 static void release_ep_resources(struct c4iw_ep *ep)
276 {
277 	set_bit(RELEASE_RESOURCES, &ep->com.flags);
278 	c4iw_put_ep(&ep->com);
279 }
280 
281 static int status2errno(int status)
282 {
283 	switch (status) {
284 	case CPL_ERR_NONE:
285 		return 0;
286 	case CPL_ERR_CONN_RESET:
287 		return -ECONNRESET;
288 	case CPL_ERR_ARP_MISS:
289 		return -EHOSTUNREACH;
290 	case CPL_ERR_CONN_TIMEDOUT:
291 		return -ETIMEDOUT;
292 	case CPL_ERR_TCAM_FULL:
293 		return -ENOMEM;
294 	case CPL_ERR_CONN_EXIST:
295 		return -EADDRINUSE;
296 	default:
297 		return -EIO;
298 	}
299 }
300 
301 /*
302  * Try and reuse skbs already allocated...
303  */
304 static struct sk_buff *get_skb(struct sk_buff *skb, int len, gfp_t gfp)
305 {
306 	if (skb && !skb_is_nonlinear(skb) && !skb_cloned(skb)) {
307 		skb_trim(skb, 0);
308 		skb_get(skb);
309 		skb_reset_transport_header(skb);
310 	} else {
311 		skb = alloc_skb(len, gfp);
312 	}
313 	return skb;
314 }
315 
316 static struct rtable *find_route(struct c4iw_dev *dev, __be32 local_ip,
317 				 __be32 peer_ip, __be16 local_port,
318 				 __be16 peer_port, u8 tos)
319 {
320 	struct rtable *rt;
321 	struct flowi fl = {
322 		.oif = 0,
323 		.nl_u = {
324 			 .ip4_u = {
325 				   .daddr = peer_ip,
326 				   .saddr = local_ip,
327 				   .tos = tos}
328 			 },
329 		.proto = IPPROTO_TCP,
330 		.uli_u = {
331 			  .ports = {
332 				    .sport = local_port,
333 				    .dport = peer_port}
334 			  }
335 	};
336 
337 	if (ip_route_output_flow(&init_net, &rt, &fl, NULL, 0))
338 		return NULL;
339 	return rt;
340 }
341 
342 static void arp_failure_discard(void *handle, struct sk_buff *skb)
343 {
344 	PDBG("%s c4iw_dev %p\n", __func__, handle);
345 	kfree_skb(skb);
346 }
347 
348 /*
349  * Handle an ARP failure for an active open.
350  */
351 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
352 {
353 	printk(KERN_ERR MOD "ARP failure duing connect\n");
354 	kfree_skb(skb);
355 }
356 
357 /*
358  * Handle an ARP failure for a CPL_ABORT_REQ.  Change it into a no RST variant
359  * and send it along.
360  */
361 static void abort_arp_failure(void *handle, struct sk_buff *skb)
362 {
363 	struct c4iw_rdev *rdev = handle;
364 	struct cpl_abort_req *req = cplhdr(skb);
365 
366 	PDBG("%s rdev %p\n", __func__, rdev);
367 	req->cmd = CPL_ABORT_NO_RST;
368 	c4iw_ofld_send(rdev, skb);
369 }
370 
371 static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
372 {
373 	unsigned int flowclen = 80;
374 	struct fw_flowc_wr *flowc;
375 	int i;
376 
377 	skb = get_skb(skb, flowclen, GFP_KERNEL);
378 	flowc = (struct fw_flowc_wr *)__skb_put(skb, flowclen);
379 
380 	flowc->op_to_nparams = cpu_to_be32(FW_WR_OP(FW_FLOWC_WR) |
381 					   FW_FLOWC_WR_NPARAMS(8));
382 	flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(flowclen,
383 					  16)) | FW_WR_FLOWID(ep->hwtid));
384 
385 	flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
386 	flowc->mnemval[0].val = cpu_to_be32(0);
387 	flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
388 	flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
389 	flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
390 	flowc->mnemval[2].val = cpu_to_be32(ep->tx_chan);
391 	flowc->mnemval[3].mnemonic = FW_FLOWC_MNEM_IQID;
392 	flowc->mnemval[3].val = cpu_to_be32(ep->rss_qid);
393 	flowc->mnemval[4].mnemonic = FW_FLOWC_MNEM_SNDNXT;
394 	flowc->mnemval[4].val = cpu_to_be32(ep->snd_seq);
395 	flowc->mnemval[5].mnemonic = FW_FLOWC_MNEM_RCVNXT;
396 	flowc->mnemval[5].val = cpu_to_be32(ep->rcv_seq);
397 	flowc->mnemval[6].mnemonic = FW_FLOWC_MNEM_SNDBUF;
398 	flowc->mnemval[6].val = cpu_to_be32(snd_win);
399 	flowc->mnemval[7].mnemonic = FW_FLOWC_MNEM_MSS;
400 	flowc->mnemval[7].val = cpu_to_be32(ep->emss);
401 	/* Pad WR to 16 byte boundary */
402 	flowc->mnemval[8].mnemonic = 0;
403 	flowc->mnemval[8].val = 0;
404 	for (i = 0; i < 9; i++) {
405 		flowc->mnemval[i].r4[0] = 0;
406 		flowc->mnemval[i].r4[1] = 0;
407 		flowc->mnemval[i].r4[2] = 0;
408 	}
409 
410 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
411 	c4iw_ofld_send(&ep->com.dev->rdev, skb);
412 }
413 
414 static int send_halfclose(struct c4iw_ep *ep, gfp_t gfp)
415 {
416 	struct cpl_close_con_req *req;
417 	struct sk_buff *skb;
418 	int wrlen = roundup(sizeof *req, 16);
419 
420 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
421 	skb = get_skb(NULL, wrlen, gfp);
422 	if (!skb) {
423 		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
424 		return -ENOMEM;
425 	}
426 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
427 	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
428 	req = (struct cpl_close_con_req *) skb_put(skb, wrlen);
429 	memset(req, 0, wrlen);
430 	INIT_TP_WR(req, ep->hwtid);
431 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_CON_REQ,
432 						    ep->hwtid));
433 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
434 }
435 
436 static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
437 {
438 	struct cpl_abort_req *req;
439 	int wrlen = roundup(sizeof *req, 16);
440 
441 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
442 	skb = get_skb(skb, wrlen, gfp);
443 	if (!skb) {
444 		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
445 		       __func__);
446 		return -ENOMEM;
447 	}
448 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
449 	t4_set_arp_err_handler(skb, &ep->com.dev->rdev, abort_arp_failure);
450 	req = (struct cpl_abort_req *) skb_put(skb, wrlen);
451 	memset(req, 0, wrlen);
452 	INIT_TP_WR(req, ep->hwtid);
453 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid));
454 	req->cmd = CPL_ABORT_SEND_RST;
455 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
456 }
457 
458 static int send_connect(struct c4iw_ep *ep)
459 {
460 	struct cpl_act_open_req *req;
461 	struct sk_buff *skb;
462 	u64 opt0;
463 	u32 opt2;
464 	unsigned int mtu_idx;
465 	int wscale;
466 	int wrlen = roundup(sizeof *req, 16);
467 
468 	PDBG("%s ep %p atid %u\n", __func__, ep, ep->atid);
469 
470 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
471 	if (!skb) {
472 		printk(KERN_ERR MOD "%s - failed to alloc skb.\n",
473 		       __func__);
474 		return -ENOMEM;
475 	}
476 	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
477 
478 	cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
479 	wscale = compute_wscale(rcv_win);
480 	opt0 = KEEP_ALIVE(1) |
481 	       DELACK(1) |
482 	       WND_SCALE(wscale) |
483 	       MSS_IDX(mtu_idx) |
484 	       L2T_IDX(ep->l2t->idx) |
485 	       TX_CHAN(ep->tx_chan) |
486 	       SMAC_SEL(ep->smac_idx) |
487 	       DSCP(ep->tos) |
488 	       RCV_BUFSIZ(rcv_win>>10);
489 	opt2 = RX_CHANNEL(0) |
490 	       RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
491 	if (enable_tcp_timestamps)
492 		opt2 |= TSTAMPS_EN(1);
493 	if (enable_tcp_sack)
494 		opt2 |= SACK_EN(1);
495 	if (wscale && enable_tcp_window_scaling)
496 		opt2 |= WND_SCALE_EN(1);
497 	t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
498 
499 	req = (struct cpl_act_open_req *) skb_put(skb, wrlen);
500 	INIT_TP_WR(req, 0);
501 	OPCODE_TID(req) = cpu_to_be32(
502 		MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ((ep->rss_qid<<14)|ep->atid)));
503 	req->local_port = ep->com.local_addr.sin_port;
504 	req->peer_port = ep->com.remote_addr.sin_port;
505 	req->local_ip = ep->com.local_addr.sin_addr.s_addr;
506 	req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;
507 	req->opt0 = cpu_to_be64(opt0);
508 	req->params = 0;
509 	req->opt2 = cpu_to_be32(opt2);
510 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
511 }
512 
513 static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb)
514 {
515 	int mpalen, wrlen;
516 	struct fw_ofld_tx_data_wr *req;
517 	struct mpa_message *mpa;
518 
519 	PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
520 
521 	BUG_ON(skb_cloned(skb));
522 
523 	mpalen = sizeof(*mpa) + ep->plen;
524 	wrlen = roundup(mpalen + sizeof *req, 16);
525 	skb = get_skb(skb, wrlen, GFP_KERNEL);
526 	if (!skb) {
527 		connect_reply_upcall(ep, -ENOMEM);
528 		return;
529 	}
530 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
531 
532 	req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
533 	memset(req, 0, wrlen);
534 	req->op_to_immdlen = cpu_to_be32(
535 		FW_WR_OP(FW_OFLD_TX_DATA_WR) |
536 		FW_WR_COMPL(1) |
537 		FW_WR_IMMDLEN(mpalen));
538 	req->flowid_len16 = cpu_to_be32(
539 		FW_WR_FLOWID(ep->hwtid) |
540 		FW_WR_LEN16(wrlen >> 4));
541 	req->plen = cpu_to_be32(mpalen);
542 	req->tunnel_to_proxy = cpu_to_be32(
543 		FW_OFLD_TX_DATA_WR_FLUSH(1) |
544 		FW_OFLD_TX_DATA_WR_SHOVE(1));
545 
546 	mpa = (struct mpa_message *)(req + 1);
547 	memcpy(mpa->key, MPA_KEY_REQ, sizeof(mpa->key));
548 	mpa->flags = (crc_enabled ? MPA_CRC : 0) |
549 		     (markers_enabled ? MPA_MARKERS : 0);
550 	mpa->private_data_size = htons(ep->plen);
551 	mpa->revision = mpa_rev;
552 
553 	if (ep->plen)
554 		memcpy(mpa->private_data, ep->mpa_pkt + sizeof(*mpa), ep->plen);
555 
556 	/*
557 	 * Reference the mpa skb.  This ensures the data area
558 	 * will remain in memory until the hw acks the tx.
559 	 * Function fw4_ack() will deref it.
560 	 */
561 	skb_get(skb);
562 	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
563 	BUG_ON(ep->mpa_skb);
564 	ep->mpa_skb = skb;
565 	c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
566 	start_ep_timer(ep);
567 	state_set(&ep->com, MPA_REQ_SENT);
568 	ep->mpa_attr.initiator = 1;
569 	return;
570 }
571 
572 static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)
573 {
574 	int mpalen, wrlen;
575 	struct fw_ofld_tx_data_wr *req;
576 	struct mpa_message *mpa;
577 	struct sk_buff *skb;
578 
579 	PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
580 
581 	mpalen = sizeof(*mpa) + plen;
582 	wrlen = roundup(mpalen + sizeof *req, 16);
583 
584 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
585 	if (!skb) {
586 		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
587 		return -ENOMEM;
588 	}
589 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
590 
591 	req = (struct fw_ofld_tx_data_wr *)skb_put(skb, wrlen);
592 	memset(req, 0, wrlen);
593 	req->op_to_immdlen = cpu_to_be32(
594 		FW_WR_OP(FW_OFLD_TX_DATA_WR) |
595 		FW_WR_COMPL(1) |
596 		FW_WR_IMMDLEN(mpalen));
597 	req->flowid_len16 = cpu_to_be32(
598 		FW_WR_FLOWID(ep->hwtid) |
599 		FW_WR_LEN16(wrlen >> 4));
600 	req->plen = cpu_to_be32(mpalen);
601 	req->tunnel_to_proxy = cpu_to_be32(
602 		FW_OFLD_TX_DATA_WR_FLUSH(1) |
603 		FW_OFLD_TX_DATA_WR_SHOVE(1));
604 
605 	mpa = (struct mpa_message *)(req + 1);
606 	memset(mpa, 0, sizeof(*mpa));
607 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
608 	mpa->flags = MPA_REJECT;
609 	mpa->revision = mpa_rev;
610 	mpa->private_data_size = htons(plen);
611 	if (plen)
612 		memcpy(mpa->private_data, pdata, plen);
613 
614 	/*
615 	 * Reference the mpa skb again.  This ensures the data area
616 	 * will remain in memory until the hw acks the tx.
617 	 * Function fw4_ack() will deref it.
618 	 */
619 	skb_get(skb);
620 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
621 	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
622 	BUG_ON(ep->mpa_skb);
623 	ep->mpa_skb = skb;
624 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
625 }
626 
627 static int send_mpa_reply(struct c4iw_ep *ep, const void *pdata, u8 plen)
628 {
629 	int mpalen, wrlen;
630 	struct fw_ofld_tx_data_wr *req;
631 	struct mpa_message *mpa;
632 	struct sk_buff *skb;
633 
634 	PDBG("%s ep %p tid %u pd_len %d\n", __func__, ep, ep->hwtid, ep->plen);
635 
636 	mpalen = sizeof(*mpa) + plen;
637 	wrlen = roundup(mpalen + sizeof *req, 16);
638 
639 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
640 	if (!skb) {
641 		printk(KERN_ERR MOD "%s - cannot alloc skb!\n", __func__);
642 		return -ENOMEM;
643 	}
644 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
645 
646 	req = (struct fw_ofld_tx_data_wr *) skb_put(skb, wrlen);
647 	memset(req, 0, wrlen);
648 	req->op_to_immdlen = cpu_to_be32(
649 		FW_WR_OP(FW_OFLD_TX_DATA_WR) |
650 		FW_WR_COMPL(1) |
651 		FW_WR_IMMDLEN(mpalen));
652 	req->flowid_len16 = cpu_to_be32(
653 		FW_WR_FLOWID(ep->hwtid) |
654 		FW_WR_LEN16(wrlen >> 4));
655 	req->plen = cpu_to_be32(mpalen);
656 	req->tunnel_to_proxy = cpu_to_be32(
657 		FW_OFLD_TX_DATA_WR_FLUSH(1) |
658 		FW_OFLD_TX_DATA_WR_SHOVE(1));
659 
660 	mpa = (struct mpa_message *)(req + 1);
661 	memset(mpa, 0, sizeof(*mpa));
662 	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));
663 	mpa->flags = (ep->mpa_attr.crc_enabled ? MPA_CRC : 0) |
664 		     (markers_enabled ? MPA_MARKERS : 0);
665 	mpa->revision = mpa_rev;
666 	mpa->private_data_size = htons(plen);
667 	if (plen)
668 		memcpy(mpa->private_data, pdata, plen);
669 
670 	/*
671 	 * Reference the mpa skb.  This ensures the data area
672 	 * will remain in memory until the hw acks the tx.
673 	 * Function fw4_ack() will deref it.
674 	 */
675 	skb_get(skb);
676 	t4_set_arp_err_handler(skb, NULL, arp_failure_discard);
677 	ep->mpa_skb = skb;
678 	state_set(&ep->com, MPA_REP_SENT);
679 	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
680 }
681 
682 static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)
683 {
684 	struct c4iw_ep *ep;
685 	struct cpl_act_establish *req = cplhdr(skb);
686 	unsigned int tid = GET_TID(req);
687 	unsigned int atid = GET_TID_TID(ntohl(req->tos_atid));
688 	struct tid_info *t = dev->rdev.lldi.tids;
689 
690 	ep = lookup_atid(t, atid);
691 
692 	PDBG("%s ep %p tid %u snd_isn %u rcv_isn %u\n", __func__, ep, tid,
693 	     be32_to_cpu(req->snd_isn), be32_to_cpu(req->rcv_isn));
694 
695 	dst_confirm(ep->dst);
696 
697 	/* setup the hwtid for this connection */
698 	ep->hwtid = tid;
699 	cxgb4_insert_tid(t, ep, tid);
700 
701 	ep->snd_seq = be32_to_cpu(req->snd_isn);
702 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
703 
704 	set_emss(ep, ntohs(req->tcp_opt));
705 
706 	/* dealloc the atid */
707 	cxgb4_free_atid(t, atid);
708 
709 	/* start MPA negotiation */
710 	send_flowc(ep, NULL);
711 	send_mpa_req(ep, skb);
712 
713 	return 0;
714 }
715 
716 static void close_complete_upcall(struct c4iw_ep *ep)
717 {
718 	struct iw_cm_event event;
719 
720 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
721 	memset(&event, 0, sizeof(event));
722 	event.event = IW_CM_EVENT_CLOSE;
723 	if (ep->com.cm_id) {
724 		PDBG("close complete delivered ep %p cm_id %p tid %u\n",
725 		     ep, ep->com.cm_id, ep->hwtid);
726 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
727 		ep->com.cm_id->rem_ref(ep->com.cm_id);
728 		ep->com.cm_id = NULL;
729 		ep->com.qp = NULL;
730 	}
731 }
732 
733 static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
734 {
735 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
736 	close_complete_upcall(ep);
737 	state_set(&ep->com, ABORTING);
738 	return send_abort(ep, skb, gfp);
739 }
740 
741 static void peer_close_upcall(struct c4iw_ep *ep)
742 {
743 	struct iw_cm_event event;
744 
745 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
746 	memset(&event, 0, sizeof(event));
747 	event.event = IW_CM_EVENT_DISCONNECT;
748 	if (ep->com.cm_id) {
749 		PDBG("peer close delivered ep %p cm_id %p tid %u\n",
750 		     ep, ep->com.cm_id, ep->hwtid);
751 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
752 	}
753 }
754 
755 static void peer_abort_upcall(struct c4iw_ep *ep)
756 {
757 	struct iw_cm_event event;
758 
759 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
760 	memset(&event, 0, sizeof(event));
761 	event.event = IW_CM_EVENT_CLOSE;
762 	event.status = -ECONNRESET;
763 	if (ep->com.cm_id) {
764 		PDBG("abort delivered ep %p cm_id %p tid %u\n", ep,
765 		     ep->com.cm_id, ep->hwtid);
766 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
767 		ep->com.cm_id->rem_ref(ep->com.cm_id);
768 		ep->com.cm_id = NULL;
769 		ep->com.qp = NULL;
770 	}
771 }
772 
773 static void connect_reply_upcall(struct c4iw_ep *ep, int status)
774 {
775 	struct iw_cm_event event;
776 
777 	PDBG("%s ep %p tid %u status %d\n", __func__, ep, ep->hwtid, status);
778 	memset(&event, 0, sizeof(event));
779 	event.event = IW_CM_EVENT_CONNECT_REPLY;
780 	event.status = status;
781 	event.local_addr = ep->com.local_addr;
782 	event.remote_addr = ep->com.remote_addr;
783 
784 	if ((status == 0) || (status == -ECONNREFUSED)) {
785 		event.private_data_len = ep->plen;
786 		event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
787 	}
788 
789 	PDBG("%s ep %p tid %u status %d\n", __func__, ep,
790 	     ep->hwtid, status);
791 	ep->com.cm_id->event_handler(ep->com.cm_id, &event);
792 
793 	if (status < 0) {
794 		ep->com.cm_id->rem_ref(ep->com.cm_id);
795 		ep->com.cm_id = NULL;
796 		ep->com.qp = NULL;
797 	}
798 }
799 
800 static void connect_request_upcall(struct c4iw_ep *ep)
801 {
802 	struct iw_cm_event event;
803 
804 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
805 	memset(&event, 0, sizeof(event));
806 	event.event = IW_CM_EVENT_CONNECT_REQUEST;
807 	event.local_addr = ep->com.local_addr;
808 	event.remote_addr = ep->com.remote_addr;
809 	event.private_data_len = ep->plen;
810 	event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
811 	event.provider_data = ep;
812 	if (state_read(&ep->parent_ep->com) != DEAD) {
813 		c4iw_get_ep(&ep->com);
814 		ep->parent_ep->com.cm_id->event_handler(
815 						ep->parent_ep->com.cm_id,
816 						&event);
817 	}
818 	c4iw_put_ep(&ep->parent_ep->com);
819 	ep->parent_ep = NULL;
820 }
821 
822 static void established_upcall(struct c4iw_ep *ep)
823 {
824 	struct iw_cm_event event;
825 
826 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
827 	memset(&event, 0, sizeof(event));
828 	event.event = IW_CM_EVENT_ESTABLISHED;
829 	if (ep->com.cm_id) {
830 		PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
831 		ep->com.cm_id->event_handler(ep->com.cm_id, &event);
832 	}
833 }
834 
835 static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
836 {
837 	struct cpl_rx_data_ack *req;
838 	struct sk_buff *skb;
839 	int wrlen = roundup(sizeof *req, 16);
840 
841 	PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
842 	skb = get_skb(NULL, wrlen, GFP_KERNEL);
843 	if (!skb) {
844 		printk(KERN_ERR MOD "update_rx_credits - cannot alloc skb!\n");
845 		return 0;
846 	}
847 
848 	req = (struct cpl_rx_data_ack *) skb_put(skb, wrlen);
849 	memset(req, 0, wrlen);
850 	INIT_TP_WR(req, ep->hwtid);
851 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK,
852 						    ep->hwtid));
853 	req->credit_dack = cpu_to_be32(credits | RX_FORCE_ACK(1) |
854 				       F_RX_DACK_CHANGE |
855 				       V_RX_DACK_MODE(dack_mode));
856 	set_wr_txq(skb, CPL_PRIORITY_ACK, ep->ctrlq_idx);
857 	c4iw_ofld_send(&ep->com.dev->rdev, skb);
858 	return credits;
859 }
860 
861 static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
862 {
863 	struct mpa_message *mpa;
864 	u16 plen;
865 	struct c4iw_qp_attributes attrs;
866 	enum c4iw_qp_attr_mask mask;
867 	int err;
868 
869 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
870 
871 	/*
872 	 * Stop mpa timer.  If it expired, then the state has
873 	 * changed and we bail since ep_timeout already aborted
874 	 * the connection.
875 	 */
876 	stop_ep_timer(ep);
877 	if (state_read(&ep->com) != MPA_REQ_SENT)
878 		return;
879 
880 	/*
881 	 * If we get more than the supported amount of private data
882 	 * then we must fail this connection.
883 	 */
884 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
885 		err = -EINVAL;
886 		goto err;
887 	}
888 
889 	/*
890 	 * copy the new data into our accumulation buffer.
891 	 */
892 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
893 				  skb->len);
894 	ep->mpa_pkt_len += skb->len;
895 
896 	/*
897 	 * if we don't even have the mpa message, then bail.
898 	 */
899 	if (ep->mpa_pkt_len < sizeof(*mpa))
900 		return;
901 	mpa = (struct mpa_message *) ep->mpa_pkt;
902 
903 	/* Validate MPA header. */
904 	if (mpa->revision != mpa_rev) {
905 		err = -EPROTO;
906 		goto err;
907 	}
908 	if (memcmp(mpa->key, MPA_KEY_REP, sizeof(mpa->key))) {
909 		err = -EPROTO;
910 		goto err;
911 	}
912 
913 	plen = ntohs(mpa->private_data_size);
914 
915 	/*
916 	 * Fail if there's too much private data.
917 	 */
918 	if (plen > MPA_MAX_PRIVATE_DATA) {
919 		err = -EPROTO;
920 		goto err;
921 	}
922 
923 	/*
924 	 * If plen does not account for pkt size
925 	 */
926 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
927 		err = -EPROTO;
928 		goto err;
929 	}
930 
931 	ep->plen = (u8) plen;
932 
933 	/*
934 	 * If we don't have all the pdata yet, then bail.
935 	 * We'll continue process when more data arrives.
936 	 */
937 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
938 		return;
939 
940 	if (mpa->flags & MPA_REJECT) {
941 		err = -ECONNREFUSED;
942 		goto err;
943 	}
944 
945 	/*
946 	 * If we get here we have accumulated the entire mpa
947 	 * start reply message including private data. And
948 	 * the MPA header is valid.
949 	 */
950 	state_set(&ep->com, FPDU_MODE);
951 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
952 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
953 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
954 	ep->mpa_attr.version = mpa_rev;
955 	ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
956 					    FW_RI_INIT_P2PTYPE_DISABLED;
957 	PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
958 	     "xmit_marker_enabled=%d, version=%d\n", __func__,
959 	     ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
960 	     ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version);
961 
962 	attrs.mpa_attr = ep->mpa_attr;
963 	attrs.max_ird = ep->ird;
964 	attrs.max_ord = ep->ord;
965 	attrs.llp_stream_handle = ep;
966 	attrs.next_state = C4IW_QP_STATE_RTS;
967 
968 	mask = C4IW_QP_ATTR_NEXT_STATE |
969 	    C4IW_QP_ATTR_LLP_STREAM_HANDLE | C4IW_QP_ATTR_MPA_ATTR |
970 	    C4IW_QP_ATTR_MAX_IRD | C4IW_QP_ATTR_MAX_ORD;
971 
972 	/* bind QP and TID with INIT_WR */
973 	err = c4iw_modify_qp(ep->com.qp->rhp,
974 			     ep->com.qp, mask, &attrs, 1);
975 	if (err)
976 		goto err;
977 	goto out;
978 err:
979 	state_set(&ep->com, ABORTING);
980 	send_abort(ep, skb, GFP_KERNEL);
981 out:
982 	connect_reply_upcall(ep, err);
983 	return;
984 }
985 
986 static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
987 {
988 	struct mpa_message *mpa;
989 	u16 plen;
990 
991 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
992 
993 	if (state_read(&ep->com) != MPA_REQ_WAIT)
994 		return;
995 
996 	/*
997 	 * If we get more than the supported amount of private data
998 	 * then we must fail this connection.
999 	 */
1000 	if (ep->mpa_pkt_len + skb->len > sizeof(ep->mpa_pkt)) {
1001 		stop_ep_timer(ep);
1002 		abort_connection(ep, skb, GFP_KERNEL);
1003 		return;
1004 	}
1005 
1006 	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1007 
1008 	/*
1009 	 * Copy the new data into our accumulation buffer.
1010 	 */
1011 	skb_copy_from_linear_data(skb, &(ep->mpa_pkt[ep->mpa_pkt_len]),
1012 				  skb->len);
1013 	ep->mpa_pkt_len += skb->len;
1014 
1015 	/*
1016 	 * If we don't even have the mpa message, then bail.
1017 	 * We'll continue process when more data arrives.
1018 	 */
1019 	if (ep->mpa_pkt_len < sizeof(*mpa))
1020 		return;
1021 
1022 	PDBG("%s enter (%s line %u)\n", __func__, __FILE__, __LINE__);
1023 	stop_ep_timer(ep);
1024 	mpa = (struct mpa_message *) ep->mpa_pkt;
1025 
1026 	/*
1027 	 * Validate MPA Header.
1028 	 */
1029 	if (mpa->revision != mpa_rev) {
1030 		abort_connection(ep, skb, GFP_KERNEL);
1031 		return;
1032 	}
1033 
1034 	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) {
1035 		abort_connection(ep, skb, GFP_KERNEL);
1036 		return;
1037 	}
1038 
1039 	plen = ntohs(mpa->private_data_size);
1040 
1041 	/*
1042 	 * Fail if there's too much private data.
1043 	 */
1044 	if (plen > MPA_MAX_PRIVATE_DATA) {
1045 		abort_connection(ep, skb, GFP_KERNEL);
1046 		return;
1047 	}
1048 
1049 	/*
1050 	 * If plen does not account for pkt size
1051 	 */
1052 	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) {
1053 		abort_connection(ep, skb, GFP_KERNEL);
1054 		return;
1055 	}
1056 	ep->plen = (u8) plen;
1057 
1058 	/*
1059 	 * If we don't have all the pdata yet, then bail.
1060 	 */
1061 	if (ep->mpa_pkt_len < (sizeof(*mpa) + plen))
1062 		return;
1063 
1064 	/*
1065 	 * If we get here we have accumulated the entire mpa
1066 	 * start reply message including private data.
1067 	 */
1068 	ep->mpa_attr.initiator = 0;
1069 	ep->mpa_attr.crc_enabled = (mpa->flags & MPA_CRC) | crc_enabled ? 1 : 0;
1070 	ep->mpa_attr.recv_marker_enabled = markers_enabled;
1071 	ep->mpa_attr.xmit_marker_enabled = mpa->flags & MPA_MARKERS ? 1 : 0;
1072 	ep->mpa_attr.version = mpa_rev;
1073 	ep->mpa_attr.p2p_type = peer2peer ? p2p_type :
1074 					    FW_RI_INIT_P2PTYPE_DISABLED;
1075 	PDBG("%s - crc_enabled=%d, recv_marker_enabled=%d, "
1076 	     "xmit_marker_enabled=%d, version=%d p2p_type=%d\n", __func__,
1077 	     ep->mpa_attr.crc_enabled, ep->mpa_attr.recv_marker_enabled,
1078 	     ep->mpa_attr.xmit_marker_enabled, ep->mpa_attr.version,
1079 	     ep->mpa_attr.p2p_type);
1080 
1081 	state_set(&ep->com, MPA_REQ_RCVD);
1082 
1083 	/* drive upcall */
1084 	connect_request_upcall(ep);
1085 	return;
1086 }
1087 
1088 static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
1089 {
1090 	struct c4iw_ep *ep;
1091 	struct cpl_rx_data *hdr = cplhdr(skb);
1092 	unsigned int dlen = ntohs(hdr->len);
1093 	unsigned int tid = GET_TID(hdr);
1094 	struct tid_info *t = dev->rdev.lldi.tids;
1095 
1096 	ep = lookup_tid(t, tid);
1097 	PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);
1098 	skb_pull(skb, sizeof(*hdr));
1099 	skb_trim(skb, dlen);
1100 
1101 	ep->rcv_seq += dlen;
1102 	BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen));
1103 
1104 	/* update RX credits */
1105 	update_rx_credits(ep, dlen);
1106 
1107 	switch (state_read(&ep->com)) {
1108 	case MPA_REQ_SENT:
1109 		process_mpa_reply(ep, skb);
1110 		break;
1111 	case MPA_REQ_WAIT:
1112 		process_mpa_request(ep, skb);
1113 		break;
1114 	case MPA_REP_SENT:
1115 		break;
1116 	default:
1117 		printk(KERN_ERR MOD "%s Unexpected streaming data."
1118 		       " ep %p state %d tid %u\n",
1119 		       __func__, ep, state_read(&ep->com), ep->hwtid);
1120 
1121 		/*
1122 		 * The ep will timeout and inform the ULP of the failure.
1123 		 * See ep_timeout().
1124 		 */
1125 		break;
1126 	}
1127 	return 0;
1128 }
1129 
1130 static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1131 {
1132 	struct c4iw_ep *ep;
1133 	struct cpl_abort_rpl_rss *rpl = cplhdr(skb);
1134 	unsigned long flags;
1135 	int release = 0;
1136 	unsigned int tid = GET_TID(rpl);
1137 	struct tid_info *t = dev->rdev.lldi.tids;
1138 
1139 	ep = lookup_tid(t, tid);
1140 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1141 	BUG_ON(!ep);
1142 	spin_lock_irqsave(&ep->com.lock, flags);
1143 	switch (ep->com.state) {
1144 	case ABORTING:
1145 		__state_set(&ep->com, DEAD);
1146 		release = 1;
1147 		break;
1148 	default:
1149 		printk(KERN_ERR "%s ep %p state %d\n",
1150 		     __func__, ep, ep->com.state);
1151 		break;
1152 	}
1153 	spin_unlock_irqrestore(&ep->com.lock, flags);
1154 
1155 	if (release)
1156 		release_ep_resources(ep);
1157 	return 0;
1158 }
1159 
1160 /*
1161  * Return whether a failed active open has allocated a TID
1162  */
1163 static inline int act_open_has_tid(int status)
1164 {
1165 	return status != CPL_ERR_TCAM_FULL && status != CPL_ERR_CONN_EXIST &&
1166 	       status != CPL_ERR_ARP_MISS;
1167 }
1168 
1169 static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1170 {
1171 	struct c4iw_ep *ep;
1172 	struct cpl_act_open_rpl *rpl = cplhdr(skb);
1173 	unsigned int atid = GET_TID_TID(GET_AOPEN_ATID(
1174 					ntohl(rpl->atid_status)));
1175 	struct tid_info *t = dev->rdev.lldi.tids;
1176 	int status = GET_AOPEN_STATUS(ntohl(rpl->atid_status));
1177 
1178 	ep = lookup_atid(t, atid);
1179 
1180 	PDBG("%s ep %p atid %u status %u errno %d\n", __func__, ep, atid,
1181 	     status, status2errno(status));
1182 
1183 	if (status == CPL_ERR_RTX_NEG_ADVICE) {
1184 		printk(KERN_WARNING MOD "Connection problems for atid %u\n",
1185 			atid);
1186 		return 0;
1187 	}
1188 
1189 	connect_reply_upcall(ep, status2errno(status));
1190 	state_set(&ep->com, DEAD);
1191 
1192 	if (status && act_open_has_tid(status))
1193 		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl));
1194 
1195 	cxgb4_free_atid(t, atid);
1196 	dst_release(ep->dst);
1197 	cxgb4_l2t_release(ep->l2t);
1198 	c4iw_put_ep(&ep->com);
1199 
1200 	return 0;
1201 }
1202 
1203 static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1204 {
1205 	struct cpl_pass_open_rpl *rpl = cplhdr(skb);
1206 	struct tid_info *t = dev->rdev.lldi.tids;
1207 	unsigned int stid = GET_TID(rpl);
1208 	struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1209 
1210 	if (!ep) {
1211 		printk(KERN_ERR MOD "stid %d lookup failure!\n", stid);
1212 		return 0;
1213 	}
1214 	PDBG("%s ep %p status %d error %d\n", __func__, ep,
1215 	     rpl->status, status2errno(rpl->status));
1216 	ep->com.rpl_err = status2errno(rpl->status);
1217 	ep->com.rpl_done = 1;
1218 	wake_up(&ep->com.waitq);
1219 
1220 	return 0;
1221 }
1222 
1223 static int listen_stop(struct c4iw_listen_ep *ep)
1224 {
1225 	struct sk_buff *skb;
1226 	struct cpl_close_listsvr_req *req;
1227 
1228 	PDBG("%s ep %p\n", __func__, ep);
1229 	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL);
1230 	if (!skb) {
1231 		printk(KERN_ERR MOD "%s - failed to alloc skb\n", __func__);
1232 		return -ENOMEM;
1233 	}
1234 	req = (struct cpl_close_listsvr_req *) skb_put(skb, sizeof(*req));
1235 	INIT_TP_WR(req, 0);
1236 	OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ,
1237 						    ep->stid));
1238 	req->reply_ctrl = cpu_to_be16(
1239 			  QUEUENO(ep->com.dev->rdev.lldi.rxq_ids[0]));
1240 	set_wr_txq(skb, CPL_PRIORITY_SETUP, 0);
1241 	return c4iw_ofld_send(&ep->com.dev->rdev, skb);
1242 }
1243 
1244 static int close_listsrv_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1245 {
1246 	struct cpl_close_listsvr_rpl *rpl = cplhdr(skb);
1247 	struct tid_info *t = dev->rdev.lldi.tids;
1248 	unsigned int stid = GET_TID(rpl);
1249 	struct c4iw_listen_ep *ep = lookup_stid(t, stid);
1250 
1251 	PDBG("%s ep %p\n", __func__, ep);
1252 	ep->com.rpl_err = status2errno(rpl->status);
1253 	ep->com.rpl_done = 1;
1254 	wake_up(&ep->com.waitq);
1255 	return 0;
1256 }
1257 
1258 static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,
1259 		      struct cpl_pass_accept_req *req)
1260 {
1261 	struct cpl_pass_accept_rpl *rpl;
1262 	unsigned int mtu_idx;
1263 	u64 opt0;
1264 	u32 opt2;
1265 	int wscale;
1266 
1267 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1268 	BUG_ON(skb_cloned(skb));
1269 	skb_trim(skb, sizeof(*rpl));
1270 	skb_get(skb);
1271 	cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);
1272 	wscale = compute_wscale(rcv_win);
1273 	opt0 = KEEP_ALIVE(1) |
1274 	       DELACK(1) |
1275 	       WND_SCALE(wscale) |
1276 	       MSS_IDX(mtu_idx) |
1277 	       L2T_IDX(ep->l2t->idx) |
1278 	       TX_CHAN(ep->tx_chan) |
1279 	       SMAC_SEL(ep->smac_idx) |
1280 	       DSCP(ep->tos) |
1281 	       RCV_BUFSIZ(rcv_win>>10);
1282 	opt2 = RX_CHANNEL(0) |
1283 	       RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);
1284 
1285 	if (enable_tcp_timestamps && req->tcpopt.tstamp)
1286 		opt2 |= TSTAMPS_EN(1);
1287 	if (enable_tcp_sack && req->tcpopt.sack)
1288 		opt2 |= SACK_EN(1);
1289 	if (wscale && enable_tcp_window_scaling)
1290 		opt2 |= WND_SCALE_EN(1);
1291 
1292 	rpl = cplhdr(skb);
1293 	INIT_TP_WR(rpl, ep->hwtid);
1294 	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_PASS_ACCEPT_RPL,
1295 				      ep->hwtid));
1296 	rpl->opt0 = cpu_to_be64(opt0);
1297 	rpl->opt2 = cpu_to_be32(opt2);
1298 	set_wr_txq(skb, CPL_PRIORITY_SETUP, ep->ctrlq_idx);
1299 	c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
1300 
1301 	return;
1302 }
1303 
1304 static void reject_cr(struct c4iw_dev *dev, u32 hwtid, __be32 peer_ip,
1305 		      struct sk_buff *skb)
1306 {
1307 	PDBG("%s c4iw_dev %p tid %u peer_ip %x\n", __func__, dev, hwtid,
1308 	     peer_ip);
1309 	BUG_ON(skb_cloned(skb));
1310 	skb_trim(skb, sizeof(struct cpl_tid_release));
1311 	skb_get(skb);
1312 	release_tid(&dev->rdev, hwtid, skb);
1313 	return;
1314 }
1315 
1316 static void get_4tuple(struct cpl_pass_accept_req *req,
1317 		       __be32 *local_ip, __be32 *peer_ip,
1318 		       __be16 *local_port, __be16 *peer_port)
1319 {
1320 	int eth_len = G_ETH_HDR_LEN(be32_to_cpu(req->hdr_len));
1321 	int ip_len = G_IP_HDR_LEN(be32_to_cpu(req->hdr_len));
1322 	struct iphdr *ip = (struct iphdr *)((u8 *)(req + 1) + eth_len);
1323 	struct tcphdr *tcp = (struct tcphdr *)
1324 			     ((u8 *)(req + 1) + eth_len + ip_len);
1325 
1326 	PDBG("%s saddr 0x%x daddr 0x%x sport %u dport %u\n", __func__,
1327 	     ntohl(ip->saddr), ntohl(ip->daddr), ntohs(tcp->source),
1328 	     ntohs(tcp->dest));
1329 
1330 	*peer_ip = ip->saddr;
1331 	*local_ip = ip->daddr;
1332 	*peer_port = tcp->source;
1333 	*local_port = tcp->dest;
1334 
1335 	return;
1336 }
1337 
1338 static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
1339 {
1340 	struct c4iw_ep *child_ep, *parent_ep;
1341 	struct cpl_pass_accept_req *req = cplhdr(skb);
1342 	unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));
1343 	struct tid_info *t = dev->rdev.lldi.tids;
1344 	unsigned int hwtid = GET_TID(req);
1345 	struct dst_entry *dst;
1346 	struct l2t_entry *l2t;
1347 	struct rtable *rt;
1348 	__be32 local_ip, peer_ip;
1349 	__be16 local_port, peer_port;
1350 	struct net_device *pdev;
1351 	u32 tx_chan, smac_idx;
1352 	u16 rss_qid;
1353 	u32 mtu;
1354 	int step;
1355 	int txq_idx, ctrlq_idx;
1356 
1357 	parent_ep = lookup_stid(t, stid);
1358 	PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid);
1359 
1360 	get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port);
1361 
1362 	if (state_read(&parent_ep->com) != LISTEN) {
1363 		printk(KERN_ERR "%s - listening ep not in LISTEN\n",
1364 		       __func__);
1365 		goto reject;
1366 	}
1367 
1368 	/* Find output route */
1369 	rt = find_route(dev, local_ip, peer_ip, local_port, peer_port,
1370 			GET_POPEN_TOS(ntohl(req->tos_stid)));
1371 	if (!rt) {
1372 		printk(KERN_ERR MOD "%s - failed to find dst entry!\n",
1373 		       __func__);
1374 		goto reject;
1375 	}
1376 	dst = &rt->dst;
1377 	if (dst->neighbour->dev->flags & IFF_LOOPBACK) {
1378 		pdev = ip_dev_find(&init_net, peer_ip);
1379 		BUG_ON(!pdev);
1380 		l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1381 				    pdev, 0);
1382 		mtu = pdev->mtu;
1383 		tx_chan = cxgb4_port_chan(pdev);
1384 		smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1385 		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1386 		txq_idx = cxgb4_port_idx(pdev) * step;
1387 		ctrlq_idx = cxgb4_port_idx(pdev);
1388 		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1389 		rss_qid = dev->rdev.lldi.rxq_ids[cxgb4_port_idx(pdev) * step];
1390 		dev_put(pdev);
1391 	} else {
1392 		l2t = cxgb4_l2t_get(dev->rdev.lldi.l2t, dst->neighbour,
1393 					dst->neighbour->dev, 0);
1394 		mtu = dst_mtu(dst);
1395 		tx_chan = cxgb4_port_chan(dst->neighbour->dev);
1396 		smac_idx = (cxgb4_port_viid(dst->neighbour->dev) & 0x7F) << 1;
1397 		step = dev->rdev.lldi.ntxq / dev->rdev.lldi.nchan;
1398 		txq_idx = cxgb4_port_idx(dst->neighbour->dev) * step;
1399 		ctrlq_idx = cxgb4_port_idx(dst->neighbour->dev);
1400 		step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
1401 		rss_qid = dev->rdev.lldi.rxq_ids[
1402 			  cxgb4_port_idx(dst->neighbour->dev) * step];
1403 	}
1404 	if (!l2t) {
1405 		printk(KERN_ERR MOD "%s - failed to allocate l2t entry!\n",
1406 		       __func__);
1407 		dst_release(dst);
1408 		goto reject;
1409 	}
1410 
1411 	child_ep = alloc_ep(sizeof(*child_ep), GFP_KERNEL);
1412 	if (!child_ep) {
1413 		printk(KERN_ERR MOD "%s - failed to allocate ep entry!\n",
1414 		       __func__);
1415 		cxgb4_l2t_release(l2t);
1416 		dst_release(dst);
1417 		goto reject;
1418 	}
1419 	state_set(&child_ep->com, CONNECTING);
1420 	child_ep->com.dev = dev;
1421 	child_ep->com.cm_id = NULL;
1422 	child_ep->com.local_addr.sin_family = PF_INET;
1423 	child_ep->com.local_addr.sin_port = local_port;
1424 	child_ep->com.local_addr.sin_addr.s_addr = local_ip;
1425 	child_ep->com.remote_addr.sin_family = PF_INET;
1426 	child_ep->com.remote_addr.sin_port = peer_port;
1427 	child_ep->com.remote_addr.sin_addr.s_addr = peer_ip;
1428 	c4iw_get_ep(&parent_ep->com);
1429 	child_ep->parent_ep = parent_ep;
1430 	child_ep->tos = GET_POPEN_TOS(ntohl(req->tos_stid));
1431 	child_ep->l2t = l2t;
1432 	child_ep->dst = dst;
1433 	child_ep->hwtid = hwtid;
1434 	child_ep->tx_chan = tx_chan;
1435 	child_ep->smac_idx = smac_idx;
1436 	child_ep->rss_qid = rss_qid;
1437 	child_ep->mtu = mtu;
1438 	child_ep->txq_idx = txq_idx;
1439 	child_ep->ctrlq_idx = ctrlq_idx;
1440 
1441 	PDBG("%s tx_chan %u smac_idx %u rss_qid %u\n", __func__,
1442 	     tx_chan, smac_idx, rss_qid);
1443 
1444 	init_timer(&child_ep->timer);
1445 	cxgb4_insert_tid(t, child_ep, hwtid);
1446 	accept_cr(child_ep, peer_ip, skb, req);
1447 	goto out;
1448 reject:
1449 	reject_cr(dev, hwtid, peer_ip, skb);
1450 out:
1451 	return 0;
1452 }
1453 
1454 static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)
1455 {
1456 	struct c4iw_ep *ep;
1457 	struct cpl_pass_establish *req = cplhdr(skb);
1458 	struct tid_info *t = dev->rdev.lldi.tids;
1459 	unsigned int tid = GET_TID(req);
1460 
1461 	ep = lookup_tid(t, tid);
1462 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1463 	ep->snd_seq = be32_to_cpu(req->snd_isn);
1464 	ep->rcv_seq = be32_to_cpu(req->rcv_isn);
1465 
1466 	set_emss(ep, ntohs(req->tcp_opt));
1467 
1468 	dst_confirm(ep->dst);
1469 	state_set(&ep->com, MPA_REQ_WAIT);
1470 	start_ep_timer(ep);
1471 	send_flowc(ep, skb);
1472 
1473 	return 0;
1474 }
1475 
1476 static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)
1477 {
1478 	struct cpl_peer_close *hdr = cplhdr(skb);
1479 	struct c4iw_ep *ep;
1480 	struct c4iw_qp_attributes attrs;
1481 	unsigned long flags;
1482 	int disconnect = 1;
1483 	int release = 0;
1484 	int closing = 0;
1485 	struct tid_info *t = dev->rdev.lldi.tids;
1486 	unsigned int tid = GET_TID(hdr);
1487 
1488 	ep = lookup_tid(t, tid);
1489 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1490 	dst_confirm(ep->dst);
1491 
1492 	spin_lock_irqsave(&ep->com.lock, flags);
1493 	switch (ep->com.state) {
1494 	case MPA_REQ_WAIT:
1495 		__state_set(&ep->com, CLOSING);
1496 		break;
1497 	case MPA_REQ_SENT:
1498 		__state_set(&ep->com, CLOSING);
1499 		connect_reply_upcall(ep, -ECONNRESET);
1500 		break;
1501 	case MPA_REQ_RCVD:
1502 
1503 		/*
1504 		 * We're gonna mark this puppy DEAD, but keep
1505 		 * the reference on it until the ULP accepts or
1506 		 * rejects the CR. Also wake up anyone waiting
1507 		 * in rdma connection migration (see c4iw_accept_cr()).
1508 		 */
1509 		__state_set(&ep->com, CLOSING);
1510 		ep->com.rpl_done = 1;
1511 		ep->com.rpl_err = -ECONNRESET;
1512 		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1513 		wake_up(&ep->com.waitq);
1514 		break;
1515 	case MPA_REP_SENT:
1516 		__state_set(&ep->com, CLOSING);
1517 		ep->com.rpl_done = 1;
1518 		ep->com.rpl_err = -ECONNRESET;
1519 		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1520 		wake_up(&ep->com.waitq);
1521 		break;
1522 	case FPDU_MODE:
1523 		start_ep_timer(ep);
1524 		__state_set(&ep->com, CLOSING);
1525 		closing = 1;
1526 		peer_close_upcall(ep);
1527 		break;
1528 	case ABORTING:
1529 		disconnect = 0;
1530 		break;
1531 	case CLOSING:
1532 		__state_set(&ep->com, MORIBUND);
1533 		disconnect = 0;
1534 		break;
1535 	case MORIBUND:
1536 		stop_ep_timer(ep);
1537 		if (ep->com.cm_id && ep->com.qp) {
1538 			attrs.next_state = C4IW_QP_STATE_IDLE;
1539 			c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1540 				       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1541 		}
1542 		close_complete_upcall(ep);
1543 		__state_set(&ep->com, DEAD);
1544 		release = 1;
1545 		disconnect = 0;
1546 		break;
1547 	case DEAD:
1548 		disconnect = 0;
1549 		break;
1550 	default:
1551 		BUG_ON(1);
1552 	}
1553 	spin_unlock_irqrestore(&ep->com.lock, flags);
1554 	if (closing) {
1555 		attrs.next_state = C4IW_QP_STATE_CLOSING;
1556 		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp,
1557 			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
1558 	}
1559 	if (disconnect)
1560 		c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1561 	if (release)
1562 		release_ep_resources(ep);
1563 	return 0;
1564 }
1565 
1566 /*
1567  * Returns whether an ABORT_REQ_RSS message is a negative advice.
1568  */
1569 static int is_neg_adv_abort(unsigned int status)
1570 {
1571 	return status == CPL_ERR_RTX_NEG_ADVICE ||
1572 	       status == CPL_ERR_PERSIST_NEG_ADVICE;
1573 }
1574 
1575 static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
1576 {
1577 	struct cpl_abort_req_rss *req = cplhdr(skb);
1578 	struct c4iw_ep *ep;
1579 	struct cpl_abort_rpl *rpl;
1580 	struct sk_buff *rpl_skb;
1581 	struct c4iw_qp_attributes attrs;
1582 	int ret;
1583 	int release = 0;
1584 	unsigned long flags;
1585 	struct tid_info *t = dev->rdev.lldi.tids;
1586 	unsigned int tid = GET_TID(req);
1587 
1588 	ep = lookup_tid(t, tid);
1589 	if (is_neg_adv_abort(req->status)) {
1590 		PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
1591 		     ep->hwtid);
1592 		return 0;
1593 	}
1594 	spin_lock_irqsave(&ep->com.lock, flags);
1595 	PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
1596 	     ep->com.state);
1597 	switch (ep->com.state) {
1598 	case CONNECTING:
1599 		break;
1600 	case MPA_REQ_WAIT:
1601 		stop_ep_timer(ep);
1602 		break;
1603 	case MPA_REQ_SENT:
1604 		stop_ep_timer(ep);
1605 		connect_reply_upcall(ep, -ECONNRESET);
1606 		break;
1607 	case MPA_REP_SENT:
1608 		ep->com.rpl_done = 1;
1609 		ep->com.rpl_err = -ECONNRESET;
1610 		PDBG("waking up ep %p\n", ep);
1611 		wake_up(&ep->com.waitq);
1612 		break;
1613 	case MPA_REQ_RCVD:
1614 
1615 		/*
1616 		 * We're gonna mark this puppy DEAD, but keep
1617 		 * the reference on it until the ULP accepts or
1618 		 * rejects the CR. Also wake up anyone waiting
1619 		 * in rdma connection migration (see c4iw_accept_cr()).
1620 		 */
1621 		ep->com.rpl_done = 1;
1622 		ep->com.rpl_err = -ECONNRESET;
1623 		PDBG("waking up ep %p tid %u\n", ep, ep->hwtid);
1624 		wake_up(&ep->com.waitq);
1625 		break;
1626 	case MORIBUND:
1627 	case CLOSING:
1628 		stop_ep_timer(ep);
1629 		/*FALLTHROUGH*/
1630 	case FPDU_MODE:
1631 		if (ep->com.cm_id && ep->com.qp) {
1632 			attrs.next_state = C4IW_QP_STATE_ERROR;
1633 			ret = c4iw_modify_qp(ep->com.qp->rhp,
1634 				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
1635 				     &attrs, 1);
1636 			if (ret)
1637 				printk(KERN_ERR MOD
1638 				       "%s - qp <- error failed!\n",
1639 				       __func__);
1640 		}
1641 		peer_abort_upcall(ep);
1642 		break;
1643 	case ABORTING:
1644 		break;
1645 	case DEAD:
1646 		PDBG("%s PEER_ABORT IN DEAD STATE!!!!\n", __func__);
1647 		spin_unlock_irqrestore(&ep->com.lock, flags);
1648 		return 0;
1649 	default:
1650 		BUG_ON(1);
1651 		break;
1652 	}
1653 	dst_confirm(ep->dst);
1654 	if (ep->com.state != ABORTING) {
1655 		__state_set(&ep->com, DEAD);
1656 		release = 1;
1657 	}
1658 	spin_unlock_irqrestore(&ep->com.lock, flags);
1659 
1660 	rpl_skb = get_skb(skb, sizeof(*rpl), GFP_KERNEL);
1661 	if (!rpl_skb) {
1662 		printk(KERN_ERR MOD "%s - cannot allocate skb!\n",
1663 		       __func__);
1664 		release = 1;
1665 		goto out;
1666 	}
1667 	set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
1668 	rpl = (struct cpl_abort_rpl *) skb_put(rpl_skb, sizeof(*rpl));
1669 	INIT_TP_WR(rpl, ep->hwtid);
1670 	OPCODE_TID(rpl) = cpu_to_be32(MK_OPCODE_TID(CPL_ABORT_RPL, ep->hwtid));
1671 	rpl->cmd = CPL_ABORT_NO_RST;
1672 	c4iw_ofld_send(&ep->com.dev->rdev, rpl_skb);
1673 out:
1674 	if (release)
1675 		release_ep_resources(ep);
1676 	return 0;
1677 }
1678 
1679 static int close_con_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
1680 {
1681 	struct c4iw_ep *ep;
1682 	struct c4iw_qp_attributes attrs;
1683 	struct cpl_close_con_rpl *rpl = cplhdr(skb);
1684 	unsigned long flags;
1685 	int release = 0;
1686 	struct tid_info *t = dev->rdev.lldi.tids;
1687 	unsigned int tid = GET_TID(rpl);
1688 
1689 	ep = lookup_tid(t, tid);
1690 
1691 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1692 	BUG_ON(!ep);
1693 
1694 	/* The cm_id may be null if we failed to connect */
1695 	spin_lock_irqsave(&ep->com.lock, flags);
1696 	switch (ep->com.state) {
1697 	case CLOSING:
1698 		__state_set(&ep->com, MORIBUND);
1699 		break;
1700 	case MORIBUND:
1701 		stop_ep_timer(ep);
1702 		if ((ep->com.cm_id) && (ep->com.qp)) {
1703 			attrs.next_state = C4IW_QP_STATE_IDLE;
1704 			c4iw_modify_qp(ep->com.qp->rhp,
1705 					     ep->com.qp,
1706 					     C4IW_QP_ATTR_NEXT_STATE,
1707 					     &attrs, 1);
1708 		}
1709 		close_complete_upcall(ep);
1710 		__state_set(&ep->com, DEAD);
1711 		release = 1;
1712 		break;
1713 	case ABORTING:
1714 	case DEAD:
1715 		break;
1716 	default:
1717 		BUG_ON(1);
1718 		break;
1719 	}
1720 	spin_unlock_irqrestore(&ep->com.lock, flags);
1721 	if (release)
1722 		release_ep_resources(ep);
1723 	return 0;
1724 }
1725 
1726 static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
1727 {
1728 	struct c4iw_ep *ep;
1729 	struct cpl_rdma_terminate *term = cplhdr(skb);
1730 	struct tid_info *t = dev->rdev.lldi.tids;
1731 	unsigned int tid = GET_TID(term);
1732 
1733 	ep = lookup_tid(t, tid);
1734 
1735 	if (state_read(&ep->com) != FPDU_MODE)
1736 		return 0;
1737 
1738 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1739 	skb_pull(skb, sizeof *term);
1740 	PDBG("%s saving %d bytes of term msg\n", __func__, skb->len);
1741 	skb_copy_from_linear_data(skb, ep->com.qp->attr.terminate_buffer,
1742 				  skb->len);
1743 	ep->com.qp->attr.terminate_msg_len = skb->len;
1744 	ep->com.qp->attr.is_terminate_local = 0;
1745 	return 0;
1746 }
1747 
1748 /*
1749  * Upcall from the adapter indicating data has been transmitted.
1750  * For us its just the single MPA request or reply.  We can now free
1751  * the skb holding the mpa message.
1752  */
1753 static int fw4_ack(struct c4iw_dev *dev, struct sk_buff *skb)
1754 {
1755 	struct c4iw_ep *ep;
1756 	struct cpl_fw4_ack *hdr = cplhdr(skb);
1757 	u8 credits = hdr->credits;
1758 	unsigned int tid = GET_TID(hdr);
1759 	struct tid_info *t = dev->rdev.lldi.tids;
1760 
1761 
1762 	ep = lookup_tid(t, tid);
1763 	PDBG("%s ep %p tid %u credits %u\n", __func__, ep, ep->hwtid, credits);
1764 	if (credits == 0) {
1765 		PDBG(KERN_ERR "%s 0 credit ack ep %p tid %u state %u\n",
1766 			__func__, ep, ep->hwtid, state_read(&ep->com));
1767 		return 0;
1768 	}
1769 
1770 	dst_confirm(ep->dst);
1771 	if (ep->mpa_skb) {
1772 		PDBG("%s last streaming msg ack ep %p tid %u state %u "
1773 		     "initiator %u freeing skb\n", __func__, ep, ep->hwtid,
1774 		     state_read(&ep->com), ep->mpa_attr.initiator ? 1 : 0);
1775 		kfree_skb(ep->mpa_skb);
1776 		ep->mpa_skb = NULL;
1777 	}
1778 	return 0;
1779 }
1780 
1781 int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
1782 {
1783 	int err;
1784 	struct c4iw_ep *ep = to_ep(cm_id);
1785 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1786 
1787 	if (state_read(&ep->com) == DEAD) {
1788 		c4iw_put_ep(&ep->com);
1789 		return -ECONNRESET;
1790 	}
1791 	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1792 	if (mpa_rev == 0)
1793 		abort_connection(ep, NULL, GFP_KERNEL);
1794 	else {
1795 		err = send_mpa_reject(ep, pdata, pdata_len);
1796 		err = c4iw_ep_disconnect(ep, 0, GFP_KERNEL);
1797 	}
1798 	c4iw_put_ep(&ep->com);
1799 	return 0;
1800 }
1801 
1802 int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1803 {
1804 	int err;
1805 	struct c4iw_qp_attributes attrs;
1806 	enum c4iw_qp_attr_mask mask;
1807 	struct c4iw_ep *ep = to_ep(cm_id);
1808 	struct c4iw_dev *h = to_c4iw_dev(cm_id->device);
1809 	struct c4iw_qp *qp = get_qhp(h, conn_param->qpn);
1810 
1811 	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
1812 	if (state_read(&ep->com) == DEAD) {
1813 		err = -ECONNRESET;
1814 		goto err;
1815 	}
1816 
1817 	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
1818 	BUG_ON(!qp);
1819 
1820 	if ((conn_param->ord > c4iw_max_read_depth) ||
1821 	    (conn_param->ird > c4iw_max_read_depth)) {
1822 		abort_connection(ep, NULL, GFP_KERNEL);
1823 		err = -EINVAL;
1824 		goto err;
1825 	}
1826 
1827 	cm_id->add_ref(cm_id);
1828 	ep->com.cm_id = cm_id;
1829 	ep->com.qp = qp;
1830 
1831 	ep->ird = conn_param->ird;
1832 	ep->ord = conn_param->ord;
1833 
1834 	if (peer2peer && ep->ird == 0)
1835 		ep->ird = 1;
1836 
1837 	PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
1838 
1839 	/* bind QP to EP and move to RTS */
1840 	attrs.mpa_attr = ep->mpa_attr;
1841 	attrs.max_ird = ep->ird;
1842 	attrs.max_ord = ep->ord;
1843 	attrs.llp_stream_handle = ep;
1844 	attrs.next_state = C4IW_QP_STATE_RTS;
1845 
1846 	/* bind QP and TID with INIT_WR */
1847 	mask = C4IW_QP_ATTR_NEXT_STATE |
1848 			     C4IW_QP_ATTR_LLP_STREAM_HANDLE |
1849 			     C4IW_QP_ATTR_MPA_ATTR |
1850 			     C4IW_QP_ATTR_MAX_IRD |
1851 			     C4IW_QP_ATTR_MAX_ORD;
1852 
1853 	err = c4iw_modify_qp(ep->com.qp->rhp,
1854 			     ep->com.qp, mask, &attrs, 1);
1855 	if (err)
1856 		goto err1;
1857 	err = send_mpa_reply(ep, conn_param->private_data,
1858 			     conn_param->private_data_len);
1859 	if (err)
1860 		goto err1;
1861 
1862 	state_set(&ep->com, FPDU_MODE);
1863 	established_upcall(ep);
1864 	c4iw_put_ep(&ep->com);
1865 	return 0;
1866 err1:
1867 	ep->com.cm_id = NULL;
1868 	ep->com.qp = NULL;
1869 	cm_id->rem_ref(cm_id);
1870 err:
1871 	c4iw_put_ep(&ep->com);
1872 	return err;
1873 }
1874 
1875 int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
1876 {
1877 	int err = 0;
1878 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
1879 	struct c4iw_ep *ep;
1880 	struct rtable *rt;
1881 	struct net_device *pdev;
1882 	int step;
1883 
1884 	if ((conn_param->ord > c4iw_max_read_depth) ||
1885 	    (conn_param->ird > c4iw_max_read_depth)) {
1886 		err = -EINVAL;
1887 		goto out;
1888 	}
1889 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
1890 	if (!ep) {
1891 		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
1892 		err = -ENOMEM;
1893 		goto out;
1894 	}
1895 	init_timer(&ep->timer);
1896 	ep->plen = conn_param->private_data_len;
1897 	if (ep->plen)
1898 		memcpy(ep->mpa_pkt + sizeof(struct mpa_message),
1899 		       conn_param->private_data, ep->plen);
1900 	ep->ird = conn_param->ird;
1901 	ep->ord = conn_param->ord;
1902 
1903 	if (peer2peer && ep->ord == 0)
1904 		ep->ord = 1;
1905 
1906 	cm_id->add_ref(cm_id);
1907 	ep->com.dev = dev;
1908 	ep->com.cm_id = cm_id;
1909 	ep->com.qp = get_qhp(dev, conn_param->qpn);
1910 	BUG_ON(!ep->com.qp);
1911 	PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,
1912 	     ep->com.qp, cm_id);
1913 
1914 	/*
1915 	 * Allocate an active TID to initiate a TCP connection.
1916 	 */
1917 	ep->atid = cxgb4_alloc_atid(dev->rdev.lldi.tids, ep);
1918 	if (ep->atid == -1) {
1919 		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__);
1920 		err = -ENOMEM;
1921 		goto fail2;
1922 	}
1923 
1924 	PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,
1925 	     ntohl(cm_id->local_addr.sin_addr.s_addr),
1926 	     ntohs(cm_id->local_addr.sin_port),
1927 	     ntohl(cm_id->remote_addr.sin_addr.s_addr),
1928 	     ntohs(cm_id->remote_addr.sin_port));
1929 
1930 	/* find a route */
1931 	rt = find_route(dev,
1932 			cm_id->local_addr.sin_addr.s_addr,
1933 			cm_id->remote_addr.sin_addr.s_addr,
1934 			cm_id->local_addr.sin_port,
1935 			cm_id->remote_addr.sin_port, 0);
1936 	if (!rt) {
1937 		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__);
1938 		err = -EHOSTUNREACH;
1939 		goto fail3;
1940 	}
1941 	ep->dst = &rt->dst;
1942 
1943 	/* get a l2t entry */
1944 	if (ep->dst->neighbour->dev->flags & IFF_LOOPBACK) {
1945 		PDBG("%s LOOPBACK\n", __func__);
1946 		pdev = ip_dev_find(&init_net,
1947 				   cm_id->remote_addr.sin_addr.s_addr);
1948 		ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1949 					ep->dst->neighbour,
1950 					pdev, 0);
1951 		ep->mtu = pdev->mtu;
1952 		ep->tx_chan = cxgb4_port_chan(pdev);
1953 		ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1;
1954 		step = ep->com.dev->rdev.lldi.ntxq /
1955 		       ep->com.dev->rdev.lldi.nchan;
1956 		ep->txq_idx = cxgb4_port_idx(pdev) * step;
1957 		step = ep->com.dev->rdev.lldi.nrxq /
1958 		       ep->com.dev->rdev.lldi.nchan;
1959 		ep->ctrlq_idx = cxgb4_port_idx(pdev);
1960 		ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1961 			      cxgb4_port_idx(pdev) * step];
1962 		dev_put(pdev);
1963 	} else {
1964 		ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t,
1965 					ep->dst->neighbour,
1966 					ep->dst->neighbour->dev, 0);
1967 		ep->mtu = dst_mtu(ep->dst);
1968 		ep->tx_chan = cxgb4_port_chan(ep->dst->neighbour->dev);
1969 		ep->smac_idx = (cxgb4_port_viid(ep->dst->neighbour->dev) &
1970 				0x7F) << 1;
1971 		step = ep->com.dev->rdev.lldi.ntxq /
1972 		       ep->com.dev->rdev.lldi.nchan;
1973 		ep->txq_idx = cxgb4_port_idx(ep->dst->neighbour->dev) * step;
1974 		ep->ctrlq_idx = cxgb4_port_idx(ep->dst->neighbour->dev);
1975 		step = ep->com.dev->rdev.lldi.nrxq /
1976 		       ep->com.dev->rdev.lldi.nchan;
1977 		ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[
1978 			      cxgb4_port_idx(ep->dst->neighbour->dev) * step];
1979 	}
1980 	if (!ep->l2t) {
1981 		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__);
1982 		err = -ENOMEM;
1983 		goto fail4;
1984 	}
1985 
1986 	PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n",
1987 		__func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid,
1988 		ep->l2t->idx);
1989 
1990 	state_set(&ep->com, CONNECTING);
1991 	ep->tos = 0;
1992 	ep->com.local_addr = cm_id->local_addr;
1993 	ep->com.remote_addr = cm_id->remote_addr;
1994 
1995 	/* send connect request to rnic */
1996 	err = send_connect(ep);
1997 	if (!err)
1998 		goto out;
1999 
2000 	cxgb4_l2t_release(ep->l2t);
2001 fail4:
2002 	dst_release(ep->dst);
2003 fail3:
2004 	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
2005 fail2:
2006 	cm_id->rem_ref(cm_id);
2007 	c4iw_put_ep(&ep->com);
2008 out:
2009 	return err;
2010 }
2011 
2012 int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
2013 {
2014 	int err = 0;
2015 	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);
2016 	struct c4iw_listen_ep *ep;
2017 
2018 
2019 	might_sleep();
2020 
2021 	ep = alloc_ep(sizeof(*ep), GFP_KERNEL);
2022 	if (!ep) {
2023 		printk(KERN_ERR MOD "%s - cannot alloc ep.\n", __func__);
2024 		err = -ENOMEM;
2025 		goto fail1;
2026 	}
2027 	PDBG("%s ep %p\n", __func__, ep);
2028 	cm_id->add_ref(cm_id);
2029 	ep->com.cm_id = cm_id;
2030 	ep->com.dev = dev;
2031 	ep->backlog = backlog;
2032 	ep->com.local_addr = cm_id->local_addr;
2033 
2034 	/*
2035 	 * Allocate a server TID.
2036 	 */
2037 	ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep);
2038 	if (ep->stid == -1) {
2039 		printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);
2040 		err = -ENOMEM;
2041 		goto fail2;
2042 	}
2043 
2044 	state_set(&ep->com, LISTEN);
2045 	err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid,
2046 				  ep->com.local_addr.sin_addr.s_addr,
2047 				  ep->com.local_addr.sin_port,
2048 				  ep->com.dev->rdev.lldi.rxq_ids[0]);
2049 	if (err)
2050 		goto fail3;
2051 
2052 	/* wait for pass_open_rpl */
2053 	wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
2054 	if (ep->com.rpl_done)
2055 		err = ep->com.rpl_err;
2056 	else {
2057 		printk(KERN_ERR MOD "Device %s not responding!\n",
2058 		       pci_name(ep->com.dev->rdev.lldi.pdev));
2059 		ep->com.dev->rdev.flags = T4_FATAL_ERROR;
2060 		err = -EIO;
2061 	}
2062 	if (!err) {
2063 		cm_id->provider_data = ep;
2064 		goto out;
2065 	}
2066 fail3:
2067 	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2068 fail2:
2069 	cm_id->rem_ref(cm_id);
2070 	c4iw_put_ep(&ep->com);
2071 fail1:
2072 out:
2073 	return err;
2074 }
2075 
2076 int c4iw_destroy_listen(struct iw_cm_id *cm_id)
2077 {
2078 	int err;
2079 	struct c4iw_listen_ep *ep = to_listen_ep(cm_id);
2080 
2081 	PDBG("%s ep %p\n", __func__, ep);
2082 
2083 	might_sleep();
2084 	state_set(&ep->com, DEAD);
2085 	ep->com.rpl_done = 0;
2086 	ep->com.rpl_err = 0;
2087 	err = listen_stop(ep);
2088 	if (err)
2089 		goto done;
2090 	wait_event_timeout(ep->com.waitq, ep->com.rpl_done, C4IW_WR_TO);
2091 	if (ep->com.rpl_done)
2092 		err = ep->com.rpl_err;
2093 	else {
2094 		printk(KERN_ERR MOD "Device %s not responding!\n",
2095 		       pci_name(ep->com.dev->rdev.lldi.pdev));
2096 		ep->com.dev->rdev.flags = T4_FATAL_ERROR;
2097 		err = -EIO;
2098 	}
2099 	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);
2100 done:
2101 	cm_id->rem_ref(cm_id);
2102 	c4iw_put_ep(&ep->com);
2103 	return err;
2104 }
2105 
2106 int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)
2107 {
2108 	int ret = 0;
2109 	unsigned long flags;
2110 	int close = 0;
2111 	int fatal = 0;
2112 	struct c4iw_rdev *rdev;
2113 
2114 	spin_lock_irqsave(&ep->com.lock, flags);
2115 
2116 	PDBG("%s ep %p state %s, abrupt %d\n", __func__, ep,
2117 	     states[ep->com.state], abrupt);
2118 
2119 	rdev = &ep->com.dev->rdev;
2120 	if (c4iw_fatal_error(rdev)) {
2121 		fatal = 1;
2122 		close_complete_upcall(ep);
2123 		ep->com.state = DEAD;
2124 	}
2125 	switch (ep->com.state) {
2126 	case MPA_REQ_WAIT:
2127 	case MPA_REQ_SENT:
2128 	case MPA_REQ_RCVD:
2129 	case MPA_REP_SENT:
2130 	case FPDU_MODE:
2131 		close = 1;
2132 		if (abrupt)
2133 			ep->com.state = ABORTING;
2134 		else {
2135 			ep->com.state = CLOSING;
2136 			start_ep_timer(ep);
2137 		}
2138 		set_bit(CLOSE_SENT, &ep->com.flags);
2139 		break;
2140 	case CLOSING:
2141 		if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
2142 			close = 1;
2143 			if (abrupt) {
2144 				stop_ep_timer(ep);
2145 				ep->com.state = ABORTING;
2146 			} else
2147 				ep->com.state = MORIBUND;
2148 		}
2149 		break;
2150 	case MORIBUND:
2151 	case ABORTING:
2152 	case DEAD:
2153 		PDBG("%s ignoring disconnect ep %p state %u\n",
2154 		     __func__, ep, ep->com.state);
2155 		break;
2156 	default:
2157 		BUG();
2158 		break;
2159 	}
2160 
2161 	spin_unlock_irqrestore(&ep->com.lock, flags);
2162 	if (close) {
2163 		if (abrupt)
2164 			ret = abort_connection(ep, NULL, gfp);
2165 		else
2166 			ret = send_halfclose(ep, gfp);
2167 		if (ret)
2168 			fatal = 1;
2169 	}
2170 	if (fatal)
2171 		release_ep_resources(ep);
2172 	return ret;
2173 }
2174 
2175 /*
2176  * These are the real handlers that are called from a
2177  * work queue.
2178  */
2179 static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {
2180 	[CPL_ACT_ESTABLISH] = act_establish,
2181 	[CPL_ACT_OPEN_RPL] = act_open_rpl,
2182 	[CPL_RX_DATA] = rx_data,
2183 	[CPL_ABORT_RPL_RSS] = abort_rpl,
2184 	[CPL_ABORT_RPL] = abort_rpl,
2185 	[CPL_PASS_OPEN_RPL] = pass_open_rpl,
2186 	[CPL_CLOSE_LISTSRV_RPL] = close_listsrv_rpl,
2187 	[CPL_PASS_ACCEPT_REQ] = pass_accept_req,
2188 	[CPL_PASS_ESTABLISH] = pass_establish,
2189 	[CPL_PEER_CLOSE] = peer_close,
2190 	[CPL_ABORT_REQ_RSS] = peer_abort,
2191 	[CPL_CLOSE_CON_RPL] = close_con_rpl,
2192 	[CPL_RDMA_TERMINATE] = terminate,
2193 	[CPL_FW4_ACK] = fw4_ack
2194 };
2195 
2196 static void process_timeout(struct c4iw_ep *ep)
2197 {
2198 	struct c4iw_qp_attributes attrs;
2199 	int abort = 1;
2200 
2201 	spin_lock_irq(&ep->com.lock);
2202 	PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,
2203 	     ep->com.state);
2204 	switch (ep->com.state) {
2205 	case MPA_REQ_SENT:
2206 		__state_set(&ep->com, ABORTING);
2207 		connect_reply_upcall(ep, -ETIMEDOUT);
2208 		break;
2209 	case MPA_REQ_WAIT:
2210 		__state_set(&ep->com, ABORTING);
2211 		break;
2212 	case CLOSING:
2213 	case MORIBUND:
2214 		if (ep->com.cm_id && ep->com.qp) {
2215 			attrs.next_state = C4IW_QP_STATE_ERROR;
2216 			c4iw_modify_qp(ep->com.qp->rhp,
2217 				     ep->com.qp, C4IW_QP_ATTR_NEXT_STATE,
2218 				     &attrs, 1);
2219 		}
2220 		__state_set(&ep->com, ABORTING);
2221 		break;
2222 	default:
2223 		printk(KERN_ERR "%s unexpected state ep %p tid %u state %u\n",
2224 			__func__, ep, ep->hwtid, ep->com.state);
2225 		WARN_ON(1);
2226 		abort = 0;
2227 	}
2228 	spin_unlock_irq(&ep->com.lock);
2229 	if (abort)
2230 		abort_connection(ep, NULL, GFP_KERNEL);
2231 	c4iw_put_ep(&ep->com);
2232 }
2233 
2234 static void process_timedout_eps(void)
2235 {
2236 	struct c4iw_ep *ep;
2237 
2238 	spin_lock_irq(&timeout_lock);
2239 	while (!list_empty(&timeout_list)) {
2240 		struct list_head *tmp;
2241 
2242 		tmp = timeout_list.next;
2243 		list_del(tmp);
2244 		spin_unlock_irq(&timeout_lock);
2245 		ep = list_entry(tmp, struct c4iw_ep, entry);
2246 		process_timeout(ep);
2247 		spin_lock_irq(&timeout_lock);
2248 	}
2249 	spin_unlock_irq(&timeout_lock);
2250 }
2251 
2252 static void process_work(struct work_struct *work)
2253 {
2254 	struct sk_buff *skb = NULL;
2255 	struct c4iw_dev *dev;
2256 	struct cpl_act_establish *rpl;
2257 	unsigned int opcode;
2258 	int ret;
2259 
2260 	while ((skb = skb_dequeue(&rxq))) {
2261 		rpl = cplhdr(skb);
2262 		dev = *((struct c4iw_dev **) (skb->cb + sizeof(void *)));
2263 		opcode = rpl->ot.opcode;
2264 
2265 		BUG_ON(!work_handlers[opcode]);
2266 		ret = work_handlers[opcode](dev, skb);
2267 		if (!ret)
2268 			kfree_skb(skb);
2269 	}
2270 	process_timedout_eps();
2271 }
2272 
2273 static DECLARE_WORK(skb_work, process_work);
2274 
2275 static void ep_timeout(unsigned long arg)
2276 {
2277 	struct c4iw_ep *ep = (struct c4iw_ep *)arg;
2278 
2279 	spin_lock(&timeout_lock);
2280 	list_add_tail(&ep->entry, &timeout_list);
2281 	spin_unlock(&timeout_lock);
2282 	queue_work(workq, &skb_work);
2283 }
2284 
2285 /*
2286  * All the CM events are handled on a work queue to have a safe context.
2287  */
2288 static int sched(struct c4iw_dev *dev, struct sk_buff *skb)
2289 {
2290 
2291 	/*
2292 	 * Save dev in the skb->cb area.
2293 	 */
2294 	*((struct c4iw_dev **) (skb->cb + sizeof(void *))) = dev;
2295 
2296 	/*
2297 	 * Queue the skb and schedule the worker thread.
2298 	 */
2299 	skb_queue_tail(&rxq, skb);
2300 	queue_work(workq, &skb_work);
2301 	return 0;
2302 }
2303 
2304 static int set_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
2305 {
2306 	struct cpl_set_tcb_rpl *rpl = cplhdr(skb);
2307 
2308 	if (rpl->status != CPL_ERR_NONE) {
2309 		printk(KERN_ERR MOD "Unexpected SET_TCB_RPL status %u "
2310 		       "for tid %u\n", rpl->status, GET_TID(rpl));
2311 	}
2312 	return 0;
2313 }
2314 
2315 static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)
2316 {
2317 	struct cpl_fw6_msg *rpl = cplhdr(skb);
2318 	struct c4iw_wr_wait *wr_waitp;
2319 	int ret;
2320 
2321 	PDBG("%s type %u\n", __func__, rpl->type);
2322 
2323 	switch (rpl->type) {
2324 	case 1:
2325 		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);
2326 		wr_waitp = (__force struct c4iw_wr_wait *)rpl->data[1];
2327 		PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret);
2328 		if (wr_waitp) {
2329 			wr_waitp->ret = ret;
2330 			wr_waitp->done = 1;
2331 			wake_up(&wr_waitp->wait);
2332 		}
2333 		break;
2334 	case 2:
2335 		c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]);
2336 		break;
2337 	default:
2338 		printk(KERN_ERR MOD "%s unexpected fw6 msg type %u\n", __func__,
2339 		       rpl->type);
2340 		break;
2341 	}
2342 	return 0;
2343 }
2344 
2345 /*
2346  * Most upcalls from the T4 Core go to sched() to
2347  * schedule the processing on a work queue.
2348  */
2349 c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {
2350 	[CPL_ACT_ESTABLISH] = sched,
2351 	[CPL_ACT_OPEN_RPL] = sched,
2352 	[CPL_RX_DATA] = sched,
2353 	[CPL_ABORT_RPL_RSS] = sched,
2354 	[CPL_ABORT_RPL] = sched,
2355 	[CPL_PASS_OPEN_RPL] = sched,
2356 	[CPL_CLOSE_LISTSRV_RPL] = sched,
2357 	[CPL_PASS_ACCEPT_REQ] = sched,
2358 	[CPL_PASS_ESTABLISH] = sched,
2359 	[CPL_PEER_CLOSE] = sched,
2360 	[CPL_CLOSE_CON_RPL] = sched,
2361 	[CPL_ABORT_REQ_RSS] = sched,
2362 	[CPL_RDMA_TERMINATE] = sched,
2363 	[CPL_FW4_ACK] = sched,
2364 	[CPL_SET_TCB_RPL] = set_tcb_rpl,
2365 	[CPL_FW6_MSG] = fw6_msg
2366 };
2367 
2368 int __init c4iw_cm_init(void)
2369 {
2370 	spin_lock_init(&timeout_lock);
2371 	skb_queue_head_init(&rxq);
2372 
2373 	workq = create_singlethread_workqueue("iw_cxgb4");
2374 	if (!workq)
2375 		return -ENOMEM;
2376 
2377 	return 0;
2378 }
2379 
2380 void __exit c4iw_cm_term(void)
2381 {
2382 	WARN_ON(!list_empty(&timeout_list));
2383 	flush_workqueue(workq);
2384 	destroy_workqueue(workq);
2385 }
2386