xref: /linux/drivers/net/ethernet/broadcom/cnic.c (revision 9e8ba5f3ec35cba4fd8a8bebda548c4db2651e40)
1 /* cnic.c: Broadcom CNIC core network driver.
2  *
3  * Copyright (c) 2006-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10  * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
25 #include <linux/in.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #include <linux/random.h>
32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33 #define BCM_VLAN 1
34 #endif
35 #include <net/ip.h>
36 #include <net/tcp.h>
37 #include <net/route.h>
38 #include <net/ipv6.h>
39 #include <net/ip6_route.h>
40 #include <net/ip6_checksum.h>
41 #include <scsi/iscsi_if.h>
42 
43 #include "cnic_if.h"
44 #include "bnx2.h"
45 #include "bnx2x/bnx2x_reg.h"
46 #include "bnx2x/bnx2x_fw_defs.h"
47 #include "bnx2x/bnx2x_hsi.h"
48 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
49 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
50 #include "cnic.h"
51 #include "cnic_defs.h"
52 
53 #define DRV_MODULE_NAME		"cnic"
54 
55 static char version[] __devinitdata =
56 	"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
57 
58 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
59 	      "Chen (zongxi@broadcom.com");
60 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
61 MODULE_LICENSE("GPL");
62 MODULE_VERSION(CNIC_MODULE_VERSION);
63 
64 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
65 static LIST_HEAD(cnic_dev_list);
66 static LIST_HEAD(cnic_udev_list);
67 static DEFINE_RWLOCK(cnic_dev_lock);
68 static DEFINE_MUTEX(cnic_lock);
69 
70 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
71 
72 /* helper function, assuming cnic_lock is held */
73 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
74 {
75 	return rcu_dereference_protected(cnic_ulp_tbl[type],
76 					 lockdep_is_held(&cnic_lock));
77 }
78 
79 static int cnic_service_bnx2(void *, void *);
80 static int cnic_service_bnx2x(void *, void *);
81 static int cnic_ctl(void *, struct cnic_ctl_info *);
82 
83 static struct cnic_ops cnic_bnx2_ops = {
84 	.cnic_owner	= THIS_MODULE,
85 	.cnic_handler	= cnic_service_bnx2,
86 	.cnic_ctl	= cnic_ctl,
87 };
88 
89 static struct cnic_ops cnic_bnx2x_ops = {
90 	.cnic_owner	= THIS_MODULE,
91 	.cnic_handler	= cnic_service_bnx2x,
92 	.cnic_ctl	= cnic_ctl,
93 };
94 
95 static struct workqueue_struct *cnic_wq;
96 
97 static void cnic_shutdown_rings(struct cnic_dev *);
98 static void cnic_init_rings(struct cnic_dev *);
99 static int cnic_cm_set_pg(struct cnic_sock *);
100 
101 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
102 {
103 	struct cnic_uio_dev *udev = uinfo->priv;
104 	struct cnic_dev *dev;
105 
106 	if (!capable(CAP_NET_ADMIN))
107 		return -EPERM;
108 
109 	if (udev->uio_dev != -1)
110 		return -EBUSY;
111 
112 	rtnl_lock();
113 	dev = udev->dev;
114 
115 	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
116 		rtnl_unlock();
117 		return -ENODEV;
118 	}
119 
120 	udev->uio_dev = iminor(inode);
121 
122 	cnic_shutdown_rings(dev);
123 	cnic_init_rings(dev);
124 	rtnl_unlock();
125 
126 	return 0;
127 }
128 
129 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
130 {
131 	struct cnic_uio_dev *udev = uinfo->priv;
132 
133 	udev->uio_dev = -1;
134 	return 0;
135 }
136 
137 static inline void cnic_hold(struct cnic_dev *dev)
138 {
139 	atomic_inc(&dev->ref_count);
140 }
141 
142 static inline void cnic_put(struct cnic_dev *dev)
143 {
144 	atomic_dec(&dev->ref_count);
145 }
146 
147 static inline void csk_hold(struct cnic_sock *csk)
148 {
149 	atomic_inc(&csk->ref_count);
150 }
151 
152 static inline void csk_put(struct cnic_sock *csk)
153 {
154 	atomic_dec(&csk->ref_count);
155 }
156 
157 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
158 {
159 	struct cnic_dev *cdev;
160 
161 	read_lock(&cnic_dev_lock);
162 	list_for_each_entry(cdev, &cnic_dev_list, list) {
163 		if (netdev == cdev->netdev) {
164 			cnic_hold(cdev);
165 			read_unlock(&cnic_dev_lock);
166 			return cdev;
167 		}
168 	}
169 	read_unlock(&cnic_dev_lock);
170 	return NULL;
171 }
172 
173 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
174 {
175 	atomic_inc(&ulp_ops->ref_count);
176 }
177 
178 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
179 {
180 	atomic_dec(&ulp_ops->ref_count);
181 }
182 
183 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
184 {
185 	struct cnic_local *cp = dev->cnic_priv;
186 	struct cnic_eth_dev *ethdev = cp->ethdev;
187 	struct drv_ctl_info info;
188 	struct drv_ctl_io *io = &info.data.io;
189 
190 	info.cmd = DRV_CTL_CTX_WR_CMD;
191 	io->cid_addr = cid_addr;
192 	io->offset = off;
193 	io->data = val;
194 	ethdev->drv_ctl(dev->netdev, &info);
195 }
196 
197 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
198 {
199 	struct cnic_local *cp = dev->cnic_priv;
200 	struct cnic_eth_dev *ethdev = cp->ethdev;
201 	struct drv_ctl_info info;
202 	struct drv_ctl_io *io = &info.data.io;
203 
204 	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
205 	io->offset = off;
206 	io->dma_addr = addr;
207 	ethdev->drv_ctl(dev->netdev, &info);
208 }
209 
210 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
211 {
212 	struct cnic_local *cp = dev->cnic_priv;
213 	struct cnic_eth_dev *ethdev = cp->ethdev;
214 	struct drv_ctl_info info;
215 	struct drv_ctl_l2_ring *ring = &info.data.ring;
216 
217 	if (start)
218 		info.cmd = DRV_CTL_START_L2_CMD;
219 	else
220 		info.cmd = DRV_CTL_STOP_L2_CMD;
221 
222 	ring->cid = cid;
223 	ring->client_id = cl_id;
224 	ethdev->drv_ctl(dev->netdev, &info);
225 }
226 
227 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
228 {
229 	struct cnic_local *cp = dev->cnic_priv;
230 	struct cnic_eth_dev *ethdev = cp->ethdev;
231 	struct drv_ctl_info info;
232 	struct drv_ctl_io *io = &info.data.io;
233 
234 	info.cmd = DRV_CTL_IO_WR_CMD;
235 	io->offset = off;
236 	io->data = val;
237 	ethdev->drv_ctl(dev->netdev, &info);
238 }
239 
240 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
241 {
242 	struct cnic_local *cp = dev->cnic_priv;
243 	struct cnic_eth_dev *ethdev = cp->ethdev;
244 	struct drv_ctl_info info;
245 	struct drv_ctl_io *io = &info.data.io;
246 
247 	info.cmd = DRV_CTL_IO_RD_CMD;
248 	io->offset = off;
249 	ethdev->drv_ctl(dev->netdev, &info);
250 	return io->data;
251 }
252 
253 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
254 {
255 	struct cnic_local *cp = dev->cnic_priv;
256 	struct cnic_eth_dev *ethdev = cp->ethdev;
257 	struct drv_ctl_info info;
258 
259 	if (reg)
260 		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
261 	else
262 		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
263 
264 	info.data.ulp_type = ulp_type;
265 	ethdev->drv_ctl(dev->netdev, &info);
266 }
267 
268 static int cnic_in_use(struct cnic_sock *csk)
269 {
270 	return test_bit(SK_F_INUSE, &csk->flags);
271 }
272 
273 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
274 {
275 	struct cnic_local *cp = dev->cnic_priv;
276 	struct cnic_eth_dev *ethdev = cp->ethdev;
277 	struct drv_ctl_info info;
278 
279 	info.cmd = cmd;
280 	info.data.credit.credit_count = count;
281 	ethdev->drv_ctl(dev->netdev, &info);
282 }
283 
284 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
285 {
286 	u32 i;
287 
288 	for (i = 0; i < cp->max_cid_space; i++) {
289 		if (cp->ctx_tbl[i].cid == cid) {
290 			*l5_cid = i;
291 			return 0;
292 		}
293 	}
294 	return -EINVAL;
295 }
296 
297 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
298 			   struct cnic_sock *csk)
299 {
300 	struct iscsi_path path_req;
301 	char *buf = NULL;
302 	u16 len = 0;
303 	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
304 	struct cnic_ulp_ops *ulp_ops;
305 	struct cnic_uio_dev *udev = cp->udev;
306 	int rc = 0, retry = 0;
307 
308 	if (!udev || udev->uio_dev == -1)
309 		return -ENODEV;
310 
311 	if (csk) {
312 		len = sizeof(path_req);
313 		buf = (char *) &path_req;
314 		memset(&path_req, 0, len);
315 
316 		msg_type = ISCSI_KEVENT_PATH_REQ;
317 		path_req.handle = (u64) csk->l5_cid;
318 		if (test_bit(SK_F_IPV6, &csk->flags)) {
319 			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
320 			       sizeof(struct in6_addr));
321 			path_req.ip_addr_len = 16;
322 		} else {
323 			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
324 			       sizeof(struct in_addr));
325 			path_req.ip_addr_len = 4;
326 		}
327 		path_req.vlan_id = csk->vlan_id;
328 		path_req.pmtu = csk->mtu;
329 	}
330 
331 	while (retry < 3) {
332 		rc = 0;
333 		rcu_read_lock();
334 		ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
335 		if (ulp_ops)
336 			rc = ulp_ops->iscsi_nl_send_msg(
337 				cp->ulp_handle[CNIC_ULP_ISCSI],
338 				msg_type, buf, len);
339 		rcu_read_unlock();
340 		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
341 			break;
342 
343 		msleep(100);
344 		retry++;
345 	}
346 	return rc;
347 }
348 
349 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
350 
351 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
352 				  char *buf, u16 len)
353 {
354 	int rc = -EINVAL;
355 
356 	switch (msg_type) {
357 	case ISCSI_UEVENT_PATH_UPDATE: {
358 		struct cnic_local *cp;
359 		u32 l5_cid;
360 		struct cnic_sock *csk;
361 		struct iscsi_path *path_resp;
362 
363 		if (len < sizeof(*path_resp))
364 			break;
365 
366 		path_resp = (struct iscsi_path *) buf;
367 		cp = dev->cnic_priv;
368 		l5_cid = (u32) path_resp->handle;
369 		if (l5_cid >= MAX_CM_SK_TBL_SZ)
370 			break;
371 
372 		rcu_read_lock();
373 		if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
374 			rc = -ENODEV;
375 			rcu_read_unlock();
376 			break;
377 		}
378 		csk = &cp->csk_tbl[l5_cid];
379 		csk_hold(csk);
380 		if (cnic_in_use(csk) &&
381 		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
382 
383 			memcpy(csk->ha, path_resp->mac_addr, 6);
384 			if (test_bit(SK_F_IPV6, &csk->flags))
385 				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
386 				       sizeof(struct in6_addr));
387 			else
388 				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
389 				       sizeof(struct in_addr));
390 
391 			if (is_valid_ether_addr(csk->ha)) {
392 				cnic_cm_set_pg(csk);
393 			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
394 				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
395 
396 				cnic_cm_upcall(cp, csk,
397 					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
398 				clear_bit(SK_F_CONNECT_START, &csk->flags);
399 			}
400 		}
401 		csk_put(csk);
402 		rcu_read_unlock();
403 		rc = 0;
404 	}
405 	}
406 
407 	return rc;
408 }
409 
410 static int cnic_offld_prep(struct cnic_sock *csk)
411 {
412 	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
413 		return 0;
414 
415 	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
416 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
417 		return 0;
418 	}
419 
420 	return 1;
421 }
422 
423 static int cnic_close_prep(struct cnic_sock *csk)
424 {
425 	clear_bit(SK_F_CONNECT_START, &csk->flags);
426 	smp_mb__after_clear_bit();
427 
428 	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
429 		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
430 			msleep(1);
431 
432 		return 1;
433 	}
434 	return 0;
435 }
436 
437 static int cnic_abort_prep(struct cnic_sock *csk)
438 {
439 	clear_bit(SK_F_CONNECT_START, &csk->flags);
440 	smp_mb__after_clear_bit();
441 
442 	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
443 		msleep(1);
444 
445 	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
446 		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
447 		return 1;
448 	}
449 
450 	return 0;
451 }
452 
453 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
454 {
455 	struct cnic_dev *dev;
456 
457 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
458 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
459 		return -EINVAL;
460 	}
461 	mutex_lock(&cnic_lock);
462 	if (cnic_ulp_tbl_prot(ulp_type)) {
463 		pr_err("%s: Type %d has already been registered\n",
464 		       __func__, ulp_type);
465 		mutex_unlock(&cnic_lock);
466 		return -EBUSY;
467 	}
468 
469 	read_lock(&cnic_dev_lock);
470 	list_for_each_entry(dev, &cnic_dev_list, list) {
471 		struct cnic_local *cp = dev->cnic_priv;
472 
473 		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
474 	}
475 	read_unlock(&cnic_dev_lock);
476 
477 	atomic_set(&ulp_ops->ref_count, 0);
478 	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
479 	mutex_unlock(&cnic_lock);
480 
481 	/* Prevent race conditions with netdev_event */
482 	rtnl_lock();
483 	list_for_each_entry(dev, &cnic_dev_list, list) {
484 		struct cnic_local *cp = dev->cnic_priv;
485 
486 		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
487 			ulp_ops->cnic_init(dev);
488 	}
489 	rtnl_unlock();
490 
491 	return 0;
492 }
493 
494 int cnic_unregister_driver(int ulp_type)
495 {
496 	struct cnic_dev *dev;
497 	struct cnic_ulp_ops *ulp_ops;
498 	int i = 0;
499 
500 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
501 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
502 		return -EINVAL;
503 	}
504 	mutex_lock(&cnic_lock);
505 	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
506 	if (!ulp_ops) {
507 		pr_err("%s: Type %d has not been registered\n",
508 		       __func__, ulp_type);
509 		goto out_unlock;
510 	}
511 	read_lock(&cnic_dev_lock);
512 	list_for_each_entry(dev, &cnic_dev_list, list) {
513 		struct cnic_local *cp = dev->cnic_priv;
514 
515 		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
516 			pr_err("%s: Type %d still has devices registered\n",
517 			       __func__, ulp_type);
518 			read_unlock(&cnic_dev_lock);
519 			goto out_unlock;
520 		}
521 	}
522 	read_unlock(&cnic_dev_lock);
523 
524 	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
525 
526 	mutex_unlock(&cnic_lock);
527 	synchronize_rcu();
528 	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
529 		msleep(100);
530 		i++;
531 	}
532 
533 	if (atomic_read(&ulp_ops->ref_count) != 0)
534 		netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
535 	return 0;
536 
537 out_unlock:
538 	mutex_unlock(&cnic_lock);
539 	return -EINVAL;
540 }
541 
542 static int cnic_start_hw(struct cnic_dev *);
543 static void cnic_stop_hw(struct cnic_dev *);
544 
545 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
546 				void *ulp_ctx)
547 {
548 	struct cnic_local *cp = dev->cnic_priv;
549 	struct cnic_ulp_ops *ulp_ops;
550 
551 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
552 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
553 		return -EINVAL;
554 	}
555 	mutex_lock(&cnic_lock);
556 	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
557 		pr_err("%s: Driver with type %d has not been registered\n",
558 		       __func__, ulp_type);
559 		mutex_unlock(&cnic_lock);
560 		return -EAGAIN;
561 	}
562 	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
563 		pr_err("%s: Type %d has already been registered to this device\n",
564 		       __func__, ulp_type);
565 		mutex_unlock(&cnic_lock);
566 		return -EBUSY;
567 	}
568 
569 	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
570 	cp->ulp_handle[ulp_type] = ulp_ctx;
571 	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
572 	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
573 	cnic_hold(dev);
574 
575 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
576 		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
577 			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
578 
579 	mutex_unlock(&cnic_lock);
580 
581 	cnic_ulp_ctl(dev, ulp_type, true);
582 
583 	return 0;
584 
585 }
586 EXPORT_SYMBOL(cnic_register_driver);
587 
588 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
589 {
590 	struct cnic_local *cp = dev->cnic_priv;
591 	int i = 0;
592 
593 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
594 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
595 		return -EINVAL;
596 	}
597 	mutex_lock(&cnic_lock);
598 	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
599 		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
600 		cnic_put(dev);
601 	} else {
602 		pr_err("%s: device not registered to this ulp type %d\n",
603 		       __func__, ulp_type);
604 		mutex_unlock(&cnic_lock);
605 		return -EINVAL;
606 	}
607 	mutex_unlock(&cnic_lock);
608 
609 	if (ulp_type == CNIC_ULP_ISCSI)
610 		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
611 
612 	synchronize_rcu();
613 
614 	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
615 	       i < 20) {
616 		msleep(100);
617 		i++;
618 	}
619 	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
620 		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
621 
622 	cnic_ulp_ctl(dev, ulp_type, false);
623 
624 	return 0;
625 }
626 EXPORT_SYMBOL(cnic_unregister_driver);
627 
628 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
629 			    u32 next)
630 {
631 	id_tbl->start = start_id;
632 	id_tbl->max = size;
633 	id_tbl->next = next;
634 	spin_lock_init(&id_tbl->lock);
635 	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
636 	if (!id_tbl->table)
637 		return -ENOMEM;
638 
639 	return 0;
640 }
641 
642 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
643 {
644 	kfree(id_tbl->table);
645 	id_tbl->table = NULL;
646 }
647 
648 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
649 {
650 	int ret = -1;
651 
652 	id -= id_tbl->start;
653 	if (id >= id_tbl->max)
654 		return ret;
655 
656 	spin_lock(&id_tbl->lock);
657 	if (!test_bit(id, id_tbl->table)) {
658 		set_bit(id, id_tbl->table);
659 		ret = 0;
660 	}
661 	spin_unlock(&id_tbl->lock);
662 	return ret;
663 }
664 
665 /* Returns -1 if not successful */
666 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
667 {
668 	u32 id;
669 
670 	spin_lock(&id_tbl->lock);
671 	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
672 	if (id >= id_tbl->max) {
673 		id = -1;
674 		if (id_tbl->next != 0) {
675 			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
676 			if (id >= id_tbl->next)
677 				id = -1;
678 		}
679 	}
680 
681 	if (id < id_tbl->max) {
682 		set_bit(id, id_tbl->table);
683 		id_tbl->next = (id + 1) & (id_tbl->max - 1);
684 		id += id_tbl->start;
685 	}
686 
687 	spin_unlock(&id_tbl->lock);
688 
689 	return id;
690 }
691 
692 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
693 {
694 	if (id == -1)
695 		return;
696 
697 	id -= id_tbl->start;
698 	if (id >= id_tbl->max)
699 		return;
700 
701 	clear_bit(id, id_tbl->table);
702 }
703 
704 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
705 {
706 	int i;
707 
708 	if (!dma->pg_arr)
709 		return;
710 
711 	for (i = 0; i < dma->num_pages; i++) {
712 		if (dma->pg_arr[i]) {
713 			dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
714 					  dma->pg_arr[i], dma->pg_map_arr[i]);
715 			dma->pg_arr[i] = NULL;
716 		}
717 	}
718 	if (dma->pgtbl) {
719 		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
720 				  dma->pgtbl, dma->pgtbl_map);
721 		dma->pgtbl = NULL;
722 	}
723 	kfree(dma->pg_arr);
724 	dma->pg_arr = NULL;
725 	dma->num_pages = 0;
726 }
727 
728 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
729 {
730 	int i;
731 	__le32 *page_table = (__le32 *) dma->pgtbl;
732 
733 	for (i = 0; i < dma->num_pages; i++) {
734 		/* Each entry needs to be in big endian format. */
735 		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
736 		page_table++;
737 		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
738 		page_table++;
739 	}
740 }
741 
742 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
743 {
744 	int i;
745 	__le32 *page_table = (__le32 *) dma->pgtbl;
746 
747 	for (i = 0; i < dma->num_pages; i++) {
748 		/* Each entry needs to be in little endian format. */
749 		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
750 		page_table++;
751 		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
752 		page_table++;
753 	}
754 }
755 
756 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
757 			  int pages, int use_pg_tbl)
758 {
759 	int i, size;
760 	struct cnic_local *cp = dev->cnic_priv;
761 
762 	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
763 	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
764 	if (dma->pg_arr == NULL)
765 		return -ENOMEM;
766 
767 	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
768 	dma->num_pages = pages;
769 
770 	for (i = 0; i < pages; i++) {
771 		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
772 						    BCM_PAGE_SIZE,
773 						    &dma->pg_map_arr[i],
774 						    GFP_ATOMIC);
775 		if (dma->pg_arr[i] == NULL)
776 			goto error;
777 	}
778 	if (!use_pg_tbl)
779 		return 0;
780 
781 	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
782 			  ~(BCM_PAGE_SIZE - 1);
783 	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
784 					&dma->pgtbl_map, GFP_ATOMIC);
785 	if (dma->pgtbl == NULL)
786 		goto error;
787 
788 	cp->setup_pgtbl(dev, dma);
789 
790 	return 0;
791 
792 error:
793 	cnic_free_dma(dev, dma);
794 	return -ENOMEM;
795 }
796 
797 static void cnic_free_context(struct cnic_dev *dev)
798 {
799 	struct cnic_local *cp = dev->cnic_priv;
800 	int i;
801 
802 	for (i = 0; i < cp->ctx_blks; i++) {
803 		if (cp->ctx_arr[i].ctx) {
804 			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
805 					  cp->ctx_arr[i].ctx,
806 					  cp->ctx_arr[i].mapping);
807 			cp->ctx_arr[i].ctx = NULL;
808 		}
809 	}
810 }
811 
812 static void __cnic_free_uio(struct cnic_uio_dev *udev)
813 {
814 	uio_unregister_device(&udev->cnic_uinfo);
815 
816 	if (udev->l2_buf) {
817 		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
818 				  udev->l2_buf, udev->l2_buf_map);
819 		udev->l2_buf = NULL;
820 	}
821 
822 	if (udev->l2_ring) {
823 		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
824 				  udev->l2_ring, udev->l2_ring_map);
825 		udev->l2_ring = NULL;
826 	}
827 
828 	pci_dev_put(udev->pdev);
829 	kfree(udev);
830 }
831 
832 static void cnic_free_uio(struct cnic_uio_dev *udev)
833 {
834 	if (!udev)
835 		return;
836 
837 	write_lock(&cnic_dev_lock);
838 	list_del_init(&udev->list);
839 	write_unlock(&cnic_dev_lock);
840 	__cnic_free_uio(udev);
841 }
842 
843 static void cnic_free_resc(struct cnic_dev *dev)
844 {
845 	struct cnic_local *cp = dev->cnic_priv;
846 	struct cnic_uio_dev *udev = cp->udev;
847 
848 	if (udev) {
849 		udev->dev = NULL;
850 		cp->udev = NULL;
851 	}
852 
853 	cnic_free_context(dev);
854 	kfree(cp->ctx_arr);
855 	cp->ctx_arr = NULL;
856 	cp->ctx_blks = 0;
857 
858 	cnic_free_dma(dev, &cp->gbl_buf_info);
859 	cnic_free_dma(dev, &cp->kwq_info);
860 	cnic_free_dma(dev, &cp->kwq_16_data_info);
861 	cnic_free_dma(dev, &cp->kcq2.dma);
862 	cnic_free_dma(dev, &cp->kcq1.dma);
863 	kfree(cp->iscsi_tbl);
864 	cp->iscsi_tbl = NULL;
865 	kfree(cp->ctx_tbl);
866 	cp->ctx_tbl = NULL;
867 
868 	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
869 	cnic_free_id_tbl(&cp->cid_tbl);
870 }
871 
872 static int cnic_alloc_context(struct cnic_dev *dev)
873 {
874 	struct cnic_local *cp = dev->cnic_priv;
875 
876 	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
877 		int i, k, arr_size;
878 
879 		cp->ctx_blk_size = BCM_PAGE_SIZE;
880 		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
881 		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
882 			   sizeof(struct cnic_ctx);
883 		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
884 		if (cp->ctx_arr == NULL)
885 			return -ENOMEM;
886 
887 		k = 0;
888 		for (i = 0; i < 2; i++) {
889 			u32 j, reg, off, lo, hi;
890 
891 			if (i == 0)
892 				off = BNX2_PG_CTX_MAP;
893 			else
894 				off = BNX2_ISCSI_CTX_MAP;
895 
896 			reg = cnic_reg_rd_ind(dev, off);
897 			lo = reg >> 16;
898 			hi = reg & 0xffff;
899 			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
900 				cp->ctx_arr[k].cid = j;
901 		}
902 
903 		cp->ctx_blks = k;
904 		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
905 			cp->ctx_blks = 0;
906 			return -ENOMEM;
907 		}
908 
909 		for (i = 0; i < cp->ctx_blks; i++) {
910 			cp->ctx_arr[i].ctx =
911 				dma_alloc_coherent(&dev->pcidev->dev,
912 						   BCM_PAGE_SIZE,
913 						   &cp->ctx_arr[i].mapping,
914 						   GFP_KERNEL);
915 			if (cp->ctx_arr[i].ctx == NULL)
916 				return -ENOMEM;
917 		}
918 	}
919 	return 0;
920 }
921 
922 static u16 cnic_bnx2_next_idx(u16 idx)
923 {
924 	return idx + 1;
925 }
926 
927 static u16 cnic_bnx2_hw_idx(u16 idx)
928 {
929 	return idx;
930 }
931 
932 static u16 cnic_bnx2x_next_idx(u16 idx)
933 {
934 	idx++;
935 	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
936 		idx++;
937 
938 	return idx;
939 }
940 
941 static u16 cnic_bnx2x_hw_idx(u16 idx)
942 {
943 	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
944 		idx++;
945 	return idx;
946 }
947 
948 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
949 			  bool use_pg_tbl)
950 {
951 	int err, i, use_page_tbl = 0;
952 	struct kcqe **kcq;
953 
954 	if (use_pg_tbl)
955 		use_page_tbl = 1;
956 
957 	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
958 	if (err)
959 		return err;
960 
961 	kcq = (struct kcqe **) info->dma.pg_arr;
962 	info->kcq = kcq;
963 
964 	info->next_idx = cnic_bnx2_next_idx;
965 	info->hw_idx = cnic_bnx2_hw_idx;
966 	if (use_pg_tbl)
967 		return 0;
968 
969 	info->next_idx = cnic_bnx2x_next_idx;
970 	info->hw_idx = cnic_bnx2x_hw_idx;
971 
972 	for (i = 0; i < KCQ_PAGE_CNT; i++) {
973 		struct bnx2x_bd_chain_next *next =
974 			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
975 		int j = i + 1;
976 
977 		if (j >= KCQ_PAGE_CNT)
978 			j = 0;
979 		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
980 		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
981 	}
982 	return 0;
983 }
984 
985 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
986 {
987 	struct cnic_local *cp = dev->cnic_priv;
988 	struct cnic_uio_dev *udev;
989 
990 	read_lock(&cnic_dev_lock);
991 	list_for_each_entry(udev, &cnic_udev_list, list) {
992 		if (udev->pdev == dev->pcidev) {
993 			udev->dev = dev;
994 			cp->udev = udev;
995 			read_unlock(&cnic_dev_lock);
996 			return 0;
997 		}
998 	}
999 	read_unlock(&cnic_dev_lock);
1000 
1001 	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1002 	if (!udev)
1003 		return -ENOMEM;
1004 
1005 	udev->uio_dev = -1;
1006 
1007 	udev->dev = dev;
1008 	udev->pdev = dev->pcidev;
1009 	udev->l2_ring_size = pages * BCM_PAGE_SIZE;
1010 	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1011 					   &udev->l2_ring_map,
1012 					   GFP_KERNEL | __GFP_COMP);
1013 	if (!udev->l2_ring)
1014 		goto err_udev;
1015 
1016 	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1017 	udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
1018 	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1019 					  &udev->l2_buf_map,
1020 					  GFP_KERNEL | __GFP_COMP);
1021 	if (!udev->l2_buf)
1022 		goto err_dma;
1023 
1024 	write_lock(&cnic_dev_lock);
1025 	list_add(&udev->list, &cnic_udev_list);
1026 	write_unlock(&cnic_dev_lock);
1027 
1028 	pci_dev_get(udev->pdev);
1029 
1030 	cp->udev = udev;
1031 
1032 	return 0;
1033  err_dma:
1034 	dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
1035 			  udev->l2_ring, udev->l2_ring_map);
1036  err_udev:
1037 	kfree(udev);
1038 	return -ENOMEM;
1039 }
1040 
1041 static int cnic_init_uio(struct cnic_dev *dev)
1042 {
1043 	struct cnic_local *cp = dev->cnic_priv;
1044 	struct cnic_uio_dev *udev = cp->udev;
1045 	struct uio_info *uinfo;
1046 	int ret = 0;
1047 
1048 	if (!udev)
1049 		return -ENOMEM;
1050 
1051 	uinfo = &udev->cnic_uinfo;
1052 
1053 	uinfo->mem[0].addr = dev->netdev->base_addr;
1054 	uinfo->mem[0].internal_addr = dev->regview;
1055 	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1056 	uinfo->mem[0].memtype = UIO_MEM_PHYS;
1057 
1058 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1059 		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1060 					PAGE_MASK;
1061 		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1062 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1063 		else
1064 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1065 
1066 		uinfo->name = "bnx2_cnic";
1067 	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1068 		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1069 			PAGE_MASK;
1070 		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1071 
1072 		uinfo->name = "bnx2x_cnic";
1073 	}
1074 
1075 	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1076 
1077 	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1078 	uinfo->mem[2].size = udev->l2_ring_size;
1079 	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1080 
1081 	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1082 	uinfo->mem[3].size = udev->l2_buf_size;
1083 	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1084 
1085 	uinfo->version = CNIC_MODULE_VERSION;
1086 	uinfo->irq = UIO_IRQ_CUSTOM;
1087 
1088 	uinfo->open = cnic_uio_open;
1089 	uinfo->release = cnic_uio_close;
1090 
1091 	if (udev->uio_dev == -1) {
1092 		if (!uinfo->priv) {
1093 			uinfo->priv = udev;
1094 
1095 			ret = uio_register_device(&udev->pdev->dev, uinfo);
1096 		}
1097 	} else {
1098 		cnic_init_rings(dev);
1099 	}
1100 
1101 	return ret;
1102 }
1103 
1104 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1105 {
1106 	struct cnic_local *cp = dev->cnic_priv;
1107 	int ret;
1108 
1109 	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1110 	if (ret)
1111 		goto error;
1112 	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1113 
1114 	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1115 	if (ret)
1116 		goto error;
1117 
1118 	ret = cnic_alloc_context(dev);
1119 	if (ret)
1120 		goto error;
1121 
1122 	ret = cnic_alloc_uio_rings(dev, 2);
1123 	if (ret)
1124 		goto error;
1125 
1126 	ret = cnic_init_uio(dev);
1127 	if (ret)
1128 		goto error;
1129 
1130 	return 0;
1131 
1132 error:
1133 	cnic_free_resc(dev);
1134 	return ret;
1135 }
1136 
1137 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1138 {
1139 	struct cnic_local *cp = dev->cnic_priv;
1140 	int ctx_blk_size = cp->ethdev->ctx_blk_size;
1141 	int total_mem, blks, i;
1142 
1143 	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1144 	blks = total_mem / ctx_blk_size;
1145 	if (total_mem % ctx_blk_size)
1146 		blks++;
1147 
1148 	if (blks > cp->ethdev->ctx_tbl_len)
1149 		return -ENOMEM;
1150 
1151 	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1152 	if (cp->ctx_arr == NULL)
1153 		return -ENOMEM;
1154 
1155 	cp->ctx_blks = blks;
1156 	cp->ctx_blk_size = ctx_blk_size;
1157 	if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1158 		cp->ctx_align = 0;
1159 	else
1160 		cp->ctx_align = ctx_blk_size;
1161 
1162 	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1163 
1164 	for (i = 0; i < blks; i++) {
1165 		cp->ctx_arr[i].ctx =
1166 			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1167 					   &cp->ctx_arr[i].mapping,
1168 					   GFP_KERNEL);
1169 		if (cp->ctx_arr[i].ctx == NULL)
1170 			return -ENOMEM;
1171 
1172 		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1173 			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1174 				cnic_free_context(dev);
1175 				cp->ctx_blk_size += cp->ctx_align;
1176 				i = -1;
1177 				continue;
1178 			}
1179 		}
1180 	}
1181 	return 0;
1182 }
1183 
1184 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1185 {
1186 	struct cnic_local *cp = dev->cnic_priv;
1187 	struct cnic_eth_dev *ethdev = cp->ethdev;
1188 	u32 start_cid = ethdev->starting_cid;
1189 	int i, j, n, ret, pages;
1190 	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1191 
1192 	cp->iro_arr = ethdev->iro_arr;
1193 
1194 	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1195 	cp->iscsi_start_cid = start_cid;
1196 	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1197 
1198 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1199 		cp->max_cid_space += dev->max_fcoe_conn;
1200 		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1201 		if (!cp->fcoe_init_cid)
1202 			cp->fcoe_init_cid = 0x10;
1203 	}
1204 
1205 	cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1206 				GFP_KERNEL);
1207 	if (!cp->iscsi_tbl)
1208 		goto error;
1209 
1210 	cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1211 				cp->max_cid_space, GFP_KERNEL);
1212 	if (!cp->ctx_tbl)
1213 		goto error;
1214 
1215 	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1216 		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1217 		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1218 	}
1219 
1220 	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1221 		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1222 
1223 	pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1224 		PAGE_SIZE;
1225 
1226 	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1227 	if (ret)
1228 		return -ENOMEM;
1229 
1230 	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1231 	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1232 		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1233 
1234 		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1235 		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1236 						   off;
1237 
1238 		if ((i % n) == (n - 1))
1239 			j++;
1240 	}
1241 
1242 	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1243 	if (ret)
1244 		goto error;
1245 
1246 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1247 		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1248 		if (ret)
1249 			goto error;
1250 	}
1251 
1252 	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1253 	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1254 	if (ret)
1255 		goto error;
1256 
1257 	ret = cnic_alloc_bnx2x_context(dev);
1258 	if (ret)
1259 		goto error;
1260 
1261 	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1262 
1263 	cp->l2_rx_ring_size = 15;
1264 
1265 	ret = cnic_alloc_uio_rings(dev, 4);
1266 	if (ret)
1267 		goto error;
1268 
1269 	ret = cnic_init_uio(dev);
1270 	if (ret)
1271 		goto error;
1272 
1273 	return 0;
1274 
1275 error:
1276 	cnic_free_resc(dev);
1277 	return -ENOMEM;
1278 }
1279 
1280 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1281 {
1282 	return cp->max_kwq_idx -
1283 		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1284 }
1285 
1286 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1287 				  u32 num_wqes)
1288 {
1289 	struct cnic_local *cp = dev->cnic_priv;
1290 	struct kwqe *prod_qe;
1291 	u16 prod, sw_prod, i;
1292 
1293 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1294 		return -EAGAIN;		/* bnx2 is down */
1295 
1296 	spin_lock_bh(&cp->cnic_ulp_lock);
1297 	if (num_wqes > cnic_kwq_avail(cp) &&
1298 	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1299 		spin_unlock_bh(&cp->cnic_ulp_lock);
1300 		return -EAGAIN;
1301 	}
1302 
1303 	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1304 
1305 	prod = cp->kwq_prod_idx;
1306 	sw_prod = prod & MAX_KWQ_IDX;
1307 	for (i = 0; i < num_wqes; i++) {
1308 		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1309 		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1310 		prod++;
1311 		sw_prod = prod & MAX_KWQ_IDX;
1312 	}
1313 	cp->kwq_prod_idx = prod;
1314 
1315 	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1316 
1317 	spin_unlock_bh(&cp->cnic_ulp_lock);
1318 	return 0;
1319 }
1320 
1321 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1322 				   union l5cm_specific_data *l5_data)
1323 {
1324 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1325 	dma_addr_t map;
1326 
1327 	map = ctx->kwqe_data_mapping;
1328 	l5_data->phy_address.lo = (u64) map & 0xffffffff;
1329 	l5_data->phy_address.hi = (u64) map >> 32;
1330 	return ctx->kwqe_data;
1331 }
1332 
1333 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1334 				u32 type, union l5cm_specific_data *l5_data)
1335 {
1336 	struct cnic_local *cp = dev->cnic_priv;
1337 	struct l5cm_spe kwqe;
1338 	struct kwqe_16 *kwq[1];
1339 	u16 type_16;
1340 	int ret;
1341 
1342 	kwqe.hdr.conn_and_cmd_data =
1343 		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1344 			     BNX2X_HW_CID(cp, cid)));
1345 
1346 	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1347 	type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1348 		   SPE_HDR_FUNCTION_ID;
1349 
1350 	kwqe.hdr.type = cpu_to_le16(type_16);
1351 	kwqe.hdr.reserved1 = 0;
1352 	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1353 	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1354 
1355 	kwq[0] = (struct kwqe_16 *) &kwqe;
1356 
1357 	spin_lock_bh(&cp->cnic_ulp_lock);
1358 	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1359 	spin_unlock_bh(&cp->cnic_ulp_lock);
1360 
1361 	if (ret == 1)
1362 		return 0;
1363 
1364 	return -EBUSY;
1365 }
1366 
1367 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1368 				   struct kcqe *cqes[], u32 num_cqes)
1369 {
1370 	struct cnic_local *cp = dev->cnic_priv;
1371 	struct cnic_ulp_ops *ulp_ops;
1372 
1373 	rcu_read_lock();
1374 	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1375 	if (likely(ulp_ops)) {
1376 		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1377 					  cqes, num_cqes);
1378 	}
1379 	rcu_read_unlock();
1380 }
1381 
1382 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1383 {
1384 	struct cnic_local *cp = dev->cnic_priv;
1385 	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1386 	int hq_bds, pages;
1387 	u32 pfid = cp->pfid;
1388 
1389 	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1390 	cp->num_ccells = req1->num_ccells_per_conn;
1391 	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1392 			      cp->num_iscsi_tasks;
1393 	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1394 			BNX2X_ISCSI_R2TQE_SIZE;
1395 	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1396 	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1397 	hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1398 	cp->num_cqs = req1->num_cqs;
1399 
1400 	if (!dev->max_iscsi_conn)
1401 		return 0;
1402 
1403 	/* init Tstorm RAM */
1404 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1405 		  req1->rq_num_wqes);
1406 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1407 		  PAGE_SIZE);
1408 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1409 		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1410 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1411 		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1412 		  req1->num_tasks_per_conn);
1413 
1414 	/* init Ustorm RAM */
1415 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1416 		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1417 		  req1->rq_buffer_size);
1418 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1419 		  PAGE_SIZE);
1420 	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1421 		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1422 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1423 		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1424 		  req1->num_tasks_per_conn);
1425 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1426 		  req1->rq_num_wqes);
1427 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1428 		  req1->cq_num_wqes);
1429 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1430 		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1431 
1432 	/* init Xstorm RAM */
1433 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1434 		  PAGE_SIZE);
1435 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1436 		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1437 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1438 		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1439 		  req1->num_tasks_per_conn);
1440 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1441 		  hq_bds);
1442 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1443 		  req1->num_tasks_per_conn);
1444 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1445 		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1446 
1447 	/* init Cstorm RAM */
1448 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1449 		  PAGE_SIZE);
1450 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1451 		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1452 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1453 		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1454 		  req1->num_tasks_per_conn);
1455 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1456 		  req1->cq_num_wqes);
1457 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1458 		  hq_bds);
1459 
1460 	return 0;
1461 }
1462 
1463 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1464 {
1465 	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1466 	struct cnic_local *cp = dev->cnic_priv;
1467 	u32 pfid = cp->pfid;
1468 	struct iscsi_kcqe kcqe;
1469 	struct kcqe *cqes[1];
1470 
1471 	memset(&kcqe, 0, sizeof(kcqe));
1472 	if (!dev->max_iscsi_conn) {
1473 		kcqe.completion_status =
1474 			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1475 		goto done;
1476 	}
1477 
1478 	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1479 		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1480 	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1481 		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1482 		req2->error_bit_map[1]);
1483 
1484 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1485 		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1486 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1487 		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1488 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1489 		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1490 		req2->error_bit_map[1]);
1491 
1492 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1493 		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1494 
1495 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1496 
1497 done:
1498 	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1499 	cqes[0] = (struct kcqe *) &kcqe;
1500 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1501 
1502 	return 0;
1503 }
1504 
1505 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1506 {
1507 	struct cnic_local *cp = dev->cnic_priv;
1508 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1509 
1510 	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1511 		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1512 
1513 		cnic_free_dma(dev, &iscsi->hq_info);
1514 		cnic_free_dma(dev, &iscsi->r2tq_info);
1515 		cnic_free_dma(dev, &iscsi->task_array_info);
1516 		cnic_free_id(&cp->cid_tbl, ctx->cid);
1517 	} else {
1518 		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1519 	}
1520 
1521 	ctx->cid = 0;
1522 }
1523 
1524 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1525 {
1526 	u32 cid;
1527 	int ret, pages;
1528 	struct cnic_local *cp = dev->cnic_priv;
1529 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1530 	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1531 
1532 	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1533 		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1534 		if (cid == -1) {
1535 			ret = -ENOMEM;
1536 			goto error;
1537 		}
1538 		ctx->cid = cid;
1539 		return 0;
1540 	}
1541 
1542 	cid = cnic_alloc_new_id(&cp->cid_tbl);
1543 	if (cid == -1) {
1544 		ret = -ENOMEM;
1545 		goto error;
1546 	}
1547 
1548 	ctx->cid = cid;
1549 	pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1550 
1551 	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1552 	if (ret)
1553 		goto error;
1554 
1555 	pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1556 	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1557 	if (ret)
1558 		goto error;
1559 
1560 	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1561 	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1562 	if (ret)
1563 		goto error;
1564 
1565 	return 0;
1566 
1567 error:
1568 	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1569 	return ret;
1570 }
1571 
1572 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1573 				struct regpair *ctx_addr)
1574 {
1575 	struct cnic_local *cp = dev->cnic_priv;
1576 	struct cnic_eth_dev *ethdev = cp->ethdev;
1577 	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1578 	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1579 	unsigned long align_off = 0;
1580 	dma_addr_t ctx_map;
1581 	void *ctx;
1582 
1583 	if (cp->ctx_align) {
1584 		unsigned long mask = cp->ctx_align - 1;
1585 
1586 		if (cp->ctx_arr[blk].mapping & mask)
1587 			align_off = cp->ctx_align -
1588 				    (cp->ctx_arr[blk].mapping & mask);
1589 	}
1590 	ctx_map = cp->ctx_arr[blk].mapping + align_off +
1591 		(off * BNX2X_CONTEXT_MEM_SIZE);
1592 	ctx = cp->ctx_arr[blk].ctx + align_off +
1593 	      (off * BNX2X_CONTEXT_MEM_SIZE);
1594 	if (init)
1595 		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1596 
1597 	ctx_addr->lo = ctx_map & 0xffffffff;
1598 	ctx_addr->hi = (u64) ctx_map >> 32;
1599 	return ctx;
1600 }
1601 
1602 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1603 				u32 num)
1604 {
1605 	struct cnic_local *cp = dev->cnic_priv;
1606 	struct iscsi_kwqe_conn_offload1 *req1 =
1607 			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
1608 	struct iscsi_kwqe_conn_offload2 *req2 =
1609 			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
1610 	struct iscsi_kwqe_conn_offload3 *req3;
1611 	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1612 	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1613 	u32 cid = ctx->cid;
1614 	u32 hw_cid = BNX2X_HW_CID(cp, cid);
1615 	struct iscsi_context *ictx;
1616 	struct regpair context_addr;
1617 	int i, j, n = 2, n_max;
1618 	u8 port = CNIC_PORT(cp);
1619 
1620 	ctx->ctx_flags = 0;
1621 	if (!req2->num_additional_wqes)
1622 		return -EINVAL;
1623 
1624 	n_max = req2->num_additional_wqes + 2;
1625 
1626 	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1627 	if (ictx == NULL)
1628 		return -ENOMEM;
1629 
1630 	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1631 
1632 	ictx->xstorm_ag_context.hq_prod = 1;
1633 
1634 	ictx->xstorm_st_context.iscsi.first_burst_length =
1635 		ISCSI_DEF_FIRST_BURST_LEN;
1636 	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1637 		ISCSI_DEF_MAX_RECV_SEG_LEN;
1638 	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1639 		req1->sq_page_table_addr_lo;
1640 	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1641 		req1->sq_page_table_addr_hi;
1642 	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1643 	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1644 	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1645 		iscsi->hq_info.pgtbl_map & 0xffffffff;
1646 	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1647 		(u64) iscsi->hq_info.pgtbl_map >> 32;
1648 	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1649 		iscsi->hq_info.pgtbl[0];
1650 	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1651 		iscsi->hq_info.pgtbl[1];
1652 	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1653 		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1654 	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1655 		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1656 	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1657 		iscsi->r2tq_info.pgtbl[0];
1658 	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1659 		iscsi->r2tq_info.pgtbl[1];
1660 	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1661 		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1662 	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1663 		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1664 	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1665 		BNX2X_ISCSI_PBL_NOT_CACHED;
1666 	ictx->xstorm_st_context.iscsi.flags.flags |=
1667 		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1668 	ictx->xstorm_st_context.iscsi.flags.flags |=
1669 		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1670 	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1671 		ETH_P_8021Q;
1672 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
1673 		cp->port_mode == CHIP_2_PORT_MODE) {
1674 
1675 		port = 0;
1676 	}
1677 	ictx->xstorm_st_context.common.flags =
1678 		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1679 	ictx->xstorm_st_context.common.flags =
1680 		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1681 
1682 	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1683 	/* TSTORM requires the base address of RQ DB & not PTE */
1684 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1685 		req2->rq_page_table_addr_lo & PAGE_MASK;
1686 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1687 		req2->rq_page_table_addr_hi;
1688 	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1689 	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1690 	ictx->tstorm_st_context.tcp.flags2 |=
1691 		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1692 	ictx->tstorm_st_context.tcp.ooo_support_mode =
1693 		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1694 
1695 	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1696 
1697 	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1698 		req2->rq_page_table_addr_lo;
1699 	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1700 		req2->rq_page_table_addr_hi;
1701 	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1702 	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1703 	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1704 		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1705 	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1706 		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1707 	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1708 		iscsi->r2tq_info.pgtbl[0];
1709 	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1710 		iscsi->r2tq_info.pgtbl[1];
1711 	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1712 		req1->cq_page_table_addr_lo;
1713 	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1714 		req1->cq_page_table_addr_hi;
1715 	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1716 	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1717 	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1718 	ictx->ustorm_st_context.task_pbe_cache_index =
1719 		BNX2X_ISCSI_PBL_NOT_CACHED;
1720 	ictx->ustorm_st_context.task_pdu_cache_index =
1721 		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1722 
1723 	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1724 		if (j == 3) {
1725 			if (n >= n_max)
1726 				break;
1727 			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1728 			j = 0;
1729 		}
1730 		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1731 		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1732 			req3->qp_first_pte[j].hi;
1733 		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1734 			req3->qp_first_pte[j].lo;
1735 	}
1736 
1737 	ictx->ustorm_st_context.task_pbl_base.lo =
1738 		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1739 	ictx->ustorm_st_context.task_pbl_base.hi =
1740 		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1741 	ictx->ustorm_st_context.tce_phy_addr.lo =
1742 		iscsi->task_array_info.pgtbl[0];
1743 	ictx->ustorm_st_context.tce_phy_addr.hi =
1744 		iscsi->task_array_info.pgtbl[1];
1745 	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1746 	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1747 	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1748 	ictx->ustorm_st_context.negotiated_rx_and_flags |=
1749 		ISCSI_DEF_MAX_BURST_LEN;
1750 	ictx->ustorm_st_context.negotiated_rx |=
1751 		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1752 		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1753 
1754 	ictx->cstorm_st_context.hq_pbl_base.lo =
1755 		iscsi->hq_info.pgtbl_map & 0xffffffff;
1756 	ictx->cstorm_st_context.hq_pbl_base.hi =
1757 		(u64) iscsi->hq_info.pgtbl_map >> 32;
1758 	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1759 	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1760 	ictx->cstorm_st_context.task_pbl_base.lo =
1761 		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1762 	ictx->cstorm_st_context.task_pbl_base.hi =
1763 		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1764 	/* CSTORM and USTORM initialization is different, CSTORM requires
1765 	 * CQ DB base & not PTE addr */
1766 	ictx->cstorm_st_context.cq_db_base.lo =
1767 		req1->cq_page_table_addr_lo & PAGE_MASK;
1768 	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1769 	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1770 	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1771 	for (i = 0; i < cp->num_cqs; i++) {
1772 		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1773 			ISCSI_INITIAL_SN;
1774 		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1775 			ISCSI_INITIAL_SN;
1776 	}
1777 
1778 	ictx->xstorm_ag_context.cdu_reserved =
1779 		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1780 				       ISCSI_CONNECTION_TYPE);
1781 	ictx->ustorm_ag_context.cdu_usage =
1782 		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1783 				       ISCSI_CONNECTION_TYPE);
1784 	return 0;
1785 
1786 }
1787 
1788 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1789 				   u32 num, int *work)
1790 {
1791 	struct iscsi_kwqe_conn_offload1 *req1;
1792 	struct iscsi_kwqe_conn_offload2 *req2;
1793 	struct cnic_local *cp = dev->cnic_priv;
1794 	struct cnic_context *ctx;
1795 	struct iscsi_kcqe kcqe;
1796 	struct kcqe *cqes[1];
1797 	u32 l5_cid;
1798 	int ret = 0;
1799 
1800 	if (num < 2) {
1801 		*work = num;
1802 		return -EINVAL;
1803 	}
1804 
1805 	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1806 	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1807 	if ((num - 2) < req2->num_additional_wqes) {
1808 		*work = num;
1809 		return -EINVAL;
1810 	}
1811 	*work = 2 + req2->num_additional_wqes;
1812 
1813 	l5_cid = req1->iscsi_conn_id;
1814 	if (l5_cid >= MAX_ISCSI_TBL_SZ)
1815 		return -EINVAL;
1816 
1817 	memset(&kcqe, 0, sizeof(kcqe));
1818 	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1819 	kcqe.iscsi_conn_id = l5_cid;
1820 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1821 
1822 	ctx = &cp->ctx_tbl[l5_cid];
1823 	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1824 		kcqe.completion_status =
1825 			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1826 		goto done;
1827 	}
1828 
1829 	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1830 		atomic_dec(&cp->iscsi_conn);
1831 		goto done;
1832 	}
1833 	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1834 	if (ret) {
1835 		atomic_dec(&cp->iscsi_conn);
1836 		ret = 0;
1837 		goto done;
1838 	}
1839 	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1840 	if (ret < 0) {
1841 		cnic_free_bnx2x_conn_resc(dev, l5_cid);
1842 		atomic_dec(&cp->iscsi_conn);
1843 		goto done;
1844 	}
1845 
1846 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1847 	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1848 
1849 done:
1850 	cqes[0] = (struct kcqe *) &kcqe;
1851 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1852 	return ret;
1853 }
1854 
1855 
1856 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1857 {
1858 	struct cnic_local *cp = dev->cnic_priv;
1859 	struct iscsi_kwqe_conn_update *req =
1860 		(struct iscsi_kwqe_conn_update *) kwqe;
1861 	void *data;
1862 	union l5cm_specific_data l5_data;
1863 	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1864 	int ret;
1865 
1866 	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1867 		return -EINVAL;
1868 
1869 	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1870 	if (!data)
1871 		return -ENOMEM;
1872 
1873 	memcpy(data, kwqe, sizeof(struct kwqe));
1874 
1875 	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1876 			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1877 	return ret;
1878 }
1879 
1880 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1881 {
1882 	struct cnic_local *cp = dev->cnic_priv;
1883 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1884 	union l5cm_specific_data l5_data;
1885 	int ret;
1886 	u32 hw_cid;
1887 
1888 	init_waitqueue_head(&ctx->waitq);
1889 	ctx->wait_cond = 0;
1890 	memset(&l5_data, 0, sizeof(l5_data));
1891 	hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1892 
1893 	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1894 				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1895 
1896 	if (ret == 0) {
1897 		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1898 		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1899 			return -EBUSY;
1900 	}
1901 
1902 	return 0;
1903 }
1904 
1905 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1906 {
1907 	struct cnic_local *cp = dev->cnic_priv;
1908 	struct iscsi_kwqe_conn_destroy *req =
1909 		(struct iscsi_kwqe_conn_destroy *) kwqe;
1910 	u32 l5_cid = req->reserved0;
1911 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1912 	int ret = 0;
1913 	struct iscsi_kcqe kcqe;
1914 	struct kcqe *cqes[1];
1915 
1916 	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1917 		goto skip_cfc_delete;
1918 
1919 	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1920 		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1921 
1922 		if (delta > (2 * HZ))
1923 			delta = 0;
1924 
1925 		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1926 		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1927 		goto destroy_reply;
1928 	}
1929 
1930 	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1931 
1932 skip_cfc_delete:
1933 	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1934 
1935 	if (!ret) {
1936 		atomic_dec(&cp->iscsi_conn);
1937 		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1938 	}
1939 
1940 destroy_reply:
1941 	memset(&kcqe, 0, sizeof(kcqe));
1942 	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1943 	kcqe.iscsi_conn_id = l5_cid;
1944 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1945 	kcqe.iscsi_conn_context_id = req->context_id;
1946 
1947 	cqes[0] = (struct kcqe *) &kcqe;
1948 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1949 
1950 	return ret;
1951 }
1952 
1953 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1954 				      struct l4_kwq_connect_req1 *kwqe1,
1955 				      struct l4_kwq_connect_req3 *kwqe3,
1956 				      struct l5cm_active_conn_buffer *conn_buf)
1957 {
1958 	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1959 	struct l5cm_xstorm_conn_buffer *xstorm_buf =
1960 		&conn_buf->xstorm_conn_buffer;
1961 	struct l5cm_tstorm_conn_buffer *tstorm_buf =
1962 		&conn_buf->tstorm_conn_buffer;
1963 	struct regpair context_addr;
1964 	u32 cid = BNX2X_SW_CID(kwqe1->cid);
1965 	struct in6_addr src_ip, dst_ip;
1966 	int i;
1967 	u32 *addrp;
1968 
1969 	addrp = (u32 *) &conn_addr->local_ip_addr;
1970 	for (i = 0; i < 4; i++, addrp++)
1971 		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1972 
1973 	addrp = (u32 *) &conn_addr->remote_ip_addr;
1974 	for (i = 0; i < 4; i++, addrp++)
1975 		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1976 
1977 	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1978 
1979 	xstorm_buf->context_addr.hi = context_addr.hi;
1980 	xstorm_buf->context_addr.lo = context_addr.lo;
1981 	xstorm_buf->mss = 0xffff;
1982 	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1983 	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1984 		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1985 	xstorm_buf->pseudo_header_checksum =
1986 		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1987 
1988 	if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1989 		tstorm_buf->params |=
1990 			L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1991 	if (kwqe3->ka_timeout) {
1992 		tstorm_buf->ka_enable = 1;
1993 		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1994 		tstorm_buf->ka_interval = kwqe3->ka_interval;
1995 		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1996 	}
1997 	tstorm_buf->max_rt_time = 0xffffffff;
1998 }
1999 
2000 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2001 {
2002 	struct cnic_local *cp = dev->cnic_priv;
2003 	u32 pfid = cp->pfid;
2004 	u8 *mac = dev->mac_addr;
2005 
2006 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2007 		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2008 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2009 		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2010 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2011 		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2012 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2013 		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2014 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2015 		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2016 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2017 		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2018 
2019 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2020 		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2021 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2022 		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2023 		 mac[4]);
2024 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2025 		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2026 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2027 		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2028 		 mac[2]);
2029 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2030 		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2031 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2032 		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2033 		 mac[0]);
2034 }
2035 
2036 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2037 {
2038 	struct cnic_local *cp = dev->cnic_priv;
2039 	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2040 	u16 tstorm_flags = 0;
2041 
2042 	if (tcp_ts) {
2043 		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2044 		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2045 	}
2046 
2047 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2048 		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
2049 
2050 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2051 		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2052 }
2053 
2054 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2055 			      u32 num, int *work)
2056 {
2057 	struct cnic_local *cp = dev->cnic_priv;
2058 	struct l4_kwq_connect_req1 *kwqe1 =
2059 		(struct l4_kwq_connect_req1 *) wqes[0];
2060 	struct l4_kwq_connect_req3 *kwqe3;
2061 	struct l5cm_active_conn_buffer *conn_buf;
2062 	struct l5cm_conn_addr_params *conn_addr;
2063 	union l5cm_specific_data l5_data;
2064 	u32 l5_cid = kwqe1->pg_cid;
2065 	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2066 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2067 	int ret;
2068 
2069 	if (num < 2) {
2070 		*work = num;
2071 		return -EINVAL;
2072 	}
2073 
2074 	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2075 		*work = 3;
2076 	else
2077 		*work = 2;
2078 
2079 	if (num < *work) {
2080 		*work = num;
2081 		return -EINVAL;
2082 	}
2083 
2084 	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2085 		netdev_err(dev->netdev, "conn_buf size too big\n");
2086 		return -ENOMEM;
2087 	}
2088 	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2089 	if (!conn_buf)
2090 		return -ENOMEM;
2091 
2092 	memset(conn_buf, 0, sizeof(*conn_buf));
2093 
2094 	conn_addr = &conn_buf->conn_addr_buf;
2095 	conn_addr->remote_addr_0 = csk->ha[0];
2096 	conn_addr->remote_addr_1 = csk->ha[1];
2097 	conn_addr->remote_addr_2 = csk->ha[2];
2098 	conn_addr->remote_addr_3 = csk->ha[3];
2099 	conn_addr->remote_addr_4 = csk->ha[4];
2100 	conn_addr->remote_addr_5 = csk->ha[5];
2101 
2102 	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2103 		struct l4_kwq_connect_req2 *kwqe2 =
2104 			(struct l4_kwq_connect_req2 *) wqes[1];
2105 
2106 		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2107 		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2108 		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2109 
2110 		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2111 		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2112 		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2113 		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2114 	}
2115 	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2116 
2117 	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2118 	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2119 	conn_addr->local_tcp_port = kwqe1->src_port;
2120 	conn_addr->remote_tcp_port = kwqe1->dst_port;
2121 
2122 	conn_addr->pmtu = kwqe3->pmtu;
2123 	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2124 
2125 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2126 		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2127 
2128 	cnic_bnx2x_set_tcp_timestamp(dev,
2129 		kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2130 
2131 	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2132 			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2133 	if (!ret)
2134 		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2135 
2136 	return ret;
2137 }
2138 
2139 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2140 {
2141 	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2142 	union l5cm_specific_data l5_data;
2143 	int ret;
2144 
2145 	memset(&l5_data, 0, sizeof(l5_data));
2146 	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2147 			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2148 	return ret;
2149 }
2150 
2151 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2152 {
2153 	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2154 	union l5cm_specific_data l5_data;
2155 	int ret;
2156 
2157 	memset(&l5_data, 0, sizeof(l5_data));
2158 	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2159 			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2160 	return ret;
2161 }
2162 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2163 {
2164 	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2165 	struct l4_kcq kcqe;
2166 	struct kcqe *cqes[1];
2167 
2168 	memset(&kcqe, 0, sizeof(kcqe));
2169 	kcqe.pg_host_opaque = req->host_opaque;
2170 	kcqe.pg_cid = req->host_opaque;
2171 	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2172 	cqes[0] = (struct kcqe *) &kcqe;
2173 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2174 	return 0;
2175 }
2176 
2177 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2178 {
2179 	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2180 	struct l4_kcq kcqe;
2181 	struct kcqe *cqes[1];
2182 
2183 	memset(&kcqe, 0, sizeof(kcqe));
2184 	kcqe.pg_host_opaque = req->pg_host_opaque;
2185 	kcqe.pg_cid = req->pg_cid;
2186 	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2187 	cqes[0] = (struct kcqe *) &kcqe;
2188 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2189 	return 0;
2190 }
2191 
2192 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2193 {
2194 	struct fcoe_kwqe_stat *req;
2195 	struct fcoe_stat_ramrod_params *fcoe_stat;
2196 	union l5cm_specific_data l5_data;
2197 	struct cnic_local *cp = dev->cnic_priv;
2198 	int ret;
2199 	u32 cid;
2200 
2201 	req = (struct fcoe_kwqe_stat *) kwqe;
2202 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2203 
2204 	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2205 	if (!fcoe_stat)
2206 		return -ENOMEM;
2207 
2208 	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2209 	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2210 
2211 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2212 				  FCOE_CONNECTION_TYPE, &l5_data);
2213 	return ret;
2214 }
2215 
2216 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2217 				 u32 num, int *work)
2218 {
2219 	int ret;
2220 	struct cnic_local *cp = dev->cnic_priv;
2221 	u32 cid;
2222 	struct fcoe_init_ramrod_params *fcoe_init;
2223 	struct fcoe_kwqe_init1 *req1;
2224 	struct fcoe_kwqe_init2 *req2;
2225 	struct fcoe_kwqe_init3 *req3;
2226 	union l5cm_specific_data l5_data;
2227 
2228 	if (num < 3) {
2229 		*work = num;
2230 		return -EINVAL;
2231 	}
2232 	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2233 	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2234 	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2235 	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2236 		*work = 1;
2237 		return -EINVAL;
2238 	}
2239 	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2240 		*work = 2;
2241 		return -EINVAL;
2242 	}
2243 
2244 	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2245 		netdev_err(dev->netdev, "fcoe_init size too big\n");
2246 		return -ENOMEM;
2247 	}
2248 	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2249 	if (!fcoe_init)
2250 		return -ENOMEM;
2251 
2252 	memset(fcoe_init, 0, sizeof(*fcoe_init));
2253 	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2254 	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2255 	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2256 	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2257 	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2258 	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2259 
2260 	fcoe_init->sb_num = cp->status_blk_num;
2261 	fcoe_init->eq_prod = MAX_KCQ_IDX;
2262 	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2263 	cp->kcq2.sw_prod_idx = 0;
2264 
2265 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2266 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2267 				  FCOE_CONNECTION_TYPE, &l5_data);
2268 	*work = 3;
2269 	return ret;
2270 }
2271 
2272 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2273 				 u32 num, int *work)
2274 {
2275 	int ret = 0;
2276 	u32 cid = -1, l5_cid;
2277 	struct cnic_local *cp = dev->cnic_priv;
2278 	struct fcoe_kwqe_conn_offload1 *req1;
2279 	struct fcoe_kwqe_conn_offload2 *req2;
2280 	struct fcoe_kwqe_conn_offload3 *req3;
2281 	struct fcoe_kwqe_conn_offload4 *req4;
2282 	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2283 	struct cnic_context *ctx;
2284 	struct fcoe_context *fctx;
2285 	struct regpair ctx_addr;
2286 	union l5cm_specific_data l5_data;
2287 	struct fcoe_kcqe kcqe;
2288 	struct kcqe *cqes[1];
2289 
2290 	if (num < 4) {
2291 		*work = num;
2292 		return -EINVAL;
2293 	}
2294 	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2295 	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2296 	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2297 	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2298 
2299 	*work = 4;
2300 
2301 	l5_cid = req1->fcoe_conn_id;
2302 	if (l5_cid >= dev->max_fcoe_conn)
2303 		goto err_reply;
2304 
2305 	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2306 
2307 	ctx = &cp->ctx_tbl[l5_cid];
2308 	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2309 		goto err_reply;
2310 
2311 	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2312 	if (ret) {
2313 		ret = 0;
2314 		goto err_reply;
2315 	}
2316 	cid = ctx->cid;
2317 
2318 	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2319 	if (fctx) {
2320 		u32 hw_cid = BNX2X_HW_CID(cp, cid);
2321 		u32 val;
2322 
2323 		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2324 					     FCOE_CONNECTION_TYPE);
2325 		fctx->xstorm_ag_context.cdu_reserved = val;
2326 		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2327 					     FCOE_CONNECTION_TYPE);
2328 		fctx->ustorm_ag_context.cdu_usage = val;
2329 	}
2330 	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2331 		netdev_err(dev->netdev, "fcoe_offload size too big\n");
2332 		goto err_reply;
2333 	}
2334 	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2335 	if (!fcoe_offload)
2336 		goto err_reply;
2337 
2338 	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2339 	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2340 	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2341 	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2342 	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2343 
2344 	cid = BNX2X_HW_CID(cp, cid);
2345 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2346 				  FCOE_CONNECTION_TYPE, &l5_data);
2347 	if (!ret)
2348 		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2349 
2350 	return ret;
2351 
2352 err_reply:
2353 	if (cid != -1)
2354 		cnic_free_bnx2x_conn_resc(dev, l5_cid);
2355 
2356 	memset(&kcqe, 0, sizeof(kcqe));
2357 	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2358 	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2359 	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2360 
2361 	cqes[0] = (struct kcqe *) &kcqe;
2362 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2363 	return ret;
2364 }
2365 
2366 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2367 {
2368 	struct fcoe_kwqe_conn_enable_disable *req;
2369 	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2370 	union l5cm_specific_data l5_data;
2371 	int ret;
2372 	u32 cid, l5_cid;
2373 	struct cnic_local *cp = dev->cnic_priv;
2374 
2375 	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2376 	cid = req->context_id;
2377 	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2378 
2379 	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2380 		netdev_err(dev->netdev, "fcoe_enable size too big\n");
2381 		return -ENOMEM;
2382 	}
2383 	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2384 	if (!fcoe_enable)
2385 		return -ENOMEM;
2386 
2387 	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2388 	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2389 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2390 				  FCOE_CONNECTION_TYPE, &l5_data);
2391 	return ret;
2392 }
2393 
2394 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2395 {
2396 	struct fcoe_kwqe_conn_enable_disable *req;
2397 	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2398 	union l5cm_specific_data l5_data;
2399 	int ret;
2400 	u32 cid, l5_cid;
2401 	struct cnic_local *cp = dev->cnic_priv;
2402 
2403 	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2404 	cid = req->context_id;
2405 	l5_cid = req->conn_id;
2406 	if (l5_cid >= dev->max_fcoe_conn)
2407 		return -EINVAL;
2408 
2409 	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2410 
2411 	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2412 		netdev_err(dev->netdev, "fcoe_disable size too big\n");
2413 		return -ENOMEM;
2414 	}
2415 	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2416 	if (!fcoe_disable)
2417 		return -ENOMEM;
2418 
2419 	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2420 	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2421 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2422 				  FCOE_CONNECTION_TYPE, &l5_data);
2423 	return ret;
2424 }
2425 
2426 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2427 {
2428 	struct fcoe_kwqe_conn_destroy *req;
2429 	union l5cm_specific_data l5_data;
2430 	int ret;
2431 	u32 cid, l5_cid;
2432 	struct cnic_local *cp = dev->cnic_priv;
2433 	struct cnic_context *ctx;
2434 	struct fcoe_kcqe kcqe;
2435 	struct kcqe *cqes[1];
2436 
2437 	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2438 	cid = req->context_id;
2439 	l5_cid = req->conn_id;
2440 	if (l5_cid >= dev->max_fcoe_conn)
2441 		return -EINVAL;
2442 
2443 	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2444 
2445 	ctx = &cp->ctx_tbl[l5_cid];
2446 
2447 	init_waitqueue_head(&ctx->waitq);
2448 	ctx->wait_cond = 0;
2449 
2450 	memset(&kcqe, 0, sizeof(kcqe));
2451 	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2452 	memset(&l5_data, 0, sizeof(l5_data));
2453 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2454 				  FCOE_CONNECTION_TYPE, &l5_data);
2455 	if (ret == 0) {
2456 		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2457 		if (ctx->wait_cond)
2458 			kcqe.completion_status = 0;
2459 	}
2460 
2461 	set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2462 	queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2463 
2464 	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2465 	kcqe.fcoe_conn_id = req->conn_id;
2466 	kcqe.fcoe_conn_context_id = cid;
2467 
2468 	cqes[0] = (struct kcqe *) &kcqe;
2469 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2470 	return ret;
2471 }
2472 
2473 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2474 {
2475 	struct cnic_local *cp = dev->cnic_priv;
2476 	u32 i;
2477 
2478 	for (i = start_cid; i < cp->max_cid_space; i++) {
2479 		struct cnic_context *ctx = &cp->ctx_tbl[i];
2480 		int j;
2481 
2482 		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2483 			msleep(10);
2484 
2485 		for (j = 0; j < 5; j++) {
2486 			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2487 				break;
2488 			msleep(20);
2489 		}
2490 
2491 		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2492 			netdev_warn(dev->netdev, "CID %x not deleted\n",
2493 				   ctx->cid);
2494 	}
2495 }
2496 
2497 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2498 {
2499 	struct fcoe_kwqe_destroy *req;
2500 	union l5cm_specific_data l5_data;
2501 	struct cnic_local *cp = dev->cnic_priv;
2502 	int ret;
2503 	u32 cid;
2504 
2505 	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2506 
2507 	req = (struct fcoe_kwqe_destroy *) kwqe;
2508 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2509 
2510 	memset(&l5_data, 0, sizeof(l5_data));
2511 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2512 				  FCOE_CONNECTION_TYPE, &l5_data);
2513 	return ret;
2514 }
2515 
2516 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2517 					 struct kwqe *wqes[], u32 num_wqes)
2518 {
2519 	int i, work, ret;
2520 	u32 opcode;
2521 	struct kwqe *kwqe;
2522 
2523 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2524 		return -EAGAIN;		/* bnx2 is down */
2525 
2526 	for (i = 0; i < num_wqes; ) {
2527 		kwqe = wqes[i];
2528 		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2529 		work = 1;
2530 
2531 		switch (opcode) {
2532 		case ISCSI_KWQE_OPCODE_INIT1:
2533 			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2534 			break;
2535 		case ISCSI_KWQE_OPCODE_INIT2:
2536 			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2537 			break;
2538 		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2539 			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2540 						     num_wqes - i, &work);
2541 			break;
2542 		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2543 			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2544 			break;
2545 		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2546 			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2547 			break;
2548 		case L4_KWQE_OPCODE_VALUE_CONNECT1:
2549 			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2550 						 &work);
2551 			break;
2552 		case L4_KWQE_OPCODE_VALUE_CLOSE:
2553 			ret = cnic_bnx2x_close(dev, kwqe);
2554 			break;
2555 		case L4_KWQE_OPCODE_VALUE_RESET:
2556 			ret = cnic_bnx2x_reset(dev, kwqe);
2557 			break;
2558 		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2559 			ret = cnic_bnx2x_offload_pg(dev, kwqe);
2560 			break;
2561 		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2562 			ret = cnic_bnx2x_update_pg(dev, kwqe);
2563 			break;
2564 		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2565 			ret = 0;
2566 			break;
2567 		default:
2568 			ret = 0;
2569 			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2570 				   opcode);
2571 			break;
2572 		}
2573 		if (ret < 0)
2574 			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2575 				   opcode);
2576 		i += work;
2577 	}
2578 	return 0;
2579 }
2580 
2581 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2582 					struct kwqe *wqes[], u32 num_wqes)
2583 {
2584 	struct cnic_local *cp = dev->cnic_priv;
2585 	int i, work, ret;
2586 	u32 opcode;
2587 	struct kwqe *kwqe;
2588 
2589 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2590 		return -EAGAIN;		/* bnx2 is down */
2591 
2592 	if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
2593 		return -EINVAL;
2594 
2595 	for (i = 0; i < num_wqes; ) {
2596 		kwqe = wqes[i];
2597 		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2598 		work = 1;
2599 
2600 		switch (opcode) {
2601 		case FCOE_KWQE_OPCODE_INIT1:
2602 			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2603 						    num_wqes - i, &work);
2604 			break;
2605 		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2606 			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2607 						    num_wqes - i, &work);
2608 			break;
2609 		case FCOE_KWQE_OPCODE_ENABLE_CONN:
2610 			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2611 			break;
2612 		case FCOE_KWQE_OPCODE_DISABLE_CONN:
2613 			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2614 			break;
2615 		case FCOE_KWQE_OPCODE_DESTROY_CONN:
2616 			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2617 			break;
2618 		case FCOE_KWQE_OPCODE_DESTROY:
2619 			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2620 			break;
2621 		case FCOE_KWQE_OPCODE_STAT:
2622 			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2623 			break;
2624 		default:
2625 			ret = 0;
2626 			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2627 				   opcode);
2628 			break;
2629 		}
2630 		if (ret < 0)
2631 			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2632 				   opcode);
2633 		i += work;
2634 	}
2635 	return 0;
2636 }
2637 
2638 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2639 				   u32 num_wqes)
2640 {
2641 	int ret = -EINVAL;
2642 	u32 layer_code;
2643 
2644 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2645 		return -EAGAIN;		/* bnx2x is down */
2646 
2647 	if (!num_wqes)
2648 		return 0;
2649 
2650 	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2651 	switch (layer_code) {
2652 	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2653 	case KWQE_FLAGS_LAYER_MASK_L4:
2654 	case KWQE_FLAGS_LAYER_MASK_L2:
2655 		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2656 		break;
2657 
2658 	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2659 		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2660 		break;
2661 	}
2662 	return ret;
2663 }
2664 
2665 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2666 {
2667 	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2668 		return KCQE_FLAGS_LAYER_MASK_L4;
2669 
2670 	return opflag & KCQE_FLAGS_LAYER_MASK;
2671 }
2672 
2673 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2674 {
2675 	struct cnic_local *cp = dev->cnic_priv;
2676 	int i, j, comp = 0;
2677 
2678 	i = 0;
2679 	j = 1;
2680 	while (num_cqes) {
2681 		struct cnic_ulp_ops *ulp_ops;
2682 		int ulp_type;
2683 		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2684 		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2685 
2686 		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2687 			comp++;
2688 
2689 		while (j < num_cqes) {
2690 			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2691 
2692 			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2693 				break;
2694 
2695 			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2696 				comp++;
2697 			j++;
2698 		}
2699 
2700 		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2701 			ulp_type = CNIC_ULP_RDMA;
2702 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2703 			ulp_type = CNIC_ULP_ISCSI;
2704 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2705 			ulp_type = CNIC_ULP_FCOE;
2706 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2707 			ulp_type = CNIC_ULP_L4;
2708 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2709 			goto end;
2710 		else {
2711 			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2712 				   kcqe_op_flag);
2713 			goto end;
2714 		}
2715 
2716 		rcu_read_lock();
2717 		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2718 		if (likely(ulp_ops)) {
2719 			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2720 						  cp->completed_kcq + i, j);
2721 		}
2722 		rcu_read_unlock();
2723 end:
2724 		num_cqes -= j;
2725 		i += j;
2726 		j = 1;
2727 	}
2728 	if (unlikely(comp))
2729 		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2730 }
2731 
2732 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2733 {
2734 	struct cnic_local *cp = dev->cnic_priv;
2735 	u16 i, ri, hw_prod, last;
2736 	struct kcqe *kcqe;
2737 	int kcqe_cnt = 0, last_cnt = 0;
2738 
2739 	i = ri = last = info->sw_prod_idx;
2740 	ri &= MAX_KCQ_IDX;
2741 	hw_prod = *info->hw_prod_idx_ptr;
2742 	hw_prod = info->hw_idx(hw_prod);
2743 
2744 	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2745 		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2746 		cp->completed_kcq[kcqe_cnt++] = kcqe;
2747 		i = info->next_idx(i);
2748 		ri = i & MAX_KCQ_IDX;
2749 		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2750 			last_cnt = kcqe_cnt;
2751 			last = i;
2752 		}
2753 	}
2754 
2755 	info->sw_prod_idx = last;
2756 	return last_cnt;
2757 }
2758 
2759 static int cnic_l2_completion(struct cnic_local *cp)
2760 {
2761 	u16 hw_cons, sw_cons;
2762 	struct cnic_uio_dev *udev = cp->udev;
2763 	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2764 					(udev->l2_ring + (2 * BCM_PAGE_SIZE));
2765 	u32 cmd;
2766 	int comp = 0;
2767 
2768 	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2769 		return 0;
2770 
2771 	hw_cons = *cp->rx_cons_ptr;
2772 	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2773 		hw_cons++;
2774 
2775 	sw_cons = cp->rx_cons;
2776 	while (sw_cons != hw_cons) {
2777 		u8 cqe_fp_flags;
2778 
2779 		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2780 		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2781 		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2782 			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2783 			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2784 			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2785 			    cmd == RAMROD_CMD_ID_ETH_HALT)
2786 				comp++;
2787 		}
2788 		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2789 	}
2790 	return comp;
2791 }
2792 
2793 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2794 {
2795 	u16 rx_cons, tx_cons;
2796 	int comp = 0;
2797 
2798 	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2799 		return;
2800 
2801 	rx_cons = *cp->rx_cons_ptr;
2802 	tx_cons = *cp->tx_cons_ptr;
2803 	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2804 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2805 			comp = cnic_l2_completion(cp);
2806 
2807 		cp->tx_cons = tx_cons;
2808 		cp->rx_cons = rx_cons;
2809 
2810 		if (cp->udev)
2811 			uio_event_notify(&cp->udev->cnic_uinfo);
2812 	}
2813 	if (comp)
2814 		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2815 }
2816 
2817 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2818 {
2819 	struct cnic_local *cp = dev->cnic_priv;
2820 	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2821 	int kcqe_cnt;
2822 
2823 	/* status block index must be read before reading other fields */
2824 	rmb();
2825 	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2826 
2827 	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2828 
2829 		service_kcqes(dev, kcqe_cnt);
2830 
2831 		/* Tell compiler that status_blk fields can change. */
2832 		barrier();
2833 		status_idx = (u16) *cp->kcq1.status_idx_ptr;
2834 		/* status block index must be read first */
2835 		rmb();
2836 		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2837 	}
2838 
2839 	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2840 
2841 	cnic_chk_pkt_rings(cp);
2842 
2843 	return status_idx;
2844 }
2845 
2846 static int cnic_service_bnx2(void *data, void *status_blk)
2847 {
2848 	struct cnic_dev *dev = data;
2849 
2850 	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2851 		struct status_block *sblk = status_blk;
2852 
2853 		return sblk->status_idx;
2854 	}
2855 
2856 	return cnic_service_bnx2_queues(dev);
2857 }
2858 
2859 static void cnic_service_bnx2_msix(unsigned long data)
2860 {
2861 	struct cnic_dev *dev = (struct cnic_dev *) data;
2862 	struct cnic_local *cp = dev->cnic_priv;
2863 
2864 	cp->last_status_idx = cnic_service_bnx2_queues(dev);
2865 
2866 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2867 		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2868 }
2869 
2870 static void cnic_doirq(struct cnic_dev *dev)
2871 {
2872 	struct cnic_local *cp = dev->cnic_priv;
2873 
2874 	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2875 		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2876 
2877 		prefetch(cp->status_blk.gen);
2878 		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2879 
2880 		tasklet_schedule(&cp->cnic_irq_task);
2881 	}
2882 }
2883 
2884 static irqreturn_t cnic_irq(int irq, void *dev_instance)
2885 {
2886 	struct cnic_dev *dev = dev_instance;
2887 	struct cnic_local *cp = dev->cnic_priv;
2888 
2889 	if (cp->ack_int)
2890 		cp->ack_int(dev);
2891 
2892 	cnic_doirq(dev);
2893 
2894 	return IRQ_HANDLED;
2895 }
2896 
2897 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2898 				      u16 index, u8 op, u8 update)
2899 {
2900 	struct cnic_local *cp = dev->cnic_priv;
2901 	u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2902 		       COMMAND_REG_INT_ACK);
2903 	struct igu_ack_register igu_ack;
2904 
2905 	igu_ack.status_block_index = index;
2906 	igu_ack.sb_id_and_flags =
2907 			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2908 			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2909 			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2910 			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2911 
2912 	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2913 }
2914 
2915 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
2916 			    u16 index, u8 op, u8 update)
2917 {
2918 	struct igu_regular cmd_data;
2919 	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
2920 
2921 	cmd_data.sb_id_and_flags =
2922 		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
2923 		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
2924 		(update << IGU_REGULAR_BUPDATE_SHIFT) |
2925 		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
2926 
2927 
2928 	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
2929 }
2930 
2931 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2932 {
2933 	struct cnic_local *cp = dev->cnic_priv;
2934 
2935 	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
2936 			   IGU_INT_DISABLE, 0);
2937 }
2938 
2939 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
2940 {
2941 	struct cnic_local *cp = dev->cnic_priv;
2942 
2943 	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
2944 			IGU_INT_DISABLE, 0);
2945 }
2946 
2947 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
2948 {
2949 	u32 last_status = *info->status_idx_ptr;
2950 	int kcqe_cnt;
2951 
2952 	/* status block index must be read before reading the KCQ */
2953 	rmb();
2954 	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
2955 
2956 		service_kcqes(dev, kcqe_cnt);
2957 
2958 		/* Tell compiler that sblk fields can change. */
2959 		barrier();
2960 
2961 		last_status = *info->status_idx_ptr;
2962 		/* status block index must be read before reading the KCQ */
2963 		rmb();
2964 	}
2965 	return last_status;
2966 }
2967 
2968 static void cnic_service_bnx2x_bh(unsigned long data)
2969 {
2970 	struct cnic_dev *dev = (struct cnic_dev *) data;
2971 	struct cnic_local *cp = dev->cnic_priv;
2972 	u32 status_idx, new_status_idx;
2973 
2974 	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2975 		return;
2976 
2977 	while (1) {
2978 		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2979 
2980 		CNIC_WR16(dev, cp->kcq1.io_addr,
2981 			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2982 
2983 		if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
2984 			cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2985 					   status_idx, IGU_INT_ENABLE, 1);
2986 			break;
2987 		}
2988 
2989 		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2990 
2991 		if (new_status_idx != status_idx)
2992 			continue;
2993 
2994 		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2995 			  MAX_KCQ_IDX);
2996 
2997 		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2998 				status_idx, IGU_INT_ENABLE, 1);
2999 
3000 		break;
3001 	}
3002 }
3003 
3004 static int cnic_service_bnx2x(void *data, void *status_blk)
3005 {
3006 	struct cnic_dev *dev = data;
3007 	struct cnic_local *cp = dev->cnic_priv;
3008 
3009 	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3010 		cnic_doirq(dev);
3011 
3012 	cnic_chk_pkt_rings(cp);
3013 
3014 	return 0;
3015 }
3016 
3017 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3018 {
3019 	struct cnic_ulp_ops *ulp_ops;
3020 
3021 	if (if_type == CNIC_ULP_ISCSI)
3022 		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3023 
3024 	mutex_lock(&cnic_lock);
3025 	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3026 					    lockdep_is_held(&cnic_lock));
3027 	if (!ulp_ops) {
3028 		mutex_unlock(&cnic_lock);
3029 		return;
3030 	}
3031 	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3032 	mutex_unlock(&cnic_lock);
3033 
3034 	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3035 		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3036 
3037 	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3038 }
3039 
3040 static void cnic_ulp_stop(struct cnic_dev *dev)
3041 {
3042 	struct cnic_local *cp = dev->cnic_priv;
3043 	int if_type;
3044 
3045 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3046 		cnic_ulp_stop_one(cp, if_type);
3047 }
3048 
3049 static void cnic_ulp_start(struct cnic_dev *dev)
3050 {
3051 	struct cnic_local *cp = dev->cnic_priv;
3052 	int if_type;
3053 
3054 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3055 		struct cnic_ulp_ops *ulp_ops;
3056 
3057 		mutex_lock(&cnic_lock);
3058 		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3059 						    lockdep_is_held(&cnic_lock));
3060 		if (!ulp_ops || !ulp_ops->cnic_start) {
3061 			mutex_unlock(&cnic_lock);
3062 			continue;
3063 		}
3064 		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3065 		mutex_unlock(&cnic_lock);
3066 
3067 		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3068 			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3069 
3070 		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3071 	}
3072 }
3073 
3074 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3075 {
3076 	struct cnic_local *cp = dev->cnic_priv;
3077 	struct cnic_ulp_ops *ulp_ops;
3078 	int rc;
3079 
3080 	mutex_lock(&cnic_lock);
3081 	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
3082 	if (ulp_ops && ulp_ops->cnic_get_stats)
3083 		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3084 	else
3085 		rc = -ENODEV;
3086 	mutex_unlock(&cnic_lock);
3087 	return rc;
3088 }
3089 
3090 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3091 {
3092 	struct cnic_dev *dev = data;
3093 	int ulp_type = CNIC_ULP_ISCSI;
3094 
3095 	switch (info->cmd) {
3096 	case CNIC_CTL_STOP_CMD:
3097 		cnic_hold(dev);
3098 
3099 		cnic_ulp_stop(dev);
3100 		cnic_stop_hw(dev);
3101 
3102 		cnic_put(dev);
3103 		break;
3104 	case CNIC_CTL_START_CMD:
3105 		cnic_hold(dev);
3106 
3107 		if (!cnic_start_hw(dev))
3108 			cnic_ulp_start(dev);
3109 
3110 		cnic_put(dev);
3111 		break;
3112 	case CNIC_CTL_STOP_ISCSI_CMD: {
3113 		struct cnic_local *cp = dev->cnic_priv;
3114 		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3115 		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3116 		break;
3117 	}
3118 	case CNIC_CTL_COMPLETION_CMD: {
3119 		struct cnic_ctl_completion *comp = &info->data.comp;
3120 		u32 cid = BNX2X_SW_CID(comp->cid);
3121 		u32 l5_cid;
3122 		struct cnic_local *cp = dev->cnic_priv;
3123 
3124 		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3125 			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3126 
3127 			if (unlikely(comp->error)) {
3128 				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3129 				netdev_err(dev->netdev,
3130 					   "CID %x CFC delete comp error %x\n",
3131 					   cid, comp->error);
3132 			}
3133 
3134 			ctx->wait_cond = 1;
3135 			wake_up(&ctx->waitq);
3136 		}
3137 		break;
3138 	}
3139 	case CNIC_CTL_FCOE_STATS_GET_CMD:
3140 		ulp_type = CNIC_ULP_FCOE;
3141 		/* fall through */
3142 	case CNIC_CTL_ISCSI_STATS_GET_CMD:
3143 		cnic_hold(dev);
3144 		cnic_copy_ulp_stats(dev, ulp_type);
3145 		cnic_put(dev);
3146 		break;
3147 
3148 	default:
3149 		return -EINVAL;
3150 	}
3151 	return 0;
3152 }
3153 
3154 static void cnic_ulp_init(struct cnic_dev *dev)
3155 {
3156 	int i;
3157 	struct cnic_local *cp = dev->cnic_priv;
3158 
3159 	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3160 		struct cnic_ulp_ops *ulp_ops;
3161 
3162 		mutex_lock(&cnic_lock);
3163 		ulp_ops = cnic_ulp_tbl_prot(i);
3164 		if (!ulp_ops || !ulp_ops->cnic_init) {
3165 			mutex_unlock(&cnic_lock);
3166 			continue;
3167 		}
3168 		ulp_get(ulp_ops);
3169 		mutex_unlock(&cnic_lock);
3170 
3171 		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3172 			ulp_ops->cnic_init(dev);
3173 
3174 		ulp_put(ulp_ops);
3175 	}
3176 }
3177 
3178 static void cnic_ulp_exit(struct cnic_dev *dev)
3179 {
3180 	int i;
3181 	struct cnic_local *cp = dev->cnic_priv;
3182 
3183 	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3184 		struct cnic_ulp_ops *ulp_ops;
3185 
3186 		mutex_lock(&cnic_lock);
3187 		ulp_ops = cnic_ulp_tbl_prot(i);
3188 		if (!ulp_ops || !ulp_ops->cnic_exit) {
3189 			mutex_unlock(&cnic_lock);
3190 			continue;
3191 		}
3192 		ulp_get(ulp_ops);
3193 		mutex_unlock(&cnic_lock);
3194 
3195 		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3196 			ulp_ops->cnic_exit(dev);
3197 
3198 		ulp_put(ulp_ops);
3199 	}
3200 }
3201 
3202 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3203 {
3204 	struct cnic_dev *dev = csk->dev;
3205 	struct l4_kwq_offload_pg *l4kwqe;
3206 	struct kwqe *wqes[1];
3207 
3208 	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3209 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3210 	wqes[0] = (struct kwqe *) l4kwqe;
3211 
3212 	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3213 	l4kwqe->flags =
3214 		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3215 	l4kwqe->l2hdr_nbytes = ETH_HLEN;
3216 
3217 	l4kwqe->da0 = csk->ha[0];
3218 	l4kwqe->da1 = csk->ha[1];
3219 	l4kwqe->da2 = csk->ha[2];
3220 	l4kwqe->da3 = csk->ha[3];
3221 	l4kwqe->da4 = csk->ha[4];
3222 	l4kwqe->da5 = csk->ha[5];
3223 
3224 	l4kwqe->sa0 = dev->mac_addr[0];
3225 	l4kwqe->sa1 = dev->mac_addr[1];
3226 	l4kwqe->sa2 = dev->mac_addr[2];
3227 	l4kwqe->sa3 = dev->mac_addr[3];
3228 	l4kwqe->sa4 = dev->mac_addr[4];
3229 	l4kwqe->sa5 = dev->mac_addr[5];
3230 
3231 	l4kwqe->etype = ETH_P_IP;
3232 	l4kwqe->ipid_start = DEF_IPID_START;
3233 	l4kwqe->host_opaque = csk->l5_cid;
3234 
3235 	if (csk->vlan_id) {
3236 		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3237 		l4kwqe->vlan_tag = csk->vlan_id;
3238 		l4kwqe->l2hdr_nbytes += 4;
3239 	}
3240 
3241 	return dev->submit_kwqes(dev, wqes, 1);
3242 }
3243 
3244 static int cnic_cm_update_pg(struct cnic_sock *csk)
3245 {
3246 	struct cnic_dev *dev = csk->dev;
3247 	struct l4_kwq_update_pg *l4kwqe;
3248 	struct kwqe *wqes[1];
3249 
3250 	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3251 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3252 	wqes[0] = (struct kwqe *) l4kwqe;
3253 
3254 	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3255 	l4kwqe->flags =
3256 		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3257 	l4kwqe->pg_cid = csk->pg_cid;
3258 
3259 	l4kwqe->da0 = csk->ha[0];
3260 	l4kwqe->da1 = csk->ha[1];
3261 	l4kwqe->da2 = csk->ha[2];
3262 	l4kwqe->da3 = csk->ha[3];
3263 	l4kwqe->da4 = csk->ha[4];
3264 	l4kwqe->da5 = csk->ha[5];
3265 
3266 	l4kwqe->pg_host_opaque = csk->l5_cid;
3267 	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3268 
3269 	return dev->submit_kwqes(dev, wqes, 1);
3270 }
3271 
3272 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3273 {
3274 	struct cnic_dev *dev = csk->dev;
3275 	struct l4_kwq_upload *l4kwqe;
3276 	struct kwqe *wqes[1];
3277 
3278 	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3279 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3280 	wqes[0] = (struct kwqe *) l4kwqe;
3281 
3282 	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3283 	l4kwqe->flags =
3284 		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3285 	l4kwqe->cid = csk->pg_cid;
3286 
3287 	return dev->submit_kwqes(dev, wqes, 1);
3288 }
3289 
3290 static int cnic_cm_conn_req(struct cnic_sock *csk)
3291 {
3292 	struct cnic_dev *dev = csk->dev;
3293 	struct l4_kwq_connect_req1 *l4kwqe1;
3294 	struct l4_kwq_connect_req2 *l4kwqe2;
3295 	struct l4_kwq_connect_req3 *l4kwqe3;
3296 	struct kwqe *wqes[3];
3297 	u8 tcp_flags = 0;
3298 	int num_wqes = 2;
3299 
3300 	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3301 	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3302 	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3303 	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3304 	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3305 	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3306 
3307 	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3308 	l4kwqe3->flags =
3309 		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3310 	l4kwqe3->ka_timeout = csk->ka_timeout;
3311 	l4kwqe3->ka_interval = csk->ka_interval;
3312 	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3313 	l4kwqe3->tos = csk->tos;
3314 	l4kwqe3->ttl = csk->ttl;
3315 	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3316 	l4kwqe3->pmtu = csk->mtu;
3317 	l4kwqe3->rcv_buf = csk->rcv_buf;
3318 	l4kwqe3->snd_buf = csk->snd_buf;
3319 	l4kwqe3->seed = csk->seed;
3320 
3321 	wqes[0] = (struct kwqe *) l4kwqe1;
3322 	if (test_bit(SK_F_IPV6, &csk->flags)) {
3323 		wqes[1] = (struct kwqe *) l4kwqe2;
3324 		wqes[2] = (struct kwqe *) l4kwqe3;
3325 		num_wqes = 3;
3326 
3327 		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3328 		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3329 		l4kwqe2->flags =
3330 			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3331 			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3332 		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3333 		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3334 		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3335 		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3336 		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3337 		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3338 		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3339 			       sizeof(struct tcphdr);
3340 	} else {
3341 		wqes[1] = (struct kwqe *) l4kwqe3;
3342 		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3343 			       sizeof(struct tcphdr);
3344 	}
3345 
3346 	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3347 	l4kwqe1->flags =
3348 		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3349 		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3350 	l4kwqe1->cid = csk->cid;
3351 	l4kwqe1->pg_cid = csk->pg_cid;
3352 	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3353 	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3354 	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3355 	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3356 	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3357 		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3358 	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3359 		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3360 	if (csk->tcp_flags & SK_TCP_NAGLE)
3361 		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3362 	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3363 		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3364 	if (csk->tcp_flags & SK_TCP_SACK)
3365 		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3366 	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3367 		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3368 
3369 	l4kwqe1->tcp_flags = tcp_flags;
3370 
3371 	return dev->submit_kwqes(dev, wqes, num_wqes);
3372 }
3373 
3374 static int cnic_cm_close_req(struct cnic_sock *csk)
3375 {
3376 	struct cnic_dev *dev = csk->dev;
3377 	struct l4_kwq_close_req *l4kwqe;
3378 	struct kwqe *wqes[1];
3379 
3380 	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3381 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3382 	wqes[0] = (struct kwqe *) l4kwqe;
3383 
3384 	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3385 	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3386 	l4kwqe->cid = csk->cid;
3387 
3388 	return dev->submit_kwqes(dev, wqes, 1);
3389 }
3390 
3391 static int cnic_cm_abort_req(struct cnic_sock *csk)
3392 {
3393 	struct cnic_dev *dev = csk->dev;
3394 	struct l4_kwq_reset_req *l4kwqe;
3395 	struct kwqe *wqes[1];
3396 
3397 	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3398 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3399 	wqes[0] = (struct kwqe *) l4kwqe;
3400 
3401 	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3402 	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3403 	l4kwqe->cid = csk->cid;
3404 
3405 	return dev->submit_kwqes(dev, wqes, 1);
3406 }
3407 
3408 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3409 			  u32 l5_cid, struct cnic_sock **csk, void *context)
3410 {
3411 	struct cnic_local *cp = dev->cnic_priv;
3412 	struct cnic_sock *csk1;
3413 
3414 	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3415 		return -EINVAL;
3416 
3417 	if (cp->ctx_tbl) {
3418 		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3419 
3420 		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3421 			return -EAGAIN;
3422 	}
3423 
3424 	csk1 = &cp->csk_tbl[l5_cid];
3425 	if (atomic_read(&csk1->ref_count))
3426 		return -EAGAIN;
3427 
3428 	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3429 		return -EBUSY;
3430 
3431 	csk1->dev = dev;
3432 	csk1->cid = cid;
3433 	csk1->l5_cid = l5_cid;
3434 	csk1->ulp_type = ulp_type;
3435 	csk1->context = context;
3436 
3437 	csk1->ka_timeout = DEF_KA_TIMEOUT;
3438 	csk1->ka_interval = DEF_KA_INTERVAL;
3439 	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3440 	csk1->tos = DEF_TOS;
3441 	csk1->ttl = DEF_TTL;
3442 	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3443 	csk1->rcv_buf = DEF_RCV_BUF;
3444 	csk1->snd_buf = DEF_SND_BUF;
3445 	csk1->seed = DEF_SEED;
3446 
3447 	*csk = csk1;
3448 	return 0;
3449 }
3450 
3451 static void cnic_cm_cleanup(struct cnic_sock *csk)
3452 {
3453 	if (csk->src_port) {
3454 		struct cnic_dev *dev = csk->dev;
3455 		struct cnic_local *cp = dev->cnic_priv;
3456 
3457 		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3458 		csk->src_port = 0;
3459 	}
3460 }
3461 
3462 static void cnic_close_conn(struct cnic_sock *csk)
3463 {
3464 	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3465 		cnic_cm_upload_pg(csk);
3466 		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3467 	}
3468 	cnic_cm_cleanup(csk);
3469 }
3470 
3471 static int cnic_cm_destroy(struct cnic_sock *csk)
3472 {
3473 	if (!cnic_in_use(csk))
3474 		return -EINVAL;
3475 
3476 	csk_hold(csk);
3477 	clear_bit(SK_F_INUSE, &csk->flags);
3478 	smp_mb__after_clear_bit();
3479 	while (atomic_read(&csk->ref_count) != 1)
3480 		msleep(1);
3481 	cnic_cm_cleanup(csk);
3482 
3483 	csk->flags = 0;
3484 	csk_put(csk);
3485 	return 0;
3486 }
3487 
3488 static inline u16 cnic_get_vlan(struct net_device *dev,
3489 				struct net_device **vlan_dev)
3490 {
3491 	if (dev->priv_flags & IFF_802_1Q_VLAN) {
3492 		*vlan_dev = vlan_dev_real_dev(dev);
3493 		return vlan_dev_vlan_id(dev);
3494 	}
3495 	*vlan_dev = dev;
3496 	return 0;
3497 }
3498 
3499 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3500 			     struct dst_entry **dst)
3501 {
3502 #if defined(CONFIG_INET)
3503 	struct rtable *rt;
3504 
3505 	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3506 	if (!IS_ERR(rt)) {
3507 		*dst = &rt->dst;
3508 		return 0;
3509 	}
3510 	return PTR_ERR(rt);
3511 #else
3512 	return -ENETUNREACH;
3513 #endif
3514 }
3515 
3516 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3517 			     struct dst_entry **dst)
3518 {
3519 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3520 	struct flowi6 fl6;
3521 
3522 	memset(&fl6, 0, sizeof(fl6));
3523 	fl6.daddr = dst_addr->sin6_addr;
3524 	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3525 		fl6.flowi6_oif = dst_addr->sin6_scope_id;
3526 
3527 	*dst = ip6_route_output(&init_net, NULL, &fl6);
3528 	if (*dst)
3529 		return 0;
3530 #endif
3531 
3532 	return -ENETUNREACH;
3533 }
3534 
3535 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3536 					   int ulp_type)
3537 {
3538 	struct cnic_dev *dev = NULL;
3539 	struct dst_entry *dst;
3540 	struct net_device *netdev = NULL;
3541 	int err = -ENETUNREACH;
3542 
3543 	if (dst_addr->sin_family == AF_INET)
3544 		err = cnic_get_v4_route(dst_addr, &dst);
3545 	else if (dst_addr->sin_family == AF_INET6) {
3546 		struct sockaddr_in6 *dst_addr6 =
3547 			(struct sockaddr_in6 *) dst_addr;
3548 
3549 		err = cnic_get_v6_route(dst_addr6, &dst);
3550 	} else
3551 		return NULL;
3552 
3553 	if (err)
3554 		return NULL;
3555 
3556 	if (!dst->dev)
3557 		goto done;
3558 
3559 	cnic_get_vlan(dst->dev, &netdev);
3560 
3561 	dev = cnic_from_netdev(netdev);
3562 
3563 done:
3564 	dst_release(dst);
3565 	if (dev)
3566 		cnic_put(dev);
3567 	return dev;
3568 }
3569 
3570 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3571 {
3572 	struct cnic_dev *dev = csk->dev;
3573 	struct cnic_local *cp = dev->cnic_priv;
3574 
3575 	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3576 }
3577 
3578 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3579 {
3580 	struct cnic_dev *dev = csk->dev;
3581 	struct cnic_local *cp = dev->cnic_priv;
3582 	int is_v6, rc = 0;
3583 	struct dst_entry *dst = NULL;
3584 	struct net_device *realdev;
3585 	__be16 local_port;
3586 	u32 port_id;
3587 
3588 	if (saddr->local.v6.sin6_family == AF_INET6 &&
3589 	    saddr->remote.v6.sin6_family == AF_INET6)
3590 		is_v6 = 1;
3591 	else if (saddr->local.v4.sin_family == AF_INET &&
3592 		 saddr->remote.v4.sin_family == AF_INET)
3593 		is_v6 = 0;
3594 	else
3595 		return -EINVAL;
3596 
3597 	clear_bit(SK_F_IPV6, &csk->flags);
3598 
3599 	if (is_v6) {
3600 		set_bit(SK_F_IPV6, &csk->flags);
3601 		cnic_get_v6_route(&saddr->remote.v6, &dst);
3602 
3603 		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3604 		       sizeof(struct in6_addr));
3605 		csk->dst_port = saddr->remote.v6.sin6_port;
3606 		local_port = saddr->local.v6.sin6_port;
3607 
3608 	} else {
3609 		cnic_get_v4_route(&saddr->remote.v4, &dst);
3610 
3611 		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3612 		csk->dst_port = saddr->remote.v4.sin_port;
3613 		local_port = saddr->local.v4.sin_port;
3614 	}
3615 
3616 	csk->vlan_id = 0;
3617 	csk->mtu = dev->netdev->mtu;
3618 	if (dst && dst->dev) {
3619 		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3620 		if (realdev == dev->netdev) {
3621 			csk->vlan_id = vlan;
3622 			csk->mtu = dst_mtu(dst);
3623 		}
3624 	}
3625 
3626 	port_id = be16_to_cpu(local_port);
3627 	if (port_id >= CNIC_LOCAL_PORT_MIN &&
3628 	    port_id < CNIC_LOCAL_PORT_MAX) {
3629 		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3630 			port_id = 0;
3631 	} else
3632 		port_id = 0;
3633 
3634 	if (!port_id) {
3635 		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3636 		if (port_id == -1) {
3637 			rc = -ENOMEM;
3638 			goto err_out;
3639 		}
3640 		local_port = cpu_to_be16(port_id);
3641 	}
3642 	csk->src_port = local_port;
3643 
3644 err_out:
3645 	dst_release(dst);
3646 	return rc;
3647 }
3648 
3649 static void cnic_init_csk_state(struct cnic_sock *csk)
3650 {
3651 	csk->state = 0;
3652 	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3653 	clear_bit(SK_F_CLOSING, &csk->flags);
3654 }
3655 
3656 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3657 {
3658 	struct cnic_local *cp = csk->dev->cnic_priv;
3659 	int err = 0;
3660 
3661 	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3662 		return -EOPNOTSUPP;
3663 
3664 	if (!cnic_in_use(csk))
3665 		return -EINVAL;
3666 
3667 	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3668 		return -EINVAL;
3669 
3670 	cnic_init_csk_state(csk);
3671 
3672 	err = cnic_get_route(csk, saddr);
3673 	if (err)
3674 		goto err_out;
3675 
3676 	err = cnic_resolve_addr(csk, saddr);
3677 	if (!err)
3678 		return 0;
3679 
3680 err_out:
3681 	clear_bit(SK_F_CONNECT_START, &csk->flags);
3682 	return err;
3683 }
3684 
3685 static int cnic_cm_abort(struct cnic_sock *csk)
3686 {
3687 	struct cnic_local *cp = csk->dev->cnic_priv;
3688 	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3689 
3690 	if (!cnic_in_use(csk))
3691 		return -EINVAL;
3692 
3693 	if (cnic_abort_prep(csk))
3694 		return cnic_cm_abort_req(csk);
3695 
3696 	/* Getting here means that we haven't started connect, or
3697 	 * connect was not successful.
3698 	 */
3699 
3700 	cp->close_conn(csk, opcode);
3701 	if (csk->state != opcode)
3702 		return -EALREADY;
3703 
3704 	return 0;
3705 }
3706 
3707 static int cnic_cm_close(struct cnic_sock *csk)
3708 {
3709 	if (!cnic_in_use(csk))
3710 		return -EINVAL;
3711 
3712 	if (cnic_close_prep(csk)) {
3713 		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3714 		return cnic_cm_close_req(csk);
3715 	} else {
3716 		return -EALREADY;
3717 	}
3718 	return 0;
3719 }
3720 
3721 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3722 			   u8 opcode)
3723 {
3724 	struct cnic_ulp_ops *ulp_ops;
3725 	int ulp_type = csk->ulp_type;
3726 
3727 	rcu_read_lock();
3728 	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3729 	if (ulp_ops) {
3730 		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3731 			ulp_ops->cm_connect_complete(csk);
3732 		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3733 			ulp_ops->cm_close_complete(csk);
3734 		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3735 			ulp_ops->cm_remote_abort(csk);
3736 		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3737 			ulp_ops->cm_abort_complete(csk);
3738 		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3739 			ulp_ops->cm_remote_close(csk);
3740 	}
3741 	rcu_read_unlock();
3742 }
3743 
3744 static int cnic_cm_set_pg(struct cnic_sock *csk)
3745 {
3746 	if (cnic_offld_prep(csk)) {
3747 		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3748 			cnic_cm_update_pg(csk);
3749 		else
3750 			cnic_cm_offload_pg(csk);
3751 	}
3752 	return 0;
3753 }
3754 
3755 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3756 {
3757 	struct cnic_local *cp = dev->cnic_priv;
3758 	u32 l5_cid = kcqe->pg_host_opaque;
3759 	u8 opcode = kcqe->op_code;
3760 	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3761 
3762 	csk_hold(csk);
3763 	if (!cnic_in_use(csk))
3764 		goto done;
3765 
3766 	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3767 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3768 		goto done;
3769 	}
3770 	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3771 	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3772 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3773 		cnic_cm_upcall(cp, csk,
3774 			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3775 		goto done;
3776 	}
3777 
3778 	csk->pg_cid = kcqe->pg_cid;
3779 	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3780 	cnic_cm_conn_req(csk);
3781 
3782 done:
3783 	csk_put(csk);
3784 }
3785 
3786 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3787 {
3788 	struct cnic_local *cp = dev->cnic_priv;
3789 	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3790 	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3791 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3792 
3793 	ctx->timestamp = jiffies;
3794 	ctx->wait_cond = 1;
3795 	wake_up(&ctx->waitq);
3796 }
3797 
3798 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3799 {
3800 	struct cnic_local *cp = dev->cnic_priv;
3801 	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3802 	u8 opcode = l4kcqe->op_code;
3803 	u32 l5_cid;
3804 	struct cnic_sock *csk;
3805 
3806 	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3807 		cnic_process_fcoe_term_conn(dev, kcqe);
3808 		return;
3809 	}
3810 	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3811 	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3812 		cnic_cm_process_offld_pg(dev, l4kcqe);
3813 		return;
3814 	}
3815 
3816 	l5_cid = l4kcqe->conn_id;
3817 	if (opcode & 0x80)
3818 		l5_cid = l4kcqe->cid;
3819 	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3820 		return;
3821 
3822 	csk = &cp->csk_tbl[l5_cid];
3823 	csk_hold(csk);
3824 
3825 	if (!cnic_in_use(csk)) {
3826 		csk_put(csk);
3827 		return;
3828 	}
3829 
3830 	switch (opcode) {
3831 	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3832 		if (l4kcqe->status != 0) {
3833 			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3834 			cnic_cm_upcall(cp, csk,
3835 				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3836 		}
3837 		break;
3838 	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3839 		if (l4kcqe->status == 0)
3840 			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3841 
3842 		smp_mb__before_clear_bit();
3843 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3844 		cnic_cm_upcall(cp, csk, opcode);
3845 		break;
3846 
3847 	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3848 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3849 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3850 	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3851 	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3852 		cp->close_conn(csk, opcode);
3853 		break;
3854 
3855 	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3856 		/* after we already sent CLOSE_REQ */
3857 		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
3858 		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
3859 		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3860 			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
3861 		else
3862 			cnic_cm_upcall(cp, csk, opcode);
3863 		break;
3864 	}
3865 	csk_put(csk);
3866 }
3867 
3868 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3869 {
3870 	struct cnic_dev *dev = data;
3871 	int i;
3872 
3873 	for (i = 0; i < num; i++)
3874 		cnic_cm_process_kcqe(dev, kcqe[i]);
3875 }
3876 
3877 static struct cnic_ulp_ops cm_ulp_ops = {
3878 	.indicate_kcqes		= cnic_cm_indicate_kcqe,
3879 };
3880 
3881 static void cnic_cm_free_mem(struct cnic_dev *dev)
3882 {
3883 	struct cnic_local *cp = dev->cnic_priv;
3884 
3885 	kfree(cp->csk_tbl);
3886 	cp->csk_tbl = NULL;
3887 	cnic_free_id_tbl(&cp->csk_port_tbl);
3888 }
3889 
3890 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3891 {
3892 	struct cnic_local *cp = dev->cnic_priv;
3893 	u32 port_id;
3894 
3895 	cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3896 			      GFP_KERNEL);
3897 	if (!cp->csk_tbl)
3898 		return -ENOMEM;
3899 
3900 	port_id = random32();
3901 	port_id %= CNIC_LOCAL_PORT_RANGE;
3902 	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3903 			     CNIC_LOCAL_PORT_MIN, port_id)) {
3904 		cnic_cm_free_mem(dev);
3905 		return -ENOMEM;
3906 	}
3907 	return 0;
3908 }
3909 
3910 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
3911 {
3912 	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
3913 		/* Unsolicited RESET_COMP or RESET_RECEIVED */
3914 		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
3915 		csk->state = opcode;
3916 	}
3917 
3918 	/* 1. If event opcode matches the expected event in csk->state
3919 	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
3920 	 *    event
3921 	 * 3. If the expected event is 0, meaning the connection was never
3922 	 *    never established, we accept the opcode from cm_abort.
3923 	 */
3924 	if (opcode == csk->state || csk->state == 0 ||
3925 	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
3926 	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
3927 		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
3928 			if (csk->state == 0)
3929 				csk->state = opcode;
3930 			return 1;
3931 		}
3932 	}
3933 	return 0;
3934 }
3935 
3936 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
3937 {
3938 	struct cnic_dev *dev = csk->dev;
3939 	struct cnic_local *cp = dev->cnic_priv;
3940 
3941 	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
3942 		cnic_cm_upcall(cp, csk, opcode);
3943 		return;
3944 	}
3945 
3946 	clear_bit(SK_F_CONNECT_START, &csk->flags);
3947 	cnic_close_conn(csk);
3948 	csk->state = opcode;
3949 	cnic_cm_upcall(cp, csk, opcode);
3950 }
3951 
3952 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
3953 {
3954 }
3955 
3956 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
3957 {
3958 	u32 seed;
3959 
3960 	seed = random32();
3961 	cnic_ctx_wr(dev, 45, 0, seed);
3962 	return 0;
3963 }
3964 
3965 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3966 {
3967 	struct cnic_dev *dev = csk->dev;
3968 	struct cnic_local *cp = dev->cnic_priv;
3969 	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3970 	union l5cm_specific_data l5_data;
3971 	u32 cmd = 0;
3972 	int close_complete = 0;
3973 
3974 	switch (opcode) {
3975 	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3976 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3977 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3978 		if (cnic_ready_to_close(csk, opcode)) {
3979 			if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3980 				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3981 			else
3982 				close_complete = 1;
3983 		}
3984 		break;
3985 	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3986 		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3987 		break;
3988 	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3989 		close_complete = 1;
3990 		break;
3991 	}
3992 	if (cmd) {
3993 		memset(&l5_data, 0, sizeof(l5_data));
3994 
3995 		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3996 				    &l5_data);
3997 	} else if (close_complete) {
3998 		ctx->timestamp = jiffies;
3999 		cnic_close_conn(csk);
4000 		cnic_cm_upcall(cp, csk, csk->state);
4001 	}
4002 }
4003 
4004 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4005 {
4006 	struct cnic_local *cp = dev->cnic_priv;
4007 
4008 	if (!cp->ctx_tbl)
4009 		return;
4010 
4011 	if (!netif_running(dev->netdev))
4012 		return;
4013 
4014 	cnic_bnx2x_delete_wait(dev, 0);
4015 
4016 	cancel_delayed_work(&cp->delete_task);
4017 	flush_workqueue(cnic_wq);
4018 
4019 	if (atomic_read(&cp->iscsi_conn) != 0)
4020 		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4021 			    atomic_read(&cp->iscsi_conn));
4022 }
4023 
4024 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4025 {
4026 	struct cnic_local *cp = dev->cnic_priv;
4027 	u32 pfid = cp->pfid;
4028 	u32 port = CNIC_PORT(cp);
4029 
4030 	cnic_init_bnx2x_mac(dev);
4031 	cnic_bnx2x_set_tcp_timestamp(dev, 1);
4032 
4033 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4034 		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4035 
4036 	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4037 		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4038 	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4039 		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4040 		DEF_MAX_DA_COUNT);
4041 
4042 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4043 		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4044 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4045 		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4046 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4047 		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4048 	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4049 		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4050 
4051 	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4052 		DEF_MAX_CWND);
4053 	return 0;
4054 }
4055 
4056 static void cnic_delete_task(struct work_struct *work)
4057 {
4058 	struct cnic_local *cp;
4059 	struct cnic_dev *dev;
4060 	u32 i;
4061 	int need_resched = 0;
4062 
4063 	cp = container_of(work, struct cnic_local, delete_task.work);
4064 	dev = cp->dev;
4065 
4066 	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4067 		struct drv_ctl_info info;
4068 
4069 		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4070 
4071 		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4072 		cp->ethdev->drv_ctl(dev->netdev, &info);
4073 	}
4074 
4075 	for (i = 0; i < cp->max_cid_space; i++) {
4076 		struct cnic_context *ctx = &cp->ctx_tbl[i];
4077 		int err;
4078 
4079 		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4080 		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4081 			continue;
4082 
4083 		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4084 			need_resched = 1;
4085 			continue;
4086 		}
4087 
4088 		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4089 			continue;
4090 
4091 		err = cnic_bnx2x_destroy_ramrod(dev, i);
4092 
4093 		cnic_free_bnx2x_conn_resc(dev, i);
4094 		if (!err) {
4095 			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4096 				atomic_dec(&cp->iscsi_conn);
4097 
4098 			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4099 		}
4100 	}
4101 
4102 	if (need_resched)
4103 		queue_delayed_work(cnic_wq, &cp->delete_task,
4104 				   msecs_to_jiffies(10));
4105 
4106 }
4107 
4108 static int cnic_cm_open(struct cnic_dev *dev)
4109 {
4110 	struct cnic_local *cp = dev->cnic_priv;
4111 	int err;
4112 
4113 	err = cnic_cm_alloc_mem(dev);
4114 	if (err)
4115 		return err;
4116 
4117 	err = cp->start_cm(dev);
4118 
4119 	if (err)
4120 		goto err_out;
4121 
4122 	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4123 
4124 	dev->cm_create = cnic_cm_create;
4125 	dev->cm_destroy = cnic_cm_destroy;
4126 	dev->cm_connect = cnic_cm_connect;
4127 	dev->cm_abort = cnic_cm_abort;
4128 	dev->cm_close = cnic_cm_close;
4129 	dev->cm_select_dev = cnic_cm_select_dev;
4130 
4131 	cp->ulp_handle[CNIC_ULP_L4] = dev;
4132 	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4133 	return 0;
4134 
4135 err_out:
4136 	cnic_cm_free_mem(dev);
4137 	return err;
4138 }
4139 
4140 static int cnic_cm_shutdown(struct cnic_dev *dev)
4141 {
4142 	struct cnic_local *cp = dev->cnic_priv;
4143 	int i;
4144 
4145 	cp->stop_cm(dev);
4146 
4147 	if (!cp->csk_tbl)
4148 		return 0;
4149 
4150 	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4151 		struct cnic_sock *csk = &cp->csk_tbl[i];
4152 
4153 		clear_bit(SK_F_INUSE, &csk->flags);
4154 		cnic_cm_cleanup(csk);
4155 	}
4156 	cnic_cm_free_mem(dev);
4157 
4158 	return 0;
4159 }
4160 
4161 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4162 {
4163 	u32 cid_addr;
4164 	int i;
4165 
4166 	cid_addr = GET_CID_ADDR(cid);
4167 
4168 	for (i = 0; i < CTX_SIZE; i += 4)
4169 		cnic_ctx_wr(dev, cid_addr, i, 0);
4170 }
4171 
4172 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4173 {
4174 	struct cnic_local *cp = dev->cnic_priv;
4175 	int ret = 0, i;
4176 	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4177 
4178 	if (CHIP_NUM(cp) != CHIP_NUM_5709)
4179 		return 0;
4180 
4181 	for (i = 0; i < cp->ctx_blks; i++) {
4182 		int j;
4183 		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4184 		u32 val;
4185 
4186 		memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4187 
4188 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4189 			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4190 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4191 			(u64) cp->ctx_arr[i].mapping >> 32);
4192 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4193 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4194 		for (j = 0; j < 10; j++) {
4195 
4196 			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4197 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4198 				break;
4199 			udelay(5);
4200 		}
4201 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4202 			ret = -EBUSY;
4203 			break;
4204 		}
4205 	}
4206 	return ret;
4207 }
4208 
4209 static void cnic_free_irq(struct cnic_dev *dev)
4210 {
4211 	struct cnic_local *cp = dev->cnic_priv;
4212 	struct cnic_eth_dev *ethdev = cp->ethdev;
4213 
4214 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4215 		cp->disable_int_sync(dev);
4216 		tasklet_kill(&cp->cnic_irq_task);
4217 		free_irq(ethdev->irq_arr[0].vector, dev);
4218 	}
4219 }
4220 
4221 static int cnic_request_irq(struct cnic_dev *dev)
4222 {
4223 	struct cnic_local *cp = dev->cnic_priv;
4224 	struct cnic_eth_dev *ethdev = cp->ethdev;
4225 	int err;
4226 
4227 	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4228 	if (err)
4229 		tasklet_disable(&cp->cnic_irq_task);
4230 
4231 	return err;
4232 }
4233 
4234 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4235 {
4236 	struct cnic_local *cp = dev->cnic_priv;
4237 	struct cnic_eth_dev *ethdev = cp->ethdev;
4238 
4239 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4240 		int err, i = 0;
4241 		int sblk_num = cp->status_blk_num;
4242 		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4243 			   BNX2_HC_SB_CONFIG_1;
4244 
4245 		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4246 
4247 		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4248 		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4249 		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4250 
4251 		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4252 		tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4253 			     (unsigned long) dev);
4254 		err = cnic_request_irq(dev);
4255 		if (err)
4256 			return err;
4257 
4258 		while (cp->status_blk.bnx2->status_completion_producer_index &&
4259 		       i < 10) {
4260 			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4261 				1 << (11 + sblk_num));
4262 			udelay(10);
4263 			i++;
4264 			barrier();
4265 		}
4266 		if (cp->status_blk.bnx2->status_completion_producer_index) {
4267 			cnic_free_irq(dev);
4268 			goto failed;
4269 		}
4270 
4271 	} else {
4272 		struct status_block *sblk = cp->status_blk.gen;
4273 		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4274 		int i = 0;
4275 
4276 		while (sblk->status_completion_producer_index && i < 10) {
4277 			CNIC_WR(dev, BNX2_HC_COMMAND,
4278 				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4279 			udelay(10);
4280 			i++;
4281 			barrier();
4282 		}
4283 		if (sblk->status_completion_producer_index)
4284 			goto failed;
4285 
4286 	}
4287 	return 0;
4288 
4289 failed:
4290 	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4291 	return -EBUSY;
4292 }
4293 
4294 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4295 {
4296 	struct cnic_local *cp = dev->cnic_priv;
4297 	struct cnic_eth_dev *ethdev = cp->ethdev;
4298 
4299 	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4300 		return;
4301 
4302 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4303 		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4304 }
4305 
4306 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4307 {
4308 	struct cnic_local *cp = dev->cnic_priv;
4309 	struct cnic_eth_dev *ethdev = cp->ethdev;
4310 
4311 	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4312 		return;
4313 
4314 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4315 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4316 	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4317 	synchronize_irq(ethdev->irq_arr[0].vector);
4318 }
4319 
4320 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4321 {
4322 	struct cnic_local *cp = dev->cnic_priv;
4323 	struct cnic_eth_dev *ethdev = cp->ethdev;
4324 	struct cnic_uio_dev *udev = cp->udev;
4325 	u32 cid_addr, tx_cid, sb_id;
4326 	u32 val, offset0, offset1, offset2, offset3;
4327 	int i;
4328 	struct tx_bd *txbd;
4329 	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4330 	struct status_block *s_blk = cp->status_blk.gen;
4331 
4332 	sb_id = cp->status_blk_num;
4333 	tx_cid = 20;
4334 	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4335 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4336 		struct status_block_msix *sblk = cp->status_blk.bnx2;
4337 
4338 		tx_cid = TX_TSS_CID + sb_id - 1;
4339 		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4340 			(TX_TSS_CID << 7));
4341 		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4342 	}
4343 	cp->tx_cons = *cp->tx_cons_ptr;
4344 
4345 	cid_addr = GET_CID_ADDR(tx_cid);
4346 	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4347 		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4348 
4349 		for (i = 0; i < PHY_CTX_SIZE; i += 4)
4350 			cnic_ctx_wr(dev, cid_addr2, i, 0);
4351 
4352 		offset0 = BNX2_L2CTX_TYPE_XI;
4353 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4354 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4355 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4356 	} else {
4357 		cnic_init_context(dev, tx_cid);
4358 		cnic_init_context(dev, tx_cid + 1);
4359 
4360 		offset0 = BNX2_L2CTX_TYPE;
4361 		offset1 = BNX2_L2CTX_CMD_TYPE;
4362 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4363 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4364 	}
4365 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4366 	cnic_ctx_wr(dev, cid_addr, offset0, val);
4367 
4368 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4369 	cnic_ctx_wr(dev, cid_addr, offset1, val);
4370 
4371 	txbd = udev->l2_ring;
4372 
4373 	buf_map = udev->l2_buf_map;
4374 	for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4375 		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4376 		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4377 	}
4378 	val = (u64) ring_map >> 32;
4379 	cnic_ctx_wr(dev, cid_addr, offset2, val);
4380 	txbd->tx_bd_haddr_hi = val;
4381 
4382 	val = (u64) ring_map & 0xffffffff;
4383 	cnic_ctx_wr(dev, cid_addr, offset3, val);
4384 	txbd->tx_bd_haddr_lo = val;
4385 }
4386 
4387 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4388 {
4389 	struct cnic_local *cp = dev->cnic_priv;
4390 	struct cnic_eth_dev *ethdev = cp->ethdev;
4391 	struct cnic_uio_dev *udev = cp->udev;
4392 	u32 cid_addr, sb_id, val, coal_reg, coal_val;
4393 	int i;
4394 	struct rx_bd *rxbd;
4395 	struct status_block *s_blk = cp->status_blk.gen;
4396 	dma_addr_t ring_map = udev->l2_ring_map;
4397 
4398 	sb_id = cp->status_blk_num;
4399 	cnic_init_context(dev, 2);
4400 	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4401 	coal_reg = BNX2_HC_COMMAND;
4402 	coal_val = CNIC_RD(dev, coal_reg);
4403 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4404 		struct status_block_msix *sblk = cp->status_blk.bnx2;
4405 
4406 		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4407 		coal_reg = BNX2_HC_COALESCE_NOW;
4408 		coal_val = 1 << (11 + sb_id);
4409 	}
4410 	i = 0;
4411 	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4412 		CNIC_WR(dev, coal_reg, coal_val);
4413 		udelay(10);
4414 		i++;
4415 		barrier();
4416 	}
4417 	cp->rx_cons = *cp->rx_cons_ptr;
4418 
4419 	cid_addr = GET_CID_ADDR(2);
4420 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4421 	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4422 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4423 
4424 	if (sb_id == 0)
4425 		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4426 	else
4427 		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4428 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4429 
4430 	rxbd = udev->l2_ring + BCM_PAGE_SIZE;
4431 	for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4432 		dma_addr_t buf_map;
4433 		int n = (i % cp->l2_rx_ring_size) + 1;
4434 
4435 		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4436 		rxbd->rx_bd_len = cp->l2_single_buf_size;
4437 		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4438 		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4439 		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4440 	}
4441 	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4442 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4443 	rxbd->rx_bd_haddr_hi = val;
4444 
4445 	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4446 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4447 	rxbd->rx_bd_haddr_lo = val;
4448 
4449 	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4450 	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4451 }
4452 
4453 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4454 {
4455 	struct kwqe *wqes[1], l2kwqe;
4456 
4457 	memset(&l2kwqe, 0, sizeof(l2kwqe));
4458 	wqes[0] = &l2kwqe;
4459 	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4460 			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
4461 			       KWQE_OPCODE_SHIFT) | 2;
4462 	dev->submit_kwqes(dev, wqes, 1);
4463 }
4464 
4465 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4466 {
4467 	struct cnic_local *cp = dev->cnic_priv;
4468 	u32 val;
4469 
4470 	val = cp->func << 2;
4471 
4472 	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4473 
4474 	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4475 			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4476 	dev->mac_addr[0] = (u8) (val >> 8);
4477 	dev->mac_addr[1] = (u8) val;
4478 
4479 	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4480 
4481 	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4482 			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4483 	dev->mac_addr[2] = (u8) (val >> 24);
4484 	dev->mac_addr[3] = (u8) (val >> 16);
4485 	dev->mac_addr[4] = (u8) (val >> 8);
4486 	dev->mac_addr[5] = (u8) val;
4487 
4488 	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4489 
4490 	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4491 	if (CHIP_NUM(cp) != CHIP_NUM_5709)
4492 		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4493 
4494 	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4495 	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4496 	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4497 }
4498 
4499 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4500 {
4501 	struct cnic_local *cp = dev->cnic_priv;
4502 	struct cnic_eth_dev *ethdev = cp->ethdev;
4503 	struct status_block *sblk = cp->status_blk.gen;
4504 	u32 val, kcq_cid_addr, kwq_cid_addr;
4505 	int err;
4506 
4507 	cnic_set_bnx2_mac(dev);
4508 
4509 	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4510 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4511 	if (BCM_PAGE_BITS > 12)
4512 		val |= (12 - 8)  << 4;
4513 	else
4514 		val |= (BCM_PAGE_BITS - 8)  << 4;
4515 
4516 	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4517 
4518 	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4519 	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4520 	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4521 
4522 	err = cnic_setup_5709_context(dev, 1);
4523 	if (err)
4524 		return err;
4525 
4526 	cnic_init_context(dev, KWQ_CID);
4527 	cnic_init_context(dev, KCQ_CID);
4528 
4529 	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4530 	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4531 
4532 	cp->max_kwq_idx = MAX_KWQ_IDX;
4533 	cp->kwq_prod_idx = 0;
4534 	cp->kwq_con_idx = 0;
4535 	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4536 
4537 	if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4538 		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4539 	else
4540 		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4541 
4542 	/* Initialize the kernel work queue context. */
4543 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4544 	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4545 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4546 
4547 	val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4548 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4549 
4550 	val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4551 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4552 
4553 	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4554 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4555 
4556 	val = (u32) cp->kwq_info.pgtbl_map;
4557 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4558 
4559 	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4560 	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4561 
4562 	cp->kcq1.sw_prod_idx = 0;
4563 	cp->kcq1.hw_prod_idx_ptr =
4564 		(u16 *) &sblk->status_completion_producer_index;
4565 
4566 	cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
4567 
4568 	/* Initialize the kernel complete queue context. */
4569 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4570 	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4571 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4572 
4573 	val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4574 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4575 
4576 	val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4577 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4578 
4579 	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4580 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4581 
4582 	val = (u32) cp->kcq1.dma.pgtbl_map;
4583 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4584 
4585 	cp->int_num = 0;
4586 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4587 		struct status_block_msix *msblk = cp->status_blk.bnx2;
4588 		u32 sb_id = cp->status_blk_num;
4589 		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4590 
4591 		cp->kcq1.hw_prod_idx_ptr =
4592 			(u16 *) &msblk->status_completion_producer_index;
4593 		cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
4594 		cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
4595 		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4596 		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4597 		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4598 	}
4599 
4600 	/* Enable Commnad Scheduler notification when we write to the
4601 	 * host producer index of the kernel contexts. */
4602 	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4603 
4604 	/* Enable Command Scheduler notification when we write to either
4605 	 * the Send Queue or Receive Queue producer indexes of the kernel
4606 	 * bypass contexts. */
4607 	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4608 	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4609 
4610 	/* Notify COM when the driver post an application buffer. */
4611 	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4612 
4613 	/* Set the CP and COM doorbells.  These two processors polls the
4614 	 * doorbell for a non zero value before running.  This must be done
4615 	 * after setting up the kernel queue contexts. */
4616 	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4617 	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4618 
4619 	cnic_init_bnx2_tx_ring(dev);
4620 	cnic_init_bnx2_rx_ring(dev);
4621 
4622 	err = cnic_init_bnx2_irq(dev);
4623 	if (err) {
4624 		netdev_err(dev->netdev, "cnic_init_irq failed\n");
4625 		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4626 		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4627 		return err;
4628 	}
4629 
4630 	return 0;
4631 }
4632 
4633 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4634 {
4635 	struct cnic_local *cp = dev->cnic_priv;
4636 	struct cnic_eth_dev *ethdev = cp->ethdev;
4637 	u32 start_offset = ethdev->ctx_tbl_offset;
4638 	int i;
4639 
4640 	for (i = 0; i < cp->ctx_blks; i++) {
4641 		struct cnic_ctx *ctx = &cp->ctx_arr[i];
4642 		dma_addr_t map = ctx->mapping;
4643 
4644 		if (cp->ctx_align) {
4645 			unsigned long mask = cp->ctx_align - 1;
4646 
4647 			map = (map + mask) & ~mask;
4648 		}
4649 
4650 		cnic_ctx_tbl_wr(dev, start_offset + i, map);
4651 	}
4652 }
4653 
4654 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4655 {
4656 	struct cnic_local *cp = dev->cnic_priv;
4657 	struct cnic_eth_dev *ethdev = cp->ethdev;
4658 	int err = 0;
4659 
4660 	tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4661 		     (unsigned long) dev);
4662 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4663 		err = cnic_request_irq(dev);
4664 
4665 	return err;
4666 }
4667 
4668 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4669 						u16 sb_id, u8 sb_index,
4670 						u8 disable)
4671 {
4672 
4673 	u32 addr = BAR_CSTRORM_INTMEM +
4674 			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4675 			offsetof(struct hc_status_block_data_e1x, index_data) +
4676 			sizeof(struct hc_index_data)*sb_index +
4677 			offsetof(struct hc_index_data, flags);
4678 	u16 flags = CNIC_RD16(dev, addr);
4679 	/* clear and set */
4680 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
4681 	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4682 		  HC_INDEX_DATA_HC_ENABLED);
4683 	CNIC_WR16(dev, addr, flags);
4684 }
4685 
4686 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4687 {
4688 	struct cnic_local *cp = dev->cnic_priv;
4689 	u8 sb_id = cp->status_blk_num;
4690 
4691 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4692 			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4693 			offsetof(struct hc_status_block_data_e1x, index_data) +
4694 			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4695 			offsetof(struct hc_index_data, timeout), 64 / 4);
4696 	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4697 }
4698 
4699 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4700 {
4701 }
4702 
4703 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4704 				    struct client_init_ramrod_data *data)
4705 {
4706 	struct cnic_local *cp = dev->cnic_priv;
4707 	struct cnic_uio_dev *udev = cp->udev;
4708 	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4709 	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4710 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4711 	int i;
4712 	u32 cli = cp->ethdev->iscsi_l2_client_id;
4713 	u32 val;
4714 
4715 	memset(txbd, 0, BCM_PAGE_SIZE);
4716 
4717 	buf_map = udev->l2_buf_map;
4718 	for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4719 		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4720 		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4721 
4722 		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4723 		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4724 		reg_bd->addr_hi = start_bd->addr_hi;
4725 		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4726 		start_bd->nbytes = cpu_to_le16(0x10);
4727 		start_bd->nbd = cpu_to_le16(3);
4728 		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4729 		start_bd->general_data = (UNICAST_ADDRESS <<
4730 			ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4731 		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4732 
4733 	}
4734 
4735 	val = (u64) ring_map >> 32;
4736 	txbd->next_bd.addr_hi = cpu_to_le32(val);
4737 
4738 	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4739 
4740 	val = (u64) ring_map & 0xffffffff;
4741 	txbd->next_bd.addr_lo = cpu_to_le32(val);
4742 
4743 	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4744 
4745 	/* Other ramrod params */
4746 	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4747 	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4748 
4749 	/* reset xstorm per client statistics */
4750 	if (cli < MAX_STAT_COUNTER_ID) {
4751 		data->general.statistics_zero_flg = 1;
4752 		data->general.statistics_en_flg = 1;
4753 		data->general.statistics_counter_id = cli;
4754 	}
4755 
4756 	cp->tx_cons_ptr =
4757 		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4758 }
4759 
4760 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4761 				    struct client_init_ramrod_data *data)
4762 {
4763 	struct cnic_local *cp = dev->cnic_priv;
4764 	struct cnic_uio_dev *udev = cp->udev;
4765 	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4766 				BCM_PAGE_SIZE);
4767 	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4768 				(udev->l2_ring + (2 * BCM_PAGE_SIZE));
4769 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4770 	int i;
4771 	u32 cli = cp->ethdev->iscsi_l2_client_id;
4772 	int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4773 	u32 val;
4774 	dma_addr_t ring_map = udev->l2_ring_map;
4775 
4776 	/* General data */
4777 	data->general.client_id = cli;
4778 	data->general.activate_flg = 1;
4779 	data->general.sp_client_id = cli;
4780 	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4781 	data->general.func_id = cp->pfid;
4782 
4783 	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4784 		dma_addr_t buf_map;
4785 		int n = (i % cp->l2_rx_ring_size) + 1;
4786 
4787 		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4788 		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4789 		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4790 	}
4791 
4792 	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4793 	rxbd->addr_hi = cpu_to_le32(val);
4794 	data->rx.bd_page_base.hi = cpu_to_le32(val);
4795 
4796 	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4797 	rxbd->addr_lo = cpu_to_le32(val);
4798 	data->rx.bd_page_base.lo = cpu_to_le32(val);
4799 
4800 	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4801 	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4802 	rxcqe->addr_hi = cpu_to_le32(val);
4803 	data->rx.cqe_page_base.hi = cpu_to_le32(val);
4804 
4805 	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4806 	rxcqe->addr_lo = cpu_to_le32(val);
4807 	data->rx.cqe_page_base.lo = cpu_to_le32(val);
4808 
4809 	/* Other ramrod params */
4810 	data->rx.client_qzone_id = cl_qzone_id;
4811 	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4812 	data->rx.status_block_id = BNX2X_DEF_SB_ID;
4813 
4814 	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4815 
4816 	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
4817 	data->rx.outer_vlan_removal_enable_flg = 1;
4818 	data->rx.silent_vlan_removal_flg = 1;
4819 	data->rx.silent_vlan_value = 0;
4820 	data->rx.silent_vlan_mask = 0xffff;
4821 
4822 	cp->rx_cons_ptr =
4823 		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4824 	cp->rx_cons = *cp->rx_cons_ptr;
4825 }
4826 
4827 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4828 {
4829 	struct cnic_local *cp = dev->cnic_priv;
4830 	u32 pfid = cp->pfid;
4831 
4832 	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4833 			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4834 	cp->kcq1.sw_prod_idx = 0;
4835 
4836 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4837 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4838 
4839 		cp->kcq1.hw_prod_idx_ptr =
4840 			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4841 		cp->kcq1.status_idx_ptr =
4842 			&sb->sb.running_index[SM_RX_ID];
4843 	} else {
4844 		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4845 
4846 		cp->kcq1.hw_prod_idx_ptr =
4847 			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4848 		cp->kcq1.status_idx_ptr =
4849 			&sb->sb.running_index[SM_RX_ID];
4850 	}
4851 
4852 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4853 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4854 
4855 		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4856 					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4857 		cp->kcq2.sw_prod_idx = 0;
4858 		cp->kcq2.hw_prod_idx_ptr =
4859 			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4860 		cp->kcq2.status_idx_ptr =
4861 			&sb->sb.running_index[SM_RX_ID];
4862 	}
4863 }
4864 
4865 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4866 {
4867 	struct cnic_local *cp = dev->cnic_priv;
4868 	struct cnic_eth_dev *ethdev = cp->ethdev;
4869 	int func = CNIC_FUNC(cp), ret;
4870 	u32 pfid;
4871 
4872 	cp->port_mode = CHIP_PORT_MODE_NONE;
4873 
4874 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4875 		u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4876 
4877 		if (!(val & 1))
4878 			val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4879 		else
4880 			val = (val >> 1) & 1;
4881 
4882 		if (val) {
4883 			cp->port_mode = CHIP_4_PORT_MODE;
4884 			cp->pfid = func >> 1;
4885 		} else {
4886 			cp->port_mode = CHIP_2_PORT_MODE;
4887 			cp->pfid = func & 0x6;
4888 		}
4889 	} else {
4890 		cp->pfid = func;
4891 	}
4892 	pfid = cp->pfid;
4893 
4894 	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4895 			       cp->iscsi_start_cid, 0);
4896 
4897 	if (ret)
4898 		return -ENOMEM;
4899 
4900 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4901 		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
4902 					cp->fcoe_start_cid, 0);
4903 
4904 		if (ret)
4905 			return -ENOMEM;
4906 	}
4907 
4908 	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4909 
4910 	cnic_init_bnx2x_kcq(dev);
4911 
4912 	/* Only 1 EQ */
4913 	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
4914 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4915 		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
4916 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4917 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
4918 		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
4919 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4920 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
4921 		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
4922 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4923 		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
4924 		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
4925 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4926 		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
4927 		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
4928 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4929 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
4930 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4931 		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
4932 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4933 		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
4934 		HC_INDEX_ISCSI_EQ_CONS);
4935 
4936 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
4937 		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
4938 		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4939 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
4940 		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
4941 		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4942 
4943 	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4944 		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
4945 
4946 	cnic_setup_bnx2x_context(dev);
4947 
4948 	ret = cnic_init_bnx2x_irq(dev);
4949 	if (ret)
4950 		return ret;
4951 
4952 	return 0;
4953 }
4954 
4955 static void cnic_init_rings(struct cnic_dev *dev)
4956 {
4957 	struct cnic_local *cp = dev->cnic_priv;
4958 	struct cnic_uio_dev *udev = cp->udev;
4959 
4960 	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
4961 		return;
4962 
4963 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4964 		cnic_init_bnx2_tx_ring(dev);
4965 		cnic_init_bnx2_rx_ring(dev);
4966 		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4967 	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4968 		u32 cli = cp->ethdev->iscsi_l2_client_id;
4969 		u32 cid = cp->ethdev->iscsi_l2_cid;
4970 		u32 cl_qzone_id;
4971 		struct client_init_ramrod_data *data;
4972 		union l5cm_specific_data l5_data;
4973 		struct ustorm_eth_rx_producers rx_prods = {0};
4974 		u32 off, i, *cid_ptr;
4975 
4976 		rx_prods.bd_prod = 0;
4977 		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4978 		barrier();
4979 
4980 		cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4981 
4982 		off = BAR_USTRORM_INTMEM +
4983 			(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
4984 			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
4985 			 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
4986 
4987 		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4988 			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
4989 
4990 		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4991 
4992 		data = udev->l2_buf;
4993 		cid_ptr = udev->l2_buf + 12;
4994 
4995 		memset(data, 0, sizeof(*data));
4996 
4997 		cnic_init_bnx2x_tx_ring(dev, data);
4998 		cnic_init_bnx2x_rx_ring(dev, data);
4999 
5000 		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5001 		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5002 
5003 		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5004 
5005 		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5006 			cid, ETH_CONNECTION_TYPE, &l5_data);
5007 
5008 		i = 0;
5009 		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5010 		       ++i < 10)
5011 			msleep(1);
5012 
5013 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5014 			netdev_err(dev->netdev,
5015 				"iSCSI CLIENT_SETUP did not complete\n");
5016 		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5017 		cnic_ring_ctl(dev, cid, cli, 1);
5018 		*cid_ptr = cid;
5019 	}
5020 }
5021 
5022 static void cnic_shutdown_rings(struct cnic_dev *dev)
5023 {
5024 	struct cnic_local *cp = dev->cnic_priv;
5025 	struct cnic_uio_dev *udev = cp->udev;
5026 	void *rx_ring;
5027 
5028 	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5029 		return;
5030 
5031 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5032 		cnic_shutdown_bnx2_rx_ring(dev);
5033 	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5034 		u32 cli = cp->ethdev->iscsi_l2_client_id;
5035 		u32 cid = cp->ethdev->iscsi_l2_cid;
5036 		union l5cm_specific_data l5_data;
5037 		int i;
5038 
5039 		cnic_ring_ctl(dev, cid, cli, 0);
5040 
5041 		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5042 
5043 		l5_data.phy_address.lo = cli;
5044 		l5_data.phy_address.hi = 0;
5045 		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5046 			cid, ETH_CONNECTION_TYPE, &l5_data);
5047 		i = 0;
5048 		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5049 		       ++i < 10)
5050 			msleep(1);
5051 
5052 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5053 			netdev_err(dev->netdev,
5054 				"iSCSI CLIENT_HALT did not complete\n");
5055 		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5056 
5057 		memset(&l5_data, 0, sizeof(l5_data));
5058 		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5059 			cid, NONE_CONNECTION_TYPE, &l5_data);
5060 		msleep(10);
5061 	}
5062 	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5063 	rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
5064 	memset(rx_ring, 0, BCM_PAGE_SIZE);
5065 }
5066 
5067 static int cnic_register_netdev(struct cnic_dev *dev)
5068 {
5069 	struct cnic_local *cp = dev->cnic_priv;
5070 	struct cnic_eth_dev *ethdev = cp->ethdev;
5071 	int err;
5072 
5073 	if (!ethdev)
5074 		return -ENODEV;
5075 
5076 	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5077 		return 0;
5078 
5079 	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5080 	if (err)
5081 		netdev_err(dev->netdev, "register_cnic failed\n");
5082 
5083 	return err;
5084 }
5085 
5086 static void cnic_unregister_netdev(struct cnic_dev *dev)
5087 {
5088 	struct cnic_local *cp = dev->cnic_priv;
5089 	struct cnic_eth_dev *ethdev = cp->ethdev;
5090 
5091 	if (!ethdev)
5092 		return;
5093 
5094 	ethdev->drv_unregister_cnic(dev->netdev);
5095 }
5096 
5097 static int cnic_start_hw(struct cnic_dev *dev)
5098 {
5099 	struct cnic_local *cp = dev->cnic_priv;
5100 	struct cnic_eth_dev *ethdev = cp->ethdev;
5101 	int err;
5102 
5103 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5104 		return -EALREADY;
5105 
5106 	dev->regview = ethdev->io_base;
5107 	pci_dev_get(dev->pcidev);
5108 	cp->func = PCI_FUNC(dev->pcidev->devfn);
5109 	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5110 	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5111 
5112 	err = cp->alloc_resc(dev);
5113 	if (err) {
5114 		netdev_err(dev->netdev, "allocate resource failure\n");
5115 		goto err1;
5116 	}
5117 
5118 	err = cp->start_hw(dev);
5119 	if (err)
5120 		goto err1;
5121 
5122 	err = cnic_cm_open(dev);
5123 	if (err)
5124 		goto err1;
5125 
5126 	set_bit(CNIC_F_CNIC_UP, &dev->flags);
5127 
5128 	cp->enable_int(dev);
5129 
5130 	return 0;
5131 
5132 err1:
5133 	cp->free_resc(dev);
5134 	pci_dev_put(dev->pcidev);
5135 	return err;
5136 }
5137 
5138 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5139 {
5140 	cnic_disable_bnx2_int_sync(dev);
5141 
5142 	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5143 	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5144 
5145 	cnic_init_context(dev, KWQ_CID);
5146 	cnic_init_context(dev, KCQ_CID);
5147 
5148 	cnic_setup_5709_context(dev, 0);
5149 	cnic_free_irq(dev);
5150 
5151 	cnic_free_resc(dev);
5152 }
5153 
5154 
5155 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5156 {
5157 	struct cnic_local *cp = dev->cnic_priv;
5158 
5159 	cnic_free_irq(dev);
5160 	*cp->kcq1.hw_prod_idx_ptr = 0;
5161 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5162 		CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5163 	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5164 	cnic_free_resc(dev);
5165 }
5166 
5167 static void cnic_stop_hw(struct cnic_dev *dev)
5168 {
5169 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5170 		struct cnic_local *cp = dev->cnic_priv;
5171 		int i = 0;
5172 
5173 		/* Need to wait for the ring shutdown event to complete
5174 		 * before clearing the CNIC_UP flag.
5175 		 */
5176 		while (cp->udev->uio_dev != -1 && i < 15) {
5177 			msleep(100);
5178 			i++;
5179 		}
5180 		cnic_shutdown_rings(dev);
5181 		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5182 		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5183 		synchronize_rcu();
5184 		cnic_cm_shutdown(dev);
5185 		cp->stop_hw(dev);
5186 		pci_dev_put(dev->pcidev);
5187 	}
5188 }
5189 
5190 static void cnic_free_dev(struct cnic_dev *dev)
5191 {
5192 	int i = 0;
5193 
5194 	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5195 		msleep(100);
5196 		i++;
5197 	}
5198 	if (atomic_read(&dev->ref_count) != 0)
5199 		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5200 
5201 	netdev_info(dev->netdev, "Removed CNIC device\n");
5202 	dev_put(dev->netdev);
5203 	kfree(dev);
5204 }
5205 
5206 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5207 				       struct pci_dev *pdev)
5208 {
5209 	struct cnic_dev *cdev;
5210 	struct cnic_local *cp;
5211 	int alloc_size;
5212 
5213 	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5214 
5215 	cdev = kzalloc(alloc_size , GFP_KERNEL);
5216 	if (cdev == NULL) {
5217 		netdev_err(dev, "allocate dev struct failure\n");
5218 		return NULL;
5219 	}
5220 
5221 	cdev->netdev = dev;
5222 	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5223 	cdev->register_device = cnic_register_device;
5224 	cdev->unregister_device = cnic_unregister_device;
5225 	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5226 
5227 	cp = cdev->cnic_priv;
5228 	cp->dev = cdev;
5229 	cp->l2_single_buf_size = 0x400;
5230 	cp->l2_rx_ring_size = 3;
5231 
5232 	spin_lock_init(&cp->cnic_ulp_lock);
5233 
5234 	netdev_info(dev, "Added CNIC device\n");
5235 
5236 	return cdev;
5237 }
5238 
5239 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5240 {
5241 	struct pci_dev *pdev;
5242 	struct cnic_dev *cdev;
5243 	struct cnic_local *cp;
5244 	struct cnic_eth_dev *ethdev = NULL;
5245 	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5246 
5247 	probe = symbol_get(bnx2_cnic_probe);
5248 	if (probe) {
5249 		ethdev = (*probe)(dev);
5250 		symbol_put(bnx2_cnic_probe);
5251 	}
5252 	if (!ethdev)
5253 		return NULL;
5254 
5255 	pdev = ethdev->pdev;
5256 	if (!pdev)
5257 		return NULL;
5258 
5259 	dev_hold(dev);
5260 	pci_dev_get(pdev);
5261 	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5262 	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5263 	    (pdev->revision < 0x10)) {
5264 		pci_dev_put(pdev);
5265 		goto cnic_err;
5266 	}
5267 	pci_dev_put(pdev);
5268 
5269 	cdev = cnic_alloc_dev(dev, pdev);
5270 	if (cdev == NULL)
5271 		goto cnic_err;
5272 
5273 	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5274 	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5275 
5276 	cp = cdev->cnic_priv;
5277 	cp->ethdev = ethdev;
5278 	cdev->pcidev = pdev;
5279 	cp->chip_id = ethdev->chip_id;
5280 
5281 	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5282 
5283 	cp->cnic_ops = &cnic_bnx2_ops;
5284 	cp->start_hw = cnic_start_bnx2_hw;
5285 	cp->stop_hw = cnic_stop_bnx2_hw;
5286 	cp->setup_pgtbl = cnic_setup_page_tbl;
5287 	cp->alloc_resc = cnic_alloc_bnx2_resc;
5288 	cp->free_resc = cnic_free_resc;
5289 	cp->start_cm = cnic_cm_init_bnx2_hw;
5290 	cp->stop_cm = cnic_cm_stop_bnx2_hw;
5291 	cp->enable_int = cnic_enable_bnx2_int;
5292 	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5293 	cp->close_conn = cnic_close_bnx2_conn;
5294 	return cdev;
5295 
5296 cnic_err:
5297 	dev_put(dev);
5298 	return NULL;
5299 }
5300 
5301 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5302 {
5303 	struct pci_dev *pdev;
5304 	struct cnic_dev *cdev;
5305 	struct cnic_local *cp;
5306 	struct cnic_eth_dev *ethdev = NULL;
5307 	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5308 
5309 	probe = symbol_get(bnx2x_cnic_probe);
5310 	if (probe) {
5311 		ethdev = (*probe)(dev);
5312 		symbol_put(bnx2x_cnic_probe);
5313 	}
5314 	if (!ethdev)
5315 		return NULL;
5316 
5317 	pdev = ethdev->pdev;
5318 	if (!pdev)
5319 		return NULL;
5320 
5321 	dev_hold(dev);
5322 	cdev = cnic_alloc_dev(dev, pdev);
5323 	if (cdev == NULL) {
5324 		dev_put(dev);
5325 		return NULL;
5326 	}
5327 
5328 	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5329 	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5330 
5331 	cp = cdev->cnic_priv;
5332 	cp->ethdev = ethdev;
5333 	cdev->pcidev = pdev;
5334 	cp->chip_id = ethdev->chip_id;
5335 
5336 	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5337 
5338 	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5339 		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5340 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
5341 	    !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5342 		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5343 
5344 	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5345 		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5346 
5347 	memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5348 
5349 	cp->cnic_ops = &cnic_bnx2x_ops;
5350 	cp->start_hw = cnic_start_bnx2x_hw;
5351 	cp->stop_hw = cnic_stop_bnx2x_hw;
5352 	cp->setup_pgtbl = cnic_setup_page_tbl_le;
5353 	cp->alloc_resc = cnic_alloc_bnx2x_resc;
5354 	cp->free_resc = cnic_free_resc;
5355 	cp->start_cm = cnic_cm_init_bnx2x_hw;
5356 	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5357 	cp->enable_int = cnic_enable_bnx2x_int;
5358 	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5359 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
5360 		cp->ack_int = cnic_ack_bnx2x_e2_msix;
5361 	else
5362 		cp->ack_int = cnic_ack_bnx2x_msix;
5363 	cp->close_conn = cnic_close_bnx2x_conn;
5364 	return cdev;
5365 }
5366 
5367 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5368 {
5369 	struct ethtool_drvinfo drvinfo;
5370 	struct cnic_dev *cdev = NULL;
5371 
5372 	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5373 		memset(&drvinfo, 0, sizeof(drvinfo));
5374 		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5375 
5376 		if (!strcmp(drvinfo.driver, "bnx2"))
5377 			cdev = init_bnx2_cnic(dev);
5378 		if (!strcmp(drvinfo.driver, "bnx2x"))
5379 			cdev = init_bnx2x_cnic(dev);
5380 		if (cdev) {
5381 			write_lock(&cnic_dev_lock);
5382 			list_add(&cdev->list, &cnic_dev_list);
5383 			write_unlock(&cnic_dev_lock);
5384 		}
5385 	}
5386 	return cdev;
5387 }
5388 
5389 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5390 			      u16 vlan_id)
5391 {
5392 	int if_type;
5393 
5394 	rcu_read_lock();
5395 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5396 		struct cnic_ulp_ops *ulp_ops;
5397 		void *ctx;
5398 
5399 		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5400 		if (!ulp_ops || !ulp_ops->indicate_netevent)
5401 			continue;
5402 
5403 		ctx = cp->ulp_handle[if_type];
5404 
5405 		ulp_ops->indicate_netevent(ctx, event, vlan_id);
5406 	}
5407 	rcu_read_unlock();
5408 }
5409 
5410 /**
5411  * netdev event handler
5412  */
5413 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5414 							 void *ptr)
5415 {
5416 	struct net_device *netdev = ptr;
5417 	struct cnic_dev *dev;
5418 	int new_dev = 0;
5419 
5420 	dev = cnic_from_netdev(netdev);
5421 
5422 	if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
5423 		/* Check for the hot-plug device */
5424 		dev = is_cnic_dev(netdev);
5425 		if (dev) {
5426 			new_dev = 1;
5427 			cnic_hold(dev);
5428 		}
5429 	}
5430 	if (dev) {
5431 		struct cnic_local *cp = dev->cnic_priv;
5432 
5433 		if (new_dev)
5434 			cnic_ulp_init(dev);
5435 		else if (event == NETDEV_UNREGISTER)
5436 			cnic_ulp_exit(dev);
5437 
5438 		if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
5439 			if (cnic_register_netdev(dev) != 0) {
5440 				cnic_put(dev);
5441 				goto done;
5442 			}
5443 			if (!cnic_start_hw(dev))
5444 				cnic_ulp_start(dev);
5445 		}
5446 
5447 		cnic_rcv_netevent(cp, event, 0);
5448 
5449 		if (event == NETDEV_GOING_DOWN) {
5450 			cnic_ulp_stop(dev);
5451 			cnic_stop_hw(dev);
5452 			cnic_unregister_netdev(dev);
5453 		} else if (event == NETDEV_UNREGISTER) {
5454 			write_lock(&cnic_dev_lock);
5455 			list_del_init(&dev->list);
5456 			write_unlock(&cnic_dev_lock);
5457 
5458 			cnic_put(dev);
5459 			cnic_free_dev(dev);
5460 			goto done;
5461 		}
5462 		cnic_put(dev);
5463 	} else {
5464 		struct net_device *realdev;
5465 		u16 vid;
5466 
5467 		vid = cnic_get_vlan(netdev, &realdev);
5468 		if (realdev) {
5469 			dev = cnic_from_netdev(realdev);
5470 			if (dev) {
5471 				vid |= VLAN_TAG_PRESENT;
5472 				cnic_rcv_netevent(dev->cnic_priv, event, vid);
5473 				cnic_put(dev);
5474 			}
5475 		}
5476 	}
5477 done:
5478 	return NOTIFY_DONE;
5479 }
5480 
5481 static struct notifier_block cnic_netdev_notifier = {
5482 	.notifier_call = cnic_netdev_event
5483 };
5484 
5485 static void cnic_release(void)
5486 {
5487 	struct cnic_dev *dev;
5488 	struct cnic_uio_dev *udev;
5489 
5490 	while (!list_empty(&cnic_dev_list)) {
5491 		dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5492 		if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5493 			cnic_ulp_stop(dev);
5494 			cnic_stop_hw(dev);
5495 		}
5496 
5497 		cnic_ulp_exit(dev);
5498 		cnic_unregister_netdev(dev);
5499 		list_del_init(&dev->list);
5500 		cnic_free_dev(dev);
5501 	}
5502 	while (!list_empty(&cnic_udev_list)) {
5503 		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5504 				  list);
5505 		cnic_free_uio(udev);
5506 	}
5507 }
5508 
5509 static int __init cnic_init(void)
5510 {
5511 	int rc = 0;
5512 
5513 	pr_info("%s", version);
5514 
5515 	rc = register_netdevice_notifier(&cnic_netdev_notifier);
5516 	if (rc) {
5517 		cnic_release();
5518 		return rc;
5519 	}
5520 
5521 	cnic_wq = create_singlethread_workqueue("cnic_wq");
5522 	if (!cnic_wq) {
5523 		cnic_release();
5524 		unregister_netdevice_notifier(&cnic_netdev_notifier);
5525 		return -ENOMEM;
5526 	}
5527 
5528 	return 0;
5529 }
5530 
5531 static void __exit cnic_exit(void)
5532 {
5533 	unregister_netdevice_notifier(&cnic_netdev_notifier);
5534 	cnic_release();
5535 	destroy_workqueue(cnic_wq);
5536 }
5537 
5538 module_init(cnic_init);
5539 module_exit(cnic_exit);
5540