xref: /linux/drivers/net/ethernet/broadcom/cnic.c (revision 800c5eb7b5eba6cb2a32738d763fd59f0fbcdde4)
1 /* cnic.c: Broadcom CNIC core network driver.
2  *
3  * Copyright (c) 2006-2012 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
10  * Modified and maintained by: Michael Chan <mchan@broadcom.com>
11  */
12 
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 
15 #include <linux/module.h>
16 
17 #include <linux/kernel.h>
18 #include <linux/errno.h>
19 #include <linux/list.h>
20 #include <linux/slab.h>
21 #include <linux/pci.h>
22 #include <linux/init.h>
23 #include <linux/netdevice.h>
24 #include <linux/uio_driver.h>
25 #include <linux/in.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/delay.h>
28 #include <linux/ethtool.h>
29 #include <linux/if_vlan.h>
30 #include <linux/prefetch.h>
31 #include <linux/random.h>
32 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
33 #define BCM_VLAN 1
34 #endif
35 #include <net/ip.h>
36 #include <net/tcp.h>
37 #include <net/route.h>
38 #include <net/ipv6.h>
39 #include <net/ip6_route.h>
40 #include <net/ip6_checksum.h>
41 #include <scsi/iscsi_if.h>
42 
43 #include "cnic_if.h"
44 #include "bnx2.h"
45 #include "bnx2x/bnx2x_reg.h"
46 #include "bnx2x/bnx2x_fw_defs.h"
47 #include "bnx2x/bnx2x_hsi.h"
48 #include "../../../scsi/bnx2i/57xx_iscsi_constants.h"
49 #include "../../../scsi/bnx2i/57xx_iscsi_hsi.h"
50 #include "cnic.h"
51 #include "cnic_defs.h"
52 
53 #define DRV_MODULE_NAME		"cnic"
54 
55 static char version[] __devinitdata =
56 	"Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
57 
58 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
59 	      "Chen (zongxi@broadcom.com");
60 MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
61 MODULE_LICENSE("GPL");
62 MODULE_VERSION(CNIC_MODULE_VERSION);
63 
64 /* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
65 static LIST_HEAD(cnic_dev_list);
66 static LIST_HEAD(cnic_udev_list);
67 static DEFINE_RWLOCK(cnic_dev_lock);
68 static DEFINE_MUTEX(cnic_lock);
69 
70 static struct cnic_ulp_ops __rcu *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
71 
72 /* helper function, assuming cnic_lock is held */
73 static inline struct cnic_ulp_ops *cnic_ulp_tbl_prot(int type)
74 {
75 	return rcu_dereference_protected(cnic_ulp_tbl[type],
76 					 lockdep_is_held(&cnic_lock));
77 }
78 
79 static int cnic_service_bnx2(void *, void *);
80 static int cnic_service_bnx2x(void *, void *);
81 static int cnic_ctl(void *, struct cnic_ctl_info *);
82 
83 static struct cnic_ops cnic_bnx2_ops = {
84 	.cnic_owner	= THIS_MODULE,
85 	.cnic_handler	= cnic_service_bnx2,
86 	.cnic_ctl	= cnic_ctl,
87 };
88 
89 static struct cnic_ops cnic_bnx2x_ops = {
90 	.cnic_owner	= THIS_MODULE,
91 	.cnic_handler	= cnic_service_bnx2x,
92 	.cnic_ctl	= cnic_ctl,
93 };
94 
95 static struct workqueue_struct *cnic_wq;
96 
97 static void cnic_shutdown_rings(struct cnic_dev *);
98 static void cnic_init_rings(struct cnic_dev *);
99 static int cnic_cm_set_pg(struct cnic_sock *);
100 
101 static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
102 {
103 	struct cnic_uio_dev *udev = uinfo->priv;
104 	struct cnic_dev *dev;
105 
106 	if (!capable(CAP_NET_ADMIN))
107 		return -EPERM;
108 
109 	if (udev->uio_dev != -1)
110 		return -EBUSY;
111 
112 	rtnl_lock();
113 	dev = udev->dev;
114 
115 	if (!dev || !test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
116 		rtnl_unlock();
117 		return -ENODEV;
118 	}
119 
120 	udev->uio_dev = iminor(inode);
121 
122 	cnic_shutdown_rings(dev);
123 	cnic_init_rings(dev);
124 	rtnl_unlock();
125 
126 	return 0;
127 }
128 
129 static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
130 {
131 	struct cnic_uio_dev *udev = uinfo->priv;
132 
133 	udev->uio_dev = -1;
134 	return 0;
135 }
136 
137 static inline void cnic_hold(struct cnic_dev *dev)
138 {
139 	atomic_inc(&dev->ref_count);
140 }
141 
142 static inline void cnic_put(struct cnic_dev *dev)
143 {
144 	atomic_dec(&dev->ref_count);
145 }
146 
147 static inline void csk_hold(struct cnic_sock *csk)
148 {
149 	atomic_inc(&csk->ref_count);
150 }
151 
152 static inline void csk_put(struct cnic_sock *csk)
153 {
154 	atomic_dec(&csk->ref_count);
155 }
156 
157 static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
158 {
159 	struct cnic_dev *cdev;
160 
161 	read_lock(&cnic_dev_lock);
162 	list_for_each_entry(cdev, &cnic_dev_list, list) {
163 		if (netdev == cdev->netdev) {
164 			cnic_hold(cdev);
165 			read_unlock(&cnic_dev_lock);
166 			return cdev;
167 		}
168 	}
169 	read_unlock(&cnic_dev_lock);
170 	return NULL;
171 }
172 
173 static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
174 {
175 	atomic_inc(&ulp_ops->ref_count);
176 }
177 
178 static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
179 {
180 	atomic_dec(&ulp_ops->ref_count);
181 }
182 
183 static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
184 {
185 	struct cnic_local *cp = dev->cnic_priv;
186 	struct cnic_eth_dev *ethdev = cp->ethdev;
187 	struct drv_ctl_info info;
188 	struct drv_ctl_io *io = &info.data.io;
189 
190 	info.cmd = DRV_CTL_CTX_WR_CMD;
191 	io->cid_addr = cid_addr;
192 	io->offset = off;
193 	io->data = val;
194 	ethdev->drv_ctl(dev->netdev, &info);
195 }
196 
197 static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
198 {
199 	struct cnic_local *cp = dev->cnic_priv;
200 	struct cnic_eth_dev *ethdev = cp->ethdev;
201 	struct drv_ctl_info info;
202 	struct drv_ctl_io *io = &info.data.io;
203 
204 	info.cmd = DRV_CTL_CTXTBL_WR_CMD;
205 	io->offset = off;
206 	io->dma_addr = addr;
207 	ethdev->drv_ctl(dev->netdev, &info);
208 }
209 
210 static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
211 {
212 	struct cnic_local *cp = dev->cnic_priv;
213 	struct cnic_eth_dev *ethdev = cp->ethdev;
214 	struct drv_ctl_info info;
215 	struct drv_ctl_l2_ring *ring = &info.data.ring;
216 
217 	if (start)
218 		info.cmd = DRV_CTL_START_L2_CMD;
219 	else
220 		info.cmd = DRV_CTL_STOP_L2_CMD;
221 
222 	ring->cid = cid;
223 	ring->client_id = cl_id;
224 	ethdev->drv_ctl(dev->netdev, &info);
225 }
226 
227 static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
228 {
229 	struct cnic_local *cp = dev->cnic_priv;
230 	struct cnic_eth_dev *ethdev = cp->ethdev;
231 	struct drv_ctl_info info;
232 	struct drv_ctl_io *io = &info.data.io;
233 
234 	info.cmd = DRV_CTL_IO_WR_CMD;
235 	io->offset = off;
236 	io->data = val;
237 	ethdev->drv_ctl(dev->netdev, &info);
238 }
239 
240 static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
241 {
242 	struct cnic_local *cp = dev->cnic_priv;
243 	struct cnic_eth_dev *ethdev = cp->ethdev;
244 	struct drv_ctl_info info;
245 	struct drv_ctl_io *io = &info.data.io;
246 
247 	info.cmd = DRV_CTL_IO_RD_CMD;
248 	io->offset = off;
249 	ethdev->drv_ctl(dev->netdev, &info);
250 	return io->data;
251 }
252 
253 static void cnic_ulp_ctl(struct cnic_dev *dev, int ulp_type, bool reg)
254 {
255 	struct cnic_local *cp = dev->cnic_priv;
256 	struct cnic_eth_dev *ethdev = cp->ethdev;
257 	struct drv_ctl_info info;
258 
259 	if (reg)
260 		info.cmd = DRV_CTL_ULP_REGISTER_CMD;
261 	else
262 		info.cmd = DRV_CTL_ULP_UNREGISTER_CMD;
263 
264 	info.data.ulp_type = ulp_type;
265 	ethdev->drv_ctl(dev->netdev, &info);
266 }
267 
268 static int cnic_in_use(struct cnic_sock *csk)
269 {
270 	return test_bit(SK_F_INUSE, &csk->flags);
271 }
272 
273 static void cnic_spq_completion(struct cnic_dev *dev, int cmd, u32 count)
274 {
275 	struct cnic_local *cp = dev->cnic_priv;
276 	struct cnic_eth_dev *ethdev = cp->ethdev;
277 	struct drv_ctl_info info;
278 
279 	info.cmd = cmd;
280 	info.data.credit.credit_count = count;
281 	ethdev->drv_ctl(dev->netdev, &info);
282 }
283 
284 static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
285 {
286 	u32 i;
287 
288 	for (i = 0; i < cp->max_cid_space; i++) {
289 		if (cp->ctx_tbl[i].cid == cid) {
290 			*l5_cid = i;
291 			return 0;
292 		}
293 	}
294 	return -EINVAL;
295 }
296 
297 static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
298 			   struct cnic_sock *csk)
299 {
300 	struct iscsi_path path_req;
301 	char *buf = NULL;
302 	u16 len = 0;
303 	u32 msg_type = ISCSI_KEVENT_IF_DOWN;
304 	struct cnic_ulp_ops *ulp_ops;
305 	struct cnic_uio_dev *udev = cp->udev;
306 	int rc = 0, retry = 0;
307 
308 	if (!udev || udev->uio_dev == -1)
309 		return -ENODEV;
310 
311 	if (csk) {
312 		len = sizeof(path_req);
313 		buf = (char *) &path_req;
314 		memset(&path_req, 0, len);
315 
316 		msg_type = ISCSI_KEVENT_PATH_REQ;
317 		path_req.handle = (u64) csk->l5_cid;
318 		if (test_bit(SK_F_IPV6, &csk->flags)) {
319 			memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
320 			       sizeof(struct in6_addr));
321 			path_req.ip_addr_len = 16;
322 		} else {
323 			memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
324 			       sizeof(struct in_addr));
325 			path_req.ip_addr_len = 4;
326 		}
327 		path_req.vlan_id = csk->vlan_id;
328 		path_req.pmtu = csk->mtu;
329 	}
330 
331 	while (retry < 3) {
332 		rc = 0;
333 		rcu_read_lock();
334 		ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
335 		if (ulp_ops)
336 			rc = ulp_ops->iscsi_nl_send_msg(
337 				cp->ulp_handle[CNIC_ULP_ISCSI],
338 				msg_type, buf, len);
339 		rcu_read_unlock();
340 		if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
341 			break;
342 
343 		msleep(100);
344 		retry++;
345 	}
346 	return rc;
347 }
348 
349 static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
350 
351 static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
352 				  char *buf, u16 len)
353 {
354 	int rc = -EINVAL;
355 
356 	switch (msg_type) {
357 	case ISCSI_UEVENT_PATH_UPDATE: {
358 		struct cnic_local *cp;
359 		u32 l5_cid;
360 		struct cnic_sock *csk;
361 		struct iscsi_path *path_resp;
362 
363 		if (len < sizeof(*path_resp))
364 			break;
365 
366 		path_resp = (struct iscsi_path *) buf;
367 		cp = dev->cnic_priv;
368 		l5_cid = (u32) path_resp->handle;
369 		if (l5_cid >= MAX_CM_SK_TBL_SZ)
370 			break;
371 
372 		rcu_read_lock();
373 		if (!rcu_dereference(cp->ulp_ops[CNIC_ULP_L4])) {
374 			rc = -ENODEV;
375 			rcu_read_unlock();
376 			break;
377 		}
378 		csk = &cp->csk_tbl[l5_cid];
379 		csk_hold(csk);
380 		if (cnic_in_use(csk) &&
381 		    test_bit(SK_F_CONNECT_START, &csk->flags)) {
382 
383 			memcpy(csk->ha, path_resp->mac_addr, 6);
384 			if (test_bit(SK_F_IPV6, &csk->flags))
385 				memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
386 				       sizeof(struct in6_addr));
387 			else
388 				memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
389 				       sizeof(struct in_addr));
390 
391 			if (is_valid_ether_addr(csk->ha)) {
392 				cnic_cm_set_pg(csk);
393 			} else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
394 				!test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
395 
396 				cnic_cm_upcall(cp, csk,
397 					L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
398 				clear_bit(SK_F_CONNECT_START, &csk->flags);
399 			}
400 		}
401 		csk_put(csk);
402 		rcu_read_unlock();
403 		rc = 0;
404 	}
405 	}
406 
407 	return rc;
408 }
409 
410 static int cnic_offld_prep(struct cnic_sock *csk)
411 {
412 	if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
413 		return 0;
414 
415 	if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
416 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
417 		return 0;
418 	}
419 
420 	return 1;
421 }
422 
423 static int cnic_close_prep(struct cnic_sock *csk)
424 {
425 	clear_bit(SK_F_CONNECT_START, &csk->flags);
426 	smp_mb__after_clear_bit();
427 
428 	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
429 		while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
430 			msleep(1);
431 
432 		return 1;
433 	}
434 	return 0;
435 }
436 
437 static int cnic_abort_prep(struct cnic_sock *csk)
438 {
439 	clear_bit(SK_F_CONNECT_START, &csk->flags);
440 	smp_mb__after_clear_bit();
441 
442 	while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
443 		msleep(1);
444 
445 	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
446 		csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
447 		return 1;
448 	}
449 
450 	return 0;
451 }
452 
453 int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
454 {
455 	struct cnic_dev *dev;
456 
457 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
458 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
459 		return -EINVAL;
460 	}
461 	mutex_lock(&cnic_lock);
462 	if (cnic_ulp_tbl_prot(ulp_type)) {
463 		pr_err("%s: Type %d has already been registered\n",
464 		       __func__, ulp_type);
465 		mutex_unlock(&cnic_lock);
466 		return -EBUSY;
467 	}
468 
469 	read_lock(&cnic_dev_lock);
470 	list_for_each_entry(dev, &cnic_dev_list, list) {
471 		struct cnic_local *cp = dev->cnic_priv;
472 
473 		clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
474 	}
475 	read_unlock(&cnic_dev_lock);
476 
477 	atomic_set(&ulp_ops->ref_count, 0);
478 	rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
479 	mutex_unlock(&cnic_lock);
480 
481 	/* Prevent race conditions with netdev_event */
482 	rtnl_lock();
483 	list_for_each_entry(dev, &cnic_dev_list, list) {
484 		struct cnic_local *cp = dev->cnic_priv;
485 
486 		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
487 			ulp_ops->cnic_init(dev);
488 	}
489 	rtnl_unlock();
490 
491 	return 0;
492 }
493 
494 int cnic_unregister_driver(int ulp_type)
495 {
496 	struct cnic_dev *dev;
497 	struct cnic_ulp_ops *ulp_ops;
498 	int i = 0;
499 
500 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
501 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
502 		return -EINVAL;
503 	}
504 	mutex_lock(&cnic_lock);
505 	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
506 	if (!ulp_ops) {
507 		pr_err("%s: Type %d has not been registered\n",
508 		       __func__, ulp_type);
509 		goto out_unlock;
510 	}
511 	read_lock(&cnic_dev_lock);
512 	list_for_each_entry(dev, &cnic_dev_list, list) {
513 		struct cnic_local *cp = dev->cnic_priv;
514 
515 		if (rcu_dereference(cp->ulp_ops[ulp_type])) {
516 			pr_err("%s: Type %d still has devices registered\n",
517 			       __func__, ulp_type);
518 			read_unlock(&cnic_dev_lock);
519 			goto out_unlock;
520 		}
521 	}
522 	read_unlock(&cnic_dev_lock);
523 
524 	RCU_INIT_POINTER(cnic_ulp_tbl[ulp_type], NULL);
525 
526 	mutex_unlock(&cnic_lock);
527 	synchronize_rcu();
528 	while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
529 		msleep(100);
530 		i++;
531 	}
532 
533 	if (atomic_read(&ulp_ops->ref_count) != 0)
534 		netdev_warn(dev->netdev, "Failed waiting for ref count to go to zero\n");
535 	return 0;
536 
537 out_unlock:
538 	mutex_unlock(&cnic_lock);
539 	return -EINVAL;
540 }
541 
542 static int cnic_start_hw(struct cnic_dev *);
543 static void cnic_stop_hw(struct cnic_dev *);
544 
545 static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
546 				void *ulp_ctx)
547 {
548 	struct cnic_local *cp = dev->cnic_priv;
549 	struct cnic_ulp_ops *ulp_ops;
550 
551 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
552 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
553 		return -EINVAL;
554 	}
555 	mutex_lock(&cnic_lock);
556 	if (cnic_ulp_tbl_prot(ulp_type) == NULL) {
557 		pr_err("%s: Driver with type %d has not been registered\n",
558 		       __func__, ulp_type);
559 		mutex_unlock(&cnic_lock);
560 		return -EAGAIN;
561 	}
562 	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
563 		pr_err("%s: Type %d has already been registered to this device\n",
564 		       __func__, ulp_type);
565 		mutex_unlock(&cnic_lock);
566 		return -EBUSY;
567 	}
568 
569 	clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
570 	cp->ulp_handle[ulp_type] = ulp_ctx;
571 	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
572 	rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
573 	cnic_hold(dev);
574 
575 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
576 		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
577 			ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
578 
579 	mutex_unlock(&cnic_lock);
580 
581 	cnic_ulp_ctl(dev, ulp_type, true);
582 
583 	return 0;
584 
585 }
586 EXPORT_SYMBOL(cnic_register_driver);
587 
588 static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
589 {
590 	struct cnic_local *cp = dev->cnic_priv;
591 	int i = 0;
592 
593 	if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
594 		pr_err("%s: Bad type %d\n", __func__, ulp_type);
595 		return -EINVAL;
596 	}
597 	mutex_lock(&cnic_lock);
598 	if (rcu_dereference(cp->ulp_ops[ulp_type])) {
599 		RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
600 		cnic_put(dev);
601 	} else {
602 		pr_err("%s: device not registered to this ulp type %d\n",
603 		       __func__, ulp_type);
604 		mutex_unlock(&cnic_lock);
605 		return -EINVAL;
606 	}
607 	mutex_unlock(&cnic_lock);
608 
609 	if (ulp_type == CNIC_ULP_ISCSI)
610 		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
611 
612 	synchronize_rcu();
613 
614 	while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
615 	       i < 20) {
616 		msleep(100);
617 		i++;
618 	}
619 	if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
620 		netdev_warn(dev->netdev, "Failed waiting for ULP up call to complete\n");
621 
622 	cnic_ulp_ctl(dev, ulp_type, false);
623 
624 	return 0;
625 }
626 EXPORT_SYMBOL(cnic_unregister_driver);
627 
628 static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
629 			    u32 next)
630 {
631 	id_tbl->start = start_id;
632 	id_tbl->max = size;
633 	id_tbl->next = next;
634 	spin_lock_init(&id_tbl->lock);
635 	id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
636 	if (!id_tbl->table)
637 		return -ENOMEM;
638 
639 	return 0;
640 }
641 
642 static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
643 {
644 	kfree(id_tbl->table);
645 	id_tbl->table = NULL;
646 }
647 
648 static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
649 {
650 	int ret = -1;
651 
652 	id -= id_tbl->start;
653 	if (id >= id_tbl->max)
654 		return ret;
655 
656 	spin_lock(&id_tbl->lock);
657 	if (!test_bit(id, id_tbl->table)) {
658 		set_bit(id, id_tbl->table);
659 		ret = 0;
660 	}
661 	spin_unlock(&id_tbl->lock);
662 	return ret;
663 }
664 
665 /* Returns -1 if not successful */
666 static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
667 {
668 	u32 id;
669 
670 	spin_lock(&id_tbl->lock);
671 	id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
672 	if (id >= id_tbl->max) {
673 		id = -1;
674 		if (id_tbl->next != 0) {
675 			id = find_first_zero_bit(id_tbl->table, id_tbl->next);
676 			if (id >= id_tbl->next)
677 				id = -1;
678 		}
679 	}
680 
681 	if (id < id_tbl->max) {
682 		set_bit(id, id_tbl->table);
683 		id_tbl->next = (id + 1) & (id_tbl->max - 1);
684 		id += id_tbl->start;
685 	}
686 
687 	spin_unlock(&id_tbl->lock);
688 
689 	return id;
690 }
691 
692 static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
693 {
694 	if (id == -1)
695 		return;
696 
697 	id -= id_tbl->start;
698 	if (id >= id_tbl->max)
699 		return;
700 
701 	clear_bit(id, id_tbl->table);
702 }
703 
704 static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
705 {
706 	int i;
707 
708 	if (!dma->pg_arr)
709 		return;
710 
711 	for (i = 0; i < dma->num_pages; i++) {
712 		if (dma->pg_arr[i]) {
713 			dma_free_coherent(&dev->pcidev->dev, BCM_PAGE_SIZE,
714 					  dma->pg_arr[i], dma->pg_map_arr[i]);
715 			dma->pg_arr[i] = NULL;
716 		}
717 	}
718 	if (dma->pgtbl) {
719 		dma_free_coherent(&dev->pcidev->dev, dma->pgtbl_size,
720 				  dma->pgtbl, dma->pgtbl_map);
721 		dma->pgtbl = NULL;
722 	}
723 	kfree(dma->pg_arr);
724 	dma->pg_arr = NULL;
725 	dma->num_pages = 0;
726 }
727 
728 static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
729 {
730 	int i;
731 	__le32 *page_table = (__le32 *) dma->pgtbl;
732 
733 	for (i = 0; i < dma->num_pages; i++) {
734 		/* Each entry needs to be in big endian format. */
735 		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
736 		page_table++;
737 		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
738 		page_table++;
739 	}
740 }
741 
742 static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
743 {
744 	int i;
745 	__le32 *page_table = (__le32 *) dma->pgtbl;
746 
747 	for (i = 0; i < dma->num_pages; i++) {
748 		/* Each entry needs to be in little endian format. */
749 		*page_table = cpu_to_le32(dma->pg_map_arr[i] & 0xffffffff);
750 		page_table++;
751 		*page_table = cpu_to_le32((u64) dma->pg_map_arr[i] >> 32);
752 		page_table++;
753 	}
754 }
755 
756 static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
757 			  int pages, int use_pg_tbl)
758 {
759 	int i, size;
760 	struct cnic_local *cp = dev->cnic_priv;
761 
762 	size = pages * (sizeof(void *) + sizeof(dma_addr_t));
763 	dma->pg_arr = kzalloc(size, GFP_ATOMIC);
764 	if (dma->pg_arr == NULL)
765 		return -ENOMEM;
766 
767 	dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
768 	dma->num_pages = pages;
769 
770 	for (i = 0; i < pages; i++) {
771 		dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
772 						    BCM_PAGE_SIZE,
773 						    &dma->pg_map_arr[i],
774 						    GFP_ATOMIC);
775 		if (dma->pg_arr[i] == NULL)
776 			goto error;
777 	}
778 	if (!use_pg_tbl)
779 		return 0;
780 
781 	dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
782 			  ~(BCM_PAGE_SIZE - 1);
783 	dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
784 					&dma->pgtbl_map, GFP_ATOMIC);
785 	if (dma->pgtbl == NULL)
786 		goto error;
787 
788 	cp->setup_pgtbl(dev, dma);
789 
790 	return 0;
791 
792 error:
793 	cnic_free_dma(dev, dma);
794 	return -ENOMEM;
795 }
796 
797 static void cnic_free_context(struct cnic_dev *dev)
798 {
799 	struct cnic_local *cp = dev->cnic_priv;
800 	int i;
801 
802 	for (i = 0; i < cp->ctx_blks; i++) {
803 		if (cp->ctx_arr[i].ctx) {
804 			dma_free_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
805 					  cp->ctx_arr[i].ctx,
806 					  cp->ctx_arr[i].mapping);
807 			cp->ctx_arr[i].ctx = NULL;
808 		}
809 	}
810 }
811 
812 static void __cnic_free_uio(struct cnic_uio_dev *udev)
813 {
814 	uio_unregister_device(&udev->cnic_uinfo);
815 
816 	if (udev->l2_buf) {
817 		dma_free_coherent(&udev->pdev->dev, udev->l2_buf_size,
818 				  udev->l2_buf, udev->l2_buf_map);
819 		udev->l2_buf = NULL;
820 	}
821 
822 	if (udev->l2_ring) {
823 		dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
824 				  udev->l2_ring, udev->l2_ring_map);
825 		udev->l2_ring = NULL;
826 	}
827 
828 	pci_dev_put(udev->pdev);
829 	kfree(udev);
830 }
831 
832 static void cnic_free_uio(struct cnic_uio_dev *udev)
833 {
834 	if (!udev)
835 		return;
836 
837 	write_lock(&cnic_dev_lock);
838 	list_del_init(&udev->list);
839 	write_unlock(&cnic_dev_lock);
840 	__cnic_free_uio(udev);
841 }
842 
843 static void cnic_free_resc(struct cnic_dev *dev)
844 {
845 	struct cnic_local *cp = dev->cnic_priv;
846 	struct cnic_uio_dev *udev = cp->udev;
847 
848 	if (udev) {
849 		udev->dev = NULL;
850 		cp->udev = NULL;
851 	}
852 
853 	cnic_free_context(dev);
854 	kfree(cp->ctx_arr);
855 	cp->ctx_arr = NULL;
856 	cp->ctx_blks = 0;
857 
858 	cnic_free_dma(dev, &cp->gbl_buf_info);
859 	cnic_free_dma(dev, &cp->kwq_info);
860 	cnic_free_dma(dev, &cp->kwq_16_data_info);
861 	cnic_free_dma(dev, &cp->kcq2.dma);
862 	cnic_free_dma(dev, &cp->kcq1.dma);
863 	kfree(cp->iscsi_tbl);
864 	cp->iscsi_tbl = NULL;
865 	kfree(cp->ctx_tbl);
866 	cp->ctx_tbl = NULL;
867 
868 	cnic_free_id_tbl(&cp->fcoe_cid_tbl);
869 	cnic_free_id_tbl(&cp->cid_tbl);
870 }
871 
872 static int cnic_alloc_context(struct cnic_dev *dev)
873 {
874 	struct cnic_local *cp = dev->cnic_priv;
875 
876 	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
877 		int i, k, arr_size;
878 
879 		cp->ctx_blk_size = BCM_PAGE_SIZE;
880 		cp->cids_per_blk = BCM_PAGE_SIZE / 128;
881 		arr_size = BNX2_MAX_CID / cp->cids_per_blk *
882 			   sizeof(struct cnic_ctx);
883 		cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
884 		if (cp->ctx_arr == NULL)
885 			return -ENOMEM;
886 
887 		k = 0;
888 		for (i = 0; i < 2; i++) {
889 			u32 j, reg, off, lo, hi;
890 
891 			if (i == 0)
892 				off = BNX2_PG_CTX_MAP;
893 			else
894 				off = BNX2_ISCSI_CTX_MAP;
895 
896 			reg = cnic_reg_rd_ind(dev, off);
897 			lo = reg >> 16;
898 			hi = reg & 0xffff;
899 			for (j = lo; j < hi; j += cp->cids_per_blk, k++)
900 				cp->ctx_arr[k].cid = j;
901 		}
902 
903 		cp->ctx_blks = k;
904 		if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
905 			cp->ctx_blks = 0;
906 			return -ENOMEM;
907 		}
908 
909 		for (i = 0; i < cp->ctx_blks; i++) {
910 			cp->ctx_arr[i].ctx =
911 				dma_alloc_coherent(&dev->pcidev->dev,
912 						   BCM_PAGE_SIZE,
913 						   &cp->ctx_arr[i].mapping,
914 						   GFP_KERNEL);
915 			if (cp->ctx_arr[i].ctx == NULL)
916 				return -ENOMEM;
917 		}
918 	}
919 	return 0;
920 }
921 
922 static u16 cnic_bnx2_next_idx(u16 idx)
923 {
924 	return idx + 1;
925 }
926 
927 static u16 cnic_bnx2_hw_idx(u16 idx)
928 {
929 	return idx;
930 }
931 
932 static u16 cnic_bnx2x_next_idx(u16 idx)
933 {
934 	idx++;
935 	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
936 		idx++;
937 
938 	return idx;
939 }
940 
941 static u16 cnic_bnx2x_hw_idx(u16 idx)
942 {
943 	if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
944 		idx++;
945 	return idx;
946 }
947 
948 static int cnic_alloc_kcq(struct cnic_dev *dev, struct kcq_info *info,
949 			  bool use_pg_tbl)
950 {
951 	int err, i, use_page_tbl = 0;
952 	struct kcqe **kcq;
953 
954 	if (use_pg_tbl)
955 		use_page_tbl = 1;
956 
957 	err = cnic_alloc_dma(dev, &info->dma, KCQ_PAGE_CNT, use_page_tbl);
958 	if (err)
959 		return err;
960 
961 	kcq = (struct kcqe **) info->dma.pg_arr;
962 	info->kcq = kcq;
963 
964 	info->next_idx = cnic_bnx2_next_idx;
965 	info->hw_idx = cnic_bnx2_hw_idx;
966 	if (use_pg_tbl)
967 		return 0;
968 
969 	info->next_idx = cnic_bnx2x_next_idx;
970 	info->hw_idx = cnic_bnx2x_hw_idx;
971 
972 	for (i = 0; i < KCQ_PAGE_CNT; i++) {
973 		struct bnx2x_bd_chain_next *next =
974 			(struct bnx2x_bd_chain_next *) &kcq[i][MAX_KCQE_CNT];
975 		int j = i + 1;
976 
977 		if (j >= KCQ_PAGE_CNT)
978 			j = 0;
979 		next->addr_hi = (u64) info->dma.pg_map_arr[j] >> 32;
980 		next->addr_lo = info->dma.pg_map_arr[j] & 0xffffffff;
981 	}
982 	return 0;
983 }
984 
985 static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
986 {
987 	struct cnic_local *cp = dev->cnic_priv;
988 	struct cnic_uio_dev *udev;
989 
990 	read_lock(&cnic_dev_lock);
991 	list_for_each_entry(udev, &cnic_udev_list, list) {
992 		if (udev->pdev == dev->pcidev) {
993 			udev->dev = dev;
994 			cp->udev = udev;
995 			read_unlock(&cnic_dev_lock);
996 			return 0;
997 		}
998 	}
999 	read_unlock(&cnic_dev_lock);
1000 
1001 	udev = kzalloc(sizeof(struct cnic_uio_dev), GFP_ATOMIC);
1002 	if (!udev)
1003 		return -ENOMEM;
1004 
1005 	udev->uio_dev = -1;
1006 
1007 	udev->dev = dev;
1008 	udev->pdev = dev->pcidev;
1009 	udev->l2_ring_size = pages * BCM_PAGE_SIZE;
1010 	udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
1011 					   &udev->l2_ring_map,
1012 					   GFP_KERNEL | __GFP_COMP);
1013 	if (!udev->l2_ring)
1014 		goto err_udev;
1015 
1016 	udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
1017 	udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
1018 	udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
1019 					  &udev->l2_buf_map,
1020 					  GFP_KERNEL | __GFP_COMP);
1021 	if (!udev->l2_buf)
1022 		goto err_dma;
1023 
1024 	write_lock(&cnic_dev_lock);
1025 	list_add(&udev->list, &cnic_udev_list);
1026 	write_unlock(&cnic_dev_lock);
1027 
1028 	pci_dev_get(udev->pdev);
1029 
1030 	cp->udev = udev;
1031 
1032 	return 0;
1033  err_dma:
1034 	dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
1035 			  udev->l2_ring, udev->l2_ring_map);
1036  err_udev:
1037 	kfree(udev);
1038 	return -ENOMEM;
1039 }
1040 
1041 static int cnic_init_uio(struct cnic_dev *dev)
1042 {
1043 	struct cnic_local *cp = dev->cnic_priv;
1044 	struct cnic_uio_dev *udev = cp->udev;
1045 	struct uio_info *uinfo;
1046 	int ret = 0;
1047 
1048 	if (!udev)
1049 		return -ENOMEM;
1050 
1051 	uinfo = &udev->cnic_uinfo;
1052 
1053 	uinfo->mem[0].addr = dev->netdev->base_addr;
1054 	uinfo->mem[0].internal_addr = dev->regview;
1055 	uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
1056 	uinfo->mem[0].memtype = UIO_MEM_PHYS;
1057 
1058 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
1059 		uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
1060 					PAGE_MASK;
1061 		if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
1062 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
1063 		else
1064 			uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
1065 
1066 		uinfo->name = "bnx2_cnic";
1067 	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
1068 		uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
1069 			PAGE_MASK;
1070 		uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
1071 
1072 		uinfo->name = "bnx2x_cnic";
1073 	}
1074 
1075 	uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
1076 
1077 	uinfo->mem[2].addr = (unsigned long) udev->l2_ring;
1078 	uinfo->mem[2].size = udev->l2_ring_size;
1079 	uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
1080 
1081 	uinfo->mem[3].addr = (unsigned long) udev->l2_buf;
1082 	uinfo->mem[3].size = udev->l2_buf_size;
1083 	uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
1084 
1085 	uinfo->version = CNIC_MODULE_VERSION;
1086 	uinfo->irq = UIO_IRQ_CUSTOM;
1087 
1088 	uinfo->open = cnic_uio_open;
1089 	uinfo->release = cnic_uio_close;
1090 
1091 	if (udev->uio_dev == -1) {
1092 		if (!uinfo->priv) {
1093 			uinfo->priv = udev;
1094 
1095 			ret = uio_register_device(&udev->pdev->dev, uinfo);
1096 		}
1097 	} else {
1098 		cnic_init_rings(dev);
1099 	}
1100 
1101 	return ret;
1102 }
1103 
1104 static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
1105 {
1106 	struct cnic_local *cp = dev->cnic_priv;
1107 	int ret;
1108 
1109 	ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
1110 	if (ret)
1111 		goto error;
1112 	cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
1113 
1114 	ret = cnic_alloc_kcq(dev, &cp->kcq1, true);
1115 	if (ret)
1116 		goto error;
1117 
1118 	ret = cnic_alloc_context(dev);
1119 	if (ret)
1120 		goto error;
1121 
1122 	ret = cnic_alloc_uio_rings(dev, 2);
1123 	if (ret)
1124 		goto error;
1125 
1126 	ret = cnic_init_uio(dev);
1127 	if (ret)
1128 		goto error;
1129 
1130 	return 0;
1131 
1132 error:
1133 	cnic_free_resc(dev);
1134 	return ret;
1135 }
1136 
1137 static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
1138 {
1139 	struct cnic_local *cp = dev->cnic_priv;
1140 	int ctx_blk_size = cp->ethdev->ctx_blk_size;
1141 	int total_mem, blks, i;
1142 
1143 	total_mem = BNX2X_CONTEXT_MEM_SIZE * cp->max_cid_space;
1144 	blks = total_mem / ctx_blk_size;
1145 	if (total_mem % ctx_blk_size)
1146 		blks++;
1147 
1148 	if (blks > cp->ethdev->ctx_tbl_len)
1149 		return -ENOMEM;
1150 
1151 	cp->ctx_arr = kcalloc(blks, sizeof(struct cnic_ctx), GFP_KERNEL);
1152 	if (cp->ctx_arr == NULL)
1153 		return -ENOMEM;
1154 
1155 	cp->ctx_blks = blks;
1156 	cp->ctx_blk_size = ctx_blk_size;
1157 	if (!BNX2X_CHIP_IS_57710(cp->chip_id))
1158 		cp->ctx_align = 0;
1159 	else
1160 		cp->ctx_align = ctx_blk_size;
1161 
1162 	cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1163 
1164 	for (i = 0; i < blks; i++) {
1165 		cp->ctx_arr[i].ctx =
1166 			dma_alloc_coherent(&dev->pcidev->dev, cp->ctx_blk_size,
1167 					   &cp->ctx_arr[i].mapping,
1168 					   GFP_KERNEL);
1169 		if (cp->ctx_arr[i].ctx == NULL)
1170 			return -ENOMEM;
1171 
1172 		if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1173 			if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1174 				cnic_free_context(dev);
1175 				cp->ctx_blk_size += cp->ctx_align;
1176 				i = -1;
1177 				continue;
1178 			}
1179 		}
1180 	}
1181 	return 0;
1182 }
1183 
1184 static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1185 {
1186 	struct cnic_local *cp = dev->cnic_priv;
1187 	struct cnic_eth_dev *ethdev = cp->ethdev;
1188 	u32 start_cid = ethdev->starting_cid;
1189 	int i, j, n, ret, pages;
1190 	struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1191 
1192 	cp->iro_arr = ethdev->iro_arr;
1193 
1194 	cp->max_cid_space = MAX_ISCSI_TBL_SZ;
1195 	cp->iscsi_start_cid = start_cid;
1196 	cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1197 
1198 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1199 		cp->max_cid_space += dev->max_fcoe_conn;
1200 		cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1201 		if (!cp->fcoe_init_cid)
1202 			cp->fcoe_init_cid = 0x10;
1203 	}
1204 
1205 	cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1206 				GFP_KERNEL);
1207 	if (!cp->iscsi_tbl)
1208 		goto error;
1209 
1210 	cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1211 				cp->max_cid_space, GFP_KERNEL);
1212 	if (!cp->ctx_tbl)
1213 		goto error;
1214 
1215 	for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1216 		cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1217 		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1218 	}
1219 
1220 	for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1221 		cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1222 
1223 	pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1224 		PAGE_SIZE;
1225 
1226 	ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1227 	if (ret)
1228 		return -ENOMEM;
1229 
1230 	n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1231 	for (i = 0, j = 0; i < cp->max_cid_space; i++) {
1232 		long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1233 
1234 		cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1235 		cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1236 						   off;
1237 
1238 		if ((i % n) == (n - 1))
1239 			j++;
1240 	}
1241 
1242 	ret = cnic_alloc_kcq(dev, &cp->kcq1, false);
1243 	if (ret)
1244 		goto error;
1245 
1246 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
1247 		ret = cnic_alloc_kcq(dev, &cp->kcq2, true);
1248 		if (ret)
1249 			goto error;
1250 	}
1251 
1252 	pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1253 	ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1254 	if (ret)
1255 		goto error;
1256 
1257 	ret = cnic_alloc_bnx2x_context(dev);
1258 	if (ret)
1259 		goto error;
1260 
1261 	cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1262 
1263 	cp->l2_rx_ring_size = 15;
1264 
1265 	ret = cnic_alloc_uio_rings(dev, 4);
1266 	if (ret)
1267 		goto error;
1268 
1269 	ret = cnic_init_uio(dev);
1270 	if (ret)
1271 		goto error;
1272 
1273 	return 0;
1274 
1275 error:
1276 	cnic_free_resc(dev);
1277 	return -ENOMEM;
1278 }
1279 
1280 static inline u32 cnic_kwq_avail(struct cnic_local *cp)
1281 {
1282 	return cp->max_kwq_idx -
1283 		((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
1284 }
1285 
1286 static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1287 				  u32 num_wqes)
1288 {
1289 	struct cnic_local *cp = dev->cnic_priv;
1290 	struct kwqe *prod_qe;
1291 	u16 prod, sw_prod, i;
1292 
1293 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1294 		return -EAGAIN;		/* bnx2 is down */
1295 
1296 	spin_lock_bh(&cp->cnic_ulp_lock);
1297 	if (num_wqes > cnic_kwq_avail(cp) &&
1298 	    !test_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags)) {
1299 		spin_unlock_bh(&cp->cnic_ulp_lock);
1300 		return -EAGAIN;
1301 	}
1302 
1303 	clear_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
1304 
1305 	prod = cp->kwq_prod_idx;
1306 	sw_prod = prod & MAX_KWQ_IDX;
1307 	for (i = 0; i < num_wqes; i++) {
1308 		prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
1309 		memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
1310 		prod++;
1311 		sw_prod = prod & MAX_KWQ_IDX;
1312 	}
1313 	cp->kwq_prod_idx = prod;
1314 
1315 	CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
1316 
1317 	spin_unlock_bh(&cp->cnic_ulp_lock);
1318 	return 0;
1319 }
1320 
1321 static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1322 				   union l5cm_specific_data *l5_data)
1323 {
1324 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1325 	dma_addr_t map;
1326 
1327 	map = ctx->kwqe_data_mapping;
1328 	l5_data->phy_address.lo = (u64) map & 0xffffffff;
1329 	l5_data->phy_address.hi = (u64) map >> 32;
1330 	return ctx->kwqe_data;
1331 }
1332 
1333 static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1334 				u32 type, union l5cm_specific_data *l5_data)
1335 {
1336 	struct cnic_local *cp = dev->cnic_priv;
1337 	struct l5cm_spe kwqe;
1338 	struct kwqe_16 *kwq[1];
1339 	u16 type_16;
1340 	int ret;
1341 
1342 	kwqe.hdr.conn_and_cmd_data =
1343 		cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1344 			     BNX2X_HW_CID(cp, cid)));
1345 
1346 	type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1347 	type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1348 		   SPE_HDR_FUNCTION_ID;
1349 
1350 	kwqe.hdr.type = cpu_to_le16(type_16);
1351 	kwqe.hdr.reserved1 = 0;
1352 	kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1353 	kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1354 
1355 	kwq[0] = (struct kwqe_16 *) &kwqe;
1356 
1357 	spin_lock_bh(&cp->cnic_ulp_lock);
1358 	ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1359 	spin_unlock_bh(&cp->cnic_ulp_lock);
1360 
1361 	if (ret == 1)
1362 		return 0;
1363 
1364 	return ret;
1365 }
1366 
1367 static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1368 				   struct kcqe *cqes[], u32 num_cqes)
1369 {
1370 	struct cnic_local *cp = dev->cnic_priv;
1371 	struct cnic_ulp_ops *ulp_ops;
1372 
1373 	rcu_read_lock();
1374 	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1375 	if (likely(ulp_ops)) {
1376 		ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1377 					  cqes, num_cqes);
1378 	}
1379 	rcu_read_unlock();
1380 }
1381 
1382 static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1383 {
1384 	struct cnic_local *cp = dev->cnic_priv;
1385 	struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1386 	int hq_bds, pages;
1387 	u32 pfid = cp->pfid;
1388 
1389 	cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1390 	cp->num_ccells = req1->num_ccells_per_conn;
1391 	cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1392 			      cp->num_iscsi_tasks;
1393 	cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1394 			BNX2X_ISCSI_R2TQE_SIZE;
1395 	cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1396 	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1397 	hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1398 	cp->num_cqs = req1->num_cqs;
1399 
1400 	if (!dev->max_iscsi_conn)
1401 		return 0;
1402 
1403 	/* init Tstorm RAM */
1404 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1405 		  req1->rq_num_wqes);
1406 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1407 		  PAGE_SIZE);
1408 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1409 		 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1410 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1411 		  TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1412 		  req1->num_tasks_per_conn);
1413 
1414 	/* init Ustorm RAM */
1415 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1416 		  USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
1417 		  req1->rq_buffer_size);
1418 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1419 		  PAGE_SIZE);
1420 	CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1421 		 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1422 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1423 		  USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1424 		  req1->num_tasks_per_conn);
1425 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
1426 		  req1->rq_num_wqes);
1427 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1428 		  req1->cq_num_wqes);
1429 	CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1430 		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1431 
1432 	/* init Xstorm RAM */
1433 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1434 		  PAGE_SIZE);
1435 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1436 		 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1437 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1438 		  XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1439 		  req1->num_tasks_per_conn);
1440 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1441 		  hq_bds);
1442 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(pfid),
1443 		  req1->num_tasks_per_conn);
1444 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(pfid),
1445 		  cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1446 
1447 	/* init Cstorm RAM */
1448 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
1449 		  PAGE_SIZE);
1450 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1451 		 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
1452 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1453 		  CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
1454 		  req1->num_tasks_per_conn);
1455 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(pfid),
1456 		  req1->cq_num_wqes);
1457 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(pfid),
1458 		  hq_bds);
1459 
1460 	return 0;
1461 }
1462 
1463 static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1464 {
1465 	struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1466 	struct cnic_local *cp = dev->cnic_priv;
1467 	u32 pfid = cp->pfid;
1468 	struct iscsi_kcqe kcqe;
1469 	struct kcqe *cqes[1];
1470 
1471 	memset(&kcqe, 0, sizeof(kcqe));
1472 	if (!dev->max_iscsi_conn) {
1473 		kcqe.completion_status =
1474 			ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1475 		goto done;
1476 	}
1477 
1478 	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1479 		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1480 	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1481 		TSTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1482 		req2->error_bit_map[1]);
1483 
1484 	CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1485 		  USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1486 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1487 		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid), req2->error_bit_map[0]);
1488 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
1489 		USTORM_ISCSI_ERROR_BITMAP_OFFSET(pfid) + 4,
1490 		req2->error_bit_map[1]);
1491 
1492 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1493 		  CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(pfid), req2->max_cq_sqn);
1494 
1495 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1496 
1497 done:
1498 	kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1499 	cqes[0] = (struct kcqe *) &kcqe;
1500 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1501 
1502 	return 0;
1503 }
1504 
1505 static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1506 {
1507 	struct cnic_local *cp = dev->cnic_priv;
1508 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1509 
1510 	if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1511 		struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1512 
1513 		cnic_free_dma(dev, &iscsi->hq_info);
1514 		cnic_free_dma(dev, &iscsi->r2tq_info);
1515 		cnic_free_dma(dev, &iscsi->task_array_info);
1516 		cnic_free_id(&cp->cid_tbl, ctx->cid);
1517 	} else {
1518 		cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1519 	}
1520 
1521 	ctx->cid = 0;
1522 }
1523 
1524 static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1525 {
1526 	u32 cid;
1527 	int ret, pages;
1528 	struct cnic_local *cp = dev->cnic_priv;
1529 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1530 	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1531 
1532 	if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1533 		cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1534 		if (cid == -1) {
1535 			ret = -ENOMEM;
1536 			goto error;
1537 		}
1538 		ctx->cid = cid;
1539 		return 0;
1540 	}
1541 
1542 	cid = cnic_alloc_new_id(&cp->cid_tbl);
1543 	if (cid == -1) {
1544 		ret = -ENOMEM;
1545 		goto error;
1546 	}
1547 
1548 	ctx->cid = cid;
1549 	pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1550 
1551 	ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1552 	if (ret)
1553 		goto error;
1554 
1555 	pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1556 	ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1557 	if (ret)
1558 		goto error;
1559 
1560 	pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1561 	ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1562 	if (ret)
1563 		goto error;
1564 
1565 	return 0;
1566 
1567 error:
1568 	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1569 	return ret;
1570 }
1571 
1572 static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1573 				struct regpair *ctx_addr)
1574 {
1575 	struct cnic_local *cp = dev->cnic_priv;
1576 	struct cnic_eth_dev *ethdev = cp->ethdev;
1577 	int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1578 	int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1579 	unsigned long align_off = 0;
1580 	dma_addr_t ctx_map;
1581 	void *ctx;
1582 
1583 	if (cp->ctx_align) {
1584 		unsigned long mask = cp->ctx_align - 1;
1585 
1586 		if (cp->ctx_arr[blk].mapping & mask)
1587 			align_off = cp->ctx_align -
1588 				    (cp->ctx_arr[blk].mapping & mask);
1589 	}
1590 	ctx_map = cp->ctx_arr[blk].mapping + align_off +
1591 		(off * BNX2X_CONTEXT_MEM_SIZE);
1592 	ctx = cp->ctx_arr[blk].ctx + align_off +
1593 	      (off * BNX2X_CONTEXT_MEM_SIZE);
1594 	if (init)
1595 		memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1596 
1597 	ctx_addr->lo = ctx_map & 0xffffffff;
1598 	ctx_addr->hi = (u64) ctx_map >> 32;
1599 	return ctx;
1600 }
1601 
1602 static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1603 				u32 num)
1604 {
1605 	struct cnic_local *cp = dev->cnic_priv;
1606 	struct iscsi_kwqe_conn_offload1 *req1 =
1607 			(struct iscsi_kwqe_conn_offload1 *) wqes[0];
1608 	struct iscsi_kwqe_conn_offload2 *req2 =
1609 			(struct iscsi_kwqe_conn_offload2 *) wqes[1];
1610 	struct iscsi_kwqe_conn_offload3 *req3;
1611 	struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1612 	struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1613 	u32 cid = ctx->cid;
1614 	u32 hw_cid = BNX2X_HW_CID(cp, cid);
1615 	struct iscsi_context *ictx;
1616 	struct regpair context_addr;
1617 	int i, j, n = 2, n_max;
1618 	u8 port = CNIC_PORT(cp);
1619 
1620 	ctx->ctx_flags = 0;
1621 	if (!req2->num_additional_wqes)
1622 		return -EINVAL;
1623 
1624 	n_max = req2->num_additional_wqes + 2;
1625 
1626 	ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1627 	if (ictx == NULL)
1628 		return -ENOMEM;
1629 
1630 	req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1631 
1632 	ictx->xstorm_ag_context.hq_prod = 1;
1633 
1634 	ictx->xstorm_st_context.iscsi.first_burst_length =
1635 		ISCSI_DEF_FIRST_BURST_LEN;
1636 	ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1637 		ISCSI_DEF_MAX_RECV_SEG_LEN;
1638 	ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1639 		req1->sq_page_table_addr_lo;
1640 	ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1641 		req1->sq_page_table_addr_hi;
1642 	ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1643 	ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1644 	ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1645 		iscsi->hq_info.pgtbl_map & 0xffffffff;
1646 	ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1647 		(u64) iscsi->hq_info.pgtbl_map >> 32;
1648 	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1649 		iscsi->hq_info.pgtbl[0];
1650 	ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1651 		iscsi->hq_info.pgtbl[1];
1652 	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1653 		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1654 	ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1655 		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1656 	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1657 		iscsi->r2tq_info.pgtbl[0];
1658 	ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1659 		iscsi->r2tq_info.pgtbl[1];
1660 	ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1661 		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1662 	ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1663 		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1664 	ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1665 		BNX2X_ISCSI_PBL_NOT_CACHED;
1666 	ictx->xstorm_st_context.iscsi.flags.flags |=
1667 		XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1668 	ictx->xstorm_st_context.iscsi.flags.flags |=
1669 		XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1670 	ictx->xstorm_st_context.common.ethernet.reserved_vlan_type =
1671 		ETH_P_8021Q;
1672 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
1673 		cp->port_mode == CHIP_2_PORT_MODE) {
1674 
1675 		port = 0;
1676 	}
1677 	ictx->xstorm_st_context.common.flags =
1678 		1 << XSTORM_COMMON_CONTEXT_SECTION_PHYSQ_INITIALIZED_SHIFT;
1679 	ictx->xstorm_st_context.common.flags =
1680 		port << XSTORM_COMMON_CONTEXT_SECTION_PBF_PORT_SHIFT;
1681 
1682 	ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1683 	/* TSTORM requires the base address of RQ DB & not PTE */
1684 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1685 		req2->rq_page_table_addr_lo & PAGE_MASK;
1686 	ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1687 		req2->rq_page_table_addr_hi;
1688 	ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1689 	ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1690 	ictx->tstorm_st_context.tcp.flags2 |=
1691 		TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1692 	ictx->tstorm_st_context.tcp.ooo_support_mode =
1693 		TCP_TSTORM_OOO_DROP_AND_PROC_ACK;
1694 
1695 	ictx->timers_context.flags |= TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1696 
1697 	ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1698 		req2->rq_page_table_addr_lo;
1699 	ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1700 		req2->rq_page_table_addr_hi;
1701 	ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1702 	ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1703 	ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1704 		iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1705 	ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1706 		(u64) iscsi->r2tq_info.pgtbl_map >> 32;
1707 	ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1708 		iscsi->r2tq_info.pgtbl[0];
1709 	ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1710 		iscsi->r2tq_info.pgtbl[1];
1711 	ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1712 		req1->cq_page_table_addr_lo;
1713 	ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1714 		req1->cq_page_table_addr_hi;
1715 	ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1716 	ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1717 	ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1718 	ictx->ustorm_st_context.task_pbe_cache_index =
1719 		BNX2X_ISCSI_PBL_NOT_CACHED;
1720 	ictx->ustorm_st_context.task_pdu_cache_index =
1721 		BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1722 
1723 	for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1724 		if (j == 3) {
1725 			if (n >= n_max)
1726 				break;
1727 			req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1728 			j = 0;
1729 		}
1730 		ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1731 		ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1732 			req3->qp_first_pte[j].hi;
1733 		ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1734 			req3->qp_first_pte[j].lo;
1735 	}
1736 
1737 	ictx->ustorm_st_context.task_pbl_base.lo =
1738 		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1739 	ictx->ustorm_st_context.task_pbl_base.hi =
1740 		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1741 	ictx->ustorm_st_context.tce_phy_addr.lo =
1742 		iscsi->task_array_info.pgtbl[0];
1743 	ictx->ustorm_st_context.tce_phy_addr.hi =
1744 		iscsi->task_array_info.pgtbl[1];
1745 	ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1746 	ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1747 	ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1748 	ictx->ustorm_st_context.negotiated_rx_and_flags |=
1749 		ISCSI_DEF_MAX_BURST_LEN;
1750 	ictx->ustorm_st_context.negotiated_rx |=
1751 		ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1752 		USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1753 
1754 	ictx->cstorm_st_context.hq_pbl_base.lo =
1755 		iscsi->hq_info.pgtbl_map & 0xffffffff;
1756 	ictx->cstorm_st_context.hq_pbl_base.hi =
1757 		(u64) iscsi->hq_info.pgtbl_map >> 32;
1758 	ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1759 	ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1760 	ictx->cstorm_st_context.task_pbl_base.lo =
1761 		iscsi->task_array_info.pgtbl_map & 0xffffffff;
1762 	ictx->cstorm_st_context.task_pbl_base.hi =
1763 		(u64) iscsi->task_array_info.pgtbl_map >> 32;
1764 	/* CSTORM and USTORM initialization is different, CSTORM requires
1765 	 * CQ DB base & not PTE addr */
1766 	ictx->cstorm_st_context.cq_db_base.lo =
1767 		req1->cq_page_table_addr_lo & PAGE_MASK;
1768 	ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1769 	ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1770 	ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1771 	for (i = 0; i < cp->num_cqs; i++) {
1772 		ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1773 			ISCSI_INITIAL_SN;
1774 		ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1775 			ISCSI_INITIAL_SN;
1776 	}
1777 
1778 	ictx->xstorm_ag_context.cdu_reserved =
1779 		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1780 				       ISCSI_CONNECTION_TYPE);
1781 	ictx->ustorm_ag_context.cdu_usage =
1782 		CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1783 				       ISCSI_CONNECTION_TYPE);
1784 	return 0;
1785 
1786 }
1787 
1788 static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1789 				   u32 num, int *work)
1790 {
1791 	struct iscsi_kwqe_conn_offload1 *req1;
1792 	struct iscsi_kwqe_conn_offload2 *req2;
1793 	struct cnic_local *cp = dev->cnic_priv;
1794 	struct cnic_context *ctx;
1795 	struct iscsi_kcqe kcqe;
1796 	struct kcqe *cqes[1];
1797 	u32 l5_cid;
1798 	int ret = 0;
1799 
1800 	if (num < 2) {
1801 		*work = num;
1802 		return -EINVAL;
1803 	}
1804 
1805 	req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1806 	req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1807 	if ((num - 2) < req2->num_additional_wqes) {
1808 		*work = num;
1809 		return -EINVAL;
1810 	}
1811 	*work = 2 + req2->num_additional_wqes;
1812 
1813 	l5_cid = req1->iscsi_conn_id;
1814 	if (l5_cid >= MAX_ISCSI_TBL_SZ)
1815 		return -EINVAL;
1816 
1817 	memset(&kcqe, 0, sizeof(kcqe));
1818 	kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1819 	kcqe.iscsi_conn_id = l5_cid;
1820 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1821 
1822 	ctx = &cp->ctx_tbl[l5_cid];
1823 	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags)) {
1824 		kcqe.completion_status =
1825 			ISCSI_KCQE_COMPLETION_STATUS_CID_BUSY;
1826 		goto done;
1827 	}
1828 
1829 	if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1830 		atomic_dec(&cp->iscsi_conn);
1831 		goto done;
1832 	}
1833 	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1834 	if (ret) {
1835 		atomic_dec(&cp->iscsi_conn);
1836 		ret = 0;
1837 		goto done;
1838 	}
1839 	ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1840 	if (ret < 0) {
1841 		cnic_free_bnx2x_conn_resc(dev, l5_cid);
1842 		atomic_dec(&cp->iscsi_conn);
1843 		goto done;
1844 	}
1845 
1846 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1847 	kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp, cp->ctx_tbl[l5_cid].cid);
1848 
1849 done:
1850 	cqes[0] = (struct kcqe *) &kcqe;
1851 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1852 	return 0;
1853 }
1854 
1855 
1856 static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1857 {
1858 	struct cnic_local *cp = dev->cnic_priv;
1859 	struct iscsi_kwqe_conn_update *req =
1860 		(struct iscsi_kwqe_conn_update *) kwqe;
1861 	void *data;
1862 	union l5cm_specific_data l5_data;
1863 	u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1864 	int ret;
1865 
1866 	if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1867 		return -EINVAL;
1868 
1869 	data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1870 	if (!data)
1871 		return -ENOMEM;
1872 
1873 	memcpy(data, kwqe, sizeof(struct kwqe));
1874 
1875 	ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1876 			req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1877 	return ret;
1878 }
1879 
1880 static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1881 {
1882 	struct cnic_local *cp = dev->cnic_priv;
1883 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1884 	union l5cm_specific_data l5_data;
1885 	int ret;
1886 	u32 hw_cid;
1887 
1888 	init_waitqueue_head(&ctx->waitq);
1889 	ctx->wait_cond = 0;
1890 	memset(&l5_data, 0, sizeof(l5_data));
1891 	hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1892 
1893 	ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1894 				  hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1895 
1896 	if (ret == 0) {
1897 		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
1898 		if (unlikely(test_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags)))
1899 			return -EBUSY;
1900 	}
1901 
1902 	return 0;
1903 }
1904 
1905 static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1906 {
1907 	struct cnic_local *cp = dev->cnic_priv;
1908 	struct iscsi_kwqe_conn_destroy *req =
1909 		(struct iscsi_kwqe_conn_destroy *) kwqe;
1910 	u32 l5_cid = req->reserved0;
1911 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1912 	int ret = 0;
1913 	struct iscsi_kcqe kcqe;
1914 	struct kcqe *cqes[1];
1915 
1916 	if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
1917 		goto skip_cfc_delete;
1918 
1919 	if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
1920 		unsigned long delta = ctx->timestamp + (2 * HZ) - jiffies;
1921 
1922 		if (delta > (2 * HZ))
1923 			delta = 0;
1924 
1925 		set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
1926 		queue_delayed_work(cnic_wq, &cp->delete_task, delta);
1927 		goto destroy_reply;
1928 	}
1929 
1930 	ret = cnic_bnx2x_destroy_ramrod(dev, l5_cid);
1931 
1932 skip_cfc_delete:
1933 	cnic_free_bnx2x_conn_resc(dev, l5_cid);
1934 
1935 	if (!ret) {
1936 		atomic_dec(&cp->iscsi_conn);
1937 		clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
1938 	}
1939 
1940 destroy_reply:
1941 	memset(&kcqe, 0, sizeof(kcqe));
1942 	kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1943 	kcqe.iscsi_conn_id = l5_cid;
1944 	kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1945 	kcqe.iscsi_conn_context_id = req->context_id;
1946 
1947 	cqes[0] = (struct kcqe *) &kcqe;
1948 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1949 
1950 	return 0;
1951 }
1952 
1953 static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1954 				      struct l4_kwq_connect_req1 *kwqe1,
1955 				      struct l4_kwq_connect_req3 *kwqe3,
1956 				      struct l5cm_active_conn_buffer *conn_buf)
1957 {
1958 	struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1959 	struct l5cm_xstorm_conn_buffer *xstorm_buf =
1960 		&conn_buf->xstorm_conn_buffer;
1961 	struct l5cm_tstorm_conn_buffer *tstorm_buf =
1962 		&conn_buf->tstorm_conn_buffer;
1963 	struct regpair context_addr;
1964 	u32 cid = BNX2X_SW_CID(kwqe1->cid);
1965 	struct in6_addr src_ip, dst_ip;
1966 	int i;
1967 	u32 *addrp;
1968 
1969 	addrp = (u32 *) &conn_addr->local_ip_addr;
1970 	for (i = 0; i < 4; i++, addrp++)
1971 		src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1972 
1973 	addrp = (u32 *) &conn_addr->remote_ip_addr;
1974 	for (i = 0; i < 4; i++, addrp++)
1975 		dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1976 
1977 	cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1978 
1979 	xstorm_buf->context_addr.hi = context_addr.hi;
1980 	xstorm_buf->context_addr.lo = context_addr.lo;
1981 	xstorm_buf->mss = 0xffff;
1982 	xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1983 	if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1984 		xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1985 	xstorm_buf->pseudo_header_checksum =
1986 		swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1987 
1988 	if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1989 		tstorm_buf->params |=
1990 			L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1991 	if (kwqe3->ka_timeout) {
1992 		tstorm_buf->ka_enable = 1;
1993 		tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1994 		tstorm_buf->ka_interval = kwqe3->ka_interval;
1995 		tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1996 	}
1997 	tstorm_buf->max_rt_time = 0xffffffff;
1998 }
1999 
2000 static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
2001 {
2002 	struct cnic_local *cp = dev->cnic_priv;
2003 	u32 pfid = cp->pfid;
2004 	u8 *mac = dev->mac_addr;
2005 
2006 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2007 		 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(pfid), mac[0]);
2008 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2009 		 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(pfid), mac[1]);
2010 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2011 		 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(pfid), mac[2]);
2012 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2013 		 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(pfid), mac[3]);
2014 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2015 		 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(pfid), mac[4]);
2016 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2017 		 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(pfid), mac[5]);
2018 
2019 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2020 		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[5]);
2021 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2022 		 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2023 		 mac[4]);
2024 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2025 		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid), mac[3]);
2026 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2027 		 TSTORM_ISCSI_TCP_VARS_MID_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2028 		 mac[2]);
2029 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2030 		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid), mac[1]);
2031 	CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
2032 		 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(pfid) + 1,
2033 		 mac[0]);
2034 }
2035 
2036 static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
2037 {
2038 	struct cnic_local *cp = dev->cnic_priv;
2039 	u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
2040 	u16 tstorm_flags = 0;
2041 
2042 	if (tcp_ts) {
2043 		xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2044 		tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
2045 	}
2046 
2047 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
2048 		 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), xstorm_flags);
2049 
2050 	CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
2051 		  TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->pfid), tstorm_flags);
2052 }
2053 
2054 static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
2055 			      u32 num, int *work)
2056 {
2057 	struct cnic_local *cp = dev->cnic_priv;
2058 	struct l4_kwq_connect_req1 *kwqe1 =
2059 		(struct l4_kwq_connect_req1 *) wqes[0];
2060 	struct l4_kwq_connect_req3 *kwqe3;
2061 	struct l5cm_active_conn_buffer *conn_buf;
2062 	struct l5cm_conn_addr_params *conn_addr;
2063 	union l5cm_specific_data l5_data;
2064 	u32 l5_cid = kwqe1->pg_cid;
2065 	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
2066 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2067 	int ret;
2068 
2069 	if (num < 2) {
2070 		*work = num;
2071 		return -EINVAL;
2072 	}
2073 
2074 	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
2075 		*work = 3;
2076 	else
2077 		*work = 2;
2078 
2079 	if (num < *work) {
2080 		*work = num;
2081 		return -EINVAL;
2082 	}
2083 
2084 	if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
2085 		netdev_err(dev->netdev, "conn_buf size too big\n");
2086 		return -ENOMEM;
2087 	}
2088 	conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2089 	if (!conn_buf)
2090 		return -ENOMEM;
2091 
2092 	memset(conn_buf, 0, sizeof(*conn_buf));
2093 
2094 	conn_addr = &conn_buf->conn_addr_buf;
2095 	conn_addr->remote_addr_0 = csk->ha[0];
2096 	conn_addr->remote_addr_1 = csk->ha[1];
2097 	conn_addr->remote_addr_2 = csk->ha[2];
2098 	conn_addr->remote_addr_3 = csk->ha[3];
2099 	conn_addr->remote_addr_4 = csk->ha[4];
2100 	conn_addr->remote_addr_5 = csk->ha[5];
2101 
2102 	if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
2103 		struct l4_kwq_connect_req2 *kwqe2 =
2104 			(struct l4_kwq_connect_req2 *) wqes[1];
2105 
2106 		conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
2107 		conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
2108 		conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
2109 
2110 		conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
2111 		conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
2112 		conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
2113 		conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
2114 	}
2115 	kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
2116 
2117 	conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
2118 	conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
2119 	conn_addr->local_tcp_port = kwqe1->src_port;
2120 	conn_addr->remote_tcp_port = kwqe1->dst_port;
2121 
2122 	conn_addr->pmtu = kwqe3->pmtu;
2123 	cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
2124 
2125 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
2126 		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->pfid), csk->vlan_id);
2127 
2128 	cnic_bnx2x_set_tcp_timestamp(dev,
2129 		kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
2130 
2131 	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
2132 			kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2133 	if (!ret)
2134 		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2135 
2136 	return ret;
2137 }
2138 
2139 static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
2140 {
2141 	struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
2142 	union l5cm_specific_data l5_data;
2143 	int ret;
2144 
2145 	memset(&l5_data, 0, sizeof(l5_data));
2146 	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
2147 			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2148 	return ret;
2149 }
2150 
2151 static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
2152 {
2153 	struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
2154 	union l5cm_specific_data l5_data;
2155 	int ret;
2156 
2157 	memset(&l5_data, 0, sizeof(l5_data));
2158 	ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
2159 			req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
2160 	return ret;
2161 }
2162 static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2163 {
2164 	struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
2165 	struct l4_kcq kcqe;
2166 	struct kcqe *cqes[1];
2167 
2168 	memset(&kcqe, 0, sizeof(kcqe));
2169 	kcqe.pg_host_opaque = req->host_opaque;
2170 	kcqe.pg_cid = req->host_opaque;
2171 	kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
2172 	cqes[0] = (struct kcqe *) &kcqe;
2173 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2174 	return 0;
2175 }
2176 
2177 static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2178 {
2179 	struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
2180 	struct l4_kcq kcqe;
2181 	struct kcqe *cqes[1];
2182 
2183 	memset(&kcqe, 0, sizeof(kcqe));
2184 	kcqe.pg_host_opaque = req->pg_host_opaque;
2185 	kcqe.pg_cid = req->pg_cid;
2186 	kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
2187 	cqes[0] = (struct kcqe *) &kcqe;
2188 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
2189 	return 0;
2190 }
2191 
2192 static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2193 {
2194 	struct fcoe_kwqe_stat *req;
2195 	struct fcoe_stat_ramrod_params *fcoe_stat;
2196 	union l5cm_specific_data l5_data;
2197 	struct cnic_local *cp = dev->cnic_priv;
2198 	int ret;
2199 	u32 cid;
2200 
2201 	req = (struct fcoe_kwqe_stat *) kwqe;
2202 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2203 
2204 	fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2205 	if (!fcoe_stat)
2206 		return -ENOMEM;
2207 
2208 	memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2209 	memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2210 
2211 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT_FUNC, cid,
2212 				  FCOE_CONNECTION_TYPE, &l5_data);
2213 	return ret;
2214 }
2215 
2216 static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2217 				 u32 num, int *work)
2218 {
2219 	int ret;
2220 	struct cnic_local *cp = dev->cnic_priv;
2221 	u32 cid;
2222 	struct fcoe_init_ramrod_params *fcoe_init;
2223 	struct fcoe_kwqe_init1 *req1;
2224 	struct fcoe_kwqe_init2 *req2;
2225 	struct fcoe_kwqe_init3 *req3;
2226 	union l5cm_specific_data l5_data;
2227 
2228 	if (num < 3) {
2229 		*work = num;
2230 		return -EINVAL;
2231 	}
2232 	req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2233 	req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2234 	req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2235 	if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2236 		*work = 1;
2237 		return -EINVAL;
2238 	}
2239 	if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2240 		*work = 2;
2241 		return -EINVAL;
2242 	}
2243 
2244 	if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2245 		netdev_err(dev->netdev, "fcoe_init size too big\n");
2246 		return -ENOMEM;
2247 	}
2248 	fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2249 	if (!fcoe_init)
2250 		return -ENOMEM;
2251 
2252 	memset(fcoe_init, 0, sizeof(*fcoe_init));
2253 	memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2254 	memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2255 	memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2256 	fcoe_init->eq_pbl_base.lo = cp->kcq2.dma.pgtbl_map & 0xffffffff;
2257 	fcoe_init->eq_pbl_base.hi = (u64) cp->kcq2.dma.pgtbl_map >> 32;
2258 	fcoe_init->eq_pbl_size = cp->kcq2.dma.num_pages;
2259 
2260 	fcoe_init->sb_num = cp->status_blk_num;
2261 	fcoe_init->eq_prod = MAX_KCQ_IDX;
2262 	fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2263 	cp->kcq2.sw_prod_idx = 0;
2264 
2265 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2266 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT_FUNC, cid,
2267 				  FCOE_CONNECTION_TYPE, &l5_data);
2268 	*work = 3;
2269 	return ret;
2270 }
2271 
2272 static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2273 				 u32 num, int *work)
2274 {
2275 	int ret = 0;
2276 	u32 cid = -1, l5_cid;
2277 	struct cnic_local *cp = dev->cnic_priv;
2278 	struct fcoe_kwqe_conn_offload1 *req1;
2279 	struct fcoe_kwqe_conn_offload2 *req2;
2280 	struct fcoe_kwqe_conn_offload3 *req3;
2281 	struct fcoe_kwqe_conn_offload4 *req4;
2282 	struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2283 	struct cnic_context *ctx;
2284 	struct fcoe_context *fctx;
2285 	struct regpair ctx_addr;
2286 	union l5cm_specific_data l5_data;
2287 	struct fcoe_kcqe kcqe;
2288 	struct kcqe *cqes[1];
2289 
2290 	if (num < 4) {
2291 		*work = num;
2292 		return -EINVAL;
2293 	}
2294 	req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2295 	req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2296 	req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2297 	req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2298 
2299 	*work = 4;
2300 
2301 	l5_cid = req1->fcoe_conn_id;
2302 	if (l5_cid >= dev->max_fcoe_conn)
2303 		goto err_reply;
2304 
2305 	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2306 
2307 	ctx = &cp->ctx_tbl[l5_cid];
2308 	if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2309 		goto err_reply;
2310 
2311 	ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2312 	if (ret) {
2313 		ret = 0;
2314 		goto err_reply;
2315 	}
2316 	cid = ctx->cid;
2317 
2318 	fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2319 	if (fctx) {
2320 		u32 hw_cid = BNX2X_HW_CID(cp, cid);
2321 		u32 val;
2322 
2323 		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2324 					     FCOE_CONNECTION_TYPE);
2325 		fctx->xstorm_ag_context.cdu_reserved = val;
2326 		val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2327 					     FCOE_CONNECTION_TYPE);
2328 		fctx->ustorm_ag_context.cdu_usage = val;
2329 	}
2330 	if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2331 		netdev_err(dev->netdev, "fcoe_offload size too big\n");
2332 		goto err_reply;
2333 	}
2334 	fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2335 	if (!fcoe_offload)
2336 		goto err_reply;
2337 
2338 	memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2339 	memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2340 	memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2341 	memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2342 	memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2343 
2344 	cid = BNX2X_HW_CID(cp, cid);
2345 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2346 				  FCOE_CONNECTION_TYPE, &l5_data);
2347 	if (!ret)
2348 		set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2349 
2350 	return ret;
2351 
2352 err_reply:
2353 	if (cid != -1)
2354 		cnic_free_bnx2x_conn_resc(dev, l5_cid);
2355 
2356 	memset(&kcqe, 0, sizeof(kcqe));
2357 	kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2358 	kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2359 	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2360 
2361 	cqes[0] = (struct kcqe *) &kcqe;
2362 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2363 	return ret;
2364 }
2365 
2366 static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2367 {
2368 	struct fcoe_kwqe_conn_enable_disable *req;
2369 	struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2370 	union l5cm_specific_data l5_data;
2371 	int ret;
2372 	u32 cid, l5_cid;
2373 	struct cnic_local *cp = dev->cnic_priv;
2374 
2375 	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2376 	cid = req->context_id;
2377 	l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2378 
2379 	if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2380 		netdev_err(dev->netdev, "fcoe_enable size too big\n");
2381 		return -ENOMEM;
2382 	}
2383 	fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2384 	if (!fcoe_enable)
2385 		return -ENOMEM;
2386 
2387 	memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2388 	memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2389 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2390 				  FCOE_CONNECTION_TYPE, &l5_data);
2391 	return ret;
2392 }
2393 
2394 static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2395 {
2396 	struct fcoe_kwqe_conn_enable_disable *req;
2397 	struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2398 	union l5cm_specific_data l5_data;
2399 	int ret;
2400 	u32 cid, l5_cid;
2401 	struct cnic_local *cp = dev->cnic_priv;
2402 
2403 	req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2404 	cid = req->context_id;
2405 	l5_cid = req->conn_id;
2406 	if (l5_cid >= dev->max_fcoe_conn)
2407 		return -EINVAL;
2408 
2409 	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2410 
2411 	if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2412 		netdev_err(dev->netdev, "fcoe_disable size too big\n");
2413 		return -ENOMEM;
2414 	}
2415 	fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2416 	if (!fcoe_disable)
2417 		return -ENOMEM;
2418 
2419 	memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2420 	memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2421 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2422 				  FCOE_CONNECTION_TYPE, &l5_data);
2423 	return ret;
2424 }
2425 
2426 static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2427 {
2428 	struct fcoe_kwqe_conn_destroy *req;
2429 	union l5cm_specific_data l5_data;
2430 	int ret;
2431 	u32 cid, l5_cid;
2432 	struct cnic_local *cp = dev->cnic_priv;
2433 	struct cnic_context *ctx;
2434 	struct fcoe_kcqe kcqe;
2435 	struct kcqe *cqes[1];
2436 
2437 	req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2438 	cid = req->context_id;
2439 	l5_cid = req->conn_id;
2440 	if (l5_cid >= dev->max_fcoe_conn)
2441 		return -EINVAL;
2442 
2443 	l5_cid += BNX2X_FCOE_L5_CID_BASE;
2444 
2445 	ctx = &cp->ctx_tbl[l5_cid];
2446 
2447 	init_waitqueue_head(&ctx->waitq);
2448 	ctx->wait_cond = 0;
2449 
2450 	memset(&kcqe, 0, sizeof(kcqe));
2451 	kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_ERROR;
2452 	memset(&l5_data, 0, sizeof(l5_data));
2453 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2454 				  FCOE_CONNECTION_TYPE, &l5_data);
2455 	if (ret == 0) {
2456 		wait_event_timeout(ctx->waitq, ctx->wait_cond, CNIC_RAMROD_TMO);
2457 		if (ctx->wait_cond)
2458 			kcqe.completion_status = 0;
2459 	}
2460 
2461 	set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2462 	queue_delayed_work(cnic_wq, &cp->delete_task, msecs_to_jiffies(2000));
2463 
2464 	kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2465 	kcqe.fcoe_conn_id = req->conn_id;
2466 	kcqe.fcoe_conn_context_id = cid;
2467 
2468 	cqes[0] = (struct kcqe *) &kcqe;
2469 	cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2470 	return ret;
2471 }
2472 
2473 static void cnic_bnx2x_delete_wait(struct cnic_dev *dev, u32 start_cid)
2474 {
2475 	struct cnic_local *cp = dev->cnic_priv;
2476 	u32 i;
2477 
2478 	for (i = start_cid; i < cp->max_cid_space; i++) {
2479 		struct cnic_context *ctx = &cp->ctx_tbl[i];
2480 		int j;
2481 
2482 		while (test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
2483 			msleep(10);
2484 
2485 		for (j = 0; j < 5; j++) {
2486 			if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2487 				break;
2488 			msleep(20);
2489 		}
2490 
2491 		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2492 			netdev_warn(dev->netdev, "CID %x not deleted\n",
2493 				   ctx->cid);
2494 	}
2495 }
2496 
2497 static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2498 {
2499 	struct fcoe_kwqe_destroy *req;
2500 	union l5cm_specific_data l5_data;
2501 	struct cnic_local *cp = dev->cnic_priv;
2502 	int ret;
2503 	u32 cid;
2504 
2505 	cnic_bnx2x_delete_wait(dev, MAX_ISCSI_TBL_SZ);
2506 
2507 	req = (struct fcoe_kwqe_destroy *) kwqe;
2508 	cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2509 
2510 	memset(&l5_data, 0, sizeof(l5_data));
2511 	ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY_FUNC, cid,
2512 				  FCOE_CONNECTION_TYPE, &l5_data);
2513 	return ret;
2514 }
2515 
2516 static void cnic_bnx2x_kwqe_err(struct cnic_dev *dev, struct kwqe *kwqe)
2517 {
2518 	struct cnic_local *cp = dev->cnic_priv;
2519 	struct kcqe kcqe;
2520 	struct kcqe *cqes[1];
2521 	u32 cid;
2522 	u32 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2523 	u32 layer_code = kwqe->kwqe_op_flag & KWQE_LAYER_MASK;
2524 	u32 kcqe_op;
2525 	int ulp_type;
2526 
2527 	cid = kwqe->kwqe_info0;
2528 	memset(&kcqe, 0, sizeof(kcqe));
2529 
2530 	if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_FCOE) {
2531 		u32 l5_cid = 0;
2532 
2533 		ulp_type = CNIC_ULP_FCOE;
2534 		if (opcode == FCOE_KWQE_OPCODE_DISABLE_CONN) {
2535 			struct fcoe_kwqe_conn_enable_disable *req;
2536 
2537 			req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2538 			kcqe_op = FCOE_KCQE_OPCODE_DISABLE_CONN;
2539 			cid = req->context_id;
2540 			l5_cid = req->conn_id;
2541 		} else if (opcode == FCOE_KWQE_OPCODE_DESTROY) {
2542 			kcqe_op = FCOE_KCQE_OPCODE_DESTROY_FUNC;
2543 		} else {
2544 			return;
2545 		}
2546 		kcqe.kcqe_op_flag = kcqe_op << KCQE_FLAGS_OPCODE_SHIFT;
2547 		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_FCOE;
2548 		kcqe.kcqe_info1 = FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR;
2549 		kcqe.kcqe_info2 = cid;
2550 		kcqe.kcqe_info0 = l5_cid;
2551 
2552 	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L5_ISCSI) {
2553 		ulp_type = CNIC_ULP_ISCSI;
2554 		if (opcode == ISCSI_KWQE_OPCODE_UPDATE_CONN)
2555 			cid = kwqe->kwqe_info1;
2556 
2557 		kcqe.kcqe_op_flag = (opcode + 0x10) << KCQE_FLAGS_OPCODE_SHIFT;
2558 		kcqe.kcqe_op_flag |= KCQE_FLAGS_LAYER_MASK_L5_ISCSI;
2559 		kcqe.kcqe_info1 = ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR;
2560 		kcqe.kcqe_info2 = cid;
2561 		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &kcqe.kcqe_info0);
2562 
2563 	} else if (layer_code == KWQE_FLAGS_LAYER_MASK_L4) {
2564 		struct l4_kcq *l4kcqe = (struct l4_kcq *) &kcqe;
2565 
2566 		ulp_type = CNIC_ULP_L4;
2567 		if (opcode == L4_KWQE_OPCODE_VALUE_CONNECT1)
2568 			kcqe_op = L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE;
2569 		else if (opcode == L4_KWQE_OPCODE_VALUE_RESET)
2570 			kcqe_op = L4_KCQE_OPCODE_VALUE_RESET_COMP;
2571 		else if (opcode == L4_KWQE_OPCODE_VALUE_CLOSE)
2572 			kcqe_op = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
2573 		else
2574 			return;
2575 
2576 		kcqe.kcqe_op_flag = (kcqe_op << KCQE_FLAGS_OPCODE_SHIFT) |
2577 				    KCQE_FLAGS_LAYER_MASK_L4;
2578 		l4kcqe->status = L4_KCQE_COMPLETION_STATUS_NIC_ERROR;
2579 		l4kcqe->cid = cid;
2580 		cnic_get_l5_cid(cp, BNX2X_SW_CID(cid), &l4kcqe->conn_id);
2581 	} else {
2582 		return;
2583 	}
2584 
2585 	cqes[0] = (struct kcqe *) &kcqe;
2586 	cnic_reply_bnx2x_kcqes(dev, ulp_type, cqes, 1);
2587 }
2588 
2589 static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2590 					 struct kwqe *wqes[], u32 num_wqes)
2591 {
2592 	int i, work, ret;
2593 	u32 opcode;
2594 	struct kwqe *kwqe;
2595 
2596 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2597 		return -EAGAIN;		/* bnx2 is down */
2598 
2599 	for (i = 0; i < num_wqes; ) {
2600 		kwqe = wqes[i];
2601 		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2602 		work = 1;
2603 
2604 		switch (opcode) {
2605 		case ISCSI_KWQE_OPCODE_INIT1:
2606 			ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
2607 			break;
2608 		case ISCSI_KWQE_OPCODE_INIT2:
2609 			ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
2610 			break;
2611 		case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
2612 			ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
2613 						     num_wqes - i, &work);
2614 			break;
2615 		case ISCSI_KWQE_OPCODE_UPDATE_CONN:
2616 			ret = cnic_bnx2x_iscsi_update(dev, kwqe);
2617 			break;
2618 		case ISCSI_KWQE_OPCODE_DESTROY_CONN:
2619 			ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
2620 			break;
2621 		case L4_KWQE_OPCODE_VALUE_CONNECT1:
2622 			ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2623 						 &work);
2624 			break;
2625 		case L4_KWQE_OPCODE_VALUE_CLOSE:
2626 			ret = cnic_bnx2x_close(dev, kwqe);
2627 			break;
2628 		case L4_KWQE_OPCODE_VALUE_RESET:
2629 			ret = cnic_bnx2x_reset(dev, kwqe);
2630 			break;
2631 		case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2632 			ret = cnic_bnx2x_offload_pg(dev, kwqe);
2633 			break;
2634 		case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2635 			ret = cnic_bnx2x_update_pg(dev, kwqe);
2636 			break;
2637 		case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2638 			ret = 0;
2639 			break;
2640 		default:
2641 			ret = 0;
2642 			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2643 				   opcode);
2644 			break;
2645 		}
2646 		if (ret < 0) {
2647 			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2648 				   opcode);
2649 
2650 			/* Possibly bnx2x parity error, send completion
2651 			 * to ulp drivers with error code to speed up
2652 			 * cleanup and reset recovery.
2653 			 */
2654 			if (ret == -EIO || ret == -EAGAIN)
2655 				cnic_bnx2x_kwqe_err(dev, kwqe);
2656 		}
2657 		i += work;
2658 	}
2659 	return 0;
2660 }
2661 
2662 static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2663 					struct kwqe *wqes[], u32 num_wqes)
2664 {
2665 	struct cnic_local *cp = dev->cnic_priv;
2666 	int i, work, ret;
2667 	u32 opcode;
2668 	struct kwqe *kwqe;
2669 
2670 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2671 		return -EAGAIN;		/* bnx2 is down */
2672 
2673 	if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
2674 		return -EINVAL;
2675 
2676 	for (i = 0; i < num_wqes; ) {
2677 		kwqe = wqes[i];
2678 		opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2679 		work = 1;
2680 
2681 		switch (opcode) {
2682 		case FCOE_KWQE_OPCODE_INIT1:
2683 			ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2684 						    num_wqes - i, &work);
2685 			break;
2686 		case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2687 			ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2688 						    num_wqes - i, &work);
2689 			break;
2690 		case FCOE_KWQE_OPCODE_ENABLE_CONN:
2691 			ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2692 			break;
2693 		case FCOE_KWQE_OPCODE_DISABLE_CONN:
2694 			ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2695 			break;
2696 		case FCOE_KWQE_OPCODE_DESTROY_CONN:
2697 			ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2698 			break;
2699 		case FCOE_KWQE_OPCODE_DESTROY:
2700 			ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2701 			break;
2702 		case FCOE_KWQE_OPCODE_STAT:
2703 			ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2704 			break;
2705 		default:
2706 			ret = 0;
2707 			netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2708 				   opcode);
2709 			break;
2710 		}
2711 		if (ret < 0) {
2712 			netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2713 				   opcode);
2714 
2715 			/* Possibly bnx2x parity error, send completion
2716 			 * to ulp drivers with error code to speed up
2717 			 * cleanup and reset recovery.
2718 			 */
2719 			if (ret == -EIO || ret == -EAGAIN)
2720 				cnic_bnx2x_kwqe_err(dev, kwqe);
2721 		}
2722 		i += work;
2723 	}
2724 	return 0;
2725 }
2726 
2727 static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2728 				   u32 num_wqes)
2729 {
2730 	int ret = -EINVAL;
2731 	u32 layer_code;
2732 
2733 	if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2734 		return -EAGAIN;		/* bnx2x is down */
2735 
2736 	if (!num_wqes)
2737 		return 0;
2738 
2739 	layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2740 	switch (layer_code) {
2741 	case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2742 	case KWQE_FLAGS_LAYER_MASK_L4:
2743 	case KWQE_FLAGS_LAYER_MASK_L2:
2744 		ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2745 		break;
2746 
2747 	case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2748 		ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2749 		break;
2750 	}
2751 	return ret;
2752 }
2753 
2754 static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2755 {
2756 	if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2757 		return KCQE_FLAGS_LAYER_MASK_L4;
2758 
2759 	return opflag & KCQE_FLAGS_LAYER_MASK;
2760 }
2761 
2762 static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2763 {
2764 	struct cnic_local *cp = dev->cnic_priv;
2765 	int i, j, comp = 0;
2766 
2767 	i = 0;
2768 	j = 1;
2769 	while (num_cqes) {
2770 		struct cnic_ulp_ops *ulp_ops;
2771 		int ulp_type;
2772 		u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2773 		u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2774 
2775 		if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2776 			comp++;
2777 
2778 		while (j < num_cqes) {
2779 			u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2780 
2781 			if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2782 				break;
2783 
2784 			if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
2785 				comp++;
2786 			j++;
2787 		}
2788 
2789 		if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
2790 			ulp_type = CNIC_ULP_RDMA;
2791 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2792 			ulp_type = CNIC_ULP_ISCSI;
2793 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2794 			ulp_type = CNIC_ULP_FCOE;
2795 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2796 			ulp_type = CNIC_ULP_L4;
2797 		else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
2798 			goto end;
2799 		else {
2800 			netdev_err(dev->netdev, "Unknown type of KCQE(0x%x)\n",
2801 				   kcqe_op_flag);
2802 			goto end;
2803 		}
2804 
2805 		rcu_read_lock();
2806 		ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
2807 		if (likely(ulp_ops)) {
2808 			ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
2809 						  cp->completed_kcq + i, j);
2810 		}
2811 		rcu_read_unlock();
2812 end:
2813 		num_cqes -= j;
2814 		i += j;
2815 		j = 1;
2816 	}
2817 	if (unlikely(comp))
2818 		cnic_spq_completion(dev, DRV_CTL_RET_L5_SPQ_CREDIT_CMD, comp);
2819 }
2820 
2821 static int cnic_get_kcqes(struct cnic_dev *dev, struct kcq_info *info)
2822 {
2823 	struct cnic_local *cp = dev->cnic_priv;
2824 	u16 i, ri, hw_prod, last;
2825 	struct kcqe *kcqe;
2826 	int kcqe_cnt = 0, last_cnt = 0;
2827 
2828 	i = ri = last = info->sw_prod_idx;
2829 	ri &= MAX_KCQ_IDX;
2830 	hw_prod = *info->hw_prod_idx_ptr;
2831 	hw_prod = info->hw_idx(hw_prod);
2832 
2833 	while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
2834 		kcqe = &info->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
2835 		cp->completed_kcq[kcqe_cnt++] = kcqe;
2836 		i = info->next_idx(i);
2837 		ri = i & MAX_KCQ_IDX;
2838 		if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
2839 			last_cnt = kcqe_cnt;
2840 			last = i;
2841 		}
2842 	}
2843 
2844 	info->sw_prod_idx = last;
2845 	return last_cnt;
2846 }
2847 
2848 static int cnic_l2_completion(struct cnic_local *cp)
2849 {
2850 	u16 hw_cons, sw_cons;
2851 	struct cnic_uio_dev *udev = cp->udev;
2852 	union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
2853 					(udev->l2_ring + (2 * BCM_PAGE_SIZE));
2854 	u32 cmd;
2855 	int comp = 0;
2856 
2857 	if (!test_bit(CNIC_F_BNX2X_CLASS, &cp->dev->flags))
2858 		return 0;
2859 
2860 	hw_cons = *cp->rx_cons_ptr;
2861 	if ((hw_cons & BNX2X_MAX_RCQ_DESC_CNT) == BNX2X_MAX_RCQ_DESC_CNT)
2862 		hw_cons++;
2863 
2864 	sw_cons = cp->rx_cons;
2865 	while (sw_cons != hw_cons) {
2866 		u8 cqe_fp_flags;
2867 
2868 		cqe = &cqe_ring[sw_cons & BNX2X_MAX_RCQ_DESC_CNT];
2869 		cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
2870 		if (cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE) {
2871 			cmd = le32_to_cpu(cqe->ramrod_cqe.conn_and_cmd_data);
2872 			cmd >>= COMMON_RAMROD_ETH_RX_CQE_CMD_ID_SHIFT;
2873 			if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP ||
2874 			    cmd == RAMROD_CMD_ID_ETH_HALT)
2875 				comp++;
2876 		}
2877 		sw_cons = BNX2X_NEXT_RCQE(sw_cons);
2878 	}
2879 	return comp;
2880 }
2881 
2882 static void cnic_chk_pkt_rings(struct cnic_local *cp)
2883 {
2884 	u16 rx_cons, tx_cons;
2885 	int comp = 0;
2886 
2887 	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
2888 		return;
2889 
2890 	rx_cons = *cp->rx_cons_ptr;
2891 	tx_cons = *cp->tx_cons_ptr;
2892 	if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
2893 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
2894 			comp = cnic_l2_completion(cp);
2895 
2896 		cp->tx_cons = tx_cons;
2897 		cp->rx_cons = rx_cons;
2898 
2899 		if (cp->udev)
2900 			uio_event_notify(&cp->udev->cnic_uinfo);
2901 	}
2902 	if (comp)
2903 		clear_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
2904 }
2905 
2906 static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2907 {
2908 	struct cnic_local *cp = dev->cnic_priv;
2909 	u32 status_idx = (u16) *cp->kcq1.status_idx_ptr;
2910 	int kcqe_cnt;
2911 
2912 	/* status block index must be read before reading other fields */
2913 	rmb();
2914 	cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2915 
2916 	while ((kcqe_cnt = cnic_get_kcqes(dev, &cp->kcq1))) {
2917 
2918 		service_kcqes(dev, kcqe_cnt);
2919 
2920 		/* Tell compiler that status_blk fields can change. */
2921 		barrier();
2922 		status_idx = (u16) *cp->kcq1.status_idx_ptr;
2923 		/* status block index must be read first */
2924 		rmb();
2925 		cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
2926 	}
2927 
2928 	CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx);
2929 
2930 	cnic_chk_pkt_rings(cp);
2931 
2932 	return status_idx;
2933 }
2934 
2935 static int cnic_service_bnx2(void *data, void *status_blk)
2936 {
2937 	struct cnic_dev *dev = data;
2938 
2939 	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2940 		struct status_block *sblk = status_blk;
2941 
2942 		return sblk->status_idx;
2943 	}
2944 
2945 	return cnic_service_bnx2_queues(dev);
2946 }
2947 
2948 static void cnic_service_bnx2_msix(unsigned long data)
2949 {
2950 	struct cnic_dev *dev = (struct cnic_dev *) data;
2951 	struct cnic_local *cp = dev->cnic_priv;
2952 
2953 	cp->last_status_idx = cnic_service_bnx2_queues(dev);
2954 
2955 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
2956 		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
2957 }
2958 
2959 static void cnic_doirq(struct cnic_dev *dev)
2960 {
2961 	struct cnic_local *cp = dev->cnic_priv;
2962 
2963 	if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2964 		u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2965 
2966 		prefetch(cp->status_blk.gen);
2967 		prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2968 
2969 		tasklet_schedule(&cp->cnic_irq_task);
2970 	}
2971 }
2972 
2973 static irqreturn_t cnic_irq(int irq, void *dev_instance)
2974 {
2975 	struct cnic_dev *dev = dev_instance;
2976 	struct cnic_local *cp = dev->cnic_priv;
2977 
2978 	if (cp->ack_int)
2979 		cp->ack_int(dev);
2980 
2981 	cnic_doirq(dev);
2982 
2983 	return IRQ_HANDLED;
2984 }
2985 
2986 static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2987 				      u16 index, u8 op, u8 update)
2988 {
2989 	struct cnic_local *cp = dev->cnic_priv;
2990 	u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2991 		       COMMAND_REG_INT_ACK);
2992 	struct igu_ack_register igu_ack;
2993 
2994 	igu_ack.status_block_index = index;
2995 	igu_ack.sb_id_and_flags =
2996 			((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2997 			 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2998 			 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2999 			 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
3000 
3001 	CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
3002 }
3003 
3004 static void cnic_ack_igu_sb(struct cnic_dev *dev, u8 igu_sb_id, u8 segment,
3005 			    u16 index, u8 op, u8 update)
3006 {
3007 	struct igu_regular cmd_data;
3008 	u32 igu_addr = BAR_IGU_INTMEM + (IGU_CMD_INT_ACK_BASE + igu_sb_id) * 8;
3009 
3010 	cmd_data.sb_id_and_flags =
3011 		(index << IGU_REGULAR_SB_INDEX_SHIFT) |
3012 		(segment << IGU_REGULAR_SEGMENT_ACCESS_SHIFT) |
3013 		(update << IGU_REGULAR_BUPDATE_SHIFT) |
3014 		(op << IGU_REGULAR_ENABLE_INT_SHIFT);
3015 
3016 
3017 	CNIC_WR(dev, igu_addr, cmd_data.sb_id_and_flags);
3018 }
3019 
3020 static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
3021 {
3022 	struct cnic_local *cp = dev->cnic_priv;
3023 
3024 	cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, CSTORM_ID, 0,
3025 			   IGU_INT_DISABLE, 0);
3026 }
3027 
3028 static void cnic_ack_bnx2x_e2_msix(struct cnic_dev *dev)
3029 {
3030 	struct cnic_local *cp = dev->cnic_priv;
3031 
3032 	cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 0,
3033 			IGU_INT_DISABLE, 0);
3034 }
3035 
3036 static u32 cnic_service_bnx2x_kcq(struct cnic_dev *dev, struct kcq_info *info)
3037 {
3038 	u32 last_status = *info->status_idx_ptr;
3039 	int kcqe_cnt;
3040 
3041 	/* status block index must be read before reading the KCQ */
3042 	rmb();
3043 	while ((kcqe_cnt = cnic_get_kcqes(dev, info))) {
3044 
3045 		service_kcqes(dev, kcqe_cnt);
3046 
3047 		/* Tell compiler that sblk fields can change. */
3048 		barrier();
3049 
3050 		last_status = *info->status_idx_ptr;
3051 		/* status block index must be read before reading the KCQ */
3052 		rmb();
3053 	}
3054 	return last_status;
3055 }
3056 
3057 static void cnic_service_bnx2x_bh(unsigned long data)
3058 {
3059 	struct cnic_dev *dev = (struct cnic_dev *) data;
3060 	struct cnic_local *cp = dev->cnic_priv;
3061 	u32 status_idx, new_status_idx;
3062 
3063 	if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
3064 		return;
3065 
3066 	while (1) {
3067 		status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
3068 
3069 		CNIC_WR16(dev, cp->kcq1.io_addr,
3070 			  cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
3071 
3072 		if (!BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
3073 			cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
3074 					   status_idx, IGU_INT_ENABLE, 1);
3075 			break;
3076 		}
3077 
3078 		new_status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
3079 
3080 		if (new_status_idx != status_idx)
3081 			continue;
3082 
3083 		CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
3084 			  MAX_KCQ_IDX);
3085 
3086 		cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
3087 				status_idx, IGU_INT_ENABLE, 1);
3088 
3089 		break;
3090 	}
3091 }
3092 
3093 static int cnic_service_bnx2x(void *data, void *status_blk)
3094 {
3095 	struct cnic_dev *dev = data;
3096 	struct cnic_local *cp = dev->cnic_priv;
3097 
3098 	if (!(cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
3099 		cnic_doirq(dev);
3100 
3101 	cnic_chk_pkt_rings(cp);
3102 
3103 	return 0;
3104 }
3105 
3106 static void cnic_ulp_stop_one(struct cnic_local *cp, int if_type)
3107 {
3108 	struct cnic_ulp_ops *ulp_ops;
3109 
3110 	if (if_type == CNIC_ULP_ISCSI)
3111 		cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
3112 
3113 	mutex_lock(&cnic_lock);
3114 	ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3115 					    lockdep_is_held(&cnic_lock));
3116 	if (!ulp_ops) {
3117 		mutex_unlock(&cnic_lock);
3118 		return;
3119 	}
3120 	set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3121 	mutex_unlock(&cnic_lock);
3122 
3123 	if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3124 		ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
3125 
3126 	clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3127 }
3128 
3129 static void cnic_ulp_stop(struct cnic_dev *dev)
3130 {
3131 	struct cnic_local *cp = dev->cnic_priv;
3132 	int if_type;
3133 
3134 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++)
3135 		cnic_ulp_stop_one(cp, if_type);
3136 }
3137 
3138 static void cnic_ulp_start(struct cnic_dev *dev)
3139 {
3140 	struct cnic_local *cp = dev->cnic_priv;
3141 	int if_type;
3142 
3143 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
3144 		struct cnic_ulp_ops *ulp_ops;
3145 
3146 		mutex_lock(&cnic_lock);
3147 		ulp_ops = rcu_dereference_protected(cp->ulp_ops[if_type],
3148 						    lockdep_is_held(&cnic_lock));
3149 		if (!ulp_ops || !ulp_ops->cnic_start) {
3150 			mutex_unlock(&cnic_lock);
3151 			continue;
3152 		}
3153 		set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3154 		mutex_unlock(&cnic_lock);
3155 
3156 		if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
3157 			ulp_ops->cnic_start(cp->ulp_handle[if_type]);
3158 
3159 		clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
3160 	}
3161 }
3162 
3163 static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
3164 {
3165 	struct cnic_local *cp = dev->cnic_priv;
3166 	struct cnic_ulp_ops *ulp_ops;
3167 	int rc;
3168 
3169 	mutex_lock(&cnic_lock);
3170 	ulp_ops = cnic_ulp_tbl_prot(ulp_type);
3171 	if (ulp_ops && ulp_ops->cnic_get_stats)
3172 		rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
3173 	else
3174 		rc = -ENODEV;
3175 	mutex_unlock(&cnic_lock);
3176 	return rc;
3177 }
3178 
3179 static int cnic_ctl(void *data, struct cnic_ctl_info *info)
3180 {
3181 	struct cnic_dev *dev = data;
3182 	int ulp_type = CNIC_ULP_ISCSI;
3183 
3184 	switch (info->cmd) {
3185 	case CNIC_CTL_STOP_CMD:
3186 		cnic_hold(dev);
3187 
3188 		cnic_ulp_stop(dev);
3189 		cnic_stop_hw(dev);
3190 
3191 		cnic_put(dev);
3192 		break;
3193 	case CNIC_CTL_START_CMD:
3194 		cnic_hold(dev);
3195 
3196 		if (!cnic_start_hw(dev))
3197 			cnic_ulp_start(dev);
3198 
3199 		cnic_put(dev);
3200 		break;
3201 	case CNIC_CTL_STOP_ISCSI_CMD: {
3202 		struct cnic_local *cp = dev->cnic_priv;
3203 		set_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags);
3204 		queue_delayed_work(cnic_wq, &cp->delete_task, 0);
3205 		break;
3206 	}
3207 	case CNIC_CTL_COMPLETION_CMD: {
3208 		struct cnic_ctl_completion *comp = &info->data.comp;
3209 		u32 cid = BNX2X_SW_CID(comp->cid);
3210 		u32 l5_cid;
3211 		struct cnic_local *cp = dev->cnic_priv;
3212 
3213 		if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
3214 			struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3215 
3216 			if (unlikely(comp->error)) {
3217 				set_bit(CTX_FL_CID_ERROR, &ctx->ctx_flags);
3218 				netdev_err(dev->netdev,
3219 					   "CID %x CFC delete comp error %x\n",
3220 					   cid, comp->error);
3221 			}
3222 
3223 			ctx->wait_cond = 1;
3224 			wake_up(&ctx->waitq);
3225 		}
3226 		break;
3227 	}
3228 	case CNIC_CTL_FCOE_STATS_GET_CMD:
3229 		ulp_type = CNIC_ULP_FCOE;
3230 		/* fall through */
3231 	case CNIC_CTL_ISCSI_STATS_GET_CMD:
3232 		cnic_hold(dev);
3233 		cnic_copy_ulp_stats(dev, ulp_type);
3234 		cnic_put(dev);
3235 		break;
3236 
3237 	default:
3238 		return -EINVAL;
3239 	}
3240 	return 0;
3241 }
3242 
3243 static void cnic_ulp_init(struct cnic_dev *dev)
3244 {
3245 	int i;
3246 	struct cnic_local *cp = dev->cnic_priv;
3247 
3248 	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3249 		struct cnic_ulp_ops *ulp_ops;
3250 
3251 		mutex_lock(&cnic_lock);
3252 		ulp_ops = cnic_ulp_tbl_prot(i);
3253 		if (!ulp_ops || !ulp_ops->cnic_init) {
3254 			mutex_unlock(&cnic_lock);
3255 			continue;
3256 		}
3257 		ulp_get(ulp_ops);
3258 		mutex_unlock(&cnic_lock);
3259 
3260 		if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3261 			ulp_ops->cnic_init(dev);
3262 
3263 		ulp_put(ulp_ops);
3264 	}
3265 }
3266 
3267 static void cnic_ulp_exit(struct cnic_dev *dev)
3268 {
3269 	int i;
3270 	struct cnic_local *cp = dev->cnic_priv;
3271 
3272 	for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
3273 		struct cnic_ulp_ops *ulp_ops;
3274 
3275 		mutex_lock(&cnic_lock);
3276 		ulp_ops = cnic_ulp_tbl_prot(i);
3277 		if (!ulp_ops || !ulp_ops->cnic_exit) {
3278 			mutex_unlock(&cnic_lock);
3279 			continue;
3280 		}
3281 		ulp_get(ulp_ops);
3282 		mutex_unlock(&cnic_lock);
3283 
3284 		if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
3285 			ulp_ops->cnic_exit(dev);
3286 
3287 		ulp_put(ulp_ops);
3288 	}
3289 }
3290 
3291 static int cnic_cm_offload_pg(struct cnic_sock *csk)
3292 {
3293 	struct cnic_dev *dev = csk->dev;
3294 	struct l4_kwq_offload_pg *l4kwqe;
3295 	struct kwqe *wqes[1];
3296 
3297 	l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
3298 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3299 	wqes[0] = (struct kwqe *) l4kwqe;
3300 
3301 	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
3302 	l4kwqe->flags =
3303 		L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
3304 	l4kwqe->l2hdr_nbytes = ETH_HLEN;
3305 
3306 	l4kwqe->da0 = csk->ha[0];
3307 	l4kwqe->da1 = csk->ha[1];
3308 	l4kwqe->da2 = csk->ha[2];
3309 	l4kwqe->da3 = csk->ha[3];
3310 	l4kwqe->da4 = csk->ha[4];
3311 	l4kwqe->da5 = csk->ha[5];
3312 
3313 	l4kwqe->sa0 = dev->mac_addr[0];
3314 	l4kwqe->sa1 = dev->mac_addr[1];
3315 	l4kwqe->sa2 = dev->mac_addr[2];
3316 	l4kwqe->sa3 = dev->mac_addr[3];
3317 	l4kwqe->sa4 = dev->mac_addr[4];
3318 	l4kwqe->sa5 = dev->mac_addr[5];
3319 
3320 	l4kwqe->etype = ETH_P_IP;
3321 	l4kwqe->ipid_start = DEF_IPID_START;
3322 	l4kwqe->host_opaque = csk->l5_cid;
3323 
3324 	if (csk->vlan_id) {
3325 		l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
3326 		l4kwqe->vlan_tag = csk->vlan_id;
3327 		l4kwqe->l2hdr_nbytes += 4;
3328 	}
3329 
3330 	return dev->submit_kwqes(dev, wqes, 1);
3331 }
3332 
3333 static int cnic_cm_update_pg(struct cnic_sock *csk)
3334 {
3335 	struct cnic_dev *dev = csk->dev;
3336 	struct l4_kwq_update_pg *l4kwqe;
3337 	struct kwqe *wqes[1];
3338 
3339 	l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
3340 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3341 	wqes[0] = (struct kwqe *) l4kwqe;
3342 
3343 	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
3344 	l4kwqe->flags =
3345 		L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
3346 	l4kwqe->pg_cid = csk->pg_cid;
3347 
3348 	l4kwqe->da0 = csk->ha[0];
3349 	l4kwqe->da1 = csk->ha[1];
3350 	l4kwqe->da2 = csk->ha[2];
3351 	l4kwqe->da3 = csk->ha[3];
3352 	l4kwqe->da4 = csk->ha[4];
3353 	l4kwqe->da5 = csk->ha[5];
3354 
3355 	l4kwqe->pg_host_opaque = csk->l5_cid;
3356 	l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
3357 
3358 	return dev->submit_kwqes(dev, wqes, 1);
3359 }
3360 
3361 static int cnic_cm_upload_pg(struct cnic_sock *csk)
3362 {
3363 	struct cnic_dev *dev = csk->dev;
3364 	struct l4_kwq_upload *l4kwqe;
3365 	struct kwqe *wqes[1];
3366 
3367 	l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
3368 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3369 	wqes[0] = (struct kwqe *) l4kwqe;
3370 
3371 	l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
3372 	l4kwqe->flags =
3373 		L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
3374 	l4kwqe->cid = csk->pg_cid;
3375 
3376 	return dev->submit_kwqes(dev, wqes, 1);
3377 }
3378 
3379 static int cnic_cm_conn_req(struct cnic_sock *csk)
3380 {
3381 	struct cnic_dev *dev = csk->dev;
3382 	struct l4_kwq_connect_req1 *l4kwqe1;
3383 	struct l4_kwq_connect_req2 *l4kwqe2;
3384 	struct l4_kwq_connect_req3 *l4kwqe3;
3385 	struct kwqe *wqes[3];
3386 	u8 tcp_flags = 0;
3387 	int num_wqes = 2;
3388 
3389 	l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
3390 	l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
3391 	l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
3392 	memset(l4kwqe1, 0, sizeof(*l4kwqe1));
3393 	memset(l4kwqe2, 0, sizeof(*l4kwqe2));
3394 	memset(l4kwqe3, 0, sizeof(*l4kwqe3));
3395 
3396 	l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
3397 	l4kwqe3->flags =
3398 		L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
3399 	l4kwqe3->ka_timeout = csk->ka_timeout;
3400 	l4kwqe3->ka_interval = csk->ka_interval;
3401 	l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
3402 	l4kwqe3->tos = csk->tos;
3403 	l4kwqe3->ttl = csk->ttl;
3404 	l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
3405 	l4kwqe3->pmtu = csk->mtu;
3406 	l4kwqe3->rcv_buf = csk->rcv_buf;
3407 	l4kwqe3->snd_buf = csk->snd_buf;
3408 	l4kwqe3->seed = csk->seed;
3409 
3410 	wqes[0] = (struct kwqe *) l4kwqe1;
3411 	if (test_bit(SK_F_IPV6, &csk->flags)) {
3412 		wqes[1] = (struct kwqe *) l4kwqe2;
3413 		wqes[2] = (struct kwqe *) l4kwqe3;
3414 		num_wqes = 3;
3415 
3416 		l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
3417 		l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
3418 		l4kwqe2->flags =
3419 			L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
3420 			L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
3421 		l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
3422 		l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
3423 		l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
3424 		l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
3425 		l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
3426 		l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
3427 		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
3428 			       sizeof(struct tcphdr);
3429 	} else {
3430 		wqes[1] = (struct kwqe *) l4kwqe3;
3431 		l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
3432 			       sizeof(struct tcphdr);
3433 	}
3434 
3435 	l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
3436 	l4kwqe1->flags =
3437 		(L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
3438 		 L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
3439 	l4kwqe1->cid = csk->cid;
3440 	l4kwqe1->pg_cid = csk->pg_cid;
3441 	l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
3442 	l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
3443 	l4kwqe1->src_port = be16_to_cpu(csk->src_port);
3444 	l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
3445 	if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
3446 		tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
3447 	if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
3448 		tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
3449 	if (csk->tcp_flags & SK_TCP_NAGLE)
3450 		tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
3451 	if (csk->tcp_flags & SK_TCP_TIMESTAMP)
3452 		tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
3453 	if (csk->tcp_flags & SK_TCP_SACK)
3454 		tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
3455 	if (csk->tcp_flags & SK_TCP_SEG_SCALING)
3456 		tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
3457 
3458 	l4kwqe1->tcp_flags = tcp_flags;
3459 
3460 	return dev->submit_kwqes(dev, wqes, num_wqes);
3461 }
3462 
3463 static int cnic_cm_close_req(struct cnic_sock *csk)
3464 {
3465 	struct cnic_dev *dev = csk->dev;
3466 	struct l4_kwq_close_req *l4kwqe;
3467 	struct kwqe *wqes[1];
3468 
3469 	l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
3470 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3471 	wqes[0] = (struct kwqe *) l4kwqe;
3472 
3473 	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
3474 	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
3475 	l4kwqe->cid = csk->cid;
3476 
3477 	return dev->submit_kwqes(dev, wqes, 1);
3478 }
3479 
3480 static int cnic_cm_abort_req(struct cnic_sock *csk)
3481 {
3482 	struct cnic_dev *dev = csk->dev;
3483 	struct l4_kwq_reset_req *l4kwqe;
3484 	struct kwqe *wqes[1];
3485 
3486 	l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
3487 	memset(l4kwqe, 0, sizeof(*l4kwqe));
3488 	wqes[0] = (struct kwqe *) l4kwqe;
3489 
3490 	l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
3491 	l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
3492 	l4kwqe->cid = csk->cid;
3493 
3494 	return dev->submit_kwqes(dev, wqes, 1);
3495 }
3496 
3497 static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
3498 			  u32 l5_cid, struct cnic_sock **csk, void *context)
3499 {
3500 	struct cnic_local *cp = dev->cnic_priv;
3501 	struct cnic_sock *csk1;
3502 
3503 	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3504 		return -EINVAL;
3505 
3506 	if (cp->ctx_tbl) {
3507 		struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3508 
3509 		if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
3510 			return -EAGAIN;
3511 	}
3512 
3513 	csk1 = &cp->csk_tbl[l5_cid];
3514 	if (atomic_read(&csk1->ref_count))
3515 		return -EAGAIN;
3516 
3517 	if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
3518 		return -EBUSY;
3519 
3520 	csk1->dev = dev;
3521 	csk1->cid = cid;
3522 	csk1->l5_cid = l5_cid;
3523 	csk1->ulp_type = ulp_type;
3524 	csk1->context = context;
3525 
3526 	csk1->ka_timeout = DEF_KA_TIMEOUT;
3527 	csk1->ka_interval = DEF_KA_INTERVAL;
3528 	csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
3529 	csk1->tos = DEF_TOS;
3530 	csk1->ttl = DEF_TTL;
3531 	csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
3532 	csk1->rcv_buf = DEF_RCV_BUF;
3533 	csk1->snd_buf = DEF_SND_BUF;
3534 	csk1->seed = DEF_SEED;
3535 
3536 	*csk = csk1;
3537 	return 0;
3538 }
3539 
3540 static void cnic_cm_cleanup(struct cnic_sock *csk)
3541 {
3542 	if (csk->src_port) {
3543 		struct cnic_dev *dev = csk->dev;
3544 		struct cnic_local *cp = dev->cnic_priv;
3545 
3546 		cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
3547 		csk->src_port = 0;
3548 	}
3549 }
3550 
3551 static void cnic_close_conn(struct cnic_sock *csk)
3552 {
3553 	if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
3554 		cnic_cm_upload_pg(csk);
3555 		clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3556 	}
3557 	cnic_cm_cleanup(csk);
3558 }
3559 
3560 static int cnic_cm_destroy(struct cnic_sock *csk)
3561 {
3562 	if (!cnic_in_use(csk))
3563 		return -EINVAL;
3564 
3565 	csk_hold(csk);
3566 	clear_bit(SK_F_INUSE, &csk->flags);
3567 	smp_mb__after_clear_bit();
3568 	while (atomic_read(&csk->ref_count) != 1)
3569 		msleep(1);
3570 	cnic_cm_cleanup(csk);
3571 
3572 	csk->flags = 0;
3573 	csk_put(csk);
3574 	return 0;
3575 }
3576 
3577 static inline u16 cnic_get_vlan(struct net_device *dev,
3578 				struct net_device **vlan_dev)
3579 {
3580 	if (dev->priv_flags & IFF_802_1Q_VLAN) {
3581 		*vlan_dev = vlan_dev_real_dev(dev);
3582 		return vlan_dev_vlan_id(dev);
3583 	}
3584 	*vlan_dev = dev;
3585 	return 0;
3586 }
3587 
3588 static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
3589 			     struct dst_entry **dst)
3590 {
3591 #if defined(CONFIG_INET)
3592 	struct rtable *rt;
3593 
3594 	rt = ip_route_output(&init_net, dst_addr->sin_addr.s_addr, 0, 0, 0);
3595 	if (!IS_ERR(rt)) {
3596 		*dst = &rt->dst;
3597 		return 0;
3598 	}
3599 	return PTR_ERR(rt);
3600 #else
3601 	return -ENETUNREACH;
3602 #endif
3603 }
3604 
3605 static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
3606 			     struct dst_entry **dst)
3607 {
3608 #if defined(CONFIG_IPV6) || (defined(CONFIG_IPV6_MODULE) && defined(MODULE))
3609 	struct flowi6 fl6;
3610 
3611 	memset(&fl6, 0, sizeof(fl6));
3612 	fl6.daddr = dst_addr->sin6_addr;
3613 	if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL)
3614 		fl6.flowi6_oif = dst_addr->sin6_scope_id;
3615 
3616 	*dst = ip6_route_output(&init_net, NULL, &fl6);
3617 	if (*dst)
3618 		return 0;
3619 #endif
3620 
3621 	return -ENETUNREACH;
3622 }
3623 
3624 static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
3625 					   int ulp_type)
3626 {
3627 	struct cnic_dev *dev = NULL;
3628 	struct dst_entry *dst;
3629 	struct net_device *netdev = NULL;
3630 	int err = -ENETUNREACH;
3631 
3632 	if (dst_addr->sin_family == AF_INET)
3633 		err = cnic_get_v4_route(dst_addr, &dst);
3634 	else if (dst_addr->sin_family == AF_INET6) {
3635 		struct sockaddr_in6 *dst_addr6 =
3636 			(struct sockaddr_in6 *) dst_addr;
3637 
3638 		err = cnic_get_v6_route(dst_addr6, &dst);
3639 	} else
3640 		return NULL;
3641 
3642 	if (err)
3643 		return NULL;
3644 
3645 	if (!dst->dev)
3646 		goto done;
3647 
3648 	cnic_get_vlan(dst->dev, &netdev);
3649 
3650 	dev = cnic_from_netdev(netdev);
3651 
3652 done:
3653 	dst_release(dst);
3654 	if (dev)
3655 		cnic_put(dev);
3656 	return dev;
3657 }
3658 
3659 static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3660 {
3661 	struct cnic_dev *dev = csk->dev;
3662 	struct cnic_local *cp = dev->cnic_priv;
3663 
3664 	return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
3665 }
3666 
3667 static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3668 {
3669 	struct cnic_dev *dev = csk->dev;
3670 	struct cnic_local *cp = dev->cnic_priv;
3671 	int is_v6, rc = 0;
3672 	struct dst_entry *dst = NULL;
3673 	struct net_device *realdev;
3674 	__be16 local_port;
3675 	u32 port_id;
3676 
3677 	if (saddr->local.v6.sin6_family == AF_INET6 &&
3678 	    saddr->remote.v6.sin6_family == AF_INET6)
3679 		is_v6 = 1;
3680 	else if (saddr->local.v4.sin_family == AF_INET &&
3681 		 saddr->remote.v4.sin_family == AF_INET)
3682 		is_v6 = 0;
3683 	else
3684 		return -EINVAL;
3685 
3686 	clear_bit(SK_F_IPV6, &csk->flags);
3687 
3688 	if (is_v6) {
3689 		set_bit(SK_F_IPV6, &csk->flags);
3690 		cnic_get_v6_route(&saddr->remote.v6, &dst);
3691 
3692 		memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
3693 		       sizeof(struct in6_addr));
3694 		csk->dst_port = saddr->remote.v6.sin6_port;
3695 		local_port = saddr->local.v6.sin6_port;
3696 
3697 	} else {
3698 		cnic_get_v4_route(&saddr->remote.v4, &dst);
3699 
3700 		csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
3701 		csk->dst_port = saddr->remote.v4.sin_port;
3702 		local_port = saddr->local.v4.sin_port;
3703 	}
3704 
3705 	csk->vlan_id = 0;
3706 	csk->mtu = dev->netdev->mtu;
3707 	if (dst && dst->dev) {
3708 		u16 vlan = cnic_get_vlan(dst->dev, &realdev);
3709 		if (realdev == dev->netdev) {
3710 			csk->vlan_id = vlan;
3711 			csk->mtu = dst_mtu(dst);
3712 		}
3713 	}
3714 
3715 	port_id = be16_to_cpu(local_port);
3716 	if (port_id >= CNIC_LOCAL_PORT_MIN &&
3717 	    port_id < CNIC_LOCAL_PORT_MAX) {
3718 		if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3719 			port_id = 0;
3720 	} else
3721 		port_id = 0;
3722 
3723 	if (!port_id) {
3724 		port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3725 		if (port_id == -1) {
3726 			rc = -ENOMEM;
3727 			goto err_out;
3728 		}
3729 		local_port = cpu_to_be16(port_id);
3730 	}
3731 	csk->src_port = local_port;
3732 
3733 err_out:
3734 	dst_release(dst);
3735 	return rc;
3736 }
3737 
3738 static void cnic_init_csk_state(struct cnic_sock *csk)
3739 {
3740 	csk->state = 0;
3741 	clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3742 	clear_bit(SK_F_CLOSING, &csk->flags);
3743 }
3744 
3745 static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3746 {
3747 	struct cnic_local *cp = csk->dev->cnic_priv;
3748 	int err = 0;
3749 
3750 	if (cp->ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI)
3751 		return -EOPNOTSUPP;
3752 
3753 	if (!cnic_in_use(csk))
3754 		return -EINVAL;
3755 
3756 	if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
3757 		return -EINVAL;
3758 
3759 	cnic_init_csk_state(csk);
3760 
3761 	err = cnic_get_route(csk, saddr);
3762 	if (err)
3763 		goto err_out;
3764 
3765 	err = cnic_resolve_addr(csk, saddr);
3766 	if (!err)
3767 		return 0;
3768 
3769 err_out:
3770 	clear_bit(SK_F_CONNECT_START, &csk->flags);
3771 	return err;
3772 }
3773 
3774 static int cnic_cm_abort(struct cnic_sock *csk)
3775 {
3776 	struct cnic_local *cp = csk->dev->cnic_priv;
3777 	u32 opcode = L4_KCQE_OPCODE_VALUE_RESET_COMP;
3778 
3779 	if (!cnic_in_use(csk))
3780 		return -EINVAL;
3781 
3782 	if (cnic_abort_prep(csk))
3783 		return cnic_cm_abort_req(csk);
3784 
3785 	/* Getting here means that we haven't started connect, or
3786 	 * connect was not successful.
3787 	 */
3788 
3789 	cp->close_conn(csk, opcode);
3790 	if (csk->state != opcode)
3791 		return -EALREADY;
3792 
3793 	return 0;
3794 }
3795 
3796 static int cnic_cm_close(struct cnic_sock *csk)
3797 {
3798 	if (!cnic_in_use(csk))
3799 		return -EINVAL;
3800 
3801 	if (cnic_close_prep(csk)) {
3802 		csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
3803 		return cnic_cm_close_req(csk);
3804 	} else {
3805 		return -EALREADY;
3806 	}
3807 	return 0;
3808 }
3809 
3810 static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
3811 			   u8 opcode)
3812 {
3813 	struct cnic_ulp_ops *ulp_ops;
3814 	int ulp_type = csk->ulp_type;
3815 
3816 	rcu_read_lock();
3817 	ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
3818 	if (ulp_ops) {
3819 		if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
3820 			ulp_ops->cm_connect_complete(csk);
3821 		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3822 			ulp_ops->cm_close_complete(csk);
3823 		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
3824 			ulp_ops->cm_remote_abort(csk);
3825 		else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
3826 			ulp_ops->cm_abort_complete(csk);
3827 		else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
3828 			ulp_ops->cm_remote_close(csk);
3829 	}
3830 	rcu_read_unlock();
3831 }
3832 
3833 static int cnic_cm_set_pg(struct cnic_sock *csk)
3834 {
3835 	if (cnic_offld_prep(csk)) {
3836 		if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
3837 			cnic_cm_update_pg(csk);
3838 		else
3839 			cnic_cm_offload_pg(csk);
3840 	}
3841 	return 0;
3842 }
3843 
3844 static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
3845 {
3846 	struct cnic_local *cp = dev->cnic_priv;
3847 	u32 l5_cid = kcqe->pg_host_opaque;
3848 	u8 opcode = kcqe->op_code;
3849 	struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
3850 
3851 	csk_hold(csk);
3852 	if (!cnic_in_use(csk))
3853 		goto done;
3854 
3855 	if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3856 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3857 		goto done;
3858 	}
3859 	/* Possible PG kcqe status:  SUCCESS, OFFLOADED_PG, or CTX_ALLOC_FAIL */
3860 	if (kcqe->status == L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL) {
3861 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3862 		cnic_cm_upcall(cp, csk,
3863 			       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3864 		goto done;
3865 	}
3866 
3867 	csk->pg_cid = kcqe->pg_cid;
3868 	set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
3869 	cnic_cm_conn_req(csk);
3870 
3871 done:
3872 	csk_put(csk);
3873 }
3874 
3875 static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3876 {
3877 	struct cnic_local *cp = dev->cnic_priv;
3878 	struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3879 	u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3880 	struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3881 
3882 	ctx->timestamp = jiffies;
3883 	ctx->wait_cond = 1;
3884 	wake_up(&ctx->waitq);
3885 }
3886 
3887 static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3888 {
3889 	struct cnic_local *cp = dev->cnic_priv;
3890 	struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
3891 	u8 opcode = l4kcqe->op_code;
3892 	u32 l5_cid;
3893 	struct cnic_sock *csk;
3894 
3895 	if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3896 		cnic_process_fcoe_term_conn(dev, kcqe);
3897 		return;
3898 	}
3899 	if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3900 	    opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3901 		cnic_cm_process_offld_pg(dev, l4kcqe);
3902 		return;
3903 	}
3904 
3905 	l5_cid = l4kcqe->conn_id;
3906 	if (opcode & 0x80)
3907 		l5_cid = l4kcqe->cid;
3908 	if (l5_cid >= MAX_CM_SK_TBL_SZ)
3909 		return;
3910 
3911 	csk = &cp->csk_tbl[l5_cid];
3912 	csk_hold(csk);
3913 
3914 	if (!cnic_in_use(csk)) {
3915 		csk_put(csk);
3916 		return;
3917 	}
3918 
3919 	switch (opcode) {
3920 	case L5CM_RAMROD_CMD_ID_TCP_CONNECT:
3921 		if (l4kcqe->status != 0) {
3922 			clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3923 			cnic_cm_upcall(cp, csk,
3924 				       L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
3925 		}
3926 		break;
3927 	case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
3928 		if (l4kcqe->status == 0)
3929 			set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
3930 
3931 		smp_mb__before_clear_bit();
3932 		clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
3933 		cnic_cm_upcall(cp, csk, opcode);
3934 		break;
3935 
3936 	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3937 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3938 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3939 	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3940 	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3941 		if (l4kcqe->status == L4_KCQE_COMPLETION_STATUS_NIC_ERROR)
3942 			set_bit(SK_F_HW_ERR, &csk->flags);
3943 
3944 		cp->close_conn(csk, opcode);
3945 		break;
3946 
3947 	case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
3948 		/* after we already sent CLOSE_REQ */
3949 		if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags) &&
3950 		    !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags) &&
3951 		    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
3952 			cp->close_conn(csk, L4_KCQE_OPCODE_VALUE_RESET_COMP);
3953 		else
3954 			cnic_cm_upcall(cp, csk, opcode);
3955 		break;
3956 	}
3957 	csk_put(csk);
3958 }
3959 
3960 static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
3961 {
3962 	struct cnic_dev *dev = data;
3963 	int i;
3964 
3965 	for (i = 0; i < num; i++)
3966 		cnic_cm_process_kcqe(dev, kcqe[i]);
3967 }
3968 
3969 static struct cnic_ulp_ops cm_ulp_ops = {
3970 	.indicate_kcqes		= cnic_cm_indicate_kcqe,
3971 };
3972 
3973 static void cnic_cm_free_mem(struct cnic_dev *dev)
3974 {
3975 	struct cnic_local *cp = dev->cnic_priv;
3976 
3977 	kfree(cp->csk_tbl);
3978 	cp->csk_tbl = NULL;
3979 	cnic_free_id_tbl(&cp->csk_port_tbl);
3980 }
3981 
3982 static int cnic_cm_alloc_mem(struct cnic_dev *dev)
3983 {
3984 	struct cnic_local *cp = dev->cnic_priv;
3985 	u32 port_id;
3986 
3987 	cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
3988 			      GFP_KERNEL);
3989 	if (!cp->csk_tbl)
3990 		return -ENOMEM;
3991 
3992 	port_id = random32();
3993 	port_id %= CNIC_LOCAL_PORT_RANGE;
3994 	if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
3995 			     CNIC_LOCAL_PORT_MIN, port_id)) {
3996 		cnic_cm_free_mem(dev);
3997 		return -ENOMEM;
3998 	}
3999 	return 0;
4000 }
4001 
4002 static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
4003 {
4004 	if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
4005 		/* Unsolicited RESET_COMP or RESET_RECEIVED */
4006 		opcode = L4_KCQE_OPCODE_VALUE_RESET_RECEIVED;
4007 		csk->state = opcode;
4008 	}
4009 
4010 	/* 1. If event opcode matches the expected event in csk->state
4011 	 * 2. If the expected event is CLOSE_COMP or RESET_COMP, we accept any
4012 	 *    event
4013 	 * 3. If the expected event is 0, meaning the connection was never
4014 	 *    never established, we accept the opcode from cm_abort.
4015 	 */
4016 	if (opcode == csk->state || csk->state == 0 ||
4017 	    csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP ||
4018 	    csk->state == L4_KCQE_OPCODE_VALUE_RESET_COMP) {
4019 		if (!test_and_set_bit(SK_F_CLOSING, &csk->flags)) {
4020 			if (csk->state == 0)
4021 				csk->state = opcode;
4022 			return 1;
4023 		}
4024 	}
4025 	return 0;
4026 }
4027 
4028 static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
4029 {
4030 	struct cnic_dev *dev = csk->dev;
4031 	struct cnic_local *cp = dev->cnic_priv;
4032 
4033 	if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED) {
4034 		cnic_cm_upcall(cp, csk, opcode);
4035 		return;
4036 	}
4037 
4038 	clear_bit(SK_F_CONNECT_START, &csk->flags);
4039 	cnic_close_conn(csk);
4040 	csk->state = opcode;
4041 	cnic_cm_upcall(cp, csk, opcode);
4042 }
4043 
4044 static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
4045 {
4046 }
4047 
4048 static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
4049 {
4050 	u32 seed;
4051 
4052 	seed = random32();
4053 	cnic_ctx_wr(dev, 45, 0, seed);
4054 	return 0;
4055 }
4056 
4057 static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
4058 {
4059 	struct cnic_dev *dev = csk->dev;
4060 	struct cnic_local *cp = dev->cnic_priv;
4061 	struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
4062 	union l5cm_specific_data l5_data;
4063 	u32 cmd = 0;
4064 	int close_complete = 0;
4065 
4066 	switch (opcode) {
4067 	case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
4068 	case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
4069 	case L4_KCQE_OPCODE_VALUE_RESET_COMP:
4070 		if (cnic_ready_to_close(csk, opcode)) {
4071 			if (test_bit(SK_F_HW_ERR, &csk->flags))
4072 				close_complete = 1;
4073 			else if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
4074 				cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
4075 			else
4076 				close_complete = 1;
4077 		}
4078 		break;
4079 	case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
4080 		cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
4081 		break;
4082 	case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
4083 		close_complete = 1;
4084 		break;
4085 	}
4086 	if (cmd) {
4087 		memset(&l5_data, 0, sizeof(l5_data));
4088 
4089 		cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
4090 				    &l5_data);
4091 	} else if (close_complete) {
4092 		ctx->timestamp = jiffies;
4093 		cnic_close_conn(csk);
4094 		cnic_cm_upcall(cp, csk, csk->state);
4095 	}
4096 }
4097 
4098 static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
4099 {
4100 	struct cnic_local *cp = dev->cnic_priv;
4101 
4102 	if (!cp->ctx_tbl)
4103 		return;
4104 
4105 	if (!netif_running(dev->netdev))
4106 		return;
4107 
4108 	cnic_bnx2x_delete_wait(dev, 0);
4109 
4110 	cancel_delayed_work(&cp->delete_task);
4111 	flush_workqueue(cnic_wq);
4112 
4113 	if (atomic_read(&cp->iscsi_conn) != 0)
4114 		netdev_warn(dev->netdev, "%d iSCSI connections not destroyed\n",
4115 			    atomic_read(&cp->iscsi_conn));
4116 }
4117 
4118 static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
4119 {
4120 	struct cnic_local *cp = dev->cnic_priv;
4121 	u32 pfid = cp->pfid;
4122 	u32 port = CNIC_PORT(cp);
4123 
4124 	cnic_init_bnx2x_mac(dev);
4125 	cnic_bnx2x_set_tcp_timestamp(dev, 1);
4126 
4127 	CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
4128 		  XSTORM_ISCSI_LOCAL_VLAN_OFFSET(pfid), 0);
4129 
4130 	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4131 		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(port), 1);
4132 	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4133 		XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(port),
4134 		DEF_MAX_DA_COUNT);
4135 
4136 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4137 		 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(pfid), DEF_TTL);
4138 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4139 		 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(pfid), DEF_TOS);
4140 	CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
4141 		 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(pfid), 2);
4142 	CNIC_WR(dev, BAR_XSTRORM_INTMEM +
4143 		XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(pfid), DEF_SWS_TIMER);
4144 
4145 	CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(pfid),
4146 		DEF_MAX_CWND);
4147 	return 0;
4148 }
4149 
4150 static void cnic_delete_task(struct work_struct *work)
4151 {
4152 	struct cnic_local *cp;
4153 	struct cnic_dev *dev;
4154 	u32 i;
4155 	int need_resched = 0;
4156 
4157 	cp = container_of(work, struct cnic_local, delete_task.work);
4158 	dev = cp->dev;
4159 
4160 	if (test_and_clear_bit(CNIC_LCL_FL_STOP_ISCSI, &cp->cnic_local_flags)) {
4161 		struct drv_ctl_info info;
4162 
4163 		cnic_ulp_stop_one(cp, CNIC_ULP_ISCSI);
4164 
4165 		info.cmd = DRV_CTL_ISCSI_STOPPED_CMD;
4166 		cp->ethdev->drv_ctl(dev->netdev, &info);
4167 	}
4168 
4169 	for (i = 0; i < cp->max_cid_space; i++) {
4170 		struct cnic_context *ctx = &cp->ctx_tbl[i];
4171 		int err;
4172 
4173 		if (!test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags) ||
4174 		    !test_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4175 			continue;
4176 
4177 		if (!time_after(jiffies, ctx->timestamp + (2 * HZ))) {
4178 			need_resched = 1;
4179 			continue;
4180 		}
4181 
4182 		if (!test_and_clear_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags))
4183 			continue;
4184 
4185 		err = cnic_bnx2x_destroy_ramrod(dev, i);
4186 
4187 		cnic_free_bnx2x_conn_resc(dev, i);
4188 		if (!err) {
4189 			if (ctx->ulp_proto_id == CNIC_ULP_ISCSI)
4190 				atomic_dec(&cp->iscsi_conn);
4191 
4192 			clear_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
4193 		}
4194 	}
4195 
4196 	if (need_resched)
4197 		queue_delayed_work(cnic_wq, &cp->delete_task,
4198 				   msecs_to_jiffies(10));
4199 
4200 }
4201 
4202 static int cnic_cm_open(struct cnic_dev *dev)
4203 {
4204 	struct cnic_local *cp = dev->cnic_priv;
4205 	int err;
4206 
4207 	err = cnic_cm_alloc_mem(dev);
4208 	if (err)
4209 		return err;
4210 
4211 	err = cp->start_cm(dev);
4212 
4213 	if (err)
4214 		goto err_out;
4215 
4216 	INIT_DELAYED_WORK(&cp->delete_task, cnic_delete_task);
4217 
4218 	dev->cm_create = cnic_cm_create;
4219 	dev->cm_destroy = cnic_cm_destroy;
4220 	dev->cm_connect = cnic_cm_connect;
4221 	dev->cm_abort = cnic_cm_abort;
4222 	dev->cm_close = cnic_cm_close;
4223 	dev->cm_select_dev = cnic_cm_select_dev;
4224 
4225 	cp->ulp_handle[CNIC_ULP_L4] = dev;
4226 	rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
4227 	return 0;
4228 
4229 err_out:
4230 	cnic_cm_free_mem(dev);
4231 	return err;
4232 }
4233 
4234 static int cnic_cm_shutdown(struct cnic_dev *dev)
4235 {
4236 	struct cnic_local *cp = dev->cnic_priv;
4237 	int i;
4238 
4239 	cp->stop_cm(dev);
4240 
4241 	if (!cp->csk_tbl)
4242 		return 0;
4243 
4244 	for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
4245 		struct cnic_sock *csk = &cp->csk_tbl[i];
4246 
4247 		clear_bit(SK_F_INUSE, &csk->flags);
4248 		cnic_cm_cleanup(csk);
4249 	}
4250 	cnic_cm_free_mem(dev);
4251 
4252 	return 0;
4253 }
4254 
4255 static void cnic_init_context(struct cnic_dev *dev, u32 cid)
4256 {
4257 	u32 cid_addr;
4258 	int i;
4259 
4260 	cid_addr = GET_CID_ADDR(cid);
4261 
4262 	for (i = 0; i < CTX_SIZE; i += 4)
4263 		cnic_ctx_wr(dev, cid_addr, i, 0);
4264 }
4265 
4266 static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
4267 {
4268 	struct cnic_local *cp = dev->cnic_priv;
4269 	int ret = 0, i;
4270 	u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
4271 
4272 	if (CHIP_NUM(cp) != CHIP_NUM_5709)
4273 		return 0;
4274 
4275 	for (i = 0; i < cp->ctx_blks; i++) {
4276 		int j;
4277 		u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
4278 		u32 val;
4279 
4280 		memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
4281 
4282 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
4283 			(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
4284 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
4285 			(u64) cp->ctx_arr[i].mapping >> 32);
4286 		CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
4287 			BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
4288 		for (j = 0; j < 10; j++) {
4289 
4290 			val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
4291 			if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
4292 				break;
4293 			udelay(5);
4294 		}
4295 		if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
4296 			ret = -EBUSY;
4297 			break;
4298 		}
4299 	}
4300 	return ret;
4301 }
4302 
4303 static void cnic_free_irq(struct cnic_dev *dev)
4304 {
4305 	struct cnic_local *cp = dev->cnic_priv;
4306 	struct cnic_eth_dev *ethdev = cp->ethdev;
4307 
4308 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4309 		cp->disable_int_sync(dev);
4310 		tasklet_kill(&cp->cnic_irq_task);
4311 		free_irq(ethdev->irq_arr[0].vector, dev);
4312 	}
4313 }
4314 
4315 static int cnic_request_irq(struct cnic_dev *dev)
4316 {
4317 	struct cnic_local *cp = dev->cnic_priv;
4318 	struct cnic_eth_dev *ethdev = cp->ethdev;
4319 	int err;
4320 
4321 	err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0, "cnic", dev);
4322 	if (err)
4323 		tasklet_disable(&cp->cnic_irq_task);
4324 
4325 	return err;
4326 }
4327 
4328 static int cnic_init_bnx2_irq(struct cnic_dev *dev)
4329 {
4330 	struct cnic_local *cp = dev->cnic_priv;
4331 	struct cnic_eth_dev *ethdev = cp->ethdev;
4332 
4333 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4334 		int err, i = 0;
4335 		int sblk_num = cp->status_blk_num;
4336 		u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4337 			   BNX2_HC_SB_CONFIG_1;
4338 
4339 		CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4340 
4341 		CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
4342 		CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
4343 		CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
4344 
4345 		cp->last_status_idx = cp->status_blk.bnx2->status_idx;
4346 		tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2_msix,
4347 			     (unsigned long) dev);
4348 		err = cnic_request_irq(dev);
4349 		if (err)
4350 			return err;
4351 
4352 		while (cp->status_blk.bnx2->status_completion_producer_index &&
4353 		       i < 10) {
4354 			CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
4355 				1 << (11 + sblk_num));
4356 			udelay(10);
4357 			i++;
4358 			barrier();
4359 		}
4360 		if (cp->status_blk.bnx2->status_completion_producer_index) {
4361 			cnic_free_irq(dev);
4362 			goto failed;
4363 		}
4364 
4365 	} else {
4366 		struct status_block *sblk = cp->status_blk.gen;
4367 		u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
4368 		int i = 0;
4369 
4370 		while (sblk->status_completion_producer_index && i < 10) {
4371 			CNIC_WR(dev, BNX2_HC_COMMAND,
4372 				hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4373 			udelay(10);
4374 			i++;
4375 			barrier();
4376 		}
4377 		if (sblk->status_completion_producer_index)
4378 			goto failed;
4379 
4380 	}
4381 	return 0;
4382 
4383 failed:
4384 	netdev_err(dev->netdev, "KCQ index not resetting to 0\n");
4385 	return -EBUSY;
4386 }
4387 
4388 static void cnic_enable_bnx2_int(struct cnic_dev *dev)
4389 {
4390 	struct cnic_local *cp = dev->cnic_priv;
4391 	struct cnic_eth_dev *ethdev = cp->ethdev;
4392 
4393 	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4394 		return;
4395 
4396 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4397 		BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
4398 }
4399 
4400 static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
4401 {
4402 	struct cnic_local *cp = dev->cnic_priv;
4403 	struct cnic_eth_dev *ethdev = cp->ethdev;
4404 
4405 	if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
4406 		return;
4407 
4408 	CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
4409 		BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4410 	CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
4411 	synchronize_irq(ethdev->irq_arr[0].vector);
4412 }
4413 
4414 static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
4415 {
4416 	struct cnic_local *cp = dev->cnic_priv;
4417 	struct cnic_eth_dev *ethdev = cp->ethdev;
4418 	struct cnic_uio_dev *udev = cp->udev;
4419 	u32 cid_addr, tx_cid, sb_id;
4420 	u32 val, offset0, offset1, offset2, offset3;
4421 	int i;
4422 	struct tx_bd *txbd;
4423 	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4424 	struct status_block *s_blk = cp->status_blk.gen;
4425 
4426 	sb_id = cp->status_blk_num;
4427 	tx_cid = 20;
4428 	cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
4429 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4430 		struct status_block_msix *sblk = cp->status_blk.bnx2;
4431 
4432 		tx_cid = TX_TSS_CID + sb_id - 1;
4433 		CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
4434 			(TX_TSS_CID << 7));
4435 		cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
4436 	}
4437 	cp->tx_cons = *cp->tx_cons_ptr;
4438 
4439 	cid_addr = GET_CID_ADDR(tx_cid);
4440 	if (CHIP_NUM(cp) == CHIP_NUM_5709) {
4441 		u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
4442 
4443 		for (i = 0; i < PHY_CTX_SIZE; i += 4)
4444 			cnic_ctx_wr(dev, cid_addr2, i, 0);
4445 
4446 		offset0 = BNX2_L2CTX_TYPE_XI;
4447 		offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4448 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4449 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4450 	} else {
4451 		cnic_init_context(dev, tx_cid);
4452 		cnic_init_context(dev, tx_cid + 1);
4453 
4454 		offset0 = BNX2_L2CTX_TYPE;
4455 		offset1 = BNX2_L2CTX_CMD_TYPE;
4456 		offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4457 		offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4458 	}
4459 	val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4460 	cnic_ctx_wr(dev, cid_addr, offset0, val);
4461 
4462 	val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4463 	cnic_ctx_wr(dev, cid_addr, offset1, val);
4464 
4465 	txbd = udev->l2_ring;
4466 
4467 	buf_map = udev->l2_buf_map;
4468 	for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
4469 		txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
4470 		txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4471 	}
4472 	val = (u64) ring_map >> 32;
4473 	cnic_ctx_wr(dev, cid_addr, offset2, val);
4474 	txbd->tx_bd_haddr_hi = val;
4475 
4476 	val = (u64) ring_map & 0xffffffff;
4477 	cnic_ctx_wr(dev, cid_addr, offset3, val);
4478 	txbd->tx_bd_haddr_lo = val;
4479 }
4480 
4481 static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
4482 {
4483 	struct cnic_local *cp = dev->cnic_priv;
4484 	struct cnic_eth_dev *ethdev = cp->ethdev;
4485 	struct cnic_uio_dev *udev = cp->udev;
4486 	u32 cid_addr, sb_id, val, coal_reg, coal_val;
4487 	int i;
4488 	struct rx_bd *rxbd;
4489 	struct status_block *s_blk = cp->status_blk.gen;
4490 	dma_addr_t ring_map = udev->l2_ring_map;
4491 
4492 	sb_id = cp->status_blk_num;
4493 	cnic_init_context(dev, 2);
4494 	cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
4495 	coal_reg = BNX2_HC_COMMAND;
4496 	coal_val = CNIC_RD(dev, coal_reg);
4497 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4498 		struct status_block_msix *sblk = cp->status_blk.bnx2;
4499 
4500 		cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
4501 		coal_reg = BNX2_HC_COALESCE_NOW;
4502 		coal_val = 1 << (11 + sb_id);
4503 	}
4504 	i = 0;
4505 	while (!(*cp->rx_cons_ptr != 0) && i < 10) {
4506 		CNIC_WR(dev, coal_reg, coal_val);
4507 		udelay(10);
4508 		i++;
4509 		barrier();
4510 	}
4511 	cp->rx_cons = *cp->rx_cons_ptr;
4512 
4513 	cid_addr = GET_CID_ADDR(2);
4514 	val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
4515 	      BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
4516 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
4517 
4518 	if (sb_id == 0)
4519 		val = 2 << BNX2_L2CTX_L2_STATUSB_NUM_SHIFT;
4520 	else
4521 		val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
4522 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
4523 
4524 	rxbd = udev->l2_ring + BCM_PAGE_SIZE;
4525 	for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
4526 		dma_addr_t buf_map;
4527 		int n = (i % cp->l2_rx_ring_size) + 1;
4528 
4529 		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4530 		rxbd->rx_bd_len = cp->l2_single_buf_size;
4531 		rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4532 		rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
4533 		rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
4534 	}
4535 	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4536 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
4537 	rxbd->rx_bd_haddr_hi = val;
4538 
4539 	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4540 	cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
4541 	rxbd->rx_bd_haddr_lo = val;
4542 
4543 	val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
4544 	cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
4545 }
4546 
4547 static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
4548 {
4549 	struct kwqe *wqes[1], l2kwqe;
4550 
4551 	memset(&l2kwqe, 0, sizeof(l2kwqe));
4552 	wqes[0] = &l2kwqe;
4553 	l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
4554 			      (L2_KWQE_OPCODE_VALUE_FLUSH <<
4555 			       KWQE_OPCODE_SHIFT) | 2;
4556 	dev->submit_kwqes(dev, wqes, 1);
4557 }
4558 
4559 static void cnic_set_bnx2_mac(struct cnic_dev *dev)
4560 {
4561 	struct cnic_local *cp = dev->cnic_priv;
4562 	u32 val;
4563 
4564 	val = cp->func << 2;
4565 
4566 	cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
4567 
4568 	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4569 			      BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
4570 	dev->mac_addr[0] = (u8) (val >> 8);
4571 	dev->mac_addr[1] = (u8) val;
4572 
4573 	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
4574 
4575 	val = cnic_reg_rd_ind(dev, cp->shmem_base +
4576 			      BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
4577 	dev->mac_addr[2] = (u8) (val >> 24);
4578 	dev->mac_addr[3] = (u8) (val >> 16);
4579 	dev->mac_addr[4] = (u8) (val >> 8);
4580 	dev->mac_addr[5] = (u8) val;
4581 
4582 	CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
4583 
4584 	val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
4585 	if (CHIP_NUM(cp) != CHIP_NUM_5709)
4586 		val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
4587 
4588 	CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
4589 	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
4590 	CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
4591 }
4592 
4593 static int cnic_start_bnx2_hw(struct cnic_dev *dev)
4594 {
4595 	struct cnic_local *cp = dev->cnic_priv;
4596 	struct cnic_eth_dev *ethdev = cp->ethdev;
4597 	struct status_block *sblk = cp->status_blk.gen;
4598 	u32 val, kcq_cid_addr, kwq_cid_addr;
4599 	int err;
4600 
4601 	cnic_set_bnx2_mac(dev);
4602 
4603 	val = CNIC_RD(dev, BNX2_MQ_CONFIG);
4604 	val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4605 	if (BCM_PAGE_BITS > 12)
4606 		val |= (12 - 8)  << 4;
4607 	else
4608 		val |= (BCM_PAGE_BITS - 8)  << 4;
4609 
4610 	CNIC_WR(dev, BNX2_MQ_CONFIG, val);
4611 
4612 	CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
4613 	CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
4614 	CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
4615 
4616 	err = cnic_setup_5709_context(dev, 1);
4617 	if (err)
4618 		return err;
4619 
4620 	cnic_init_context(dev, KWQ_CID);
4621 	cnic_init_context(dev, KCQ_CID);
4622 
4623 	kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
4624 	cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
4625 
4626 	cp->max_kwq_idx = MAX_KWQ_IDX;
4627 	cp->kwq_prod_idx = 0;
4628 	cp->kwq_con_idx = 0;
4629 	set_bit(CNIC_LCL_FL_KWQ_INIT, &cp->cnic_local_flags);
4630 
4631 	if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
4632 		cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
4633 	else
4634 		cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
4635 
4636 	/* Initialize the kernel work queue context. */
4637 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4638 	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4639 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
4640 
4641 	val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
4642 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4643 
4644 	val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
4645 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4646 
4647 	val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
4648 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4649 
4650 	val = (u32) cp->kwq_info.pgtbl_map;
4651 	cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4652 
4653 	kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
4654 	cp->kcq1.io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
4655 
4656 	cp->kcq1.sw_prod_idx = 0;
4657 	cp->kcq1.hw_prod_idx_ptr =
4658 		(u16 *) &sblk->status_completion_producer_index;
4659 
4660 	cp->kcq1.status_idx_ptr = (u16 *) &sblk->status_idx;
4661 
4662 	/* Initialize the kernel complete queue context. */
4663 	val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
4664 	      (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
4665 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
4666 
4667 	val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
4668 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
4669 
4670 	val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
4671 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
4672 
4673 	val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
4674 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
4675 
4676 	val = (u32) cp->kcq1.dma.pgtbl_map;
4677 	cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
4678 
4679 	cp->int_num = 0;
4680 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
4681 		struct status_block_msix *msblk = cp->status_blk.bnx2;
4682 		u32 sb_id = cp->status_blk_num;
4683 		u32 sb = BNX2_L2CTX_L5_STATUSB_NUM(sb_id);
4684 
4685 		cp->kcq1.hw_prod_idx_ptr =
4686 			(u16 *) &msblk->status_completion_producer_index;
4687 		cp->kcq1.status_idx_ptr = (u16 *) &msblk->status_idx;
4688 		cp->kwq_con_idx_ptr = (u16 *) &msblk->status_cmd_consumer_index;
4689 		cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
4690 		cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4691 		cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
4692 	}
4693 
4694 	/* Enable Commnad Scheduler notification when we write to the
4695 	 * host producer index of the kernel contexts. */
4696 	CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
4697 
4698 	/* Enable Command Scheduler notification when we write to either
4699 	 * the Send Queue or Receive Queue producer indexes of the kernel
4700 	 * bypass contexts. */
4701 	CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
4702 	CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
4703 
4704 	/* Notify COM when the driver post an application buffer. */
4705 	CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
4706 
4707 	/* Set the CP and COM doorbells.  These two processors polls the
4708 	 * doorbell for a non zero value before running.  This must be done
4709 	 * after setting up the kernel queue contexts. */
4710 	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
4711 	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
4712 
4713 	cnic_init_bnx2_tx_ring(dev);
4714 	cnic_init_bnx2_rx_ring(dev);
4715 
4716 	err = cnic_init_bnx2_irq(dev);
4717 	if (err) {
4718 		netdev_err(dev->netdev, "cnic_init_irq failed\n");
4719 		cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
4720 		cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
4721 		return err;
4722 	}
4723 
4724 	return 0;
4725 }
4726 
4727 static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
4728 {
4729 	struct cnic_local *cp = dev->cnic_priv;
4730 	struct cnic_eth_dev *ethdev = cp->ethdev;
4731 	u32 start_offset = ethdev->ctx_tbl_offset;
4732 	int i;
4733 
4734 	for (i = 0; i < cp->ctx_blks; i++) {
4735 		struct cnic_ctx *ctx = &cp->ctx_arr[i];
4736 		dma_addr_t map = ctx->mapping;
4737 
4738 		if (cp->ctx_align) {
4739 			unsigned long mask = cp->ctx_align - 1;
4740 
4741 			map = (map + mask) & ~mask;
4742 		}
4743 
4744 		cnic_ctx_tbl_wr(dev, start_offset + i, map);
4745 	}
4746 }
4747 
4748 static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
4749 {
4750 	struct cnic_local *cp = dev->cnic_priv;
4751 	struct cnic_eth_dev *ethdev = cp->ethdev;
4752 	int err = 0;
4753 
4754 	tasklet_init(&cp->cnic_irq_task, cnic_service_bnx2x_bh,
4755 		     (unsigned long) dev);
4756 	if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
4757 		err = cnic_request_irq(dev);
4758 
4759 	return err;
4760 }
4761 
4762 static inline void cnic_storm_memset_hc_disable(struct cnic_dev *dev,
4763 						u16 sb_id, u8 sb_index,
4764 						u8 disable)
4765 {
4766 
4767 	u32 addr = BAR_CSTRORM_INTMEM +
4768 			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4769 			offsetof(struct hc_status_block_data_e1x, index_data) +
4770 			sizeof(struct hc_index_data)*sb_index +
4771 			offsetof(struct hc_index_data, flags);
4772 	u16 flags = CNIC_RD16(dev, addr);
4773 	/* clear and set */
4774 	flags &= ~HC_INDEX_DATA_HC_ENABLED;
4775 	flags |= (((~disable) << HC_INDEX_DATA_HC_ENABLED_SHIFT) &
4776 		  HC_INDEX_DATA_HC_ENABLED);
4777 	CNIC_WR16(dev, addr, flags);
4778 }
4779 
4780 static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
4781 {
4782 	struct cnic_local *cp = dev->cnic_priv;
4783 	u8 sb_id = cp->status_blk_num;
4784 
4785 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4786 			CSTORM_STATUS_BLOCK_DATA_OFFSET(sb_id) +
4787 			offsetof(struct hc_status_block_data_e1x, index_data) +
4788 			sizeof(struct hc_index_data)*HC_INDEX_ISCSI_EQ_CONS +
4789 			offsetof(struct hc_index_data, timeout), 64 / 4);
4790 	cnic_storm_memset_hc_disable(dev, sb_id, HC_INDEX_ISCSI_EQ_CONS, 0);
4791 }
4792 
4793 static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
4794 {
4795 }
4796 
4797 static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4798 				    struct client_init_ramrod_data *data)
4799 {
4800 	struct cnic_local *cp = dev->cnic_priv;
4801 	struct cnic_uio_dev *udev = cp->udev;
4802 	union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) udev->l2_ring;
4803 	dma_addr_t buf_map, ring_map = udev->l2_ring_map;
4804 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4805 	int i;
4806 	u32 cli = cp->ethdev->iscsi_l2_client_id;
4807 	u32 val;
4808 
4809 	memset(txbd, 0, BCM_PAGE_SIZE);
4810 
4811 	buf_map = udev->l2_buf_map;
4812 	for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
4813 		struct eth_tx_start_bd *start_bd = &txbd->start_bd;
4814 		struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
4815 
4816 		start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4817 		start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4818 		reg_bd->addr_hi = start_bd->addr_hi;
4819 		reg_bd->addr_lo = start_bd->addr_lo + 0x10;
4820 		start_bd->nbytes = cpu_to_le16(0x10);
4821 		start_bd->nbd = cpu_to_le16(3);
4822 		start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
4823 		start_bd->general_data = (UNICAST_ADDRESS <<
4824 			ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
4825 		start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
4826 
4827 	}
4828 
4829 	val = (u64) ring_map >> 32;
4830 	txbd->next_bd.addr_hi = cpu_to_le32(val);
4831 
4832 	data->tx.tx_bd_page_base.hi = cpu_to_le32(val);
4833 
4834 	val = (u64) ring_map & 0xffffffff;
4835 	txbd->next_bd.addr_lo = cpu_to_le32(val);
4836 
4837 	data->tx.tx_bd_page_base.lo = cpu_to_le32(val);
4838 
4839 	/* Other ramrod params */
4840 	data->tx.tx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_CQ_CONS;
4841 	data->tx.tx_status_block_id = BNX2X_DEF_SB_ID;
4842 
4843 	/* reset xstorm per client statistics */
4844 	if (cli < MAX_STAT_COUNTER_ID) {
4845 		data->general.statistics_zero_flg = 1;
4846 		data->general.statistics_en_flg = 1;
4847 		data->general.statistics_counter_id = cli;
4848 	}
4849 
4850 	cp->tx_cons_ptr =
4851 		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_CQ_CONS];
4852 }
4853 
4854 static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4855 				    struct client_init_ramrod_data *data)
4856 {
4857 	struct cnic_local *cp = dev->cnic_priv;
4858 	struct cnic_uio_dev *udev = cp->udev;
4859 	struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
4860 				BCM_PAGE_SIZE);
4861 	struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
4862 				(udev->l2_ring + (2 * BCM_PAGE_SIZE));
4863 	struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4864 	int i;
4865 	u32 cli = cp->ethdev->iscsi_l2_client_id;
4866 	int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4867 	u32 val;
4868 	dma_addr_t ring_map = udev->l2_ring_map;
4869 
4870 	/* General data */
4871 	data->general.client_id = cli;
4872 	data->general.activate_flg = 1;
4873 	data->general.sp_client_id = cli;
4874 	data->general.mtu = cpu_to_le16(cp->l2_single_buf_size - 14);
4875 	data->general.func_id = cp->pfid;
4876 
4877 	for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
4878 		dma_addr_t buf_map;
4879 		int n = (i % cp->l2_rx_ring_size) + 1;
4880 
4881 		buf_map = udev->l2_buf_map + (n * cp->l2_single_buf_size);
4882 		rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
4883 		rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
4884 	}
4885 
4886 	val = (u64) (ring_map + BCM_PAGE_SIZE) >> 32;
4887 	rxbd->addr_hi = cpu_to_le32(val);
4888 	data->rx.bd_page_base.hi = cpu_to_le32(val);
4889 
4890 	val = (u64) (ring_map + BCM_PAGE_SIZE) & 0xffffffff;
4891 	rxbd->addr_lo = cpu_to_le32(val);
4892 	data->rx.bd_page_base.lo = cpu_to_le32(val);
4893 
4894 	rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
4895 	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
4896 	rxcqe->addr_hi = cpu_to_le32(val);
4897 	data->rx.cqe_page_base.hi = cpu_to_le32(val);
4898 
4899 	val = (u64) (ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
4900 	rxcqe->addr_lo = cpu_to_le32(val);
4901 	data->rx.cqe_page_base.lo = cpu_to_le32(val);
4902 
4903 	/* Other ramrod params */
4904 	data->rx.client_qzone_id = cl_qzone_id;
4905 	data->rx.rx_sb_index_number = HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS;
4906 	data->rx.status_block_id = BNX2X_DEF_SB_ID;
4907 
4908 	data->rx.cache_line_alignment_log_size = L1_CACHE_SHIFT;
4909 
4910 	data->rx.max_bytes_on_bd = cpu_to_le16(cp->l2_single_buf_size);
4911 	data->rx.outer_vlan_removal_enable_flg = 1;
4912 	data->rx.silent_vlan_removal_flg = 1;
4913 	data->rx.silent_vlan_value = 0;
4914 	data->rx.silent_vlan_mask = 0xffff;
4915 
4916 	cp->rx_cons_ptr =
4917 		&sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4918 	cp->rx_cons = *cp->rx_cons_ptr;
4919 }
4920 
4921 static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4922 {
4923 	struct cnic_local *cp = dev->cnic_priv;
4924 	u32 pfid = cp->pfid;
4925 
4926 	cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4927 			   CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4928 	cp->kcq1.sw_prod_idx = 0;
4929 
4930 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4931 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4932 
4933 		cp->kcq1.hw_prod_idx_ptr =
4934 			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4935 		cp->kcq1.status_idx_ptr =
4936 			&sb->sb.running_index[SM_RX_ID];
4937 	} else {
4938 		struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4939 
4940 		cp->kcq1.hw_prod_idx_ptr =
4941 			&sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4942 		cp->kcq1.status_idx_ptr =
4943 			&sb->sb.running_index[SM_RX_ID];
4944 	}
4945 
4946 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4947 		struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4948 
4949 		cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4950 					USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4951 		cp->kcq2.sw_prod_idx = 0;
4952 		cp->kcq2.hw_prod_idx_ptr =
4953 			&sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4954 		cp->kcq2.status_idx_ptr =
4955 			&sb->sb.running_index[SM_RX_ID];
4956 	}
4957 }
4958 
4959 static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4960 {
4961 	struct cnic_local *cp = dev->cnic_priv;
4962 	struct cnic_eth_dev *ethdev = cp->ethdev;
4963 	int func = CNIC_FUNC(cp), ret;
4964 	u32 pfid;
4965 
4966 	dev->stats_addr = ethdev->addr_drv_info_to_mcp;
4967 	cp->port_mode = CHIP_PORT_MODE_NONE;
4968 
4969 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4970 		u32 val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN_OVWR);
4971 
4972 		if (!(val & 1))
4973 			val = CNIC_RD(dev, MISC_REG_PORT4MODE_EN);
4974 		else
4975 			val = (val >> 1) & 1;
4976 
4977 		if (val) {
4978 			cp->port_mode = CHIP_4_PORT_MODE;
4979 			cp->pfid = func >> 1;
4980 		} else {
4981 			cp->port_mode = CHIP_2_PORT_MODE;
4982 			cp->pfid = func & 0x6;
4983 		}
4984 	} else {
4985 		cp->pfid = func;
4986 	}
4987 	pfid = cp->pfid;
4988 
4989 	ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4990 			       cp->iscsi_start_cid, 0);
4991 
4992 	if (ret)
4993 		return -ENOMEM;
4994 
4995 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id)) {
4996 		ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl, dev->max_fcoe_conn,
4997 					cp->fcoe_start_cid, 0);
4998 
4999 		if (ret)
5000 			return -ENOMEM;
5001 	}
5002 
5003 	cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
5004 
5005 	cnic_init_bnx2x_kcq(dev);
5006 
5007 	/* Only 1 EQ */
5008 	CNIC_WR16(dev, cp->kcq1.io_addr, MAX_KCQ_IDX);
5009 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5010 		CSTORM_ISCSI_EQ_CONS_OFFSET(pfid, 0), 0);
5011 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5012 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0),
5013 		cp->kcq1.dma.pg_map_arr[1] & 0xffffffff);
5014 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5015 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(pfid, 0) + 4,
5016 		(u64) cp->kcq1.dma.pg_map_arr[1] >> 32);
5017 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5018 		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0),
5019 		cp->kcq1.dma.pg_map_arr[0] & 0xffffffff);
5020 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5021 		CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(pfid, 0) + 4,
5022 		(u64) cp->kcq1.dma.pg_map_arr[0] >> 32);
5023 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5024 		CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(pfid, 0), 1);
5025 	CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
5026 		CSTORM_ISCSI_EQ_SB_NUM_OFFSET(pfid, 0), cp->status_blk_num);
5027 	CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
5028 		CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(pfid, 0),
5029 		HC_INDEX_ISCSI_EQ_CONS);
5030 
5031 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5032 		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid),
5033 		cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
5034 	CNIC_WR(dev, BAR_USTRORM_INTMEM +
5035 		USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(pfid) + 4,
5036 		(u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
5037 
5038 	CNIC_WR(dev, BAR_TSTRORM_INTMEM +
5039 		TSTORM_ISCSI_TCP_LOCAL_ADV_WND_OFFSET(pfid), DEF_RCV_BUF);
5040 
5041 	cnic_setup_bnx2x_context(dev);
5042 
5043 	ret = cnic_init_bnx2x_irq(dev);
5044 	if (ret)
5045 		return ret;
5046 
5047 	return 0;
5048 }
5049 
5050 static void cnic_init_rings(struct cnic_dev *dev)
5051 {
5052 	struct cnic_local *cp = dev->cnic_priv;
5053 	struct cnic_uio_dev *udev = cp->udev;
5054 
5055 	if (test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5056 		return;
5057 
5058 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5059 		cnic_init_bnx2_tx_ring(dev);
5060 		cnic_init_bnx2_rx_ring(dev);
5061 		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5062 	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5063 		u32 cli = cp->ethdev->iscsi_l2_client_id;
5064 		u32 cid = cp->ethdev->iscsi_l2_cid;
5065 		u32 cl_qzone_id;
5066 		struct client_init_ramrod_data *data;
5067 		union l5cm_specific_data l5_data;
5068 		struct ustorm_eth_rx_producers rx_prods = {0};
5069 		u32 off, i, *cid_ptr;
5070 
5071 		rx_prods.bd_prod = 0;
5072 		rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
5073 		barrier();
5074 
5075 		cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
5076 
5077 		off = BAR_USTRORM_INTMEM +
5078 			(BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) ?
5079 			 USTORM_RX_PRODS_E2_OFFSET(cl_qzone_id) :
5080 			 USTORM_RX_PRODS_E1X_OFFSET(CNIC_PORT(cp), cli));
5081 
5082 		for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
5083 			CNIC_WR(dev, off + i * 4, ((u32 *) &rx_prods)[i]);
5084 
5085 		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5086 
5087 		data = udev->l2_buf;
5088 		cid_ptr = udev->l2_buf + 12;
5089 
5090 		memset(data, 0, sizeof(*data));
5091 
5092 		cnic_init_bnx2x_tx_ring(dev, data);
5093 		cnic_init_bnx2x_rx_ring(dev, data);
5094 
5095 		l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
5096 		l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
5097 
5098 		set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5099 
5100 		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
5101 			cid, ETH_CONNECTION_TYPE, &l5_data);
5102 
5103 		i = 0;
5104 		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5105 		       ++i < 10)
5106 			msleep(1);
5107 
5108 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5109 			netdev_err(dev->netdev,
5110 				"iSCSI CLIENT_SETUP did not complete\n");
5111 		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5112 		cnic_ring_ctl(dev, cid, cli, 1);
5113 		*cid_ptr = cid;
5114 	}
5115 }
5116 
5117 static void cnic_shutdown_rings(struct cnic_dev *dev)
5118 {
5119 	struct cnic_local *cp = dev->cnic_priv;
5120 	struct cnic_uio_dev *udev = cp->udev;
5121 	void *rx_ring;
5122 
5123 	if (!test_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags))
5124 		return;
5125 
5126 	if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
5127 		cnic_shutdown_bnx2_rx_ring(dev);
5128 	} else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
5129 		u32 cli = cp->ethdev->iscsi_l2_client_id;
5130 		u32 cid = cp->ethdev->iscsi_l2_cid;
5131 		union l5cm_specific_data l5_data;
5132 		int i;
5133 
5134 		cnic_ring_ctl(dev, cid, cli, 0);
5135 
5136 		set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
5137 
5138 		l5_data.phy_address.lo = cli;
5139 		l5_data.phy_address.hi = 0;
5140 		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
5141 			cid, ETH_CONNECTION_TYPE, &l5_data);
5142 		i = 0;
5143 		while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
5144 		       ++i < 10)
5145 			msleep(1);
5146 
5147 		if (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags))
5148 			netdev_err(dev->netdev,
5149 				"iSCSI CLIENT_HALT did not complete\n");
5150 		cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
5151 
5152 		memset(&l5_data, 0, sizeof(l5_data));
5153 		cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
5154 			cid, NONE_CONNECTION_TYPE, &l5_data);
5155 		msleep(10);
5156 	}
5157 	clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
5158 	rx_ring = udev->l2_ring + BCM_PAGE_SIZE;
5159 	memset(rx_ring, 0, BCM_PAGE_SIZE);
5160 }
5161 
5162 static int cnic_register_netdev(struct cnic_dev *dev)
5163 {
5164 	struct cnic_local *cp = dev->cnic_priv;
5165 	struct cnic_eth_dev *ethdev = cp->ethdev;
5166 	int err;
5167 
5168 	if (!ethdev)
5169 		return -ENODEV;
5170 
5171 	if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
5172 		return 0;
5173 
5174 	err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
5175 	if (err)
5176 		netdev_err(dev->netdev, "register_cnic failed\n");
5177 
5178 	return err;
5179 }
5180 
5181 static void cnic_unregister_netdev(struct cnic_dev *dev)
5182 {
5183 	struct cnic_local *cp = dev->cnic_priv;
5184 	struct cnic_eth_dev *ethdev = cp->ethdev;
5185 
5186 	if (!ethdev)
5187 		return;
5188 
5189 	ethdev->drv_unregister_cnic(dev->netdev);
5190 }
5191 
5192 static int cnic_start_hw(struct cnic_dev *dev)
5193 {
5194 	struct cnic_local *cp = dev->cnic_priv;
5195 	struct cnic_eth_dev *ethdev = cp->ethdev;
5196 	int err;
5197 
5198 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
5199 		return -EALREADY;
5200 
5201 	dev->regview = ethdev->io_base;
5202 	pci_dev_get(dev->pcidev);
5203 	cp->func = PCI_FUNC(dev->pcidev->devfn);
5204 	cp->status_blk.gen = ethdev->irq_arr[0].status_blk;
5205 	cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
5206 
5207 	err = cp->alloc_resc(dev);
5208 	if (err) {
5209 		netdev_err(dev->netdev, "allocate resource failure\n");
5210 		goto err1;
5211 	}
5212 
5213 	err = cp->start_hw(dev);
5214 	if (err)
5215 		goto err1;
5216 
5217 	err = cnic_cm_open(dev);
5218 	if (err)
5219 		goto err1;
5220 
5221 	set_bit(CNIC_F_CNIC_UP, &dev->flags);
5222 
5223 	cp->enable_int(dev);
5224 
5225 	return 0;
5226 
5227 err1:
5228 	cp->free_resc(dev);
5229 	pci_dev_put(dev->pcidev);
5230 	return err;
5231 }
5232 
5233 static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
5234 {
5235 	cnic_disable_bnx2_int_sync(dev);
5236 
5237 	cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
5238 	cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
5239 
5240 	cnic_init_context(dev, KWQ_CID);
5241 	cnic_init_context(dev, KCQ_CID);
5242 
5243 	cnic_setup_5709_context(dev, 0);
5244 	cnic_free_irq(dev);
5245 
5246 	cnic_free_resc(dev);
5247 }
5248 
5249 
5250 static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
5251 {
5252 	struct cnic_local *cp = dev->cnic_priv;
5253 
5254 	cnic_free_irq(dev);
5255 	*cp->kcq1.hw_prod_idx_ptr = 0;
5256 	CNIC_WR(dev, BAR_CSTRORM_INTMEM +
5257 		CSTORM_ISCSI_EQ_CONS_OFFSET(cp->pfid, 0), 0);
5258 	CNIC_WR16(dev, cp->kcq1.io_addr, 0);
5259 	cnic_free_resc(dev);
5260 }
5261 
5262 static void cnic_stop_hw(struct cnic_dev *dev)
5263 {
5264 	if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5265 		struct cnic_local *cp = dev->cnic_priv;
5266 		int i = 0;
5267 
5268 		/* Need to wait for the ring shutdown event to complete
5269 		 * before clearing the CNIC_UP flag.
5270 		 */
5271 		while (cp->udev->uio_dev != -1 && i < 15) {
5272 			msleep(100);
5273 			i++;
5274 		}
5275 		cnic_shutdown_rings(dev);
5276 		clear_bit(CNIC_F_CNIC_UP, &dev->flags);
5277 		RCU_INIT_POINTER(cp->ulp_ops[CNIC_ULP_L4], NULL);
5278 		synchronize_rcu();
5279 		cnic_cm_shutdown(dev);
5280 		cp->stop_hw(dev);
5281 		pci_dev_put(dev->pcidev);
5282 	}
5283 }
5284 
5285 static void cnic_free_dev(struct cnic_dev *dev)
5286 {
5287 	int i = 0;
5288 
5289 	while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
5290 		msleep(100);
5291 		i++;
5292 	}
5293 	if (atomic_read(&dev->ref_count) != 0)
5294 		netdev_err(dev->netdev, "Failed waiting for ref count to go to zero\n");
5295 
5296 	netdev_info(dev->netdev, "Removed CNIC device\n");
5297 	dev_put(dev->netdev);
5298 	kfree(dev);
5299 }
5300 
5301 static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
5302 				       struct pci_dev *pdev)
5303 {
5304 	struct cnic_dev *cdev;
5305 	struct cnic_local *cp;
5306 	int alloc_size;
5307 
5308 	alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
5309 
5310 	cdev = kzalloc(alloc_size , GFP_KERNEL);
5311 	if (cdev == NULL) {
5312 		netdev_err(dev, "allocate dev struct failure\n");
5313 		return NULL;
5314 	}
5315 
5316 	cdev->netdev = dev;
5317 	cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
5318 	cdev->register_device = cnic_register_device;
5319 	cdev->unregister_device = cnic_unregister_device;
5320 	cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
5321 
5322 	cp = cdev->cnic_priv;
5323 	cp->dev = cdev;
5324 	cp->l2_single_buf_size = 0x400;
5325 	cp->l2_rx_ring_size = 3;
5326 
5327 	spin_lock_init(&cp->cnic_ulp_lock);
5328 
5329 	netdev_info(dev, "Added CNIC device\n");
5330 
5331 	return cdev;
5332 }
5333 
5334 static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
5335 {
5336 	struct pci_dev *pdev;
5337 	struct cnic_dev *cdev;
5338 	struct cnic_local *cp;
5339 	struct cnic_eth_dev *ethdev = NULL;
5340 	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5341 
5342 	probe = symbol_get(bnx2_cnic_probe);
5343 	if (probe) {
5344 		ethdev = (*probe)(dev);
5345 		symbol_put(bnx2_cnic_probe);
5346 	}
5347 	if (!ethdev)
5348 		return NULL;
5349 
5350 	pdev = ethdev->pdev;
5351 	if (!pdev)
5352 		return NULL;
5353 
5354 	dev_hold(dev);
5355 	pci_dev_get(pdev);
5356 	if ((pdev->device == PCI_DEVICE_ID_NX2_5709 ||
5357 	     pdev->device == PCI_DEVICE_ID_NX2_5709S) &&
5358 	    (pdev->revision < 0x10)) {
5359 		pci_dev_put(pdev);
5360 		goto cnic_err;
5361 	}
5362 	pci_dev_put(pdev);
5363 
5364 	cdev = cnic_alloc_dev(dev, pdev);
5365 	if (cdev == NULL)
5366 		goto cnic_err;
5367 
5368 	set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
5369 	cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
5370 
5371 	cp = cdev->cnic_priv;
5372 	cp->ethdev = ethdev;
5373 	cdev->pcidev = pdev;
5374 	cp->chip_id = ethdev->chip_id;
5375 
5376 	cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5377 
5378 	cp->cnic_ops = &cnic_bnx2_ops;
5379 	cp->start_hw = cnic_start_bnx2_hw;
5380 	cp->stop_hw = cnic_stop_bnx2_hw;
5381 	cp->setup_pgtbl = cnic_setup_page_tbl;
5382 	cp->alloc_resc = cnic_alloc_bnx2_resc;
5383 	cp->free_resc = cnic_free_resc;
5384 	cp->start_cm = cnic_cm_init_bnx2_hw;
5385 	cp->stop_cm = cnic_cm_stop_bnx2_hw;
5386 	cp->enable_int = cnic_enable_bnx2_int;
5387 	cp->disable_int_sync = cnic_disable_bnx2_int_sync;
5388 	cp->close_conn = cnic_close_bnx2_conn;
5389 	return cdev;
5390 
5391 cnic_err:
5392 	dev_put(dev);
5393 	return NULL;
5394 }
5395 
5396 static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
5397 {
5398 	struct pci_dev *pdev;
5399 	struct cnic_dev *cdev;
5400 	struct cnic_local *cp;
5401 	struct cnic_eth_dev *ethdev = NULL;
5402 	struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
5403 
5404 	probe = symbol_get(bnx2x_cnic_probe);
5405 	if (probe) {
5406 		ethdev = (*probe)(dev);
5407 		symbol_put(bnx2x_cnic_probe);
5408 	}
5409 	if (!ethdev)
5410 		return NULL;
5411 
5412 	pdev = ethdev->pdev;
5413 	if (!pdev)
5414 		return NULL;
5415 
5416 	dev_hold(dev);
5417 	cdev = cnic_alloc_dev(dev, pdev);
5418 	if (cdev == NULL) {
5419 		dev_put(dev);
5420 		return NULL;
5421 	}
5422 
5423 	set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
5424 	cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
5425 
5426 	cp = cdev->cnic_priv;
5427 	cp->ethdev = ethdev;
5428 	cdev->pcidev = pdev;
5429 	cp->chip_id = ethdev->chip_id;
5430 
5431 	cdev->stats_addr = ethdev->addr_drv_info_to_mcp;
5432 
5433 	if (!(ethdev->drv_state & CNIC_DRV_STATE_NO_ISCSI))
5434 		cdev->max_iscsi_conn = ethdev->max_iscsi_conn;
5435 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id) &&
5436 	    !(ethdev->drv_state & CNIC_DRV_STATE_NO_FCOE))
5437 		cdev->max_fcoe_conn = ethdev->max_fcoe_conn;
5438 
5439 	if (cdev->max_fcoe_conn > BNX2X_FCOE_NUM_CONNECTIONS)
5440 		cdev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
5441 
5442 	memcpy(cdev->mac_addr, ethdev->iscsi_mac, 6);
5443 
5444 	cp->cnic_ops = &cnic_bnx2x_ops;
5445 	cp->start_hw = cnic_start_bnx2x_hw;
5446 	cp->stop_hw = cnic_stop_bnx2x_hw;
5447 	cp->setup_pgtbl = cnic_setup_page_tbl_le;
5448 	cp->alloc_resc = cnic_alloc_bnx2x_resc;
5449 	cp->free_resc = cnic_free_resc;
5450 	cp->start_cm = cnic_cm_init_bnx2x_hw;
5451 	cp->stop_cm = cnic_cm_stop_bnx2x_hw;
5452 	cp->enable_int = cnic_enable_bnx2x_int;
5453 	cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
5454 	if (BNX2X_CHIP_IS_E2_PLUS(cp->chip_id))
5455 		cp->ack_int = cnic_ack_bnx2x_e2_msix;
5456 	else
5457 		cp->ack_int = cnic_ack_bnx2x_msix;
5458 	cp->close_conn = cnic_close_bnx2x_conn;
5459 	return cdev;
5460 }
5461 
5462 static struct cnic_dev *is_cnic_dev(struct net_device *dev)
5463 {
5464 	struct ethtool_drvinfo drvinfo;
5465 	struct cnic_dev *cdev = NULL;
5466 
5467 	if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
5468 		memset(&drvinfo, 0, sizeof(drvinfo));
5469 		dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
5470 
5471 		if (!strcmp(drvinfo.driver, "bnx2"))
5472 			cdev = init_bnx2_cnic(dev);
5473 		if (!strcmp(drvinfo.driver, "bnx2x"))
5474 			cdev = init_bnx2x_cnic(dev);
5475 		if (cdev) {
5476 			write_lock(&cnic_dev_lock);
5477 			list_add(&cdev->list, &cnic_dev_list);
5478 			write_unlock(&cnic_dev_lock);
5479 		}
5480 	}
5481 	return cdev;
5482 }
5483 
5484 static void cnic_rcv_netevent(struct cnic_local *cp, unsigned long event,
5485 			      u16 vlan_id)
5486 {
5487 	int if_type;
5488 
5489 	rcu_read_lock();
5490 	for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
5491 		struct cnic_ulp_ops *ulp_ops;
5492 		void *ctx;
5493 
5494 		ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
5495 		if (!ulp_ops || !ulp_ops->indicate_netevent)
5496 			continue;
5497 
5498 		ctx = cp->ulp_handle[if_type];
5499 
5500 		ulp_ops->indicate_netevent(ctx, event, vlan_id);
5501 	}
5502 	rcu_read_unlock();
5503 }
5504 
5505 /**
5506  * netdev event handler
5507  */
5508 static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
5509 							 void *ptr)
5510 {
5511 	struct net_device *netdev = ptr;
5512 	struct cnic_dev *dev;
5513 	int new_dev = 0;
5514 
5515 	dev = cnic_from_netdev(netdev);
5516 
5517 	if (!dev && (event == NETDEV_REGISTER || netif_running(netdev))) {
5518 		/* Check for the hot-plug device */
5519 		dev = is_cnic_dev(netdev);
5520 		if (dev) {
5521 			new_dev = 1;
5522 			cnic_hold(dev);
5523 		}
5524 	}
5525 	if (dev) {
5526 		struct cnic_local *cp = dev->cnic_priv;
5527 
5528 		if (new_dev)
5529 			cnic_ulp_init(dev);
5530 		else if (event == NETDEV_UNREGISTER)
5531 			cnic_ulp_exit(dev);
5532 
5533 		if (event == NETDEV_UP || (new_dev && netif_running(netdev))) {
5534 			if (cnic_register_netdev(dev) != 0) {
5535 				cnic_put(dev);
5536 				goto done;
5537 			}
5538 			if (!cnic_start_hw(dev))
5539 				cnic_ulp_start(dev);
5540 		}
5541 
5542 		cnic_rcv_netevent(cp, event, 0);
5543 
5544 		if (event == NETDEV_GOING_DOWN) {
5545 			cnic_ulp_stop(dev);
5546 			cnic_stop_hw(dev);
5547 			cnic_unregister_netdev(dev);
5548 		} else if (event == NETDEV_UNREGISTER) {
5549 			write_lock(&cnic_dev_lock);
5550 			list_del_init(&dev->list);
5551 			write_unlock(&cnic_dev_lock);
5552 
5553 			cnic_put(dev);
5554 			cnic_free_dev(dev);
5555 			goto done;
5556 		}
5557 		cnic_put(dev);
5558 	} else {
5559 		struct net_device *realdev;
5560 		u16 vid;
5561 
5562 		vid = cnic_get_vlan(netdev, &realdev);
5563 		if (realdev) {
5564 			dev = cnic_from_netdev(realdev);
5565 			if (dev) {
5566 				vid |= VLAN_TAG_PRESENT;
5567 				cnic_rcv_netevent(dev->cnic_priv, event, vid);
5568 				cnic_put(dev);
5569 			}
5570 		}
5571 	}
5572 done:
5573 	return NOTIFY_DONE;
5574 }
5575 
5576 static struct notifier_block cnic_netdev_notifier = {
5577 	.notifier_call = cnic_netdev_event
5578 };
5579 
5580 static void cnic_release(void)
5581 {
5582 	struct cnic_dev *dev;
5583 	struct cnic_uio_dev *udev;
5584 
5585 	while (!list_empty(&cnic_dev_list)) {
5586 		dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
5587 		if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
5588 			cnic_ulp_stop(dev);
5589 			cnic_stop_hw(dev);
5590 		}
5591 
5592 		cnic_ulp_exit(dev);
5593 		cnic_unregister_netdev(dev);
5594 		list_del_init(&dev->list);
5595 		cnic_free_dev(dev);
5596 	}
5597 	while (!list_empty(&cnic_udev_list)) {
5598 		udev = list_entry(cnic_udev_list.next, struct cnic_uio_dev,
5599 				  list);
5600 		cnic_free_uio(udev);
5601 	}
5602 }
5603 
5604 static int __init cnic_init(void)
5605 {
5606 	int rc = 0;
5607 
5608 	pr_info("%s", version);
5609 
5610 	rc = register_netdevice_notifier(&cnic_netdev_notifier);
5611 	if (rc) {
5612 		cnic_release();
5613 		return rc;
5614 	}
5615 
5616 	cnic_wq = create_singlethread_workqueue("cnic_wq");
5617 	if (!cnic_wq) {
5618 		cnic_release();
5619 		unregister_netdevice_notifier(&cnic_netdev_notifier);
5620 		return -ENOMEM;
5621 	}
5622 
5623 	return 0;
5624 }
5625 
5626 static void __exit cnic_exit(void)
5627 {
5628 	unregister_netdevice_notifier(&cnic_netdev_notifier);
5629 	cnic_release();
5630 	destroy_workqueue(cnic_wq);
5631 }
5632 
5633 module_init(cnic_init);
5634 module_exit(cnic_exit);
5635