xref: /linux/drivers/scsi/be2iscsi/be_main.c (revision a3a4a816b4b194c45d0217e8b9e08b2639802cda)
1 /**
2  * Copyright (C) 2005 - 2016 Broadcom
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohan.kallickal@broadcom.com)
11  *
12  * Contact Information:
13  * linux-drivers@broadcom.com
14  *
15  * Emulex
16  * 3333 Susan Street
17  * Costa Mesa, CA 92626
18  */
19 
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 #include <linux/iscsi_boot_sysfs.h>
30 #include <linux/module.h>
31 #include <linux/bsg-lib.h>
32 #include <linux/irq_poll.h>
33 
34 #include <scsi/libiscsi.h>
35 #include <scsi/scsi_bsg_iscsi.h>
36 #include <scsi/scsi_netlink.h>
37 #include <scsi/scsi_transport_iscsi.h>
38 #include <scsi/scsi_transport.h>
39 #include <scsi/scsi_cmnd.h>
40 #include <scsi/scsi_device.h>
41 #include <scsi/scsi_host.h>
42 #include <scsi/scsi.h>
43 #include "be_main.h"
44 #include "be_iscsi.h"
45 #include "be_mgmt.h"
46 #include "be_cmds.h"
47 
48 static unsigned int be_iopoll_budget = 10;
49 static unsigned int be_max_phys_size = 64;
50 static unsigned int enable_msix = 1;
51 
52 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
53 MODULE_VERSION(BUILD_STR);
54 MODULE_AUTHOR("Emulex Corporation");
55 MODULE_LICENSE("GPL");
56 module_param(be_iopoll_budget, int, 0);
57 module_param(enable_msix, int, 0);
58 module_param(be_max_phys_size, uint, S_IRUGO);
59 MODULE_PARM_DESC(be_max_phys_size,
60 		"Maximum Size (In Kilobytes) of physically contiguous "
61 		"memory that can be allocated. Range is 16 - 128");
62 
63 #define beiscsi_disp_param(_name)\
64 static ssize_t	\
65 beiscsi_##_name##_disp(struct device *dev,\
66 			struct device_attribute *attrib, char *buf)	\
67 {	\
68 	struct Scsi_Host *shost = class_to_shost(dev);\
69 	struct beiscsi_hba *phba = iscsi_host_priv(shost); \
70 	return snprintf(buf, PAGE_SIZE, "%d\n",\
71 			phba->attr_##_name);\
72 }
73 
74 #define beiscsi_change_param(_name, _minval, _maxval, _defaval)\
75 static int \
76 beiscsi_##_name##_change(struct beiscsi_hba *phba, uint32_t val)\
77 {\
78 	if (val >= _minval && val <= _maxval) {\
79 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
80 			    "BA_%d : beiscsi_"#_name" updated "\
81 			    "from 0x%x ==> 0x%x\n",\
82 			    phba->attr_##_name, val); \
83 		phba->attr_##_name = val;\
84 		return 0;\
85 	} \
86 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT, \
87 		    "BA_%d beiscsi_"#_name" attribute "\
88 		    "cannot be updated to 0x%x, "\
89 		    "range allowed is ["#_minval" - "#_maxval"]\n", val);\
90 		return -EINVAL;\
91 }
92 
93 #define beiscsi_store_param(_name)  \
94 static ssize_t \
95 beiscsi_##_name##_store(struct device *dev,\
96 			 struct device_attribute *attr, const char *buf,\
97 			 size_t count) \
98 { \
99 	struct Scsi_Host  *shost = class_to_shost(dev);\
100 	struct beiscsi_hba *phba = iscsi_host_priv(shost);\
101 	uint32_t param_val = 0;\
102 	if (!isdigit(buf[0]))\
103 		return -EINVAL;\
104 	if (sscanf(buf, "%i", &param_val) != 1)\
105 		return -EINVAL;\
106 	if (beiscsi_##_name##_change(phba, param_val) == 0) \
107 		return strlen(buf);\
108 	else \
109 		return -EINVAL;\
110 }
111 
112 #define beiscsi_init_param(_name, _minval, _maxval, _defval) \
113 static int \
114 beiscsi_##_name##_init(struct beiscsi_hba *phba, uint32_t val) \
115 { \
116 	if (val >= _minval && val <= _maxval) {\
117 		phba->attr_##_name = val;\
118 		return 0;\
119 	} \
120 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,\
121 		    "BA_%d beiscsi_"#_name" attribute " \
122 		    "cannot be updated to 0x%x, "\
123 		    "range allowed is ["#_minval" - "#_maxval"]\n", val);\
124 	phba->attr_##_name = _defval;\
125 	return -EINVAL;\
126 }
127 
128 #define BEISCSI_RW_ATTR(_name, _minval, _maxval, _defval, _descp) \
129 static uint beiscsi_##_name = _defval;\
130 module_param(beiscsi_##_name, uint, S_IRUGO);\
131 MODULE_PARM_DESC(beiscsi_##_name, _descp);\
132 beiscsi_disp_param(_name)\
133 beiscsi_change_param(_name, _minval, _maxval, _defval)\
134 beiscsi_store_param(_name)\
135 beiscsi_init_param(_name, _minval, _maxval, _defval)\
136 DEVICE_ATTR(beiscsi_##_name, S_IRUGO | S_IWUSR,\
137 	      beiscsi_##_name##_disp, beiscsi_##_name##_store)
138 
139 /*
140  * When new log level added update the
141  * the MAX allowed value for log_enable
142  */
143 BEISCSI_RW_ATTR(log_enable, 0x00,
144 		0xFF, 0x00, "Enable logging Bit Mask\n"
145 		"\t\t\t\tInitialization Events	: 0x01\n"
146 		"\t\t\t\tMailbox Events		: 0x02\n"
147 		"\t\t\t\tMiscellaneous Events	: 0x04\n"
148 		"\t\t\t\tError Handling		: 0x08\n"
149 		"\t\t\t\tIO Path Events		: 0x10\n"
150 		"\t\t\t\tConfiguration Path	: 0x20\n"
151 		"\t\t\t\tiSCSI Protocol		: 0x40\n");
152 
153 DEVICE_ATTR(beiscsi_drvr_ver, S_IRUGO, beiscsi_drvr_ver_disp, NULL);
154 DEVICE_ATTR(beiscsi_adapter_family, S_IRUGO, beiscsi_adap_family_disp, NULL);
155 DEVICE_ATTR(beiscsi_fw_ver, S_IRUGO, beiscsi_fw_ver_disp, NULL);
156 DEVICE_ATTR(beiscsi_phys_port, S_IRUGO, beiscsi_phys_port_disp, NULL);
157 DEVICE_ATTR(beiscsi_active_session_count, S_IRUGO,
158 	     beiscsi_active_session_disp, NULL);
159 DEVICE_ATTR(beiscsi_free_session_count, S_IRUGO,
160 	     beiscsi_free_session_disp, NULL);
161 struct device_attribute *beiscsi_attrs[] = {
162 	&dev_attr_beiscsi_log_enable,
163 	&dev_attr_beiscsi_drvr_ver,
164 	&dev_attr_beiscsi_adapter_family,
165 	&dev_attr_beiscsi_fw_ver,
166 	&dev_attr_beiscsi_active_session_count,
167 	&dev_attr_beiscsi_free_session_count,
168 	&dev_attr_beiscsi_phys_port,
169 	NULL,
170 };
171 
172 static char const *cqe_desc[] = {
173 	"RESERVED_DESC",
174 	"SOL_CMD_COMPLETE",
175 	"SOL_CMD_KILLED_DATA_DIGEST_ERR",
176 	"CXN_KILLED_PDU_SIZE_EXCEEDS_DSL",
177 	"CXN_KILLED_BURST_LEN_MISMATCH",
178 	"CXN_KILLED_AHS_RCVD",
179 	"CXN_KILLED_HDR_DIGEST_ERR",
180 	"CXN_KILLED_UNKNOWN_HDR",
181 	"CXN_KILLED_STALE_ITT_TTT_RCVD",
182 	"CXN_KILLED_INVALID_ITT_TTT_RCVD",
183 	"CXN_KILLED_RST_RCVD",
184 	"CXN_KILLED_TIMED_OUT",
185 	"CXN_KILLED_RST_SENT",
186 	"CXN_KILLED_FIN_RCVD",
187 	"CXN_KILLED_BAD_UNSOL_PDU_RCVD",
188 	"CXN_KILLED_BAD_WRB_INDEX_ERROR",
189 	"CXN_KILLED_OVER_RUN_RESIDUAL",
190 	"CXN_KILLED_UNDER_RUN_RESIDUAL",
191 	"CMD_KILLED_INVALID_STATSN_RCVD",
192 	"CMD_KILLED_INVALID_R2T_RCVD",
193 	"CMD_CXN_KILLED_LUN_INVALID",
194 	"CMD_CXN_KILLED_ICD_INVALID",
195 	"CMD_CXN_KILLED_ITT_INVALID",
196 	"CMD_CXN_KILLED_SEQ_OUTOFORDER",
197 	"CMD_CXN_KILLED_INVALID_DATASN_RCVD",
198 	"CXN_INVALIDATE_NOTIFY",
199 	"CXN_INVALIDATE_INDEX_NOTIFY",
200 	"CMD_INVALIDATED_NOTIFY",
201 	"UNSOL_HDR_NOTIFY",
202 	"UNSOL_DATA_NOTIFY",
203 	"UNSOL_DATA_DIGEST_ERROR_NOTIFY",
204 	"DRIVERMSG_NOTIFY",
205 	"CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN",
206 	"SOL_CMD_KILLED_DIF_ERR",
207 	"CXN_KILLED_SYN_RCVD",
208 	"CXN_KILLED_IMM_DATA_RCVD"
209 };
210 
211 static int beiscsi_slave_configure(struct scsi_device *sdev)
212 {
213 	blk_queue_max_segment_size(sdev->request_queue, 65536);
214 	return 0;
215 }
216 
217 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
218 {
219 	struct iscsi_task *abrt_task = (struct iscsi_task *)sc->SCp.ptr;
220 	struct iscsi_cls_session *cls_session;
221 	struct beiscsi_io_task *abrt_io_task;
222 	struct beiscsi_conn *beiscsi_conn;
223 	struct iscsi_session *session;
224 	struct invldt_cmd_tbl inv_tbl;
225 	struct beiscsi_hba *phba;
226 	struct iscsi_conn *conn;
227 	int rc;
228 
229 	cls_session = starget_to_session(scsi_target(sc->device));
230 	session = cls_session->dd_data;
231 
232 	/* check if we raced, task just got cleaned up under us */
233 	spin_lock_bh(&session->back_lock);
234 	if (!abrt_task || !abrt_task->sc) {
235 		spin_unlock_bh(&session->back_lock);
236 		return SUCCESS;
237 	}
238 	/* get a task ref till FW processes the req for the ICD used */
239 	__iscsi_get_task(abrt_task);
240 	abrt_io_task = abrt_task->dd_data;
241 	conn = abrt_task->conn;
242 	beiscsi_conn = conn->dd_data;
243 	phba = beiscsi_conn->phba;
244 	/* mark WRB invalid which have been not processed by FW yet */
245 	if (is_chip_be2_be3r(phba)) {
246 		AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
247 			      abrt_io_task->pwrb_handle->pwrb, 1);
248 	} else {
249 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
250 			      abrt_io_task->pwrb_handle->pwrb, 1);
251 	}
252 	inv_tbl.cid = beiscsi_conn->beiscsi_conn_cid;
253 	inv_tbl.icd = abrt_io_task->psgl_handle->sgl_index;
254 	spin_unlock_bh(&session->back_lock);
255 
256 	rc = beiscsi_mgmt_invalidate_icds(phba, &inv_tbl, 1);
257 	iscsi_put_task(abrt_task);
258 	if (rc) {
259 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
260 			    "BM_%d : sc %p invalidation failed %d\n",
261 			    sc, rc);
262 		return FAILED;
263 	}
264 
265 	return iscsi_eh_abort(sc);
266 }
267 
268 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
269 {
270 	struct beiscsi_invldt_cmd_tbl {
271 		struct invldt_cmd_tbl tbl[BE_INVLDT_CMD_TBL_SZ];
272 		struct iscsi_task *task[BE_INVLDT_CMD_TBL_SZ];
273 	} *inv_tbl;
274 	struct iscsi_cls_session *cls_session;
275 	struct beiscsi_conn *beiscsi_conn;
276 	struct beiscsi_io_task *io_task;
277 	struct iscsi_session *session;
278 	struct beiscsi_hba *phba;
279 	struct iscsi_conn *conn;
280 	struct iscsi_task *task;
281 	unsigned int i, nents;
282 	int rc, more = 0;
283 
284 	cls_session = starget_to_session(scsi_target(sc->device));
285 	session = cls_session->dd_data;
286 
287 	spin_lock_bh(&session->frwd_lock);
288 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN) {
289 		spin_unlock_bh(&session->frwd_lock);
290 		return FAILED;
291 	}
292 
293 	conn = session->leadconn;
294 	beiscsi_conn = conn->dd_data;
295 	phba = beiscsi_conn->phba;
296 
297 	inv_tbl = kzalloc(sizeof(*inv_tbl), GFP_ATOMIC);
298 	if (!inv_tbl) {
299 		spin_unlock_bh(&session->frwd_lock);
300 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
301 			    "BM_%d : invldt_cmd_tbl alloc failed\n");
302 		return FAILED;
303 	}
304 	nents = 0;
305 	/* take back_lock to prevent task from getting cleaned up under us */
306 	spin_lock(&session->back_lock);
307 	for (i = 0; i < conn->session->cmds_max; i++) {
308 		task = conn->session->cmds[i];
309 		if (!task->sc)
310 			continue;
311 
312 		if (sc->device->lun != task->sc->device->lun)
313 			continue;
314 		/**
315 		 * Can't fit in more cmds? Normally this won't happen b'coz
316 		 * BEISCSI_CMD_PER_LUN is same as BE_INVLDT_CMD_TBL_SZ.
317 		 */
318 		if (nents == BE_INVLDT_CMD_TBL_SZ) {
319 			more = 1;
320 			break;
321 		}
322 
323 		/* get a task ref till FW processes the req for the ICD used */
324 		__iscsi_get_task(task);
325 		io_task = task->dd_data;
326 		/* mark WRB invalid which have been not processed by FW yet */
327 		if (is_chip_be2_be3r(phba)) {
328 			AMAP_SET_BITS(struct amap_iscsi_wrb, invld,
329 				      io_task->pwrb_handle->pwrb, 1);
330 		} else {
331 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, invld,
332 				      io_task->pwrb_handle->pwrb, 1);
333 		}
334 
335 		inv_tbl->tbl[nents].cid = beiscsi_conn->beiscsi_conn_cid;
336 		inv_tbl->tbl[nents].icd = io_task->psgl_handle->sgl_index;
337 		inv_tbl->task[nents] = task;
338 		nents++;
339 	}
340 	spin_unlock_bh(&session->back_lock);
341 	spin_unlock_bh(&session->frwd_lock);
342 
343 	rc = SUCCESS;
344 	if (!nents)
345 		goto end_reset;
346 
347 	if (more) {
348 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_EH,
349 			    "BM_%d : number of cmds exceeds size of invalidation table\n");
350 		rc = FAILED;
351 		goto end_reset;
352 	}
353 
354 	if (beiscsi_mgmt_invalidate_icds(phba, &inv_tbl->tbl[0], nents)) {
355 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_EH,
356 			    "BM_%d : cid %u scmds invalidation failed\n",
357 			    beiscsi_conn->beiscsi_conn_cid);
358 		rc = FAILED;
359 	}
360 
361 end_reset:
362 	for (i = 0; i < nents; i++)
363 		iscsi_put_task(inv_tbl->task[i]);
364 	kfree(inv_tbl);
365 
366 	if (rc == SUCCESS)
367 		rc = iscsi_eh_device_reset(sc);
368 	return rc;
369 }
370 
371 /*------------------- PCI Driver operations and data ----------------- */
372 static const struct pci_device_id beiscsi_pci_id_table[] = {
373 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
374 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
375 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
376 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
377 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
378 	{ PCI_DEVICE(ELX_VENDOR_ID, OC_SKH_ID1) },
379 	{ 0 }
380 };
381 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
382 
383 
384 static struct scsi_host_template beiscsi_sht = {
385 	.module = THIS_MODULE,
386 	.name = "Emulex 10Gbe open-iscsi Initiator Driver",
387 	.proc_name = DRV_NAME,
388 	.queuecommand = iscsi_queuecommand,
389 	.change_queue_depth = scsi_change_queue_depth,
390 	.slave_configure = beiscsi_slave_configure,
391 	.target_alloc = iscsi_target_alloc,
392 	.eh_timed_out = iscsi_eh_cmd_timed_out,
393 	.eh_abort_handler = beiscsi_eh_abort,
394 	.eh_device_reset_handler = beiscsi_eh_device_reset,
395 	.eh_target_reset_handler = iscsi_eh_session_reset,
396 	.shost_attrs = beiscsi_attrs,
397 	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
398 	.can_queue = BE2_IO_DEPTH,
399 	.this_id = -1,
400 	.max_sectors = BEISCSI_MAX_SECTORS,
401 	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
402 	.use_clustering = ENABLE_CLUSTERING,
403 	.vendor_id = SCSI_NL_VID_TYPE_PCI | BE_VENDOR_ID,
404 	.track_queue_depth = 1,
405 };
406 
407 static struct scsi_transport_template *beiscsi_scsi_transport;
408 
409 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
410 {
411 	struct beiscsi_hba *phba;
412 	struct Scsi_Host *shost;
413 
414 	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
415 	if (!shost) {
416 		dev_err(&pcidev->dev,
417 			"beiscsi_hba_alloc - iscsi_host_alloc failed\n");
418 		return NULL;
419 	}
420 	shost->max_id = BE2_MAX_SESSIONS;
421 	shost->max_channel = 0;
422 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
423 	shost->max_lun = BEISCSI_NUM_MAX_LUN;
424 	shost->transportt = beiscsi_scsi_transport;
425 	phba = iscsi_host_priv(shost);
426 	memset(phba, 0, sizeof(*phba));
427 	phba->shost = shost;
428 	phba->pcidev = pci_dev_get(pcidev);
429 	pci_set_drvdata(pcidev, phba);
430 	phba->interface_handle = 0xFFFFFFFF;
431 
432 	return phba;
433 }
434 
435 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
436 {
437 	if (phba->csr_va) {
438 		iounmap(phba->csr_va);
439 		phba->csr_va = NULL;
440 	}
441 	if (phba->db_va) {
442 		iounmap(phba->db_va);
443 		phba->db_va = NULL;
444 	}
445 	if (phba->pci_va) {
446 		iounmap(phba->pci_va);
447 		phba->pci_va = NULL;
448 	}
449 }
450 
451 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
452 				struct pci_dev *pcidev)
453 {
454 	u8 __iomem *addr;
455 	int pcicfg_reg;
456 
457 	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
458 			       pci_resource_len(pcidev, 2));
459 	if (addr == NULL)
460 		return -ENOMEM;
461 	phba->ctrl.csr = addr;
462 	phba->csr_va = addr;
463 	phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
464 
465 	addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
466 	if (addr == NULL)
467 		goto pci_map_err;
468 	phba->ctrl.db = addr;
469 	phba->db_va = addr;
470 	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
471 
472 	if (phba->generation == BE_GEN2)
473 		pcicfg_reg = 1;
474 	else
475 		pcicfg_reg = 0;
476 
477 	addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
478 			       pci_resource_len(pcidev, pcicfg_reg));
479 
480 	if (addr == NULL)
481 		goto pci_map_err;
482 	phba->ctrl.pcicfg = addr;
483 	phba->pci_va = addr;
484 	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
485 	return 0;
486 
487 pci_map_err:
488 	beiscsi_unmap_pci_function(phba);
489 	return -ENOMEM;
490 }
491 
492 static int beiscsi_enable_pci(struct pci_dev *pcidev)
493 {
494 	int ret;
495 
496 	ret = pci_enable_device(pcidev);
497 	if (ret) {
498 		dev_err(&pcidev->dev,
499 			"beiscsi_enable_pci - enable device failed\n");
500 		return ret;
501 	}
502 
503 	ret = pci_request_regions(pcidev, DRV_NAME);
504 	if (ret) {
505 		dev_err(&pcidev->dev,
506 				"beiscsi_enable_pci - request region failed\n");
507 		goto pci_dev_disable;
508 	}
509 
510 	pci_set_master(pcidev);
511 	ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64));
512 	if (ret) {
513 		ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32));
514 		if (ret) {
515 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
516 			goto pci_region_release;
517 		} else {
518 			ret = pci_set_consistent_dma_mask(pcidev,
519 							  DMA_BIT_MASK(32));
520 		}
521 	} else {
522 		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64));
523 		if (ret) {
524 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
525 			goto pci_region_release;
526 		}
527 	}
528 	return 0;
529 
530 pci_region_release:
531 	pci_release_regions(pcidev);
532 pci_dev_disable:
533 	pci_disable_device(pcidev);
534 
535 	return ret;
536 }
537 
538 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
539 {
540 	struct be_ctrl_info *ctrl = &phba->ctrl;
541 	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
542 	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
543 	int status = 0;
544 
545 	ctrl->pdev = pdev;
546 	status = beiscsi_map_pci_bars(phba, pdev);
547 	if (status)
548 		return status;
549 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
550 	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
551 						  mbox_mem_alloc->size,
552 						  &mbox_mem_alloc->dma);
553 	if (!mbox_mem_alloc->va) {
554 		beiscsi_unmap_pci_function(phba);
555 		return -ENOMEM;
556 	}
557 
558 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
559 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
560 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
561 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
562 	mutex_init(&ctrl->mbox_lock);
563 	spin_lock_init(&phba->ctrl.mcc_lock);
564 
565 	return status;
566 }
567 
568 /**
569  * beiscsi_get_params()- Set the config paramters
570  * @phba: ptr  device priv structure
571  **/
572 static void beiscsi_get_params(struct beiscsi_hba *phba)
573 {
574 	uint32_t total_cid_count = 0;
575 	uint32_t total_icd_count = 0;
576 	uint8_t ulp_num = 0;
577 
578 	total_cid_count = BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP0) +
579 			  BEISCSI_GET_CID_COUNT(phba, BEISCSI_ULP1);
580 
581 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
582 		uint32_t align_mask = 0;
583 		uint32_t icd_post_per_page = 0;
584 		uint32_t icd_count_unavailable = 0;
585 		uint32_t icd_start = 0, icd_count = 0;
586 		uint32_t icd_start_align = 0, icd_count_align = 0;
587 
588 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
589 			icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
590 			icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
591 
592 			/* Get ICD count that can be posted on each page */
593 			icd_post_per_page = (PAGE_SIZE / (BE2_SGE *
594 					     sizeof(struct iscsi_sge)));
595 			align_mask = (icd_post_per_page - 1);
596 
597 			/* Check if icd_start is aligned ICD per page posting */
598 			if (icd_start % icd_post_per_page) {
599 				icd_start_align = ((icd_start +
600 						    icd_post_per_page) &
601 						    ~(align_mask));
602 				phba->fw_config.
603 					iscsi_icd_start[ulp_num] =
604 					icd_start_align;
605 			}
606 
607 			icd_count_align = (icd_count & ~align_mask);
608 
609 			/* ICD discarded in the process of alignment */
610 			if (icd_start_align)
611 				icd_count_unavailable = ((icd_start_align -
612 							  icd_start) +
613 							 (icd_count -
614 							  icd_count_align));
615 
616 			/* Updated ICD count available */
617 			phba->fw_config.iscsi_icd_count[ulp_num] = (icd_count -
618 					icd_count_unavailable);
619 
620 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
621 					"BM_%d : Aligned ICD values\n"
622 					"\t ICD Start : %d\n"
623 					"\t ICD Count : %d\n"
624 					"\t ICD Discarded : %d\n",
625 					phba->fw_config.
626 					iscsi_icd_start[ulp_num],
627 					phba->fw_config.
628 					iscsi_icd_count[ulp_num],
629 					icd_count_unavailable);
630 			break;
631 		}
632 	}
633 
634 	total_icd_count = phba->fw_config.iscsi_icd_count[ulp_num];
635 	phba->params.ios_per_ctrl = (total_icd_count -
636 				    (total_cid_count +
637 				     BE2_TMFS + BE2_NOPOUT_REQ));
638 	phba->params.cxns_per_ctrl = total_cid_count;
639 	phba->params.asyncpdus_per_ctrl = total_cid_count;
640 	phba->params.icds_per_ctrl = total_icd_count;
641 	phba->params.num_sge_per_io = BE2_SGE;
642 	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
643 	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
644 	phba->params.num_eq_entries = 1024;
645 	phba->params.num_cq_entries = 1024;
646 	phba->params.wrbs_per_cxn = 256;
647 }
648 
649 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
650 			   unsigned int id, unsigned int clr_interrupt,
651 			   unsigned int num_processed,
652 			   unsigned char rearm, unsigned char event)
653 {
654 	u32 val = 0;
655 
656 	if (rearm)
657 		val |= 1 << DB_EQ_REARM_SHIFT;
658 	if (clr_interrupt)
659 		val |= 1 << DB_EQ_CLR_SHIFT;
660 	if (event)
661 		val |= 1 << DB_EQ_EVNT_SHIFT;
662 
663 	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
664 	/* Setting lower order EQ_ID Bits */
665 	val |= (id & DB_EQ_RING_ID_LOW_MASK);
666 
667 	/* Setting Higher order EQ_ID Bits */
668 	val |= (((id >> DB_EQ_HIGH_FEILD_SHIFT) &
669 		  DB_EQ_RING_ID_HIGH_MASK)
670 		  << DB_EQ_HIGH_SET_SHIFT);
671 
672 	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
673 }
674 
675 /**
676  * be_isr_mcc - The isr routine of the driver.
677  * @irq: Not used
678  * @dev_id: Pointer to host adapter structure
679  */
680 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
681 {
682 	struct beiscsi_hba *phba;
683 	struct be_eq_entry *eqe;
684 	struct be_queue_info *eq;
685 	struct be_queue_info *mcc;
686 	unsigned int mcc_events;
687 	struct be_eq_obj *pbe_eq;
688 
689 	pbe_eq = dev_id;
690 	eq = &pbe_eq->q;
691 	phba =  pbe_eq->phba;
692 	mcc = &phba->ctrl.mcc_obj.cq;
693 	eqe = queue_tail_node(eq);
694 
695 	mcc_events = 0;
696 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
697 				& EQE_VALID_MASK) {
698 		if (((eqe->dw[offsetof(struct amap_eq_entry,
699 		     resource_id) / 32] &
700 		     EQE_RESID_MASK) >> 16) == mcc->id) {
701 			mcc_events++;
702 		}
703 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
704 		queue_tail_inc(eq);
705 		eqe = queue_tail_node(eq);
706 	}
707 
708 	if (mcc_events) {
709 		queue_work(phba->wq, &pbe_eq->mcc_work);
710 		hwi_ring_eq_db(phba, eq->id, 1,	mcc_events, 1, 1);
711 	}
712 	return IRQ_HANDLED;
713 }
714 
715 /**
716  * be_isr_msix - The isr routine of the driver.
717  * @irq: Not used
718  * @dev_id: Pointer to host adapter structure
719  */
720 static irqreturn_t be_isr_msix(int irq, void *dev_id)
721 {
722 	struct beiscsi_hba *phba;
723 	struct be_queue_info *eq;
724 	struct be_eq_obj *pbe_eq;
725 
726 	pbe_eq = dev_id;
727 	eq = &pbe_eq->q;
728 
729 	phba = pbe_eq->phba;
730 	/* disable interrupt till iopoll completes */
731 	hwi_ring_eq_db(phba, eq->id, 1,	0, 0, 1);
732 	irq_poll_sched(&pbe_eq->iopoll);
733 
734 	return IRQ_HANDLED;
735 }
736 
737 /**
738  * be_isr - The isr routine of the driver.
739  * @irq: Not used
740  * @dev_id: Pointer to host adapter structure
741  */
742 static irqreturn_t be_isr(int irq, void *dev_id)
743 {
744 	struct beiscsi_hba *phba;
745 	struct hwi_controller *phwi_ctrlr;
746 	struct hwi_context_memory *phwi_context;
747 	struct be_eq_entry *eqe;
748 	struct be_queue_info *eq;
749 	struct be_queue_info *mcc;
750 	unsigned int mcc_events, io_events;
751 	struct be_ctrl_info *ctrl;
752 	struct be_eq_obj *pbe_eq;
753 	int isr, rearm;
754 
755 	phba = dev_id;
756 	ctrl = &phba->ctrl;
757 	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
758 		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
759 	if (!isr)
760 		return IRQ_NONE;
761 
762 	phwi_ctrlr = phba->phwi_ctrlr;
763 	phwi_context = phwi_ctrlr->phwi_ctxt;
764 	pbe_eq = &phwi_context->be_eq[0];
765 
766 	eq = &phwi_context->be_eq[0].q;
767 	mcc = &phba->ctrl.mcc_obj.cq;
768 	eqe = queue_tail_node(eq);
769 
770 	io_events = 0;
771 	mcc_events = 0;
772 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
773 				& EQE_VALID_MASK) {
774 		if (((eqe->dw[offsetof(struct amap_eq_entry,
775 		      resource_id) / 32] & EQE_RESID_MASK) >> 16) == mcc->id)
776 			mcc_events++;
777 		else
778 			io_events++;
779 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
780 		queue_tail_inc(eq);
781 		eqe = queue_tail_node(eq);
782 	}
783 	if (!io_events && !mcc_events)
784 		return IRQ_NONE;
785 
786 	/* no need to rearm if interrupt is only for IOs */
787 	rearm = 0;
788 	if (mcc_events) {
789 		queue_work(phba->wq, &pbe_eq->mcc_work);
790 		/* rearm for MCCQ */
791 		rearm = 1;
792 	}
793 	if (io_events)
794 		irq_poll_sched(&pbe_eq->iopoll);
795 	hwi_ring_eq_db(phba, eq->id, 0, (io_events + mcc_events), rearm, 1);
796 	return IRQ_HANDLED;
797 }
798 
799 
800 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
801 {
802 	struct pci_dev *pcidev = phba->pcidev;
803 	struct hwi_controller *phwi_ctrlr;
804 	struct hwi_context_memory *phwi_context;
805 	int ret, msix_vec, i, j;
806 
807 	phwi_ctrlr = phba->phwi_ctrlr;
808 	phwi_context = phwi_ctrlr->phwi_ctxt;
809 
810 	if (phba->msix_enabled) {
811 		for (i = 0; i < phba->num_cpus; i++) {
812 			phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME,
813 						    GFP_KERNEL);
814 			if (!phba->msi_name[i]) {
815 				ret = -ENOMEM;
816 				goto free_msix_irqs;
817 			}
818 
819 			sprintf(phba->msi_name[i], "beiscsi_%02x_%02x",
820 				phba->shost->host_no, i);
821 			msix_vec = phba->msix_entries[i].vector;
822 			ret = request_irq(msix_vec, be_isr_msix, 0,
823 					  phba->msi_name[i],
824 					  &phwi_context->be_eq[i]);
825 			if (ret) {
826 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
827 					    "BM_%d : beiscsi_init_irqs-Failed to"
828 					    "register msix for i = %d\n",
829 					    i);
830 				kfree(phba->msi_name[i]);
831 				goto free_msix_irqs;
832 			}
833 		}
834 		phba->msi_name[i] = kzalloc(BEISCSI_MSI_NAME, GFP_KERNEL);
835 		if (!phba->msi_name[i]) {
836 			ret = -ENOMEM;
837 			goto free_msix_irqs;
838 		}
839 		sprintf(phba->msi_name[i], "beiscsi_mcc_%02x",
840 			phba->shost->host_no);
841 		msix_vec = phba->msix_entries[i].vector;
842 		ret = request_irq(msix_vec, be_isr_mcc, 0, phba->msi_name[i],
843 				  &phwi_context->be_eq[i]);
844 		if (ret) {
845 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT ,
846 				    "BM_%d : beiscsi_init_irqs-"
847 				    "Failed to register beiscsi_msix_mcc\n");
848 			kfree(phba->msi_name[i]);
849 			goto free_msix_irqs;
850 		}
851 
852 	} else {
853 		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
854 				  "beiscsi", phba);
855 		if (ret) {
856 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
857 				    "BM_%d : beiscsi_init_irqs-"
858 				    "Failed to register irq\\n");
859 			return ret;
860 		}
861 	}
862 	return 0;
863 free_msix_irqs:
864 	for (j = i - 1; j >= 0; j--) {
865 		kfree(phba->msi_name[j]);
866 		msix_vec = phba->msix_entries[j].vector;
867 		free_irq(msix_vec, &phwi_context->be_eq[j]);
868 	}
869 	return ret;
870 }
871 
872 void hwi_ring_cq_db(struct beiscsi_hba *phba,
873 			   unsigned int id, unsigned int num_processed,
874 			   unsigned char rearm)
875 {
876 	u32 val = 0;
877 
878 	if (rearm)
879 		val |= 1 << DB_CQ_REARM_SHIFT;
880 
881 	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
882 
883 	/* Setting lower order CQ_ID Bits */
884 	val |= (id & DB_CQ_RING_ID_LOW_MASK);
885 
886 	/* Setting Higher order CQ_ID Bits */
887 	val |= (((id >> DB_CQ_HIGH_FEILD_SHIFT) &
888 		  DB_CQ_RING_ID_HIGH_MASK)
889 		  << DB_CQ_HIGH_SET_SHIFT);
890 
891 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
892 }
893 
894 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
895 {
896 	struct sgl_handle *psgl_handle;
897 	unsigned long flags;
898 
899 	spin_lock_irqsave(&phba->io_sgl_lock, flags);
900 	if (phba->io_sgl_hndl_avbl) {
901 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
902 			    "BM_%d : In alloc_io_sgl_handle,"
903 			    " io_sgl_alloc_index=%d\n",
904 			    phba->io_sgl_alloc_index);
905 
906 		psgl_handle = phba->io_sgl_hndl_base[phba->
907 						io_sgl_alloc_index];
908 		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
909 		phba->io_sgl_hndl_avbl--;
910 		if (phba->io_sgl_alloc_index == (phba->params.
911 						 ios_per_ctrl - 1))
912 			phba->io_sgl_alloc_index = 0;
913 		else
914 			phba->io_sgl_alloc_index++;
915 	} else
916 		psgl_handle = NULL;
917 	spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
918 	return psgl_handle;
919 }
920 
921 static void
922 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
923 {
924 	unsigned long flags;
925 
926 	spin_lock_irqsave(&phba->io_sgl_lock, flags);
927 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
928 		    "BM_%d : In free_,io_sgl_free_index=%d\n",
929 		    phba->io_sgl_free_index);
930 
931 	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
932 		/*
933 		 * this can happen if clean_task is called on a task that
934 		 * failed in xmit_task or alloc_pdu.
935 		 */
936 		 beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_IO,
937 			     "BM_%d : Double Free in IO SGL io_sgl_free_index=%d,"
938 			     "value there=%p\n", phba->io_sgl_free_index,
939 			     phba->io_sgl_hndl_base
940 			     [phba->io_sgl_free_index]);
941 		 spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
942 		return;
943 	}
944 	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
945 	phba->io_sgl_hndl_avbl++;
946 	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
947 		phba->io_sgl_free_index = 0;
948 	else
949 		phba->io_sgl_free_index++;
950 	spin_unlock_irqrestore(&phba->io_sgl_lock, flags);
951 }
952 
953 static inline struct wrb_handle *
954 beiscsi_get_wrb_handle(struct hwi_wrb_context *pwrb_context,
955 		       unsigned int wrbs_per_cxn)
956 {
957 	struct wrb_handle *pwrb_handle;
958 	unsigned long flags;
959 
960 	spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
961 	if (!pwrb_context->wrb_handles_available) {
962 		spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
963 		return NULL;
964 	}
965 	pwrb_handle = pwrb_context->pwrb_handle_base[pwrb_context->alloc_index];
966 	pwrb_context->wrb_handles_available--;
967 	if (pwrb_context->alloc_index == (wrbs_per_cxn - 1))
968 		pwrb_context->alloc_index = 0;
969 	else
970 		pwrb_context->alloc_index++;
971 	spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
972 
973 	if (pwrb_handle)
974 		memset(pwrb_handle->pwrb, 0, sizeof(*pwrb_handle->pwrb));
975 
976 	return pwrb_handle;
977 }
978 
979 /**
980  * alloc_wrb_handle - To allocate a wrb handle
981  * @phba: The hba pointer
982  * @cid: The cid to use for allocation
983  * @pwrb_context: ptr to ptr to wrb context
984  *
985  * This happens under session_lock until submission to chip
986  */
987 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid,
988 				    struct hwi_wrb_context **pcontext)
989 {
990 	struct hwi_wrb_context *pwrb_context;
991 	struct hwi_controller *phwi_ctrlr;
992 	uint16_t cri_index = BE_GET_CRI_FROM_CID(cid);
993 
994 	phwi_ctrlr = phba->phwi_ctrlr;
995 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
996 	/* return the context address */
997 	*pcontext = pwrb_context;
998 	return beiscsi_get_wrb_handle(pwrb_context, phba->params.wrbs_per_cxn);
999 }
1000 
1001 static inline void
1002 beiscsi_put_wrb_handle(struct hwi_wrb_context *pwrb_context,
1003 		       struct wrb_handle *pwrb_handle,
1004 		       unsigned int wrbs_per_cxn)
1005 {
1006 	unsigned long flags;
1007 
1008 	spin_lock_irqsave(&pwrb_context->wrb_lock, flags);
1009 	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
1010 	pwrb_context->wrb_handles_available++;
1011 	if (pwrb_context->free_index == (wrbs_per_cxn - 1))
1012 		pwrb_context->free_index = 0;
1013 	else
1014 		pwrb_context->free_index++;
1015 	pwrb_handle->pio_handle = NULL;
1016 	spin_unlock_irqrestore(&pwrb_context->wrb_lock, flags);
1017 }
1018 
1019 /**
1020  * free_wrb_handle - To free the wrb handle back to pool
1021  * @phba: The hba pointer
1022  * @pwrb_context: The context to free from
1023  * @pwrb_handle: The wrb_handle to free
1024  *
1025  * This happens under session_lock until submission to chip
1026  */
1027 static void
1028 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
1029 		struct wrb_handle *pwrb_handle)
1030 {
1031 	beiscsi_put_wrb_handle(pwrb_context,
1032 			       pwrb_handle,
1033 			       phba->params.wrbs_per_cxn);
1034 	beiscsi_log(phba, KERN_INFO,
1035 		    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1036 		    "BM_%d : FREE WRB: pwrb_handle=%p free_index=0x%x"
1037 		    "wrb_handles_available=%d\n",
1038 		    pwrb_handle, pwrb_context->free_index,
1039 		    pwrb_context->wrb_handles_available);
1040 }
1041 
1042 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
1043 {
1044 	struct sgl_handle *psgl_handle;
1045 	unsigned long flags;
1046 
1047 	spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
1048 	if (phba->eh_sgl_hndl_avbl) {
1049 		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
1050 		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
1051 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1052 			    "BM_%d : mgmt_sgl_alloc_index=%d=0x%x\n",
1053 			    phba->eh_sgl_alloc_index,
1054 			    phba->eh_sgl_alloc_index);
1055 
1056 		phba->eh_sgl_hndl_avbl--;
1057 		if (phba->eh_sgl_alloc_index ==
1058 		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
1059 		     1))
1060 			phba->eh_sgl_alloc_index = 0;
1061 		else
1062 			phba->eh_sgl_alloc_index++;
1063 	} else
1064 		psgl_handle = NULL;
1065 	spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1066 	return psgl_handle;
1067 }
1068 
1069 void
1070 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
1071 {
1072 	unsigned long flags;
1073 
1074 	spin_lock_irqsave(&phba->mgmt_sgl_lock, flags);
1075 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
1076 		    "BM_%d : In  free_mgmt_sgl_handle,"
1077 		    "eh_sgl_free_index=%d\n",
1078 		    phba->eh_sgl_free_index);
1079 
1080 	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
1081 		/*
1082 		 * this can happen if clean_task is called on a task that
1083 		 * failed in xmit_task or alloc_pdu.
1084 		 */
1085 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_CONFIG,
1086 			    "BM_%d : Double Free in eh SGL ,"
1087 			    "eh_sgl_free_index=%d\n",
1088 			    phba->eh_sgl_free_index);
1089 		spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1090 		return;
1091 	}
1092 	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
1093 	phba->eh_sgl_hndl_avbl++;
1094 	if (phba->eh_sgl_free_index ==
1095 	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
1096 		phba->eh_sgl_free_index = 0;
1097 	else
1098 		phba->eh_sgl_free_index++;
1099 	spin_unlock_irqrestore(&phba->mgmt_sgl_lock, flags);
1100 }
1101 
1102 static void
1103 be_complete_io(struct beiscsi_conn *beiscsi_conn,
1104 		struct iscsi_task *task,
1105 		struct common_sol_cqe *csol_cqe)
1106 {
1107 	struct beiscsi_io_task *io_task = task->dd_data;
1108 	struct be_status_bhs *sts_bhs =
1109 				(struct be_status_bhs *)io_task->cmd_bhs;
1110 	struct iscsi_conn *conn = beiscsi_conn->conn;
1111 	unsigned char *sense;
1112 	u32 resid = 0, exp_cmdsn, max_cmdsn;
1113 	u8 rsp, status, flags;
1114 
1115 	exp_cmdsn = csol_cqe->exp_cmdsn;
1116 	max_cmdsn = (csol_cqe->exp_cmdsn +
1117 		     csol_cqe->cmd_wnd - 1);
1118 	rsp = csol_cqe->i_resp;
1119 	status = csol_cqe->i_sts;
1120 	flags = csol_cqe->i_flags;
1121 	resid = csol_cqe->res_cnt;
1122 
1123 	if (!task->sc) {
1124 		if (io_task->scsi_cmnd) {
1125 			scsi_dma_unmap(io_task->scsi_cmnd);
1126 			io_task->scsi_cmnd = NULL;
1127 		}
1128 
1129 		return;
1130 	}
1131 	task->sc->result = (DID_OK << 16) | status;
1132 	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
1133 		task->sc->result = DID_ERROR << 16;
1134 		goto unmap;
1135 	}
1136 
1137 	/* bidi not initially supported */
1138 	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
1139 		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
1140 			task->sc->result = DID_ERROR << 16;
1141 
1142 		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
1143 			scsi_set_resid(task->sc, resid);
1144 			if (!status && (scsi_bufflen(task->sc) - resid <
1145 			    task->sc->underflow))
1146 				task->sc->result = DID_ERROR << 16;
1147 		}
1148 	}
1149 
1150 	if (status == SAM_STAT_CHECK_CONDITION) {
1151 		u16 sense_len;
1152 		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
1153 
1154 		sense = sts_bhs->sense_info + sizeof(unsigned short);
1155 		sense_len = be16_to_cpu(*slen);
1156 		memcpy(task->sc->sense_buffer, sense,
1157 		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
1158 	}
1159 
1160 	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ)
1161 		conn->rxdata_octets += resid;
1162 unmap:
1163 	if (io_task->scsi_cmnd) {
1164 		scsi_dma_unmap(io_task->scsi_cmnd);
1165 		io_task->scsi_cmnd = NULL;
1166 	}
1167 	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
1168 }
1169 
1170 static void
1171 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
1172 		    struct iscsi_task *task,
1173 		    struct common_sol_cqe *csol_cqe)
1174 {
1175 	struct iscsi_logout_rsp *hdr;
1176 	struct beiscsi_io_task *io_task = task->dd_data;
1177 	struct iscsi_conn *conn = beiscsi_conn->conn;
1178 
1179 	hdr = (struct iscsi_logout_rsp *)task->hdr;
1180 	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
1181 	hdr->t2wait = 5;
1182 	hdr->t2retain = 0;
1183 	hdr->flags = csol_cqe->i_flags;
1184 	hdr->response = csol_cqe->i_resp;
1185 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1186 	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1187 				     csol_cqe->cmd_wnd - 1);
1188 
1189 	hdr->dlength[0] = 0;
1190 	hdr->dlength[1] = 0;
1191 	hdr->dlength[2] = 0;
1192 	hdr->hlength = 0;
1193 	hdr->itt = io_task->libiscsi_itt;
1194 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1195 }
1196 
1197 static void
1198 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
1199 		 struct iscsi_task *task,
1200 		 struct common_sol_cqe *csol_cqe)
1201 {
1202 	struct iscsi_tm_rsp *hdr;
1203 	struct iscsi_conn *conn = beiscsi_conn->conn;
1204 	struct beiscsi_io_task *io_task = task->dd_data;
1205 
1206 	hdr = (struct iscsi_tm_rsp *)task->hdr;
1207 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
1208 	hdr->flags = csol_cqe->i_flags;
1209 	hdr->response = csol_cqe->i_resp;
1210 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1211 	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1212 				     csol_cqe->cmd_wnd - 1);
1213 
1214 	hdr->itt = io_task->libiscsi_itt;
1215 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1216 }
1217 
1218 static void
1219 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
1220 		       struct beiscsi_hba *phba, struct sol_cqe *psol)
1221 {
1222 	struct hwi_wrb_context *pwrb_context;
1223 	uint16_t wrb_index, cid, cri_index;
1224 	struct hwi_controller *phwi_ctrlr;
1225 	struct wrb_handle *pwrb_handle;
1226 	struct iscsi_session *session;
1227 	struct iscsi_task *task;
1228 
1229 	phwi_ctrlr = phba->phwi_ctrlr;
1230 	if (is_chip_be2_be3r(phba)) {
1231 		wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1232 					  wrb_idx, psol);
1233 		cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe,
1234 				    cid, psol);
1235 	} else {
1236 		wrb_index = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1237 					  wrb_idx, psol);
1238 		cid = AMAP_GET_BITS(struct amap_it_dmsg_cqe_v2,
1239 				    cid, psol);
1240 	}
1241 
1242 	cri_index = BE_GET_CRI_FROM_CID(cid);
1243 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1244 	pwrb_handle = pwrb_context->pwrb_handle_basestd[wrb_index];
1245 	session = beiscsi_conn->conn->session;
1246 	spin_lock_bh(&session->back_lock);
1247 	task = pwrb_handle->pio_handle;
1248 	if (task)
1249 		__iscsi_put_task(task);
1250 	spin_unlock_bh(&session->back_lock);
1251 }
1252 
1253 static void
1254 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1255 			struct iscsi_task *task,
1256 			struct common_sol_cqe *csol_cqe)
1257 {
1258 	struct iscsi_nopin *hdr;
1259 	struct iscsi_conn *conn = beiscsi_conn->conn;
1260 	struct beiscsi_io_task *io_task = task->dd_data;
1261 
1262 	hdr = (struct iscsi_nopin *)task->hdr;
1263 	hdr->flags = csol_cqe->i_flags;
1264 	hdr->exp_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn);
1265 	hdr->max_cmdsn = cpu_to_be32(csol_cqe->exp_cmdsn +
1266 				     csol_cqe->cmd_wnd - 1);
1267 
1268 	hdr->opcode = ISCSI_OP_NOOP_IN;
1269 	hdr->itt = io_task->libiscsi_itt;
1270 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1271 }
1272 
1273 static void adapter_get_sol_cqe(struct beiscsi_hba *phba,
1274 		struct sol_cqe *psol,
1275 		struct common_sol_cqe *csol_cqe)
1276 {
1277 	if (is_chip_be2_be3r(phba)) {
1278 		csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe,
1279 						    i_exp_cmd_sn, psol);
1280 		csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe,
1281 						  i_res_cnt, psol);
1282 		csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe,
1283 						  i_cmd_wnd, psol);
1284 		csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe,
1285 						    wrb_index, psol);
1286 		csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe,
1287 					      cid, psol);
1288 		csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1289 						 hw_sts, psol);
1290 		csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe,
1291 						 i_resp, psol);
1292 		csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe,
1293 						i_sts, psol);
1294 		csol_cqe->i_flags = AMAP_GET_BITS(struct amap_sol_cqe,
1295 						  i_flags, psol);
1296 	} else {
1297 		csol_cqe->exp_cmdsn = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1298 						    i_exp_cmd_sn, psol);
1299 		csol_cqe->res_cnt = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1300 						  i_res_cnt, psol);
1301 		csol_cqe->wrb_index = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1302 						    wrb_index, psol);
1303 		csol_cqe->cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1304 					      cid, psol);
1305 		csol_cqe->hw_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1306 						 hw_sts, psol);
1307 		csol_cqe->cmd_wnd = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1308 						  i_cmd_wnd, psol);
1309 		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1310 				  cmd_cmpl, psol))
1311 			csol_cqe->i_sts = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1312 							i_sts, psol);
1313 		else
1314 			csol_cqe->i_resp = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1315 							 i_sts, psol);
1316 		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1317 				  u, psol))
1318 			csol_cqe->i_flags = ISCSI_FLAG_CMD_UNDERFLOW;
1319 
1320 		if (AMAP_GET_BITS(struct amap_sol_cqe_v2,
1321 				  o, psol))
1322 			csol_cqe->i_flags |= ISCSI_FLAG_CMD_OVERFLOW;
1323 	}
1324 }
1325 
1326 
1327 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1328 			     struct beiscsi_hba *phba, struct sol_cqe *psol)
1329 {
1330 	struct iscsi_conn *conn = beiscsi_conn->conn;
1331 	struct iscsi_session *session = conn->session;
1332 	struct common_sol_cqe csol_cqe = {0};
1333 	struct hwi_wrb_context *pwrb_context;
1334 	struct hwi_controller *phwi_ctrlr;
1335 	struct wrb_handle *pwrb_handle;
1336 	struct iscsi_task *task;
1337 	uint16_t cri_index = 0;
1338 	uint8_t type;
1339 
1340 	phwi_ctrlr = phba->phwi_ctrlr;
1341 
1342 	/* Copy the elements to a common structure */
1343 	adapter_get_sol_cqe(phba, psol, &csol_cqe);
1344 
1345 	cri_index = BE_GET_CRI_FROM_CID(csol_cqe.cid);
1346 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
1347 
1348 	pwrb_handle = pwrb_context->pwrb_handle_basestd[
1349 		      csol_cqe.wrb_index];
1350 
1351 	spin_lock_bh(&session->back_lock);
1352 	task = pwrb_handle->pio_handle;
1353 	if (!task) {
1354 		spin_unlock_bh(&session->back_lock);
1355 		return;
1356 	}
1357 	type = ((struct beiscsi_io_task *)task->dd_data)->wrb_type;
1358 
1359 	switch (type) {
1360 	case HWH_TYPE_IO:
1361 	case HWH_TYPE_IO_RD:
1362 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1363 		     ISCSI_OP_NOOP_OUT)
1364 			be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1365 		else
1366 			be_complete_io(beiscsi_conn, task, &csol_cqe);
1367 		break;
1368 
1369 	case HWH_TYPE_LOGOUT:
1370 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1371 			be_complete_logout(beiscsi_conn, task, &csol_cqe);
1372 		else
1373 			be_complete_tmf(beiscsi_conn, task, &csol_cqe);
1374 		break;
1375 
1376 	case HWH_TYPE_LOGIN:
1377 		beiscsi_log(phba, KERN_ERR,
1378 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1379 			    "BM_%d :\t\t No HWH_TYPE_LOGIN Expected in"
1380 			    " hwi_complete_cmd- Solicited path\n");
1381 		break;
1382 
1383 	case HWH_TYPE_NOP:
1384 		be_complete_nopin_resp(beiscsi_conn, task, &csol_cqe);
1385 		break;
1386 
1387 	default:
1388 		beiscsi_log(phba, KERN_WARNING,
1389 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1390 			    "BM_%d : In hwi_complete_cmd, unknown type = %d"
1391 			    "wrb_index 0x%x CID 0x%x\n", type,
1392 			    csol_cqe.wrb_index,
1393 			    csol_cqe.cid);
1394 		break;
1395 	}
1396 
1397 	spin_unlock_bh(&session->back_lock);
1398 }
1399 
1400 /**
1401  * ASYNC PDUs include
1402  * a. Unsolicited NOP-In (target initiated NOP-In)
1403  * b. ASYNC Messages
1404  * c. Reject PDU
1405  * d. Login response
1406  * These headers arrive unprocessed by the EP firmware.
1407  * iSCSI layer processes them.
1408  */
1409 static unsigned int
1410 beiscsi_complete_pdu(struct beiscsi_conn *beiscsi_conn,
1411 		struct pdu_base *phdr, void *pdata, unsigned int dlen)
1412 {
1413 	struct beiscsi_hba *phba = beiscsi_conn->phba;
1414 	struct iscsi_conn *conn = beiscsi_conn->conn;
1415 	struct beiscsi_io_task *io_task;
1416 	struct iscsi_hdr *login_hdr;
1417 	struct iscsi_task *task;
1418 	u8 code;
1419 
1420 	code = AMAP_GET_BITS(struct amap_pdu_base, opcode, phdr);
1421 	switch (code) {
1422 	case ISCSI_OP_NOOP_IN:
1423 		pdata = NULL;
1424 		dlen = 0;
1425 		break;
1426 	case ISCSI_OP_ASYNC_EVENT:
1427 		break;
1428 	case ISCSI_OP_REJECT:
1429 		WARN_ON(!pdata);
1430 		WARN_ON(!(dlen == 48));
1431 		beiscsi_log(phba, KERN_ERR,
1432 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1433 			    "BM_%d : In ISCSI_OP_REJECT\n");
1434 		break;
1435 	case ISCSI_OP_LOGIN_RSP:
1436 	case ISCSI_OP_TEXT_RSP:
1437 		task = conn->login_task;
1438 		io_task = task->dd_data;
1439 		login_hdr = (struct iscsi_hdr *)phdr;
1440 		login_hdr->itt = io_task->libiscsi_itt;
1441 		break;
1442 	default:
1443 		beiscsi_log(phba, KERN_WARNING,
1444 			    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1445 			    "BM_%d : unrecognized async PDU opcode 0x%x\n",
1446 			    code);
1447 		return 1;
1448 	}
1449 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)phdr, pdata, dlen);
1450 	return 0;
1451 }
1452 
1453 static inline void
1454 beiscsi_hdl_put_handle(struct hd_async_context *pasync_ctx,
1455 			 struct hd_async_handle *pasync_handle)
1456 {
1457 	if (pasync_handle->is_header) {
1458 		list_add_tail(&pasync_handle->link,
1459 				&pasync_ctx->async_header.free_list);
1460 		pasync_ctx->async_header.free_entries++;
1461 	} else {
1462 		list_add_tail(&pasync_handle->link,
1463 				&pasync_ctx->async_data.free_list);
1464 		pasync_ctx->async_data.free_entries++;
1465 	}
1466 }
1467 
1468 static struct hd_async_handle *
1469 beiscsi_hdl_get_handle(struct beiscsi_conn *beiscsi_conn,
1470 		       struct hd_async_context *pasync_ctx,
1471 		       struct i_t_dpdu_cqe *pdpdu_cqe)
1472 {
1473 	struct beiscsi_hba *phba = beiscsi_conn->phba;
1474 	struct hd_async_handle *pasync_handle;
1475 	struct be_bus_address phys_addr;
1476 	u8 final, error = 0;
1477 	u16 cid, code, ci;
1478 	u32 dpl;
1479 
1480 	cid = beiscsi_conn->beiscsi_conn_cid;
1481 	/**
1482 	 * This function is invoked to get the right async_handle structure
1483 	 * from a given DEF PDU CQ entry.
1484 	 *
1485 	 * - index in CQ entry gives the vertical index
1486 	 * - address in CQ entry is the offset where the DMA last ended
1487 	 * - final - no more notifications for this PDU
1488 	 */
1489 	if (is_chip_be2_be3r(phba)) {
1490 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1491 				    dpl, pdpdu_cqe);
1492 		ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1493 				      index, pdpdu_cqe);
1494 		final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1495 				      final, pdpdu_cqe);
1496 	} else {
1497 		dpl = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1498 				    dpl, pdpdu_cqe);
1499 		ci = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1500 				      index, pdpdu_cqe);
1501 		final = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe_v2,
1502 				      final, pdpdu_cqe);
1503 	}
1504 
1505 	/**
1506 	 * DB addr Hi/Lo is same for BE and SKH.
1507 	 * Subtract the dataplacementlength to get to the base.
1508 	 */
1509 	phys_addr.u.a32.address_lo = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1510 						   db_addr_lo, pdpdu_cqe);
1511 	phys_addr.u.a32.address_lo -= dpl;
1512 	phys_addr.u.a32.address_hi = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe,
1513 						   db_addr_hi, pdpdu_cqe);
1514 
1515 	code = AMAP_GET_BITS(struct amap_i_t_dpdu_cqe, code, pdpdu_cqe);
1516 	switch (code) {
1517 	case UNSOL_HDR_NOTIFY:
1518 		pasync_handle = pasync_ctx->async_entry[ci].header;
1519 		break;
1520 	case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1521 		error = 1;
1522 	case UNSOL_DATA_NOTIFY:
1523 		pasync_handle = pasync_ctx->async_entry[ci].data;
1524 		break;
1525 	/* called only for above codes */
1526 	default:
1527 		pasync_handle = NULL;
1528 		break;
1529 	}
1530 
1531 	if (!pasync_handle) {
1532 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1533 			    "BM_%d : cid %d async PDU handle not found - code %d ci %d addr %llx\n",
1534 			    cid, code, ci, phys_addr.u.a64.address);
1535 		return pasync_handle;
1536 	}
1537 
1538 	if (pasync_handle->pa.u.a64.address != phys_addr.u.a64.address ||
1539 	    pasync_handle->index != ci) {
1540 		/* driver bug - if ci does not match async handle index */
1541 		error = 1;
1542 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1543 			    "BM_%d : cid %u async PDU handle mismatch - addr in %cQE %llx at %u:addr in CQE %llx ci %u\n",
1544 			    cid, pasync_handle->is_header ? 'H' : 'D',
1545 			    pasync_handle->pa.u.a64.address,
1546 			    pasync_handle->index,
1547 			    phys_addr.u.a64.address, ci);
1548 		/* FW has stale address - attempt continuing by dropping */
1549 	}
1550 
1551 	/**
1552 	 * Each CID is associated with unique CRI.
1553 	 * ASYNC_CRI_FROM_CID mapping and CRI_FROM_CID are totaly different.
1554 	 **/
1555 	pasync_handle->cri = BE_GET_ASYNC_CRI_FROM_CID(cid);
1556 	pasync_handle->is_final = final;
1557 	pasync_handle->buffer_len = dpl;
1558 	/* empty the slot */
1559 	if (pasync_handle->is_header)
1560 		pasync_ctx->async_entry[ci].header = NULL;
1561 	else
1562 		pasync_ctx->async_entry[ci].data = NULL;
1563 
1564 	/**
1565 	 * DEF PDU header and data buffers with errors should be simply
1566 	 * dropped as there are no consumers for it.
1567 	 */
1568 	if (error) {
1569 		beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1570 		pasync_handle = NULL;
1571 	}
1572 	return pasync_handle;
1573 }
1574 
1575 static void
1576 beiscsi_hdl_purge_handles(struct beiscsi_hba *phba,
1577 			  struct hd_async_context *pasync_ctx,
1578 			  u16 cri)
1579 {
1580 	struct hd_async_handle *pasync_handle, *tmp_handle;
1581 	struct list_head *plist;
1582 
1583 	plist  = &pasync_ctx->async_entry[cri].wq.list;
1584 	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1585 		list_del(&pasync_handle->link);
1586 		beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1587 	}
1588 
1589 	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wq.list);
1590 	pasync_ctx->async_entry[cri].wq.hdr_len = 0;
1591 	pasync_ctx->async_entry[cri].wq.bytes_received = 0;
1592 	pasync_ctx->async_entry[cri].wq.bytes_needed = 0;
1593 }
1594 
1595 static unsigned int
1596 beiscsi_hdl_fwd_pdu(struct beiscsi_conn *beiscsi_conn,
1597 		    struct hd_async_context *pasync_ctx,
1598 		    u16 cri)
1599 {
1600 	struct iscsi_session *session = beiscsi_conn->conn->session;
1601 	struct hd_async_handle *pasync_handle, *plast_handle;
1602 	struct beiscsi_hba *phba = beiscsi_conn->phba;
1603 	void *phdr = NULL, *pdata = NULL;
1604 	u32 dlen = 0, status = 0;
1605 	struct list_head *plist;
1606 
1607 	plist = &pasync_ctx->async_entry[cri].wq.list;
1608 	plast_handle = NULL;
1609 	list_for_each_entry(pasync_handle, plist, link) {
1610 		plast_handle = pasync_handle;
1611 		/* get the header, the first entry */
1612 		if (!phdr) {
1613 			phdr = pasync_handle->pbuffer;
1614 			continue;
1615 		}
1616 		/* use first buffer to collect all the data */
1617 		if (!pdata) {
1618 			pdata = pasync_handle->pbuffer;
1619 			dlen = pasync_handle->buffer_len;
1620 			continue;
1621 		}
1622 		memcpy(pdata + dlen, pasync_handle->pbuffer,
1623 		       pasync_handle->buffer_len);
1624 		dlen += pasync_handle->buffer_len;
1625 	}
1626 
1627 	if (!plast_handle->is_final) {
1628 		/* last handle should have final PDU notification from FW */
1629 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1630 			    "BM_%d : cid %u %p fwd async PDU with last handle missing - HL%u:DN%u:DR%u\n",
1631 			    beiscsi_conn->beiscsi_conn_cid, plast_handle,
1632 			    pasync_ctx->async_entry[cri].wq.hdr_len,
1633 			    pasync_ctx->async_entry[cri].wq.bytes_needed,
1634 			    pasync_ctx->async_entry[cri].wq.bytes_received);
1635 	}
1636 	spin_lock_bh(&session->back_lock);
1637 	status = beiscsi_complete_pdu(beiscsi_conn, phdr, pdata, dlen);
1638 	spin_unlock_bh(&session->back_lock);
1639 	beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1640 	return status;
1641 }
1642 
1643 static unsigned int
1644 beiscsi_hdl_gather_pdu(struct beiscsi_conn *beiscsi_conn,
1645 		       struct hd_async_context *pasync_ctx,
1646 		       struct hd_async_handle *pasync_handle)
1647 {
1648 	unsigned int bytes_needed = 0, status = 0;
1649 	u16 cri = pasync_handle->cri;
1650 	struct cri_wait_queue *wq;
1651 	struct beiscsi_hba *phba;
1652 	struct pdu_base *ppdu;
1653 	char *err = "";
1654 
1655 	phba = beiscsi_conn->phba;
1656 	wq = &pasync_ctx->async_entry[cri].wq;
1657 	if (pasync_handle->is_header) {
1658 		/* check if PDU hdr is rcv'd when old hdr not completed */
1659 		if (wq->hdr_len) {
1660 			err = "incomplete";
1661 			goto drop_pdu;
1662 		}
1663 		ppdu = pasync_handle->pbuffer;
1664 		bytes_needed = AMAP_GET_BITS(struct amap_pdu_base,
1665 					     data_len_hi, ppdu);
1666 		bytes_needed <<= 16;
1667 		bytes_needed |= be16_to_cpu(AMAP_GET_BITS(struct amap_pdu_base,
1668 							  data_len_lo, ppdu));
1669 		wq->hdr_len = pasync_handle->buffer_len;
1670 		wq->bytes_received = 0;
1671 		wq->bytes_needed = bytes_needed;
1672 		list_add_tail(&pasync_handle->link, &wq->list);
1673 		if (!bytes_needed)
1674 			status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
1675 						     pasync_ctx, cri);
1676 	} else {
1677 		/* check if data received has header and is needed */
1678 		if (!wq->hdr_len || !wq->bytes_needed) {
1679 			err = "header less";
1680 			goto drop_pdu;
1681 		}
1682 		wq->bytes_received += pasync_handle->buffer_len;
1683 		/* Something got overwritten? Better catch it here. */
1684 		if (wq->bytes_received > wq->bytes_needed) {
1685 			err = "overflow";
1686 			goto drop_pdu;
1687 		}
1688 		list_add_tail(&pasync_handle->link, &wq->list);
1689 		if (wq->bytes_received == wq->bytes_needed)
1690 			status = beiscsi_hdl_fwd_pdu(beiscsi_conn,
1691 						     pasync_ctx, cri);
1692 	}
1693 	return status;
1694 
1695 drop_pdu:
1696 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1697 		    "BM_%d : cid %u async PDU %s - def-%c:HL%u:DN%u:DR%u\n",
1698 		    beiscsi_conn->beiscsi_conn_cid, err,
1699 		    pasync_handle->is_header ? 'H' : 'D',
1700 		    wq->hdr_len, wq->bytes_needed,
1701 		    pasync_handle->buffer_len);
1702 	/* discard this handle */
1703 	beiscsi_hdl_put_handle(pasync_ctx, pasync_handle);
1704 	/* free all the other handles in cri_wait_queue */
1705 	beiscsi_hdl_purge_handles(phba, pasync_ctx, cri);
1706 	/* try continuing */
1707 	return status;
1708 }
1709 
1710 static void
1711 beiscsi_hdq_post_handles(struct beiscsi_hba *phba,
1712 			 u8 header, u8 ulp_num)
1713 {
1714 	struct hd_async_handle *pasync_handle, *tmp, **slot;
1715 	struct hd_async_context *pasync_ctx;
1716 	struct hwi_controller *phwi_ctrlr;
1717 	struct list_head *hfree_list;
1718 	struct phys_addr *pasync_sge;
1719 	u32 ring_id, doorbell = 0;
1720 	u32 doorbell_offset;
1721 	u16 prod = 0, cons;
1722 	u16 index;
1723 
1724 	phwi_ctrlr = phba->phwi_ctrlr;
1725 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1726 	if (header) {
1727 		cons = pasync_ctx->async_header.free_entries;
1728 		hfree_list = &pasync_ctx->async_header.free_list;
1729 		ring_id = phwi_ctrlr->default_pdu_hdr[ulp_num].id;
1730 		doorbell_offset = phwi_ctrlr->default_pdu_hdr[ulp_num].
1731 					doorbell_offset;
1732 	} else {
1733 		cons = pasync_ctx->async_data.free_entries;
1734 		hfree_list = &pasync_ctx->async_data.free_list;
1735 		ring_id = phwi_ctrlr->default_pdu_data[ulp_num].id;
1736 		doorbell_offset = phwi_ctrlr->default_pdu_data[ulp_num].
1737 					doorbell_offset;
1738 	}
1739 	/* number of entries posted must be in multiples of 8 */
1740 	if (cons % 8)
1741 		return;
1742 
1743 	list_for_each_entry_safe(pasync_handle, tmp, hfree_list, link) {
1744 		list_del_init(&pasync_handle->link);
1745 		pasync_handle->is_final = 0;
1746 		pasync_handle->buffer_len = 0;
1747 
1748 		/* handles can be consumed out of order, use index in handle */
1749 		index = pasync_handle->index;
1750 		WARN_ON(pasync_handle->is_header != header);
1751 		if (header)
1752 			slot = &pasync_ctx->async_entry[index].header;
1753 		else
1754 			slot = &pasync_ctx->async_entry[index].data;
1755 		/**
1756 		 * The slot just tracks handle's hold and release, so
1757 		 * overwriting at the same index won't do any harm but
1758 		 * needs to be caught.
1759 		 */
1760 		if (*slot != NULL) {
1761 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_ISCSI,
1762 				    "BM_%d : async PDU %s slot at %u not empty\n",
1763 				    header ? "header" : "data", index);
1764 		}
1765 		/**
1766 		 * We use same freed index as in completion to post so this
1767 		 * operation is not required for refills. Its required only
1768 		 * for ring creation.
1769 		 */
1770 		if (header)
1771 			pasync_sge = pasync_ctx->async_header.ring_base;
1772 		else
1773 			pasync_sge = pasync_ctx->async_data.ring_base;
1774 		pasync_sge += index;
1775 		/* if its a refill then address is same; hi is lo */
1776 		WARN_ON(pasync_sge->hi &&
1777 			pasync_sge->hi != pasync_handle->pa.u.a32.address_lo);
1778 		WARN_ON(pasync_sge->lo &&
1779 			pasync_sge->lo != pasync_handle->pa.u.a32.address_hi);
1780 		pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1781 		pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1782 
1783 		*slot = pasync_handle;
1784 		if (++prod == cons)
1785 			break;
1786 	}
1787 	if (header)
1788 		pasync_ctx->async_header.free_entries -= prod;
1789 	else
1790 		pasync_ctx->async_data.free_entries -= prod;
1791 
1792 	doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1793 	doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1794 	doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1795 	doorbell |= (prod & DB_DEF_PDU_CQPROC_MASK) << DB_DEF_PDU_CQPROC_SHIFT;
1796 	iowrite32(doorbell, phba->db_va + doorbell_offset);
1797 }
1798 
1799 static void
1800 beiscsi_hdq_process_compl(struct beiscsi_conn *beiscsi_conn,
1801 			  struct i_t_dpdu_cqe *pdpdu_cqe)
1802 {
1803 	struct beiscsi_hba *phba = beiscsi_conn->phba;
1804 	struct hd_async_handle *pasync_handle = NULL;
1805 	struct hd_async_context *pasync_ctx;
1806 	struct hwi_controller *phwi_ctrlr;
1807 	u16 cid_cri;
1808 	u8 ulp_num;
1809 
1810 	phwi_ctrlr = phba->phwi_ctrlr;
1811 	cid_cri = BE_GET_CRI_FROM_CID(beiscsi_conn->beiscsi_conn_cid);
1812 	ulp_num = BEISCSI_GET_ULP_FROM_CRI(phwi_ctrlr, cid_cri);
1813 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr, ulp_num);
1814 	pasync_handle = beiscsi_hdl_get_handle(beiscsi_conn, pasync_ctx,
1815 					       pdpdu_cqe);
1816 	if (!pasync_handle)
1817 		return;
1818 
1819 	beiscsi_hdl_gather_pdu(beiscsi_conn, pasync_ctx, pasync_handle);
1820 	beiscsi_hdq_post_handles(phba, pasync_handle->is_header, ulp_num);
1821 }
1822 
1823 void beiscsi_process_mcc_cq(struct beiscsi_hba *phba)
1824 {
1825 	struct be_queue_info *mcc_cq;
1826 	struct  be_mcc_compl *mcc_compl;
1827 	unsigned int num_processed = 0;
1828 
1829 	mcc_cq = &phba->ctrl.mcc_obj.cq;
1830 	mcc_compl = queue_tail_node(mcc_cq);
1831 	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1832 	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1833 		if (beiscsi_hba_in_error(phba))
1834 			return;
1835 
1836 		if (num_processed >= 32) {
1837 			hwi_ring_cq_db(phba, mcc_cq->id,
1838 					num_processed, 0);
1839 			num_processed = 0;
1840 		}
1841 		if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1842 			beiscsi_process_async_event(phba, mcc_compl);
1843 		} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1844 			beiscsi_process_mcc_compl(&phba->ctrl, mcc_compl);
1845 		}
1846 
1847 		mcc_compl->flags = 0;
1848 		queue_tail_inc(mcc_cq);
1849 		mcc_compl = queue_tail_node(mcc_cq);
1850 		mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1851 		num_processed++;
1852 	}
1853 
1854 	if (num_processed > 0)
1855 		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1);
1856 }
1857 
1858 static void beiscsi_mcc_work(struct work_struct *work)
1859 {
1860 	struct be_eq_obj *pbe_eq;
1861 	struct beiscsi_hba *phba;
1862 
1863 	pbe_eq = container_of(work, struct be_eq_obj, mcc_work);
1864 	phba = pbe_eq->phba;
1865 	beiscsi_process_mcc_cq(phba);
1866 	/* rearm EQ for further interrupts */
1867 	if (!beiscsi_hba_in_error(phba))
1868 		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1869 }
1870 
1871 /**
1872  * beiscsi_process_cq()- Process the Completion Queue
1873  * @pbe_eq: Event Q on which the Completion has come
1874  * @budget: Max number of events to processed
1875  *
1876  * return
1877  *     Number of Completion Entries processed.
1878  **/
1879 unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq, int budget)
1880 {
1881 	struct be_queue_info *cq;
1882 	struct sol_cqe *sol;
1883 	struct dmsg_cqe *dmsg;
1884 	unsigned int total = 0;
1885 	unsigned int num_processed = 0;
1886 	unsigned short code = 0, cid = 0;
1887 	uint16_t cri_index = 0;
1888 	struct beiscsi_conn *beiscsi_conn;
1889 	struct beiscsi_endpoint *beiscsi_ep;
1890 	struct iscsi_endpoint *ep;
1891 	struct beiscsi_hba *phba;
1892 
1893 	cq = pbe_eq->cq;
1894 	sol = queue_tail_node(cq);
1895 	phba = pbe_eq->phba;
1896 
1897 	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1898 	       CQE_VALID_MASK) {
1899 		if (beiscsi_hba_in_error(phba))
1900 			return 0;
1901 
1902 		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1903 
1904 		 code = (sol->dw[offsetof(struct amap_sol_cqe, code) /
1905 			 32] & CQE_CODE_MASK);
1906 
1907 		 /* Get the CID */
1908 		if (is_chip_be2_be3r(phba)) {
1909 			cid = AMAP_GET_BITS(struct amap_sol_cqe, cid, sol);
1910 		} else {
1911 			if ((code == DRIVERMSG_NOTIFY) ||
1912 			    (code == UNSOL_HDR_NOTIFY) ||
1913 			    (code == UNSOL_DATA_NOTIFY))
1914 				cid = AMAP_GET_BITS(
1915 						    struct amap_i_t_dpdu_cqe_v2,
1916 						    cid, sol);
1917 			 else
1918 				 cid = AMAP_GET_BITS(struct amap_sol_cqe_v2,
1919 						     cid, sol);
1920 		}
1921 
1922 		cri_index = BE_GET_CRI_FROM_CID(cid);
1923 		ep = phba->ep_array[cri_index];
1924 
1925 		if (ep == NULL) {
1926 			/* connection has already been freed
1927 			 * just move on to next one
1928 			 */
1929 			beiscsi_log(phba, KERN_WARNING,
1930 				    BEISCSI_LOG_INIT,
1931 				    "BM_%d : proc cqe of disconn ep: cid %d\n",
1932 				    cid);
1933 			goto proc_next_cqe;
1934 		}
1935 
1936 		beiscsi_ep = ep->dd_data;
1937 		beiscsi_conn = beiscsi_ep->conn;
1938 
1939 		/* replenish cq */
1940 		if (num_processed == 32) {
1941 			hwi_ring_cq_db(phba, cq->id, 32, 0);
1942 			num_processed = 0;
1943 		}
1944 		total++;
1945 
1946 		switch (code) {
1947 		case SOL_CMD_COMPLETE:
1948 			hwi_complete_cmd(beiscsi_conn, phba, sol);
1949 			break;
1950 		case DRIVERMSG_NOTIFY:
1951 			beiscsi_log(phba, KERN_INFO,
1952 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1953 				    "BM_%d : Received %s[%d] on CID : %d\n",
1954 				    cqe_desc[code], code, cid);
1955 
1956 			dmsg = (struct dmsg_cqe *)sol;
1957 			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1958 			break;
1959 		case UNSOL_HDR_NOTIFY:
1960 			beiscsi_log(phba, KERN_INFO,
1961 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1962 				    "BM_%d : Received %s[%d] on CID : %d\n",
1963 				    cqe_desc[code], code, cid);
1964 
1965 			spin_lock_bh(&phba->async_pdu_lock);
1966 			beiscsi_hdq_process_compl(beiscsi_conn,
1967 						  (struct i_t_dpdu_cqe *)sol);
1968 			spin_unlock_bh(&phba->async_pdu_lock);
1969 			break;
1970 		case UNSOL_DATA_NOTIFY:
1971 			beiscsi_log(phba, KERN_INFO,
1972 				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1973 				    "BM_%d : Received %s[%d] on CID : %d\n",
1974 				    cqe_desc[code], code, cid);
1975 
1976 			spin_lock_bh(&phba->async_pdu_lock);
1977 			beiscsi_hdq_process_compl(beiscsi_conn,
1978 						  (struct i_t_dpdu_cqe *)sol);
1979 			spin_unlock_bh(&phba->async_pdu_lock);
1980 			break;
1981 		case CXN_INVALIDATE_INDEX_NOTIFY:
1982 		case CMD_INVALIDATED_NOTIFY:
1983 		case CXN_INVALIDATE_NOTIFY:
1984 			beiscsi_log(phba, KERN_ERR,
1985 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
1986 				    "BM_%d : Ignoring %s[%d] on CID : %d\n",
1987 				    cqe_desc[code], code, cid);
1988 			break;
1989 		case CXN_KILLED_HDR_DIGEST_ERR:
1990 		case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1991 			beiscsi_log(phba, KERN_ERR,
1992 				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
1993 				    "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
1994 				    cqe_desc[code], code,  cid);
1995 			break;
1996 		case CMD_KILLED_INVALID_STATSN_RCVD:
1997 		case CMD_KILLED_INVALID_R2T_RCVD:
1998 		case CMD_CXN_KILLED_LUN_INVALID:
1999 		case CMD_CXN_KILLED_ICD_INVALID:
2000 		case CMD_CXN_KILLED_ITT_INVALID:
2001 		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
2002 		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
2003 			beiscsi_log(phba, KERN_ERR,
2004 				    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2005 				    "BM_%d : Cmd Notification %s[%d] on CID : %d\n",
2006 				    cqe_desc[code], code,  cid);
2007 			break;
2008 		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
2009 			beiscsi_log(phba, KERN_ERR,
2010 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2011 				    "BM_%d :  Dropping %s[%d] on DPDU ring on CID : %d\n",
2012 				    cqe_desc[code], code, cid);
2013 			spin_lock_bh(&phba->async_pdu_lock);
2014 			/* driver consumes the entry and drops the contents */
2015 			beiscsi_hdq_process_compl(beiscsi_conn,
2016 						  (struct i_t_dpdu_cqe *)sol);
2017 			spin_unlock_bh(&phba->async_pdu_lock);
2018 			break;
2019 		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
2020 		case CXN_KILLED_BURST_LEN_MISMATCH:
2021 		case CXN_KILLED_AHS_RCVD:
2022 		case CXN_KILLED_UNKNOWN_HDR:
2023 		case CXN_KILLED_STALE_ITT_TTT_RCVD:
2024 		case CXN_KILLED_INVALID_ITT_TTT_RCVD:
2025 		case CXN_KILLED_TIMED_OUT:
2026 		case CXN_KILLED_FIN_RCVD:
2027 		case CXN_KILLED_RST_SENT:
2028 		case CXN_KILLED_RST_RCVD:
2029 		case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
2030 		case CXN_KILLED_BAD_WRB_INDEX_ERROR:
2031 		case CXN_KILLED_OVER_RUN_RESIDUAL:
2032 		case CXN_KILLED_UNDER_RUN_RESIDUAL:
2033 		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
2034 			beiscsi_log(phba, KERN_ERR,
2035 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2036 				    "BM_%d : Event %s[%d] received on CID : %d\n",
2037 				    cqe_desc[code], code, cid);
2038 			if (beiscsi_conn)
2039 				iscsi_conn_failure(beiscsi_conn->conn,
2040 						   ISCSI_ERR_CONN_FAILED);
2041 			break;
2042 		default:
2043 			beiscsi_log(phba, KERN_ERR,
2044 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
2045 				    "BM_%d : Invalid CQE Event Received Code : %d"
2046 				    "CID 0x%x...\n",
2047 				    code, cid);
2048 			break;
2049 		}
2050 
2051 proc_next_cqe:
2052 		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
2053 		queue_tail_inc(cq);
2054 		sol = queue_tail_node(cq);
2055 		num_processed++;
2056 		if (total == budget)
2057 			break;
2058 	}
2059 
2060 	hwi_ring_cq_db(phba, cq->id, num_processed, 1);
2061 	return total;
2062 }
2063 
2064 static int be_iopoll(struct irq_poll *iop, int budget)
2065 {
2066 	unsigned int ret, io_events;
2067 	struct beiscsi_hba *phba;
2068 	struct be_eq_obj *pbe_eq;
2069 	struct be_eq_entry *eqe = NULL;
2070 	struct be_queue_info *eq;
2071 
2072 	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
2073 	phba = pbe_eq->phba;
2074 	if (beiscsi_hba_in_error(phba)) {
2075 		irq_poll_complete(iop);
2076 		return 0;
2077 	}
2078 
2079 	io_events = 0;
2080 	eq = &pbe_eq->q;
2081 	eqe = queue_tail_node(eq);
2082 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32] &
2083 			EQE_VALID_MASK) {
2084 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
2085 		queue_tail_inc(eq);
2086 		eqe = queue_tail_node(eq);
2087 		io_events++;
2088 	}
2089 	hwi_ring_eq_db(phba, eq->id, 1, io_events, 0, 1);
2090 
2091 	ret = beiscsi_process_cq(pbe_eq, budget);
2092 	pbe_eq->cq_count += ret;
2093 	if (ret < budget) {
2094 		irq_poll_complete(iop);
2095 		beiscsi_log(phba, KERN_INFO,
2096 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_IO,
2097 			    "BM_%d : rearm pbe_eq->q.id =%d ret %d\n",
2098 			    pbe_eq->q.id, ret);
2099 		if (!beiscsi_hba_in_error(phba))
2100 			hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
2101 	}
2102 	return ret;
2103 }
2104 
2105 static void
2106 hwi_write_sgl_v2(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2107 		  unsigned int num_sg, struct beiscsi_io_task *io_task)
2108 {
2109 	struct iscsi_sge *psgl;
2110 	unsigned int sg_len, index;
2111 	unsigned int sge_len = 0;
2112 	unsigned long long addr;
2113 	struct scatterlist *l_sg;
2114 	unsigned int offset;
2115 
2116 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_lo, pwrb,
2117 		      io_task->bhs_pa.u.a32.address_lo);
2118 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, iscsi_bhs_addr_hi, pwrb,
2119 		      io_task->bhs_pa.u.a32.address_hi);
2120 
2121 	l_sg = sg;
2122 	for (index = 0; (index < num_sg) && (index < 2); index++,
2123 			sg = sg_next(sg)) {
2124 		if (index == 0) {
2125 			sg_len = sg_dma_len(sg);
2126 			addr = (u64) sg_dma_address(sg);
2127 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2128 				      sge0_addr_lo, pwrb,
2129 				      lower_32_bits(addr));
2130 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2131 				      sge0_addr_hi, pwrb,
2132 				      upper_32_bits(addr));
2133 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2134 				      sge0_len, pwrb,
2135 				      sg_len);
2136 			sge_len = sg_len;
2137 		} else {
2138 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_r2t_offset,
2139 				      pwrb, sge_len);
2140 			sg_len = sg_dma_len(sg);
2141 			addr = (u64) sg_dma_address(sg);
2142 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2143 				      sge1_addr_lo, pwrb,
2144 				      lower_32_bits(addr));
2145 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2146 				      sge1_addr_hi, pwrb,
2147 				      upper_32_bits(addr));
2148 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
2149 				      sge1_len, pwrb,
2150 				      sg_len);
2151 		}
2152 	}
2153 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2154 	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2155 
2156 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2157 
2158 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2159 		      io_task->bhs_pa.u.a32.address_hi);
2160 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2161 		      io_task->bhs_pa.u.a32.address_lo);
2162 
2163 	if (num_sg == 1) {
2164 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2165 			      1);
2166 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2167 			      0);
2168 	} else if (num_sg == 2) {
2169 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2170 			      0);
2171 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2172 			      1);
2173 	} else {
2174 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge0_last, pwrb,
2175 			      0);
2176 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sge1_last, pwrb,
2177 			      0);
2178 	}
2179 
2180 	sg = l_sg;
2181 	psgl++;
2182 	psgl++;
2183 	offset = 0;
2184 	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2185 		sg_len = sg_dma_len(sg);
2186 		addr = (u64) sg_dma_address(sg);
2187 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2188 			      lower_32_bits(addr));
2189 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2190 			      upper_32_bits(addr));
2191 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2192 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2193 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2194 		offset += sg_len;
2195 	}
2196 	psgl--;
2197 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2198 }
2199 
2200 static void
2201 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
2202 	      unsigned int num_sg, struct beiscsi_io_task *io_task)
2203 {
2204 	struct iscsi_sge *psgl;
2205 	unsigned int sg_len, index;
2206 	unsigned int sge_len = 0;
2207 	unsigned long long addr;
2208 	struct scatterlist *l_sg;
2209 	unsigned int offset;
2210 
2211 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2212 				      io_task->bhs_pa.u.a32.address_lo);
2213 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2214 				      io_task->bhs_pa.u.a32.address_hi);
2215 
2216 	l_sg = sg;
2217 	for (index = 0; (index < num_sg) && (index < 2); index++,
2218 							 sg = sg_next(sg)) {
2219 		if (index == 0) {
2220 			sg_len = sg_dma_len(sg);
2221 			addr = (u64) sg_dma_address(sg);
2222 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2223 						((u32)(addr & 0xFFFFFFFF)));
2224 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2225 							((u32)(addr >> 32)));
2226 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2227 							sg_len);
2228 			sge_len = sg_len;
2229 		} else {
2230 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
2231 							pwrb, sge_len);
2232 			sg_len = sg_dma_len(sg);
2233 			addr = (u64) sg_dma_address(sg);
2234 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
2235 						((u32)(addr & 0xFFFFFFFF)));
2236 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
2237 							((u32)(addr >> 32)));
2238 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
2239 							sg_len);
2240 		}
2241 	}
2242 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2243 	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
2244 
2245 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
2246 
2247 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2248 			io_task->bhs_pa.u.a32.address_hi);
2249 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2250 			io_task->bhs_pa.u.a32.address_lo);
2251 
2252 	if (num_sg == 1) {
2253 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2254 								1);
2255 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2256 								0);
2257 	} else if (num_sg == 2) {
2258 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2259 								0);
2260 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2261 								1);
2262 	} else {
2263 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
2264 								0);
2265 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
2266 								0);
2267 	}
2268 	sg = l_sg;
2269 	psgl++;
2270 	psgl++;
2271 	offset = 0;
2272 	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
2273 		sg_len = sg_dma_len(sg);
2274 		addr = (u64) sg_dma_address(sg);
2275 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2276 						(addr & 0xFFFFFFFF));
2277 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2278 						(addr >> 32));
2279 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
2280 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
2281 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2282 		offset += sg_len;
2283 	}
2284 	psgl--;
2285 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2286 }
2287 
2288 /**
2289  * hwi_write_buffer()- Populate the WRB with task info
2290  * @pwrb: ptr to the WRB entry
2291  * @task: iscsi task which is to be executed
2292  **/
2293 static int hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
2294 {
2295 	struct iscsi_sge *psgl;
2296 	struct beiscsi_io_task *io_task = task->dd_data;
2297 	struct beiscsi_conn *beiscsi_conn = io_task->conn;
2298 	struct beiscsi_hba *phba = beiscsi_conn->phba;
2299 	uint8_t dsp_value = 0;
2300 
2301 	io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
2302 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
2303 				io_task->bhs_pa.u.a32.address_lo);
2304 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
2305 				io_task->bhs_pa.u.a32.address_hi);
2306 
2307 	if (task->data) {
2308 
2309 		/* Check for the data_count */
2310 		dsp_value = (task->data_count) ? 1 : 0;
2311 
2312 		if (is_chip_be2_be3r(phba))
2313 			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp,
2314 				      pwrb, dsp_value);
2315 		else
2316 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp,
2317 				      pwrb, dsp_value);
2318 
2319 		/* Map addr only if there is data_count */
2320 		if (dsp_value) {
2321 			io_task->mtask_addr = pci_map_single(phba->pcidev,
2322 							     task->data,
2323 							     task->data_count,
2324 							     PCI_DMA_TODEVICE);
2325 			if (pci_dma_mapping_error(phba->pcidev,
2326 						  io_task->mtask_addr))
2327 				return -ENOMEM;
2328 			io_task->mtask_data_count = task->data_count;
2329 		} else
2330 			io_task->mtask_addr = 0;
2331 
2332 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
2333 			      lower_32_bits(io_task->mtask_addr));
2334 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
2335 			      upper_32_bits(io_task->mtask_addr));
2336 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
2337 						task->data_count);
2338 
2339 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
2340 	} else {
2341 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
2342 		io_task->mtask_addr = 0;
2343 	}
2344 
2345 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
2346 
2347 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
2348 
2349 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2350 		      io_task->bhs_pa.u.a32.address_hi);
2351 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2352 		      io_task->bhs_pa.u.a32.address_lo);
2353 	if (task->data) {
2354 		psgl++;
2355 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
2356 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
2357 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
2358 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
2359 		AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
2360 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
2361 
2362 		psgl++;
2363 		if (task->data) {
2364 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
2365 				      lower_32_bits(io_task->mtask_addr));
2366 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
2367 				      upper_32_bits(io_task->mtask_addr));
2368 		}
2369 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
2370 	}
2371 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
2372 	return 0;
2373 }
2374 
2375 /**
2376  * beiscsi_find_mem_req()- Find mem needed
2377  * @phba: ptr to HBA struct
2378  **/
2379 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
2380 {
2381 	uint8_t mem_descr_index, ulp_num;
2382 	unsigned int num_async_pdu_buf_pages;
2383 	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
2384 	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
2385 
2386 	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
2387 
2388 	phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
2389 						 BE_ISCSI_PDU_HEADER_SIZE;
2390 	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
2391 					    sizeof(struct hwi_context_memory);
2392 
2393 
2394 	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
2395 	    * (phba->params.wrbs_per_cxn)
2396 	    * phba->params.cxns_per_ctrl;
2397 	wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
2398 				 (phba->params.wrbs_per_cxn);
2399 	phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
2400 				phba->params.cxns_per_ctrl);
2401 
2402 	phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
2403 		phba->params.icds_per_ctrl;
2404 	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
2405 		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
2406 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2407 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2408 
2409 			num_async_pdu_buf_sgl_pages =
2410 				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2411 					       phba, ulp_num) *
2412 					       sizeof(struct phys_addr));
2413 
2414 			num_async_pdu_buf_pages =
2415 				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2416 					       phba, ulp_num) *
2417 					       phba->params.defpdu_hdr_sz);
2418 
2419 			num_async_pdu_data_pages =
2420 				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2421 					       phba, ulp_num) *
2422 					       phba->params.defpdu_data_sz);
2423 
2424 			num_async_pdu_data_sgl_pages =
2425 				PAGES_REQUIRED(BEISCSI_GET_CID_COUNT(
2426 					       phba, ulp_num) *
2427 					       sizeof(struct phys_addr));
2428 
2429 			mem_descr_index = (HWI_MEM_TEMPLATE_HDR_ULP0 +
2430 					  (ulp_num * MEM_DESCR_OFFSET));
2431 			phba->mem_req[mem_descr_index] =
2432 					BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2433 					BEISCSI_TEMPLATE_HDR_PER_CXN_SIZE;
2434 
2435 			mem_descr_index = (HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2436 					  (ulp_num * MEM_DESCR_OFFSET));
2437 			phba->mem_req[mem_descr_index] =
2438 					  num_async_pdu_buf_pages *
2439 					  PAGE_SIZE;
2440 
2441 			mem_descr_index = (HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2442 					  (ulp_num * MEM_DESCR_OFFSET));
2443 			phba->mem_req[mem_descr_index] =
2444 					  num_async_pdu_data_pages *
2445 					  PAGE_SIZE;
2446 
2447 			mem_descr_index = (HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2448 					  (ulp_num * MEM_DESCR_OFFSET));
2449 			phba->mem_req[mem_descr_index] =
2450 					  num_async_pdu_buf_sgl_pages *
2451 					  PAGE_SIZE;
2452 
2453 			mem_descr_index = (HWI_MEM_ASYNC_DATA_RING_ULP0 +
2454 					  (ulp_num * MEM_DESCR_OFFSET));
2455 			phba->mem_req[mem_descr_index] =
2456 					  num_async_pdu_data_sgl_pages *
2457 					  PAGE_SIZE;
2458 
2459 			mem_descr_index = (HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2460 					  (ulp_num * MEM_DESCR_OFFSET));
2461 			phba->mem_req[mem_descr_index] =
2462 					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2463 					  sizeof(struct hd_async_handle);
2464 
2465 			mem_descr_index = (HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2466 					  (ulp_num * MEM_DESCR_OFFSET));
2467 			phba->mem_req[mem_descr_index] =
2468 					  BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2469 					  sizeof(struct hd_async_handle);
2470 
2471 			mem_descr_index = (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2472 					  (ulp_num * MEM_DESCR_OFFSET));
2473 			phba->mem_req[mem_descr_index] =
2474 					  sizeof(struct hd_async_context) +
2475 					 (BEISCSI_GET_CID_COUNT(phba, ulp_num) *
2476 					  sizeof(struct hd_async_entry));
2477 		}
2478 	}
2479 }
2480 
2481 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
2482 {
2483 	dma_addr_t bus_add;
2484 	struct hwi_controller *phwi_ctrlr;
2485 	struct be_mem_descriptor *mem_descr;
2486 	struct mem_array *mem_arr, *mem_arr_orig;
2487 	unsigned int i, j, alloc_size, curr_alloc_size;
2488 
2489 	phba->phwi_ctrlr = kzalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
2490 	if (!phba->phwi_ctrlr)
2491 		return -ENOMEM;
2492 
2493 	/* Allocate memory for wrb_context */
2494 	phwi_ctrlr = phba->phwi_ctrlr;
2495 	phwi_ctrlr->wrb_context = kzalloc(sizeof(struct hwi_wrb_context) *
2496 					  phba->params.cxns_per_ctrl,
2497 					  GFP_KERNEL);
2498 	if (!phwi_ctrlr->wrb_context) {
2499 		kfree(phba->phwi_ctrlr);
2500 		return -ENOMEM;
2501 	}
2502 
2503 	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
2504 				 GFP_KERNEL);
2505 	if (!phba->init_mem) {
2506 		kfree(phwi_ctrlr->wrb_context);
2507 		kfree(phba->phwi_ctrlr);
2508 		return -ENOMEM;
2509 	}
2510 
2511 	mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
2512 			       GFP_KERNEL);
2513 	if (!mem_arr_orig) {
2514 		kfree(phba->init_mem);
2515 		kfree(phwi_ctrlr->wrb_context);
2516 		kfree(phba->phwi_ctrlr);
2517 		return -ENOMEM;
2518 	}
2519 
2520 	mem_descr = phba->init_mem;
2521 	for (i = 0; i < SE_MEM_MAX; i++) {
2522 		if (!phba->mem_req[i]) {
2523 			mem_descr->mem_array = NULL;
2524 			mem_descr++;
2525 			continue;
2526 		}
2527 
2528 		j = 0;
2529 		mem_arr = mem_arr_orig;
2530 		alloc_size = phba->mem_req[i];
2531 		memset(mem_arr, 0, sizeof(struct mem_array) *
2532 		       BEISCSI_MAX_FRAGS_INIT);
2533 		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2534 		do {
2535 			mem_arr->virtual_address = pci_alloc_consistent(
2536 							phba->pcidev,
2537 							curr_alloc_size,
2538 							&bus_add);
2539 			if (!mem_arr->virtual_address) {
2540 				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2541 					goto free_mem;
2542 				if (curr_alloc_size -
2543 					rounddown_pow_of_two(curr_alloc_size))
2544 					curr_alloc_size = rounddown_pow_of_two
2545 							     (curr_alloc_size);
2546 				else
2547 					curr_alloc_size = curr_alloc_size / 2;
2548 			} else {
2549 				mem_arr->bus_address.u.
2550 				    a64.address = (__u64) bus_add;
2551 				mem_arr->size = curr_alloc_size;
2552 				alloc_size -= curr_alloc_size;
2553 				curr_alloc_size = min(be_max_phys_size *
2554 						      1024, alloc_size);
2555 				j++;
2556 				mem_arr++;
2557 			}
2558 		} while (alloc_size);
2559 		mem_descr->num_elements = j;
2560 		mem_descr->size_in_bytes = phba->mem_req[i];
2561 		mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2562 					       GFP_KERNEL);
2563 		if (!mem_descr->mem_array)
2564 			goto free_mem;
2565 
2566 		memcpy(mem_descr->mem_array, mem_arr_orig,
2567 		       sizeof(struct mem_array) * j);
2568 		mem_descr++;
2569 	}
2570 	kfree(mem_arr_orig);
2571 	return 0;
2572 free_mem:
2573 	mem_descr->num_elements = j;
2574 	while ((i) || (j)) {
2575 		for (j = mem_descr->num_elements; j > 0; j--) {
2576 			pci_free_consistent(phba->pcidev,
2577 					    mem_descr->mem_array[j - 1].size,
2578 					    mem_descr->mem_array[j - 1].
2579 					    virtual_address,
2580 					    (unsigned long)mem_descr->
2581 					    mem_array[j - 1].
2582 					    bus_address.u.a64.address);
2583 		}
2584 		if (i) {
2585 			i--;
2586 			kfree(mem_descr->mem_array);
2587 			mem_descr--;
2588 		}
2589 	}
2590 	kfree(mem_arr_orig);
2591 	kfree(phba->init_mem);
2592 	kfree(phba->phwi_ctrlr->wrb_context);
2593 	kfree(phba->phwi_ctrlr);
2594 	return -ENOMEM;
2595 }
2596 
2597 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2598 {
2599 	beiscsi_find_mem_req(phba);
2600 	return beiscsi_alloc_mem(phba);
2601 }
2602 
2603 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2604 {
2605 	struct pdu_data_out *pdata_out;
2606 	struct pdu_nop_out *pnop_out;
2607 	struct be_mem_descriptor *mem_descr;
2608 
2609 	mem_descr = phba->init_mem;
2610 	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2611 	pdata_out =
2612 	    (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2613 	memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2614 
2615 	AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2616 		      IIOC_SCSI_DATA);
2617 
2618 	pnop_out =
2619 	    (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2620 				   virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2621 
2622 	memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2623 	AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2624 	AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2625 	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2626 }
2627 
2628 static int beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2629 {
2630 	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2631 	struct hwi_context_memory *phwi_ctxt;
2632 	struct wrb_handle *pwrb_handle = NULL;
2633 	struct hwi_controller *phwi_ctrlr;
2634 	struct hwi_wrb_context *pwrb_context;
2635 	struct iscsi_wrb *pwrb = NULL;
2636 	unsigned int num_cxn_wrbh = 0;
2637 	unsigned int num_cxn_wrb = 0, j, idx = 0, index;
2638 
2639 	mem_descr_wrbh = phba->init_mem;
2640 	mem_descr_wrbh += HWI_MEM_WRBH;
2641 
2642 	mem_descr_wrb = phba->init_mem;
2643 	mem_descr_wrb += HWI_MEM_WRB;
2644 	phwi_ctrlr = phba->phwi_ctrlr;
2645 
2646 	/* Allocate memory for WRBQ */
2647 	phwi_ctxt = phwi_ctrlr->phwi_ctxt;
2648 	phwi_ctxt->be_wrbq = kzalloc(sizeof(struct be_queue_info) *
2649 				     phba->params.cxns_per_ctrl,
2650 				     GFP_KERNEL);
2651 	if (!phwi_ctxt->be_wrbq) {
2652 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2653 			    "BM_%d : WRBQ Mem Alloc Failed\n");
2654 		return -ENOMEM;
2655 	}
2656 
2657 	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2658 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2659 		pwrb_context->pwrb_handle_base =
2660 				kzalloc(sizeof(struct wrb_handle *) *
2661 					phba->params.wrbs_per_cxn, GFP_KERNEL);
2662 		if (!pwrb_context->pwrb_handle_base) {
2663 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2664 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
2665 			goto init_wrb_hndl_failed;
2666 		}
2667 		pwrb_context->pwrb_handle_basestd =
2668 				kzalloc(sizeof(struct wrb_handle *) *
2669 					phba->params.wrbs_per_cxn, GFP_KERNEL);
2670 		if (!pwrb_context->pwrb_handle_basestd) {
2671 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
2672 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
2673 			goto init_wrb_hndl_failed;
2674 		}
2675 		if (!num_cxn_wrbh) {
2676 			pwrb_handle =
2677 				mem_descr_wrbh->mem_array[idx].virtual_address;
2678 			num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2679 					((sizeof(struct wrb_handle)) *
2680 					 phba->params.wrbs_per_cxn));
2681 			idx++;
2682 		}
2683 		pwrb_context->alloc_index = 0;
2684 		pwrb_context->wrb_handles_available = 0;
2685 		pwrb_context->free_index = 0;
2686 
2687 		if (num_cxn_wrbh) {
2688 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2689 				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2690 				pwrb_context->pwrb_handle_basestd[j] =
2691 								pwrb_handle;
2692 				pwrb_context->wrb_handles_available++;
2693 				pwrb_handle->wrb_index = j;
2694 				pwrb_handle++;
2695 			}
2696 			num_cxn_wrbh--;
2697 		}
2698 		spin_lock_init(&pwrb_context->wrb_lock);
2699 	}
2700 	idx = 0;
2701 	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
2702 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2703 		if (!num_cxn_wrb) {
2704 			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2705 			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2706 				((sizeof(struct iscsi_wrb) *
2707 				  phba->params.wrbs_per_cxn));
2708 			idx++;
2709 		}
2710 
2711 		if (num_cxn_wrb) {
2712 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2713 				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2714 				pwrb_handle->pwrb = pwrb;
2715 				pwrb++;
2716 			}
2717 			num_cxn_wrb--;
2718 		}
2719 	}
2720 	return 0;
2721 init_wrb_hndl_failed:
2722 	for (j = index; j > 0; j--) {
2723 		pwrb_context = &phwi_ctrlr->wrb_context[j];
2724 		kfree(pwrb_context->pwrb_handle_base);
2725 		kfree(pwrb_context->pwrb_handle_basestd);
2726 	}
2727 	return -ENOMEM;
2728 }
2729 
2730 static int hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2731 {
2732 	uint8_t ulp_num;
2733 	struct hwi_controller *phwi_ctrlr;
2734 	struct hba_parameters *p = &phba->params;
2735 	struct hd_async_context *pasync_ctx;
2736 	struct hd_async_handle *pasync_header_h, *pasync_data_h;
2737 	unsigned int index, idx, num_per_mem, num_async_data;
2738 	struct be_mem_descriptor *mem_descr;
2739 
2740 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
2741 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
2742 			/* get async_ctx for each ULP */
2743 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2744 			mem_descr += (HWI_MEM_ASYNC_PDU_CONTEXT_ULP0 +
2745 				     (ulp_num * MEM_DESCR_OFFSET));
2746 
2747 			phwi_ctrlr = phba->phwi_ctrlr;
2748 			phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num] =
2749 				(struct hd_async_context *)
2750 				 mem_descr->mem_array[0].virtual_address;
2751 
2752 			pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx[ulp_num];
2753 			memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2754 
2755 			pasync_ctx->async_entry =
2756 					(struct hd_async_entry *)
2757 					((long unsigned int)pasync_ctx +
2758 					sizeof(struct hd_async_context));
2759 
2760 			pasync_ctx->num_entries = BEISCSI_GET_CID_COUNT(phba,
2761 						  ulp_num);
2762 			/* setup header buffers */
2763 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2764 			mem_descr += HWI_MEM_ASYNC_HEADER_BUF_ULP0 +
2765 				(ulp_num * MEM_DESCR_OFFSET);
2766 			if (mem_descr->mem_array[0].virtual_address) {
2767 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2768 					    "BM_%d : hwi_init_async_pdu_ctx"
2769 					    " HWI_MEM_ASYNC_HEADER_BUF_ULP%d va=%p\n",
2770 					    ulp_num,
2771 					    mem_descr->mem_array[0].
2772 					    virtual_address);
2773 			} else
2774 				beiscsi_log(phba, KERN_WARNING,
2775 					    BEISCSI_LOG_INIT,
2776 					    "BM_%d : No Virtual address for ULP : %d\n",
2777 					    ulp_num);
2778 
2779 			pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2780 			pasync_ctx->async_header.va_base =
2781 				mem_descr->mem_array[0].virtual_address;
2782 
2783 			pasync_ctx->async_header.pa_base.u.a64.address =
2784 				mem_descr->mem_array[0].
2785 				bus_address.u.a64.address;
2786 
2787 			/* setup header buffer sgls */
2788 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2789 			mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
2790 				     (ulp_num * MEM_DESCR_OFFSET);
2791 			if (mem_descr->mem_array[0].virtual_address) {
2792 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2793 					    "BM_%d : hwi_init_async_pdu_ctx"
2794 					    " HWI_MEM_ASYNC_HEADER_RING_ULP%d va=%p\n",
2795 					    ulp_num,
2796 					    mem_descr->mem_array[0].
2797 					    virtual_address);
2798 			} else
2799 				beiscsi_log(phba, KERN_WARNING,
2800 					    BEISCSI_LOG_INIT,
2801 					    "BM_%d : No Virtual address for ULP : %d\n",
2802 					    ulp_num);
2803 
2804 			pasync_ctx->async_header.ring_base =
2805 				mem_descr->mem_array[0].virtual_address;
2806 
2807 			/* setup header buffer handles */
2808 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2809 			mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE_ULP0 +
2810 				     (ulp_num * MEM_DESCR_OFFSET);
2811 			if (mem_descr->mem_array[0].virtual_address) {
2812 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2813 					    "BM_%d : hwi_init_async_pdu_ctx"
2814 					    " HWI_MEM_ASYNC_HEADER_HANDLE_ULP%d va=%p\n",
2815 					    ulp_num,
2816 					    mem_descr->mem_array[0].
2817 					    virtual_address);
2818 			} else
2819 				beiscsi_log(phba, KERN_WARNING,
2820 					    BEISCSI_LOG_INIT,
2821 					    "BM_%d : No Virtual address for ULP : %d\n",
2822 					    ulp_num);
2823 
2824 			pasync_ctx->async_header.handle_base =
2825 				mem_descr->mem_array[0].virtual_address;
2826 			INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2827 
2828 			/* setup data buffer sgls */
2829 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2830 			mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
2831 				     (ulp_num * MEM_DESCR_OFFSET);
2832 			if (mem_descr->mem_array[0].virtual_address) {
2833 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2834 					    "BM_%d : hwi_init_async_pdu_ctx"
2835 					    " HWI_MEM_ASYNC_DATA_RING_ULP%d va=%p\n",
2836 					    ulp_num,
2837 					    mem_descr->mem_array[0].
2838 					    virtual_address);
2839 			} else
2840 				beiscsi_log(phba, KERN_WARNING,
2841 					    BEISCSI_LOG_INIT,
2842 					    "BM_%d : No Virtual address for ULP : %d\n",
2843 					    ulp_num);
2844 
2845 			pasync_ctx->async_data.ring_base =
2846 				mem_descr->mem_array[0].virtual_address;
2847 
2848 			/* setup data buffer handles */
2849 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2850 			mem_descr += HWI_MEM_ASYNC_DATA_HANDLE_ULP0 +
2851 				     (ulp_num * MEM_DESCR_OFFSET);
2852 			if (!mem_descr->mem_array[0].virtual_address)
2853 				beiscsi_log(phba, KERN_WARNING,
2854 					    BEISCSI_LOG_INIT,
2855 					    "BM_%d : No Virtual address for ULP : %d\n",
2856 					    ulp_num);
2857 
2858 			pasync_ctx->async_data.handle_base =
2859 				mem_descr->mem_array[0].virtual_address;
2860 			INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2861 
2862 			pasync_header_h =
2863 				(struct hd_async_handle *)
2864 				pasync_ctx->async_header.handle_base;
2865 			pasync_data_h =
2866 				(struct hd_async_handle *)
2867 				pasync_ctx->async_data.handle_base;
2868 
2869 			/* setup data buffers */
2870 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2871 			mem_descr += HWI_MEM_ASYNC_DATA_BUF_ULP0 +
2872 				     (ulp_num * MEM_DESCR_OFFSET);
2873 			if (mem_descr->mem_array[0].virtual_address) {
2874 				beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
2875 					    "BM_%d : hwi_init_async_pdu_ctx"
2876 					    " HWI_MEM_ASYNC_DATA_BUF_ULP%d va=%p\n",
2877 					    ulp_num,
2878 					    mem_descr->mem_array[0].
2879 					    virtual_address);
2880 			} else
2881 				beiscsi_log(phba, KERN_WARNING,
2882 					    BEISCSI_LOG_INIT,
2883 					    "BM_%d : No Virtual address for ULP : %d\n",
2884 					    ulp_num);
2885 
2886 			idx = 0;
2887 			pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2888 			pasync_ctx->async_data.va_base =
2889 				mem_descr->mem_array[idx].virtual_address;
2890 			pasync_ctx->async_data.pa_base.u.a64.address =
2891 				mem_descr->mem_array[idx].
2892 				bus_address.u.a64.address;
2893 
2894 			num_async_data = ((mem_descr->mem_array[idx].size) /
2895 					phba->params.defpdu_data_sz);
2896 			num_per_mem = 0;
2897 
2898 			for (index = 0;	index < BEISCSI_GET_CID_COUNT
2899 					(phba, ulp_num); index++) {
2900 				pasync_header_h->cri = -1;
2901 				pasync_header_h->is_header = 1;
2902 				pasync_header_h->index = index;
2903 				INIT_LIST_HEAD(&pasync_header_h->link);
2904 				pasync_header_h->pbuffer =
2905 					(void *)((unsigned long)
2906 						 (pasync_ctx->
2907 						  async_header.va_base) +
2908 						 (p->defpdu_hdr_sz * index));
2909 
2910 				pasync_header_h->pa.u.a64.address =
2911 					pasync_ctx->async_header.pa_base.u.a64.
2912 					address + (p->defpdu_hdr_sz * index);
2913 
2914 				list_add_tail(&pasync_header_h->link,
2915 					      &pasync_ctx->async_header.
2916 					      free_list);
2917 				pasync_header_h++;
2918 				pasync_ctx->async_header.free_entries++;
2919 				INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2920 						wq.list);
2921 				pasync_ctx->async_entry[index].header = NULL;
2922 
2923 				pasync_data_h->cri = -1;
2924 				pasync_data_h->is_header = 0;
2925 				pasync_data_h->index = index;
2926 				INIT_LIST_HEAD(&pasync_data_h->link);
2927 
2928 				if (!num_async_data) {
2929 					num_per_mem = 0;
2930 					idx++;
2931 					pasync_ctx->async_data.va_base =
2932 						mem_descr->mem_array[idx].
2933 						virtual_address;
2934 					pasync_ctx->async_data.pa_base.u.
2935 						a64.address =
2936 						mem_descr->mem_array[idx].
2937 						bus_address.u.a64.address;
2938 					num_async_data =
2939 						((mem_descr->mem_array[idx].
2940 						  size) /
2941 						 phba->params.defpdu_data_sz);
2942 				}
2943 				pasync_data_h->pbuffer =
2944 					(void *)((unsigned long)
2945 					(pasync_ctx->async_data.va_base) +
2946 					(p->defpdu_data_sz * num_per_mem));
2947 
2948 				pasync_data_h->pa.u.a64.address =
2949 					pasync_ctx->async_data.pa_base.u.a64.
2950 					address + (p->defpdu_data_sz *
2951 					num_per_mem);
2952 				num_per_mem++;
2953 				num_async_data--;
2954 
2955 				list_add_tail(&pasync_data_h->link,
2956 					      &pasync_ctx->async_data.
2957 					      free_list);
2958 				pasync_data_h++;
2959 				pasync_ctx->async_data.free_entries++;
2960 				pasync_ctx->async_entry[index].data = NULL;
2961 			}
2962 		}
2963 	}
2964 
2965 	return 0;
2966 }
2967 
2968 static int
2969 be_sgl_create_contiguous(void *virtual_address,
2970 			 u64 physical_address, u32 length,
2971 			 struct be_dma_mem *sgl)
2972 {
2973 	WARN_ON(!virtual_address);
2974 	WARN_ON(!physical_address);
2975 	WARN_ON(!length);
2976 	WARN_ON(!sgl);
2977 
2978 	sgl->va = virtual_address;
2979 	sgl->dma = (unsigned long)physical_address;
2980 	sgl->size = length;
2981 
2982 	return 0;
2983 }
2984 
2985 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2986 {
2987 	memset(sgl, 0, sizeof(*sgl));
2988 }
2989 
2990 static void
2991 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2992 		     struct mem_array *pmem, struct be_dma_mem *sgl)
2993 {
2994 	if (sgl->va)
2995 		be_sgl_destroy_contiguous(sgl);
2996 
2997 	be_sgl_create_contiguous(pmem->virtual_address,
2998 				 pmem->bus_address.u.a64.address,
2999 				 pmem->size, sgl);
3000 }
3001 
3002 static void
3003 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
3004 			   struct mem_array *pmem, struct be_dma_mem *sgl)
3005 {
3006 	if (sgl->va)
3007 		be_sgl_destroy_contiguous(sgl);
3008 
3009 	be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
3010 				 pmem->bus_address.u.a64.address,
3011 				 pmem->size, sgl);
3012 }
3013 
3014 static int be_fill_queue(struct be_queue_info *q,
3015 		u16 len, u16 entry_size, void *vaddress)
3016 {
3017 	struct be_dma_mem *mem = &q->dma_mem;
3018 
3019 	memset(q, 0, sizeof(*q));
3020 	q->len = len;
3021 	q->entry_size = entry_size;
3022 	mem->size = len * entry_size;
3023 	mem->va = vaddress;
3024 	if (!mem->va)
3025 		return -ENOMEM;
3026 	memset(mem->va, 0, mem->size);
3027 	return 0;
3028 }
3029 
3030 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
3031 			     struct hwi_context_memory *phwi_context)
3032 {
3033 	int ret = -ENOMEM, eq_for_mcc;
3034 	unsigned int i, num_eq_pages;
3035 	struct be_queue_info *eq;
3036 	struct be_dma_mem *mem;
3037 	void *eq_vaddress;
3038 	dma_addr_t paddr;
3039 
3040 	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
3041 				      sizeof(struct be_eq_entry));
3042 
3043 	if (phba->msix_enabled)
3044 		eq_for_mcc = 1;
3045 	else
3046 		eq_for_mcc = 0;
3047 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3048 		eq = &phwi_context->be_eq[i].q;
3049 		mem = &eq->dma_mem;
3050 		phwi_context->be_eq[i].phba = phba;
3051 		eq_vaddress = pci_alloc_consistent(phba->pcidev,
3052 						   num_eq_pages * PAGE_SIZE,
3053 						   &paddr);
3054 		if (!eq_vaddress) {
3055 			ret = -ENOMEM;
3056 			goto create_eq_error;
3057 		}
3058 
3059 		mem->va = eq_vaddress;
3060 		ret = be_fill_queue(eq, phba->params.num_eq_entries,
3061 				    sizeof(struct be_eq_entry), eq_vaddress);
3062 		if (ret) {
3063 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3064 				    "BM_%d : be_fill_queue Failed for EQ\n");
3065 			goto create_eq_error;
3066 		}
3067 
3068 		mem->dma = paddr;
3069 		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
3070 					    phwi_context->cur_eqd);
3071 		if (ret) {
3072 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3073 				    "BM_%d : beiscsi_cmd_eq_create"
3074 				    "Failed for EQ\n");
3075 			goto create_eq_error;
3076 		}
3077 
3078 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3079 			    "BM_%d : eqid = %d\n",
3080 			    phwi_context->be_eq[i].q.id);
3081 	}
3082 	return 0;
3083 
3084 create_eq_error:
3085 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3086 		eq = &phwi_context->be_eq[i].q;
3087 		mem = &eq->dma_mem;
3088 		if (mem->va)
3089 			pci_free_consistent(phba->pcidev, num_eq_pages
3090 					    * PAGE_SIZE,
3091 					    mem->va, mem->dma);
3092 	}
3093 	return ret;
3094 }
3095 
3096 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
3097 			     struct hwi_context_memory *phwi_context)
3098 {
3099 	unsigned int i, num_cq_pages;
3100 	struct be_queue_info *cq, *eq;
3101 	struct be_dma_mem *mem;
3102 	struct be_eq_obj *pbe_eq;
3103 	void *cq_vaddress;
3104 	int ret = -ENOMEM;
3105 	dma_addr_t paddr;
3106 
3107 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
3108 				      sizeof(struct sol_cqe));
3109 
3110 	for (i = 0; i < phba->num_cpus; i++) {
3111 		cq = &phwi_context->be_cq[i];
3112 		eq = &phwi_context->be_eq[i].q;
3113 		pbe_eq = &phwi_context->be_eq[i];
3114 		pbe_eq->cq = cq;
3115 		pbe_eq->phba = phba;
3116 		mem = &cq->dma_mem;
3117 		cq_vaddress = pci_alloc_consistent(phba->pcidev,
3118 						   num_cq_pages * PAGE_SIZE,
3119 						   &paddr);
3120 		if (!cq_vaddress) {
3121 			ret = -ENOMEM;
3122 			goto create_cq_error;
3123 		}
3124 
3125 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
3126 				    sizeof(struct sol_cqe), cq_vaddress);
3127 		if (ret) {
3128 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3129 				    "BM_%d : be_fill_queue Failed "
3130 				    "for ISCSI CQ\n");
3131 			goto create_cq_error;
3132 		}
3133 
3134 		mem->dma = paddr;
3135 		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
3136 					    false, 0);
3137 		if (ret) {
3138 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3139 				    "BM_%d : beiscsi_cmd_eq_create"
3140 				    "Failed for ISCSI CQ\n");
3141 			goto create_cq_error;
3142 		}
3143 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3144 			    "BM_%d : iscsi cq_id is %d for eq_id %d\n"
3145 			    "iSCSI CQ CREATED\n", cq->id, eq->id);
3146 	}
3147 	return 0;
3148 
3149 create_cq_error:
3150 	for (i = 0; i < phba->num_cpus; i++) {
3151 		cq = &phwi_context->be_cq[i];
3152 		mem = &cq->dma_mem;
3153 		if (mem->va)
3154 			pci_free_consistent(phba->pcidev, num_cq_pages
3155 					    * PAGE_SIZE,
3156 					    mem->va, mem->dma);
3157 	}
3158 	return ret;
3159 }
3160 
3161 static int
3162 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
3163 		       struct hwi_context_memory *phwi_context,
3164 		       struct hwi_controller *phwi_ctrlr,
3165 		       unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3166 {
3167 	unsigned int idx;
3168 	int ret;
3169 	struct be_queue_info *dq, *cq;
3170 	struct be_dma_mem *mem;
3171 	struct be_mem_descriptor *mem_descr;
3172 	void *dq_vaddress;
3173 
3174 	idx = 0;
3175 	dq = &phwi_context->be_def_hdrq[ulp_num];
3176 	cq = &phwi_context->be_cq[0];
3177 	mem = &dq->dma_mem;
3178 	mem_descr = phba->init_mem;
3179 	mem_descr += HWI_MEM_ASYNC_HEADER_RING_ULP0 +
3180 		    (ulp_num * MEM_DESCR_OFFSET);
3181 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3182 	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
3183 			    sizeof(struct phys_addr),
3184 			    sizeof(struct phys_addr), dq_vaddress);
3185 	if (ret) {
3186 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3187 			    "BM_%d : be_fill_queue Failed for DEF PDU HDR on ULP : %d\n",
3188 			    ulp_num);
3189 
3190 		return ret;
3191 	}
3192 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
3193 				  bus_address.u.a64.address;
3194 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
3195 					      def_pdu_ring_sz,
3196 					      phba->params.defpdu_hdr_sz,
3197 					      BEISCSI_DEFQ_HDR, ulp_num);
3198 	if (ret) {
3199 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3200 			    "BM_%d : be_cmd_create_default_pdu_queue Failed DEFHDR on ULP : %d\n",
3201 			    ulp_num);
3202 
3203 		return ret;
3204 	}
3205 
3206 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3207 		    "BM_%d : iscsi hdr def pdu id for ULP : %d is %d\n",
3208 		    ulp_num,
3209 		    phwi_context->be_def_hdrq[ulp_num].id);
3210 	return 0;
3211 }
3212 
3213 static int
3214 beiscsi_create_def_data(struct beiscsi_hba *phba,
3215 			struct hwi_context_memory *phwi_context,
3216 			struct hwi_controller *phwi_ctrlr,
3217 			unsigned int def_pdu_ring_sz, uint8_t ulp_num)
3218 {
3219 	unsigned int idx;
3220 	int ret;
3221 	struct be_queue_info *dataq, *cq;
3222 	struct be_dma_mem *mem;
3223 	struct be_mem_descriptor *mem_descr;
3224 	void *dq_vaddress;
3225 
3226 	idx = 0;
3227 	dataq = &phwi_context->be_def_dataq[ulp_num];
3228 	cq = &phwi_context->be_cq[0];
3229 	mem = &dataq->dma_mem;
3230 	mem_descr = phba->init_mem;
3231 	mem_descr += HWI_MEM_ASYNC_DATA_RING_ULP0 +
3232 		    (ulp_num * MEM_DESCR_OFFSET);
3233 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
3234 	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
3235 			    sizeof(struct phys_addr),
3236 			    sizeof(struct phys_addr), dq_vaddress);
3237 	if (ret) {
3238 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3239 			    "BM_%d : be_fill_queue Failed for DEF PDU "
3240 			    "DATA on ULP : %d\n",
3241 			    ulp_num);
3242 
3243 		return ret;
3244 	}
3245 	mem->dma = (unsigned long)mem_descr->mem_array[idx].
3246 				  bus_address.u.a64.address;
3247 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
3248 					      def_pdu_ring_sz,
3249 					      phba->params.defpdu_data_sz,
3250 					      BEISCSI_DEFQ_DATA, ulp_num);
3251 	if (ret) {
3252 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3253 			    "BM_%d be_cmd_create_default_pdu_queue"
3254 			    " Failed for DEF PDU DATA on ULP : %d\n",
3255 			    ulp_num);
3256 		return ret;
3257 	}
3258 
3259 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3260 		    "BM_%d : iscsi def data id on ULP : %d is  %d\n",
3261 		    ulp_num,
3262 		    phwi_context->be_def_dataq[ulp_num].id);
3263 
3264 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3265 		    "BM_%d : DEFAULT PDU DATA RING CREATED"
3266 		    "on ULP : %d\n", ulp_num);
3267 	return 0;
3268 }
3269 
3270 
3271 static int
3272 beiscsi_post_template_hdr(struct beiscsi_hba *phba)
3273 {
3274 	struct be_mem_descriptor *mem_descr;
3275 	struct mem_array *pm_arr;
3276 	struct be_dma_mem sgl;
3277 	int status, ulp_num;
3278 
3279 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3280 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3281 			mem_descr = (struct be_mem_descriptor *)phba->init_mem;
3282 			mem_descr += HWI_MEM_TEMPLATE_HDR_ULP0 +
3283 				    (ulp_num * MEM_DESCR_OFFSET);
3284 			pm_arr = mem_descr->mem_array;
3285 
3286 			hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3287 			status = be_cmd_iscsi_post_template_hdr(
3288 				 &phba->ctrl, &sgl);
3289 
3290 			if (status != 0) {
3291 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3292 					    "BM_%d : Post Template HDR Failed for"
3293 					    "ULP_%d\n", ulp_num);
3294 				return status;
3295 			}
3296 
3297 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3298 				    "BM_%d : Template HDR Pages Posted for"
3299 				    "ULP_%d\n", ulp_num);
3300 		}
3301 	}
3302 	return 0;
3303 }
3304 
3305 static int
3306 beiscsi_post_pages(struct beiscsi_hba *phba)
3307 {
3308 	struct be_mem_descriptor *mem_descr;
3309 	struct mem_array *pm_arr;
3310 	unsigned int page_offset, i;
3311 	struct be_dma_mem sgl;
3312 	int status, ulp_num = 0;
3313 
3314 	mem_descr = phba->init_mem;
3315 	mem_descr += HWI_MEM_SGE;
3316 	pm_arr = mem_descr->mem_array;
3317 
3318 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3319 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3320 			break;
3321 
3322 	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
3323 			phba->fw_config.iscsi_icd_start[ulp_num]) / PAGE_SIZE;
3324 	for (i = 0; i < mem_descr->num_elements; i++) {
3325 		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
3326 		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
3327 						page_offset,
3328 						(pm_arr->size / PAGE_SIZE));
3329 		page_offset += pm_arr->size / PAGE_SIZE;
3330 		if (status != 0) {
3331 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3332 				    "BM_%d : post sgl failed.\n");
3333 			return status;
3334 		}
3335 		pm_arr++;
3336 	}
3337 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3338 		    "BM_%d : POSTED PAGES\n");
3339 	return 0;
3340 }
3341 
3342 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
3343 {
3344 	struct be_dma_mem *mem = &q->dma_mem;
3345 	if (mem->va) {
3346 		pci_free_consistent(phba->pcidev, mem->size,
3347 			mem->va, mem->dma);
3348 		mem->va = NULL;
3349 	}
3350 }
3351 
3352 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
3353 		u16 len, u16 entry_size)
3354 {
3355 	struct be_dma_mem *mem = &q->dma_mem;
3356 
3357 	memset(q, 0, sizeof(*q));
3358 	q->len = len;
3359 	q->entry_size = entry_size;
3360 	mem->size = len * entry_size;
3361 	mem->va = pci_zalloc_consistent(phba->pcidev, mem->size, &mem->dma);
3362 	if (!mem->va)
3363 		return -ENOMEM;
3364 	return 0;
3365 }
3366 
3367 static int
3368 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
3369 			 struct hwi_context_memory *phwi_context,
3370 			 struct hwi_controller *phwi_ctrlr)
3371 {
3372 	unsigned int num_wrb_rings;
3373 	u64 pa_addr_lo;
3374 	unsigned int idx, num, i, ulp_num;
3375 	struct mem_array *pwrb_arr;
3376 	void *wrb_vaddr;
3377 	struct be_dma_mem sgl;
3378 	struct be_mem_descriptor *mem_descr;
3379 	struct hwi_wrb_context *pwrb_context;
3380 	int status;
3381 	uint8_t ulp_count = 0, ulp_base_num = 0;
3382 	uint16_t cid_count_ulp[BEISCSI_ULP_COUNT] = { 0 };
3383 
3384 	idx = 0;
3385 	mem_descr = phba->init_mem;
3386 	mem_descr += HWI_MEM_WRB;
3387 	pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
3388 			   GFP_KERNEL);
3389 	if (!pwrb_arr) {
3390 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3391 			    "BM_%d : Memory alloc failed in create wrb ring.\n");
3392 		return -ENOMEM;
3393 	}
3394 	wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3395 	pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
3396 	num_wrb_rings = mem_descr->mem_array[idx].size /
3397 		(phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
3398 
3399 	for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
3400 		if (num_wrb_rings) {
3401 			pwrb_arr[num].virtual_address = wrb_vaddr;
3402 			pwrb_arr[num].bus_address.u.a64.address	= pa_addr_lo;
3403 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3404 					    sizeof(struct iscsi_wrb);
3405 			wrb_vaddr += pwrb_arr[num].size;
3406 			pa_addr_lo += pwrb_arr[num].size;
3407 			num_wrb_rings--;
3408 		} else {
3409 			idx++;
3410 			wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
3411 			pa_addr_lo = mem_descr->mem_array[idx].\
3412 					bus_address.u.a64.address;
3413 			num_wrb_rings = mem_descr->mem_array[idx].size /
3414 					(phba->params.wrbs_per_cxn *
3415 					sizeof(struct iscsi_wrb));
3416 			pwrb_arr[num].virtual_address = wrb_vaddr;
3417 			pwrb_arr[num].bus_address.u.a64.address\
3418 						= pa_addr_lo;
3419 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
3420 						 sizeof(struct iscsi_wrb);
3421 			wrb_vaddr += pwrb_arr[num].size;
3422 			pa_addr_lo   += pwrb_arr[num].size;
3423 			num_wrb_rings--;
3424 		}
3425 	}
3426 
3427 	/* Get the ULP Count */
3428 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3429 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3430 			ulp_count++;
3431 			ulp_base_num = ulp_num;
3432 			cid_count_ulp[ulp_num] =
3433 				BEISCSI_GET_CID_COUNT(phba, ulp_num);
3434 		}
3435 
3436 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3437 		if (ulp_count > 1) {
3438 			ulp_base_num = (ulp_base_num + 1) % BEISCSI_ULP_COUNT;
3439 
3440 			if (!cid_count_ulp[ulp_base_num])
3441 				ulp_base_num = (ulp_base_num + 1) %
3442 						BEISCSI_ULP_COUNT;
3443 
3444 			cid_count_ulp[ulp_base_num]--;
3445 		}
3446 
3447 
3448 		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
3449 		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
3450 					    &phwi_context->be_wrbq[i],
3451 					    &phwi_ctrlr->wrb_context[i],
3452 					    ulp_base_num);
3453 		if (status != 0) {
3454 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3455 				    "BM_%d : wrbq create failed.");
3456 			kfree(pwrb_arr);
3457 			return status;
3458 		}
3459 		pwrb_context = &phwi_ctrlr->wrb_context[i];
3460 		BE_SET_CID_TO_CRI(i, pwrb_context->cid);
3461 	}
3462 	kfree(pwrb_arr);
3463 	return 0;
3464 }
3465 
3466 static void free_wrb_handles(struct beiscsi_hba *phba)
3467 {
3468 	unsigned int index;
3469 	struct hwi_controller *phwi_ctrlr;
3470 	struct hwi_wrb_context *pwrb_context;
3471 
3472 	phwi_ctrlr = phba->phwi_ctrlr;
3473 	for (index = 0; index < phba->params.cxns_per_ctrl; index++) {
3474 		pwrb_context = &phwi_ctrlr->wrb_context[index];
3475 		kfree(pwrb_context->pwrb_handle_base);
3476 		kfree(pwrb_context->pwrb_handle_basestd);
3477 	}
3478 }
3479 
3480 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
3481 {
3482 	struct be_ctrl_info *ctrl = &phba->ctrl;
3483 	struct be_dma_mem *ptag_mem;
3484 	struct be_queue_info *q;
3485 	int i, tag;
3486 
3487 	q = &phba->ctrl.mcc_obj.q;
3488 	for (i = 0; i < MAX_MCC_CMD; i++) {
3489 		tag = i + 1;
3490 		if (!test_bit(MCC_TAG_STATE_RUNNING,
3491 			      &ctrl->ptag_state[tag].tag_state))
3492 			continue;
3493 
3494 		if (test_bit(MCC_TAG_STATE_TIMEOUT,
3495 			     &ctrl->ptag_state[tag].tag_state)) {
3496 			ptag_mem = &ctrl->ptag_state[tag].tag_mem_state;
3497 			if (ptag_mem->size) {
3498 				pci_free_consistent(ctrl->pdev,
3499 						    ptag_mem->size,
3500 						    ptag_mem->va,
3501 						    ptag_mem->dma);
3502 				ptag_mem->size = 0;
3503 			}
3504 			continue;
3505 		}
3506 		/**
3507 		 * If MCC is still active and waiting then wake up the process.
3508 		 * We are here only because port is going offline. The process
3509 		 * sees that (BEISCSI_HBA_ONLINE is cleared) and EIO error is
3510 		 * returned for the operation and allocated memory cleaned up.
3511 		 */
3512 		if (waitqueue_active(&ctrl->mcc_wait[tag])) {
3513 			ctrl->mcc_tag_status[tag] = MCC_STATUS_FAILED;
3514 			ctrl->mcc_tag_status[tag] |= CQE_VALID_MASK;
3515 			wake_up_interruptible(&ctrl->mcc_wait[tag]);
3516 			/*
3517 			 * Control tag info gets reinitialized in enable
3518 			 * so wait for the process to clear running state.
3519 			 */
3520 			while (test_bit(MCC_TAG_STATE_RUNNING,
3521 					&ctrl->ptag_state[tag].tag_state))
3522 				schedule_timeout_uninterruptible(HZ);
3523 		}
3524 		/**
3525 		 * For MCC with tag_states MCC_TAG_STATE_ASYNC and
3526 		 * MCC_TAG_STATE_IGNORE nothing needs to done.
3527 		 */
3528 	}
3529 	if (q->created) {
3530 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
3531 		be_queue_free(phba, q);
3532 	}
3533 
3534 	q = &phba->ctrl.mcc_obj.cq;
3535 	if (q->created) {
3536 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3537 		be_queue_free(phba, q);
3538 	}
3539 }
3540 
3541 static int be_mcc_queues_create(struct beiscsi_hba *phba,
3542 				struct hwi_context_memory *phwi_context)
3543 {
3544 	struct be_queue_info *q, *cq;
3545 	struct be_ctrl_info *ctrl = &phba->ctrl;
3546 
3547 	/* Alloc MCC compl queue */
3548 	cq = &phba->ctrl.mcc_obj.cq;
3549 	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
3550 			sizeof(struct be_mcc_compl)))
3551 		goto err;
3552 	/* Ask BE to create MCC compl queue; */
3553 	if (phba->msix_enabled) {
3554 		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
3555 					 [phba->num_cpus].q, false, true, 0))
3556 		goto mcc_cq_free;
3557 	} else {
3558 		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
3559 					  false, true, 0))
3560 		goto mcc_cq_free;
3561 	}
3562 
3563 	/* Alloc MCC queue */
3564 	q = &phba->ctrl.mcc_obj.q;
3565 	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
3566 		goto mcc_cq_destroy;
3567 
3568 	/* Ask BE to create MCC queue */
3569 	if (beiscsi_cmd_mccq_create(phba, q, cq))
3570 		goto mcc_q_free;
3571 
3572 	return 0;
3573 
3574 mcc_q_free:
3575 	be_queue_free(phba, q);
3576 mcc_cq_destroy:
3577 	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
3578 mcc_cq_free:
3579 	be_queue_free(phba, cq);
3580 err:
3581 	return -ENOMEM;
3582 }
3583 
3584 /**
3585  * find_num_cpus()- Get the CPU online count
3586  * @phba: ptr to priv structure
3587  *
3588  * CPU count is used for creating EQ.
3589  **/
3590 static void find_num_cpus(struct beiscsi_hba *phba)
3591 {
3592 	int  num_cpus = 0;
3593 
3594 	num_cpus = num_online_cpus();
3595 
3596 	switch (phba->generation) {
3597 	case BE_GEN2:
3598 	case BE_GEN3:
3599 		phba->num_cpus = (num_cpus > BEISCSI_MAX_NUM_CPUS) ?
3600 				  BEISCSI_MAX_NUM_CPUS : num_cpus;
3601 		break;
3602 	case BE_GEN4:
3603 		/*
3604 		 * If eqid_count == 1 fall back to
3605 		 * INTX mechanism
3606 		 **/
3607 		if (phba->fw_config.eqid_count == 1) {
3608 			enable_msix = 0;
3609 			phba->num_cpus = 1;
3610 			return;
3611 		}
3612 
3613 		phba->num_cpus =
3614 			(num_cpus > (phba->fw_config.eqid_count - 1)) ?
3615 			(phba->fw_config.eqid_count - 1) : num_cpus;
3616 		break;
3617 	default:
3618 		phba->num_cpus = 1;
3619 	}
3620 }
3621 
3622 static void hwi_purge_eq(struct beiscsi_hba *phba)
3623 {
3624 	struct hwi_controller *phwi_ctrlr;
3625 	struct hwi_context_memory *phwi_context;
3626 	struct be_queue_info *eq;
3627 	struct be_eq_entry *eqe = NULL;
3628 	int i, eq_msix;
3629 	unsigned int num_processed;
3630 
3631 	if (beiscsi_hba_in_error(phba))
3632 		return;
3633 
3634 	phwi_ctrlr = phba->phwi_ctrlr;
3635 	phwi_context = phwi_ctrlr->phwi_ctxt;
3636 	if (phba->msix_enabled)
3637 		eq_msix = 1;
3638 	else
3639 		eq_msix = 0;
3640 
3641 	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3642 		eq = &phwi_context->be_eq[i].q;
3643 		eqe = queue_tail_node(eq);
3644 		num_processed = 0;
3645 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3646 					& EQE_VALID_MASK) {
3647 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3648 			queue_tail_inc(eq);
3649 			eqe = queue_tail_node(eq);
3650 			num_processed++;
3651 		}
3652 
3653 		if (num_processed)
3654 			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
3655 	}
3656 }
3657 
3658 static void hwi_cleanup_port(struct beiscsi_hba *phba)
3659 {
3660 	struct be_queue_info *q;
3661 	struct be_ctrl_info *ctrl = &phba->ctrl;
3662 	struct hwi_controller *phwi_ctrlr;
3663 	struct hwi_context_memory *phwi_context;
3664 	int i, eq_for_mcc, ulp_num;
3665 
3666 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3667 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3668 			beiscsi_cmd_iscsi_cleanup(phba, ulp_num);
3669 
3670 	/**
3671 	 * Purge all EQ entries that may have been left out. This is to
3672 	 * workaround a problem we've seen occasionally where driver gets an
3673 	 * interrupt with EQ entry bit set after stopping the controller.
3674 	 */
3675 	hwi_purge_eq(phba);
3676 
3677 	phwi_ctrlr = phba->phwi_ctrlr;
3678 	phwi_context = phwi_ctrlr->phwi_ctxt;
3679 
3680 	be_cmd_iscsi_remove_template_hdr(ctrl);
3681 
3682 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3683 		q = &phwi_context->be_wrbq[i];
3684 		if (q->created)
3685 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
3686 	}
3687 	kfree(phwi_context->be_wrbq);
3688 	free_wrb_handles(phba);
3689 
3690 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3691 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3692 
3693 			q = &phwi_context->be_def_hdrq[ulp_num];
3694 			if (q->created)
3695 				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3696 
3697 			q = &phwi_context->be_def_dataq[ulp_num];
3698 			if (q->created)
3699 				beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
3700 		}
3701 	}
3702 
3703 	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
3704 
3705 	for (i = 0; i < (phba->num_cpus); i++) {
3706 		q = &phwi_context->be_cq[i];
3707 		if (q->created) {
3708 			be_queue_free(phba, q);
3709 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
3710 		}
3711 	}
3712 
3713 	be_mcc_queues_destroy(phba);
3714 	if (phba->msix_enabled)
3715 		eq_for_mcc = 1;
3716 	else
3717 		eq_for_mcc = 0;
3718 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
3719 		q = &phwi_context->be_eq[i].q;
3720 		if (q->created) {
3721 			be_queue_free(phba, q);
3722 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
3723 		}
3724 	}
3725 	/* this ensures complete FW cleanup */
3726 	beiscsi_cmd_function_reset(phba);
3727 	/* last communication, indicate driver is unloading */
3728 	beiscsi_cmd_special_wrb(&phba->ctrl, 0);
3729 }
3730 
3731 static int hwi_init_port(struct beiscsi_hba *phba)
3732 {
3733 	struct hwi_controller *phwi_ctrlr;
3734 	struct hwi_context_memory *phwi_context;
3735 	unsigned int def_pdu_ring_sz;
3736 	struct be_ctrl_info *ctrl = &phba->ctrl;
3737 	int status, ulp_num;
3738 
3739 	phwi_ctrlr = phba->phwi_ctrlr;
3740 	phwi_context = phwi_ctrlr->phwi_ctxt;
3741 	phwi_context->max_eqd = 128;
3742 	phwi_context->min_eqd = 0;
3743 	phwi_context->cur_eqd = 32;
3744 	/* set port optic state to unknown */
3745 	phba->optic_state = 0xff;
3746 
3747 	status = beiscsi_create_eqs(phba, phwi_context);
3748 	if (status != 0) {
3749 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3750 			    "BM_%d : EQ not created\n");
3751 		goto error;
3752 	}
3753 
3754 	status = be_mcc_queues_create(phba, phwi_context);
3755 	if (status != 0)
3756 		goto error;
3757 
3758 	status = beiscsi_check_supported_fw(ctrl, phba);
3759 	if (status != 0) {
3760 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3761 			    "BM_%d : Unsupported fw version\n");
3762 		goto error;
3763 	}
3764 
3765 	status = beiscsi_create_cqs(phba, phwi_context);
3766 	if (status != 0) {
3767 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3768 			    "BM_%d : CQ not created\n");
3769 		goto error;
3770 	}
3771 
3772 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3773 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3774 			def_pdu_ring_sz =
3775 				BEISCSI_GET_CID_COUNT(phba, ulp_num) *
3776 				sizeof(struct phys_addr);
3777 
3778 			status = beiscsi_create_def_hdr(phba, phwi_context,
3779 							phwi_ctrlr,
3780 							def_pdu_ring_sz,
3781 							ulp_num);
3782 			if (status != 0) {
3783 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3784 					    "BM_%d : Default Header not created for ULP : %d\n",
3785 					    ulp_num);
3786 				goto error;
3787 			}
3788 
3789 			status = beiscsi_create_def_data(phba, phwi_context,
3790 							 phwi_ctrlr,
3791 							 def_pdu_ring_sz,
3792 							 ulp_num);
3793 			if (status != 0) {
3794 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3795 					    "BM_%d : Default Data not created for ULP : %d\n",
3796 					    ulp_num);
3797 				goto error;
3798 			}
3799 			/**
3800 			 * Now that the default PDU rings have been created,
3801 			 * let EP know about it.
3802 			 */
3803 			beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_HDR,
3804 						 ulp_num);
3805 			beiscsi_hdq_post_handles(phba, BEISCSI_DEFQ_DATA,
3806 						 ulp_num);
3807 		}
3808 	}
3809 
3810 	status = beiscsi_post_pages(phba);
3811 	if (status != 0) {
3812 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3813 			    "BM_%d : Post SGL Pages Failed\n");
3814 		goto error;
3815 	}
3816 
3817 	status = beiscsi_post_template_hdr(phba);
3818 	if (status != 0) {
3819 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3820 			    "BM_%d : Template HDR Posting for CXN Failed\n");
3821 	}
3822 
3823 	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
3824 	if (status != 0) {
3825 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3826 			    "BM_%d : WRB Rings not created\n");
3827 		goto error;
3828 	}
3829 
3830 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
3831 		uint16_t async_arr_idx = 0;
3832 
3833 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported)) {
3834 			uint16_t cri = 0;
3835 			struct hd_async_context *pasync_ctx;
3836 
3837 			pasync_ctx = HWI_GET_ASYNC_PDU_CTX(
3838 				     phwi_ctrlr, ulp_num);
3839 			for (cri = 0; cri <
3840 			     phba->params.cxns_per_ctrl; cri++) {
3841 				if (ulp_num == BEISCSI_GET_ULP_FROM_CRI
3842 					       (phwi_ctrlr, cri))
3843 					pasync_ctx->cid_to_async_cri_map[
3844 					phwi_ctrlr->wrb_context[cri].cid] =
3845 					async_arr_idx++;
3846 			}
3847 		}
3848 	}
3849 
3850 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3851 		    "BM_%d : hwi_init_port success\n");
3852 	return 0;
3853 
3854 error:
3855 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3856 		    "BM_%d : hwi_init_port failed");
3857 	hwi_cleanup_port(phba);
3858 	return status;
3859 }
3860 
3861 static int hwi_init_controller(struct beiscsi_hba *phba)
3862 {
3863 	struct hwi_controller *phwi_ctrlr;
3864 
3865 	phwi_ctrlr = phba->phwi_ctrlr;
3866 	if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
3867 		phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
3868 		    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
3869 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3870 			    "BM_%d :  phwi_ctrlr->phwi_ctxt=%p\n",
3871 			    phwi_ctrlr->phwi_ctxt);
3872 	} else {
3873 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3874 			    "BM_%d : HWI_MEM_ADDN_CONTEXT is more "
3875 			    "than one element.Failing to load\n");
3876 		return -ENOMEM;
3877 	}
3878 
3879 	iscsi_init_global_templates(phba);
3880 	if (beiscsi_init_wrb_handle(phba))
3881 		return -ENOMEM;
3882 
3883 	if (hwi_init_async_pdu_ctx(phba)) {
3884 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3885 			    "BM_%d : hwi_init_async_pdu_ctx failed\n");
3886 		return -ENOMEM;
3887 	}
3888 
3889 	if (hwi_init_port(phba) != 0) {
3890 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3891 			    "BM_%d : hwi_init_controller failed\n");
3892 
3893 		return -ENOMEM;
3894 	}
3895 	return 0;
3896 }
3897 
3898 static void beiscsi_free_mem(struct beiscsi_hba *phba)
3899 {
3900 	struct be_mem_descriptor *mem_descr;
3901 	int i, j;
3902 
3903 	mem_descr = phba->init_mem;
3904 	i = 0;
3905 	j = 0;
3906 	for (i = 0; i < SE_MEM_MAX; i++) {
3907 		for (j = mem_descr->num_elements; j > 0; j--) {
3908 			pci_free_consistent(phba->pcidev,
3909 			  mem_descr->mem_array[j - 1].size,
3910 			  mem_descr->mem_array[j - 1].virtual_address,
3911 			  (unsigned long)mem_descr->mem_array[j - 1].
3912 			  bus_address.u.a64.address);
3913 		}
3914 
3915 		kfree(mem_descr->mem_array);
3916 		mem_descr++;
3917 	}
3918 	kfree(phba->init_mem);
3919 	kfree(phba->phwi_ctrlr->wrb_context);
3920 	kfree(phba->phwi_ctrlr);
3921 }
3922 
3923 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3924 {
3925 	struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3926 	struct sgl_handle *psgl_handle;
3927 	struct iscsi_sge *pfrag;
3928 	unsigned int arr_index, i, idx;
3929 	unsigned int ulp_icd_start, ulp_num = 0;
3930 
3931 	phba->io_sgl_hndl_avbl = 0;
3932 	phba->eh_sgl_hndl_avbl = 0;
3933 
3934 	mem_descr_sglh = phba->init_mem;
3935 	mem_descr_sglh += HWI_MEM_SGLH;
3936 	if (1 == mem_descr_sglh->num_elements) {
3937 		phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3938 						 phba->params.ios_per_ctrl,
3939 						 GFP_KERNEL);
3940 		if (!phba->io_sgl_hndl_base) {
3941 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3942 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
3943 			return -ENOMEM;
3944 		}
3945 		phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3946 						 (phba->params.icds_per_ctrl -
3947 						 phba->params.ios_per_ctrl),
3948 						 GFP_KERNEL);
3949 		if (!phba->eh_sgl_hndl_base) {
3950 			kfree(phba->io_sgl_hndl_base);
3951 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3952 				    "BM_%d : Mem Alloc Failed. Failing to load\n");
3953 			return -ENOMEM;
3954 		}
3955 	} else {
3956 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
3957 			    "BM_%d : HWI_MEM_SGLH is more than one element."
3958 			    "Failing to load\n");
3959 		return -ENOMEM;
3960 	}
3961 
3962 	arr_index = 0;
3963 	idx = 0;
3964 	while (idx < mem_descr_sglh->num_elements) {
3965 		psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3966 
3967 		for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3968 		      sizeof(struct sgl_handle)); i++) {
3969 			if (arr_index < phba->params.ios_per_ctrl) {
3970 				phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3971 				phba->io_sgl_hndl_avbl++;
3972 				arr_index++;
3973 			} else {
3974 				phba->eh_sgl_hndl_base[arr_index -
3975 					phba->params.ios_per_ctrl] =
3976 								psgl_handle;
3977 				arr_index++;
3978 				phba->eh_sgl_hndl_avbl++;
3979 			}
3980 			psgl_handle++;
3981 		}
3982 		idx++;
3983 	}
3984 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3985 		    "BM_%d : phba->io_sgl_hndl_avbl=%d"
3986 		    "phba->eh_sgl_hndl_avbl=%d\n",
3987 		    phba->io_sgl_hndl_avbl,
3988 		    phba->eh_sgl_hndl_avbl);
3989 
3990 	mem_descr_sg = phba->init_mem;
3991 	mem_descr_sg += HWI_MEM_SGE;
3992 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
3993 		    "\n BM_%d : mem_descr_sg->num_elements=%d\n",
3994 		    mem_descr_sg->num_elements);
3995 
3996 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++)
3997 		if (test_bit(ulp_num, &phba->fw_config.ulp_supported))
3998 			break;
3999 
4000 	ulp_icd_start = phba->fw_config.iscsi_icd_start[ulp_num];
4001 
4002 	arr_index = 0;
4003 	idx = 0;
4004 	while (idx < mem_descr_sg->num_elements) {
4005 		pfrag = mem_descr_sg->mem_array[idx].virtual_address;
4006 
4007 		for (i = 0;
4008 		     i < (mem_descr_sg->mem_array[idx].size) /
4009 		     (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
4010 		     i++) {
4011 			if (arr_index < phba->params.ios_per_ctrl)
4012 				psgl_handle = phba->io_sgl_hndl_base[arr_index];
4013 			else
4014 				psgl_handle = phba->eh_sgl_hndl_base[arr_index -
4015 						phba->params.ios_per_ctrl];
4016 			psgl_handle->pfrag = pfrag;
4017 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
4018 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
4019 			pfrag += phba->params.num_sge_per_io;
4020 			psgl_handle->sgl_index = ulp_icd_start + arr_index++;
4021 		}
4022 		idx++;
4023 	}
4024 	phba->io_sgl_free_index = 0;
4025 	phba->io_sgl_alloc_index = 0;
4026 	phba->eh_sgl_free_index = 0;
4027 	phba->eh_sgl_alloc_index = 0;
4028 	return 0;
4029 }
4030 
4031 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
4032 {
4033 	int ret;
4034 	uint16_t i, ulp_num;
4035 	struct ulp_cid_info *ptr_cid_info = NULL;
4036 
4037 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4038 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4039 			ptr_cid_info = kzalloc(sizeof(struct ulp_cid_info),
4040 					       GFP_KERNEL);
4041 
4042 			if (!ptr_cid_info) {
4043 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4044 					    "BM_%d : Failed to allocate memory"
4045 					    "for ULP_CID_INFO for ULP : %d\n",
4046 					    ulp_num);
4047 				ret = -ENOMEM;
4048 				goto free_memory;
4049 
4050 			}
4051 
4052 			/* Allocate memory for CID array */
4053 			ptr_cid_info->cid_array =
4054 				kcalloc(BEISCSI_GET_CID_COUNT(phba, ulp_num),
4055 					sizeof(*ptr_cid_info->cid_array),
4056 					GFP_KERNEL);
4057 			if (!ptr_cid_info->cid_array) {
4058 				beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4059 					    "BM_%d : Failed to allocate memory"
4060 					    "for CID_ARRAY for ULP : %d\n",
4061 					    ulp_num);
4062 				kfree(ptr_cid_info);
4063 				ptr_cid_info = NULL;
4064 				ret = -ENOMEM;
4065 
4066 				goto free_memory;
4067 			}
4068 			ptr_cid_info->avlbl_cids = BEISCSI_GET_CID_COUNT(
4069 						   phba, ulp_num);
4070 
4071 			/* Save the cid_info_array ptr */
4072 			phba->cid_array_info[ulp_num] = ptr_cid_info;
4073 		}
4074 	}
4075 	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
4076 				 phba->params.cxns_per_ctrl, GFP_KERNEL);
4077 	if (!phba->ep_array) {
4078 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4079 			    "BM_%d : Failed to allocate memory in "
4080 			    "hba_setup_cid_tbls\n");
4081 		ret = -ENOMEM;
4082 
4083 		goto free_memory;
4084 	}
4085 
4086 	phba->conn_table = kzalloc(sizeof(struct beiscsi_conn *) *
4087 				   phba->params.cxns_per_ctrl, GFP_KERNEL);
4088 	if (!phba->conn_table) {
4089 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4090 			    "BM_%d : Failed to allocate memory in"
4091 			    "hba_setup_cid_tbls\n");
4092 
4093 		kfree(phba->ep_array);
4094 		phba->ep_array = NULL;
4095 		ret = -ENOMEM;
4096 
4097 		goto free_memory;
4098 	}
4099 
4100 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
4101 		ulp_num = phba->phwi_ctrlr->wrb_context[i].ulp_num;
4102 
4103 		ptr_cid_info = phba->cid_array_info[ulp_num];
4104 		ptr_cid_info->cid_array[ptr_cid_info->cid_alloc++] =
4105 			phba->phwi_ctrlr->wrb_context[i].cid;
4106 
4107 	}
4108 
4109 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4110 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4111 			ptr_cid_info = phba->cid_array_info[ulp_num];
4112 
4113 			ptr_cid_info->cid_alloc = 0;
4114 			ptr_cid_info->cid_free = 0;
4115 		}
4116 	}
4117 	return 0;
4118 
4119 free_memory:
4120 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4121 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4122 			ptr_cid_info = phba->cid_array_info[ulp_num];
4123 
4124 			if (ptr_cid_info) {
4125 				kfree(ptr_cid_info->cid_array);
4126 				kfree(ptr_cid_info);
4127 				phba->cid_array_info[ulp_num] = NULL;
4128 			}
4129 		}
4130 	}
4131 
4132 	return ret;
4133 }
4134 
4135 static void hwi_enable_intr(struct beiscsi_hba *phba)
4136 {
4137 	struct be_ctrl_info *ctrl = &phba->ctrl;
4138 	struct hwi_controller *phwi_ctrlr;
4139 	struct hwi_context_memory *phwi_context;
4140 	struct be_queue_info *eq;
4141 	u8 __iomem *addr;
4142 	u32 reg, i;
4143 	u32 enabled;
4144 
4145 	phwi_ctrlr = phba->phwi_ctrlr;
4146 	phwi_context = phwi_ctrlr->phwi_ctxt;
4147 
4148 	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
4149 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
4150 	reg = ioread32(addr);
4151 
4152 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4153 	if (!enabled) {
4154 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4155 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4156 			    "BM_%d : reg =x%08x addr=%p\n", reg, addr);
4157 		iowrite32(reg, addr);
4158 	}
4159 
4160 	if (!phba->msix_enabled) {
4161 		eq = &phwi_context->be_eq[0].q;
4162 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4163 			    "BM_%d : eq->id=%d\n", eq->id);
4164 
4165 		hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4166 	} else {
4167 		for (i = 0; i <= phba->num_cpus; i++) {
4168 			eq = &phwi_context->be_eq[i].q;
4169 			beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
4170 				    "BM_%d : eq->id=%d\n", eq->id);
4171 			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
4172 		}
4173 	}
4174 }
4175 
4176 static void hwi_disable_intr(struct beiscsi_hba *phba)
4177 {
4178 	struct be_ctrl_info *ctrl = &phba->ctrl;
4179 
4180 	u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
4181 	u32 reg = ioread32(addr);
4182 
4183 	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4184 	if (enabled) {
4185 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
4186 		iowrite32(reg, addr);
4187 	} else
4188 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
4189 			    "BM_%d : In hwi_disable_intr, Already Disabled\n");
4190 }
4191 
4192 static int beiscsi_init_port(struct beiscsi_hba *phba)
4193 {
4194 	int ret;
4195 
4196 	ret = hwi_init_controller(phba);
4197 	if (ret < 0) {
4198 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4199 			    "BM_%d : init controller failed\n");
4200 		return ret;
4201 	}
4202 	ret = beiscsi_init_sgl_handle(phba);
4203 	if (ret < 0) {
4204 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4205 			    "BM_%d : init sgl handles failed\n");
4206 		goto cleanup_port;
4207 	}
4208 
4209 	ret = hba_setup_cid_tbls(phba);
4210 	if (ret < 0) {
4211 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
4212 			    "BM_%d : setup CID table failed\n");
4213 		kfree(phba->io_sgl_hndl_base);
4214 		kfree(phba->eh_sgl_hndl_base);
4215 		goto cleanup_port;
4216 	}
4217 	return ret;
4218 
4219 cleanup_port:
4220 	hwi_cleanup_port(phba);
4221 	return ret;
4222 }
4223 
4224 static void beiscsi_cleanup_port(struct beiscsi_hba *phba)
4225 {
4226 	struct ulp_cid_info *ptr_cid_info = NULL;
4227 	int ulp_num;
4228 
4229 	kfree(phba->io_sgl_hndl_base);
4230 	kfree(phba->eh_sgl_hndl_base);
4231 	kfree(phba->ep_array);
4232 	kfree(phba->conn_table);
4233 
4234 	for (ulp_num = 0; ulp_num < BEISCSI_ULP_COUNT; ulp_num++) {
4235 		if (test_bit(ulp_num, (void *)&phba->fw_config.ulp_supported)) {
4236 			ptr_cid_info = phba->cid_array_info[ulp_num];
4237 
4238 			if (ptr_cid_info) {
4239 				kfree(ptr_cid_info->cid_array);
4240 				kfree(ptr_cid_info);
4241 				phba->cid_array_info[ulp_num] = NULL;
4242 			}
4243 		}
4244 	}
4245 }
4246 
4247 /**
4248  * beiscsi_free_mgmt_task_handles()- Free driver CXN resources
4249  * @beiscsi_conn: ptr to the conn to be cleaned up
4250  * @task: ptr to iscsi_task resource to be freed.
4251  *
4252  * Free driver mgmt resources binded to CXN.
4253  **/
4254 void
4255 beiscsi_free_mgmt_task_handles(struct beiscsi_conn *beiscsi_conn,
4256 				struct iscsi_task *task)
4257 {
4258 	struct beiscsi_io_task *io_task;
4259 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4260 	struct hwi_wrb_context *pwrb_context;
4261 	struct hwi_controller *phwi_ctrlr;
4262 	uint16_t cri_index = BE_GET_CRI_FROM_CID(
4263 				beiscsi_conn->beiscsi_conn_cid);
4264 
4265 	phwi_ctrlr = phba->phwi_ctrlr;
4266 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4267 
4268 	io_task = task->dd_data;
4269 
4270 	if (io_task->pwrb_handle) {
4271 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4272 		io_task->pwrb_handle = NULL;
4273 	}
4274 
4275 	if (io_task->psgl_handle) {
4276 		free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4277 		io_task->psgl_handle = NULL;
4278 	}
4279 
4280 	if (io_task->mtask_addr) {
4281 		pci_unmap_single(phba->pcidev,
4282 				 io_task->mtask_addr,
4283 				 io_task->mtask_data_count,
4284 				 PCI_DMA_TODEVICE);
4285 		io_task->mtask_addr = 0;
4286 	}
4287 }
4288 
4289 /**
4290  * beiscsi_cleanup_task()- Free driver resources of the task
4291  * @task: ptr to the iscsi task
4292  *
4293  **/
4294 static void beiscsi_cleanup_task(struct iscsi_task *task)
4295 {
4296 	struct beiscsi_io_task *io_task = task->dd_data;
4297 	struct iscsi_conn *conn = task->conn;
4298 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4299 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4300 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4301 	struct hwi_wrb_context *pwrb_context;
4302 	struct hwi_controller *phwi_ctrlr;
4303 	uint16_t cri_index = BE_GET_CRI_FROM_CID(
4304 			     beiscsi_conn->beiscsi_conn_cid);
4305 
4306 	phwi_ctrlr = phba->phwi_ctrlr;
4307 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4308 
4309 	if (io_task->cmd_bhs) {
4310 		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4311 			      io_task->bhs_pa.u.a64.address);
4312 		io_task->cmd_bhs = NULL;
4313 		task->hdr = NULL;
4314 	}
4315 
4316 	if (task->sc) {
4317 		if (io_task->pwrb_handle) {
4318 			free_wrb_handle(phba, pwrb_context,
4319 					io_task->pwrb_handle);
4320 			io_task->pwrb_handle = NULL;
4321 		}
4322 
4323 		if (io_task->psgl_handle) {
4324 			free_io_sgl_handle(phba, io_task->psgl_handle);
4325 			io_task->psgl_handle = NULL;
4326 		}
4327 
4328 		if (io_task->scsi_cmnd) {
4329 			if (io_task->num_sg)
4330 				scsi_dma_unmap(io_task->scsi_cmnd);
4331 			io_task->scsi_cmnd = NULL;
4332 		}
4333 	} else {
4334 		if (!beiscsi_conn->login_in_progress)
4335 			beiscsi_free_mgmt_task_handles(beiscsi_conn, task);
4336 	}
4337 }
4338 
4339 void
4340 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
4341 			   struct beiscsi_offload_params *params)
4342 {
4343 	struct wrb_handle *pwrb_handle;
4344 	struct hwi_wrb_context *pwrb_context = NULL;
4345 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4346 	struct iscsi_task *task = beiscsi_conn->task;
4347 	struct iscsi_session *session = task->conn->session;
4348 	u32 doorbell = 0;
4349 
4350 	/*
4351 	 * We can always use 0 here because it is reserved by libiscsi for
4352 	 * login/startup related tasks.
4353 	 */
4354 	beiscsi_conn->login_in_progress = 0;
4355 	spin_lock_bh(&session->back_lock);
4356 	beiscsi_cleanup_task(task);
4357 	spin_unlock_bh(&session->back_lock);
4358 
4359 	pwrb_handle = alloc_wrb_handle(phba, beiscsi_conn->beiscsi_conn_cid,
4360 				       &pwrb_context);
4361 
4362 	/* Check for the adapter family */
4363 	if (is_chip_be2_be3r(phba))
4364 		beiscsi_offload_cxn_v0(params, pwrb_handle,
4365 				       phba->init_mem,
4366 				       pwrb_context);
4367 	else
4368 		beiscsi_offload_cxn_v2(params, pwrb_handle,
4369 				       pwrb_context);
4370 
4371 	be_dws_le_to_cpu(pwrb_handle->pwrb,
4372 			 sizeof(struct iscsi_target_context_update_wrb));
4373 
4374 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4375 	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
4376 			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
4377 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4378 	iowrite32(doorbell, phba->db_va +
4379 		  beiscsi_conn->doorbell_offset);
4380 
4381 	/*
4382 	 * There is no completion for CONTEXT_UPDATE. The completion of next
4383 	 * WRB posted guarantees FW's processing and DMA'ing of it.
4384 	 * Use beiscsi_put_wrb_handle to put it back in the pool which makes
4385 	 * sure zero'ing or reuse of the WRB only after wrbs_per_cxn.
4386 	 */
4387 	beiscsi_put_wrb_handle(pwrb_context, pwrb_handle,
4388 			       phba->params.wrbs_per_cxn);
4389 	beiscsi_log(phba, KERN_INFO,
4390 		    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4391 		    "BM_%d : put CONTEXT_UPDATE pwrb_handle=%p free_index=0x%x wrb_handles_available=%d\n",
4392 		    pwrb_handle, pwrb_context->free_index,
4393 		    pwrb_context->wrb_handles_available);
4394 }
4395 
4396 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
4397 			      int *index, int *age)
4398 {
4399 	*index = (int)itt;
4400 	if (age)
4401 		*age = conn->session->age;
4402 }
4403 
4404 /**
4405  * beiscsi_alloc_pdu - allocates pdu and related resources
4406  * @task: libiscsi task
4407  * @opcode: opcode of pdu for task
4408  *
4409  * This is called with the session lock held. It will allocate
4410  * the wrb and sgl if needed for the command. And it will prep
4411  * the pdu's itt. beiscsi_parse_pdu will later translate
4412  * the pdu itt to the libiscsi task itt.
4413  */
4414 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
4415 {
4416 	struct beiscsi_io_task *io_task = task->dd_data;
4417 	struct iscsi_conn *conn = task->conn;
4418 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4419 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4420 	struct hwi_wrb_context *pwrb_context;
4421 	struct hwi_controller *phwi_ctrlr;
4422 	itt_t itt;
4423 	uint16_t cri_index = 0;
4424 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
4425 	dma_addr_t paddr;
4426 
4427 	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
4428 					  GFP_ATOMIC, &paddr);
4429 	if (!io_task->cmd_bhs)
4430 		return -ENOMEM;
4431 	io_task->bhs_pa.u.a64.address = paddr;
4432 	io_task->libiscsi_itt = (itt_t)task->itt;
4433 	io_task->conn = beiscsi_conn;
4434 
4435 	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
4436 	task->hdr_max = sizeof(struct be_cmd_bhs);
4437 	io_task->psgl_handle = NULL;
4438 	io_task->pwrb_handle = NULL;
4439 
4440 	if (task->sc) {
4441 		io_task->psgl_handle = alloc_io_sgl_handle(phba);
4442 		if (!io_task->psgl_handle) {
4443 			beiscsi_log(phba, KERN_ERR,
4444 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4445 				    "BM_%d : Alloc of IO_SGL_ICD Failed"
4446 				    "for the CID : %d\n",
4447 				    beiscsi_conn->beiscsi_conn_cid);
4448 			goto free_hndls;
4449 		}
4450 		io_task->pwrb_handle = alloc_wrb_handle(phba,
4451 					beiscsi_conn->beiscsi_conn_cid,
4452 					&io_task->pwrb_context);
4453 		if (!io_task->pwrb_handle) {
4454 			beiscsi_log(phba, KERN_ERR,
4455 				    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4456 				    "BM_%d : Alloc of WRB_HANDLE Failed"
4457 				    "for the CID : %d\n",
4458 				    beiscsi_conn->beiscsi_conn_cid);
4459 			goto free_io_hndls;
4460 		}
4461 	} else {
4462 		io_task->scsi_cmnd = NULL;
4463 		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
4464 			beiscsi_conn->task = task;
4465 			if (!beiscsi_conn->login_in_progress) {
4466 				io_task->psgl_handle = (struct sgl_handle *)
4467 						alloc_mgmt_sgl_handle(phba);
4468 				if (!io_task->psgl_handle) {
4469 					beiscsi_log(phba, KERN_ERR,
4470 						    BEISCSI_LOG_IO |
4471 						    BEISCSI_LOG_CONFIG,
4472 						    "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4473 						    "for the CID : %d\n",
4474 						    beiscsi_conn->
4475 						    beiscsi_conn_cid);
4476 					goto free_hndls;
4477 				}
4478 
4479 				beiscsi_conn->login_in_progress = 1;
4480 				beiscsi_conn->plogin_sgl_handle =
4481 							io_task->psgl_handle;
4482 				io_task->pwrb_handle =
4483 					alloc_wrb_handle(phba,
4484 					beiscsi_conn->beiscsi_conn_cid,
4485 					&io_task->pwrb_context);
4486 				if (!io_task->pwrb_handle) {
4487 					beiscsi_log(phba, KERN_ERR,
4488 						    BEISCSI_LOG_IO |
4489 						    BEISCSI_LOG_CONFIG,
4490 						    "BM_%d : Alloc of WRB_HANDLE Failed"
4491 						    "for the CID : %d\n",
4492 						    beiscsi_conn->
4493 						    beiscsi_conn_cid);
4494 					goto free_mgmt_hndls;
4495 				}
4496 				beiscsi_conn->plogin_wrb_handle =
4497 							io_task->pwrb_handle;
4498 
4499 			} else {
4500 				io_task->psgl_handle =
4501 						beiscsi_conn->plogin_sgl_handle;
4502 				io_task->pwrb_handle =
4503 						beiscsi_conn->plogin_wrb_handle;
4504 			}
4505 		} else {
4506 			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
4507 			if (!io_task->psgl_handle) {
4508 				beiscsi_log(phba, KERN_ERR,
4509 					    BEISCSI_LOG_IO |
4510 					    BEISCSI_LOG_CONFIG,
4511 					    "BM_%d : Alloc of MGMT_SGL_ICD Failed"
4512 					    "for the CID : %d\n",
4513 					    beiscsi_conn->
4514 					    beiscsi_conn_cid);
4515 				goto free_hndls;
4516 			}
4517 			io_task->pwrb_handle =
4518 					alloc_wrb_handle(phba,
4519 					beiscsi_conn->beiscsi_conn_cid,
4520 					&io_task->pwrb_context);
4521 			if (!io_task->pwrb_handle) {
4522 				beiscsi_log(phba, KERN_ERR,
4523 					    BEISCSI_LOG_IO | BEISCSI_LOG_CONFIG,
4524 					    "BM_%d : Alloc of WRB_HANDLE Failed"
4525 					    "for the CID : %d\n",
4526 					    beiscsi_conn->beiscsi_conn_cid);
4527 				goto free_mgmt_hndls;
4528 			}
4529 
4530 		}
4531 	}
4532 	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
4533 				 wrb_index << 16) | (unsigned int)
4534 				(io_task->psgl_handle->sgl_index));
4535 	io_task->pwrb_handle->pio_handle = task;
4536 
4537 	io_task->cmd_bhs->iscsi_hdr.itt = itt;
4538 	return 0;
4539 
4540 free_io_hndls:
4541 	free_io_sgl_handle(phba, io_task->psgl_handle);
4542 	goto free_hndls;
4543 free_mgmt_hndls:
4544 	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
4545 	io_task->psgl_handle = NULL;
4546 free_hndls:
4547 	phwi_ctrlr = phba->phwi_ctrlr;
4548 	cri_index = BE_GET_CRI_FROM_CID(
4549 	beiscsi_conn->beiscsi_conn_cid);
4550 	pwrb_context = &phwi_ctrlr->wrb_context[cri_index];
4551 	if (io_task->pwrb_handle)
4552 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
4553 	io_task->pwrb_handle = NULL;
4554 	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
4555 		      io_task->bhs_pa.u.a64.address);
4556 	io_task->cmd_bhs = NULL;
4557 	return -ENOMEM;
4558 }
4559 static int beiscsi_iotask_v2(struct iscsi_task *task, struct scatterlist *sg,
4560 		       unsigned int num_sg, unsigned int xferlen,
4561 		       unsigned int writedir)
4562 {
4563 
4564 	struct beiscsi_io_task *io_task = task->dd_data;
4565 	struct iscsi_conn *conn = task->conn;
4566 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4567 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4568 	struct iscsi_wrb *pwrb = NULL;
4569 	unsigned int doorbell = 0;
4570 
4571 	pwrb = io_task->pwrb_handle->pwrb;
4572 
4573 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
4574 
4575 	if (writedir) {
4576 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4577 			      INI_WR_CMD);
4578 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 1);
4579 	} else {
4580 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, type, pwrb,
4581 			      INI_RD_CMD);
4582 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, dsp, pwrb, 0);
4583 	}
4584 
4585 	io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb_v2,
4586 					  type, pwrb);
4587 
4588 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, lun, pwrb,
4589 		      cpu_to_be16(*(unsigned short *)
4590 		      &io_task->cmd_bhs->iscsi_hdr.lun));
4591 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb, xferlen);
4592 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4593 		      io_task->pwrb_handle->wrb_index);
4594 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4595 		      be32_to_cpu(task->cmdsn));
4596 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4597 		      io_task->psgl_handle->sgl_index);
4598 
4599 	hwi_write_sgl_v2(pwrb, sg, num_sg, io_task);
4600 	AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4601 		      io_task->pwrb_handle->wrb_index);
4602 	if (io_task->pwrb_context->plast_wrb)
4603 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
4604 			      io_task->pwrb_context->plast_wrb,
4605 			      io_task->pwrb_handle->wrb_index);
4606 	io_task->pwrb_context->plast_wrb = pwrb;
4607 
4608 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4609 
4610 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4611 	doorbell |= (io_task->pwrb_handle->wrb_index &
4612 		     DB_DEF_PDU_WRB_INDEX_MASK) <<
4613 		     DB_DEF_PDU_WRB_INDEX_SHIFT;
4614 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4615 	iowrite32(doorbell, phba->db_va +
4616 		  beiscsi_conn->doorbell_offset);
4617 	return 0;
4618 }
4619 
4620 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
4621 			  unsigned int num_sg, unsigned int xferlen,
4622 			  unsigned int writedir)
4623 {
4624 
4625 	struct beiscsi_io_task *io_task = task->dd_data;
4626 	struct iscsi_conn *conn = task->conn;
4627 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4628 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4629 	struct iscsi_wrb *pwrb = NULL;
4630 	unsigned int doorbell = 0;
4631 
4632 	pwrb = io_task->pwrb_handle->pwrb;
4633 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
4634 
4635 	if (writedir) {
4636 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4637 			      INI_WR_CMD);
4638 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
4639 	} else {
4640 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
4641 			      INI_RD_CMD);
4642 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
4643 	}
4644 
4645 	io_task->wrb_type = AMAP_GET_BITS(struct amap_iscsi_wrb,
4646 					  type, pwrb);
4647 
4648 	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
4649 		      cpu_to_be16(*(unsigned short *)
4650 				  &io_task->cmd_bhs->iscsi_hdr.lun));
4651 	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
4652 	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4653 		      io_task->pwrb_handle->wrb_index);
4654 	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4655 		      be32_to_cpu(task->cmdsn));
4656 	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4657 		      io_task->psgl_handle->sgl_index);
4658 
4659 	hwi_write_sgl(pwrb, sg, num_sg, io_task);
4660 
4661 	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4662 		      io_task->pwrb_handle->wrb_index);
4663 	if (io_task->pwrb_context->plast_wrb)
4664 		AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
4665 			      io_task->pwrb_context->plast_wrb,
4666 			      io_task->pwrb_handle->wrb_index);
4667 	io_task->pwrb_context->plast_wrb = pwrb;
4668 
4669 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
4670 
4671 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
4672 	doorbell |= (io_task->pwrb_handle->wrb_index &
4673 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4674 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4675 
4676 	iowrite32(doorbell, phba->db_va +
4677 		  beiscsi_conn->doorbell_offset);
4678 	return 0;
4679 }
4680 
4681 static int beiscsi_mtask(struct iscsi_task *task)
4682 {
4683 	struct beiscsi_io_task *io_task = task->dd_data;
4684 	struct iscsi_conn *conn = task->conn;
4685 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
4686 	struct beiscsi_hba *phba = beiscsi_conn->phba;
4687 	struct iscsi_wrb *pwrb = NULL;
4688 	unsigned int doorbell = 0;
4689 	unsigned int cid;
4690 	unsigned int pwrb_typeoffset = 0;
4691 	int ret = 0;
4692 
4693 	cid = beiscsi_conn->beiscsi_conn_cid;
4694 	pwrb = io_task->pwrb_handle->pwrb;
4695 
4696 	if (is_chip_be2_be3r(phba)) {
4697 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
4698 			      be32_to_cpu(task->cmdsn));
4699 		AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
4700 			      io_task->pwrb_handle->wrb_index);
4701 		AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
4702 			      io_task->psgl_handle->sgl_index);
4703 		AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
4704 			      task->data_count);
4705 		AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
4706 			      io_task->pwrb_handle->wrb_index);
4707 		if (io_task->pwrb_context->plast_wrb)
4708 			AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb,
4709 				      io_task->pwrb_context->plast_wrb,
4710 				      io_task->pwrb_handle->wrb_index);
4711 		io_task->pwrb_context->plast_wrb = pwrb;
4712 
4713 		pwrb_typeoffset = BE_WRB_TYPE_OFFSET;
4714 	} else {
4715 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, cmdsn_itt, pwrb,
4716 			      be32_to_cpu(task->cmdsn));
4717 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, wrb_idx, pwrb,
4718 			      io_task->pwrb_handle->wrb_index);
4719 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, sgl_idx, pwrb,
4720 			      io_task->psgl_handle->sgl_index);
4721 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, r2t_exp_dtl, pwrb,
4722 			      task->data_count);
4723 		AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb, pwrb,
4724 			      io_task->pwrb_handle->wrb_index);
4725 		if (io_task->pwrb_context->plast_wrb)
4726 			AMAP_SET_BITS(struct amap_iscsi_wrb_v2, ptr2nextwrb,
4727 				      io_task->pwrb_context->plast_wrb,
4728 				      io_task->pwrb_handle->wrb_index);
4729 		io_task->pwrb_context->plast_wrb = pwrb;
4730 
4731 		pwrb_typeoffset = SKH_WRB_TYPE_OFFSET;
4732 	}
4733 
4734 
4735 	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
4736 	case ISCSI_OP_LOGIN:
4737 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
4738 		ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4739 		ret = hwi_write_buffer(pwrb, task);
4740 		break;
4741 	case ISCSI_OP_NOOP_OUT:
4742 		if (task->hdr->ttt != ISCSI_RESERVED_TAG) {
4743 			ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4744 			if (is_chip_be2_be3r(phba))
4745 				AMAP_SET_BITS(struct amap_iscsi_wrb,
4746 					      dmsg, pwrb, 1);
4747 			else
4748 				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4749 					      dmsg, pwrb, 1);
4750 		} else {
4751 			ADAPTER_SET_WRB_TYPE(pwrb, INI_RD_CMD, pwrb_typeoffset);
4752 			if (is_chip_be2_be3r(phba))
4753 				AMAP_SET_BITS(struct amap_iscsi_wrb,
4754 					      dmsg, pwrb, 0);
4755 			else
4756 				AMAP_SET_BITS(struct amap_iscsi_wrb_v2,
4757 					      dmsg, pwrb, 0);
4758 		}
4759 		ret = hwi_write_buffer(pwrb, task);
4760 		break;
4761 	case ISCSI_OP_TEXT:
4762 		ADAPTER_SET_WRB_TYPE(pwrb, TGT_DM_CMD, pwrb_typeoffset);
4763 		ret = hwi_write_buffer(pwrb, task);
4764 		break;
4765 	case ISCSI_OP_SCSI_TMFUNC:
4766 		ADAPTER_SET_WRB_TYPE(pwrb, INI_TMF_CMD, pwrb_typeoffset);
4767 		ret = hwi_write_buffer(pwrb, task);
4768 		break;
4769 	case ISCSI_OP_LOGOUT:
4770 		ADAPTER_SET_WRB_TYPE(pwrb, HWH_TYPE_LOGOUT, pwrb_typeoffset);
4771 		ret = hwi_write_buffer(pwrb, task);
4772 		break;
4773 
4774 	default:
4775 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4776 			    "BM_%d : opcode =%d Not supported\n",
4777 			    task->hdr->opcode & ISCSI_OPCODE_MASK);
4778 
4779 		return -EINVAL;
4780 	}
4781 
4782 	if (ret)
4783 		return ret;
4784 
4785 	/* Set the task type */
4786 	io_task->wrb_type = (is_chip_be2_be3r(phba)) ?
4787 		AMAP_GET_BITS(struct amap_iscsi_wrb, type, pwrb) :
4788 		AMAP_GET_BITS(struct amap_iscsi_wrb_v2, type, pwrb);
4789 
4790 	doorbell |= cid & DB_WRB_POST_CID_MASK;
4791 	doorbell |= (io_task->pwrb_handle->wrb_index &
4792 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
4793 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
4794 	iowrite32(doorbell, phba->db_va +
4795 		  beiscsi_conn->doorbell_offset);
4796 	return 0;
4797 }
4798 
4799 static int beiscsi_task_xmit(struct iscsi_task *task)
4800 {
4801 	struct beiscsi_io_task *io_task = task->dd_data;
4802 	struct scsi_cmnd *sc = task->sc;
4803 	struct beiscsi_hba *phba;
4804 	struct scatterlist *sg;
4805 	int num_sg;
4806 	unsigned int  writedir = 0, xferlen = 0;
4807 
4808 	phba = io_task->conn->phba;
4809 	/**
4810 	 * HBA in error includes BEISCSI_HBA_FW_TIMEOUT. IO path might be
4811 	 * operational if FW still gets heartbeat from EP FW. Is management
4812 	 * path really needed to continue further?
4813 	 */
4814 	if (!beiscsi_hba_is_online(phba))
4815 		return -EIO;
4816 
4817 	if (!io_task->conn->login_in_progress)
4818 		task->hdr->exp_statsn = 0;
4819 
4820 	if (!sc)
4821 		return beiscsi_mtask(task);
4822 
4823 	io_task->scsi_cmnd = sc;
4824 	io_task->num_sg = 0;
4825 	num_sg = scsi_dma_map(sc);
4826 	if (num_sg < 0) {
4827 		beiscsi_log(phba, KERN_ERR,
4828 			    BEISCSI_LOG_IO | BEISCSI_LOG_ISCSI,
4829 			    "BM_%d : scsi_dma_map Failed "
4830 			    "Driver_ITT : 0x%x ITT : 0x%x Xferlen : 0x%x\n",
4831 			    be32_to_cpu(io_task->cmd_bhs->iscsi_hdr.itt),
4832 			    io_task->libiscsi_itt, scsi_bufflen(sc));
4833 
4834 		return num_sg;
4835 	}
4836 	/**
4837 	 * For scsi cmd task, check num_sg before unmapping in cleanup_task.
4838 	 * For management task, cleanup_task checks mtask_addr before unmapping.
4839 	 */
4840 	io_task->num_sg = num_sg;
4841 	xferlen = scsi_bufflen(sc);
4842 	sg = scsi_sglist(sc);
4843 	if (sc->sc_data_direction == DMA_TO_DEVICE)
4844 		writedir = 1;
4845 	 else
4846 		writedir = 0;
4847 
4848 	 return phba->iotask_fn(task, sg, num_sg, xferlen, writedir);
4849 }
4850 
4851 /**
4852  * beiscsi_bsg_request - handle bsg request from ISCSI transport
4853  * @job: job to handle
4854  */
4855 static int beiscsi_bsg_request(struct bsg_job *job)
4856 {
4857 	struct Scsi_Host *shost;
4858 	struct beiscsi_hba *phba;
4859 	struct iscsi_bsg_request *bsg_req = job->request;
4860 	int rc = -EINVAL;
4861 	unsigned int tag;
4862 	struct be_dma_mem nonemb_cmd;
4863 	struct be_cmd_resp_hdr *resp;
4864 	struct iscsi_bsg_reply *bsg_reply = job->reply;
4865 	unsigned short status, extd_status;
4866 
4867 	shost = iscsi_job_to_shost(job);
4868 	phba = iscsi_host_priv(shost);
4869 
4870 	if (!beiscsi_hba_is_online(phba)) {
4871 		beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_CONFIG,
4872 			    "BM_%d : HBA in error 0x%lx\n", phba->state);
4873 		return -ENXIO;
4874 	}
4875 
4876 	switch (bsg_req->msgcode) {
4877 	case ISCSI_BSG_HST_VENDOR:
4878 		nonemb_cmd.va = pci_alloc_consistent(phba->ctrl.pdev,
4879 					job->request_payload.payload_len,
4880 					&nonemb_cmd.dma);
4881 		if (nonemb_cmd.va == NULL) {
4882 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4883 				    "BM_%d : Failed to allocate memory for "
4884 				    "beiscsi_bsg_request\n");
4885 			return -ENOMEM;
4886 		}
4887 		tag = mgmt_vendor_specific_fw_cmd(&phba->ctrl, phba, job,
4888 						  &nonemb_cmd);
4889 		if (!tag) {
4890 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4891 				    "BM_%d : MBX Tag Allocation Failed\n");
4892 
4893 			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4894 					    nonemb_cmd.va, nonemb_cmd.dma);
4895 			return -EAGAIN;
4896 		}
4897 
4898 		rc = wait_event_interruptible_timeout(
4899 					phba->ctrl.mcc_wait[tag],
4900 					phba->ctrl.mcc_tag_status[tag],
4901 					msecs_to_jiffies(
4902 					BEISCSI_HOST_MBX_TIMEOUT));
4903 
4904 		if (!test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
4905 			clear_bit(MCC_TAG_STATE_RUNNING,
4906 				  &phba->ctrl.ptag_state[tag].tag_state);
4907 			pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4908 					    nonemb_cmd.va, nonemb_cmd.dma);
4909 			return -EIO;
4910 		}
4911 		extd_status = (phba->ctrl.mcc_tag_status[tag] &
4912 			       CQE_STATUS_ADDL_MASK) >> CQE_STATUS_ADDL_SHIFT;
4913 		status = phba->ctrl.mcc_tag_status[tag] & CQE_STATUS_MASK;
4914 		free_mcc_wrb(&phba->ctrl, tag);
4915 		resp = (struct be_cmd_resp_hdr *)nonemb_cmd.va;
4916 		sg_copy_from_buffer(job->reply_payload.sg_list,
4917 				    job->reply_payload.sg_cnt,
4918 				    nonemb_cmd.va, (resp->response_length
4919 				    + sizeof(*resp)));
4920 		bsg_reply->reply_payload_rcv_len = resp->response_length;
4921 		bsg_reply->result = status;
4922 		bsg_job_done(job, bsg_reply->result,
4923 			     bsg_reply->reply_payload_rcv_len);
4924 		pci_free_consistent(phba->ctrl.pdev, nonemb_cmd.size,
4925 				    nonemb_cmd.va, nonemb_cmd.dma);
4926 		if (status || extd_status) {
4927 			beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4928 				    "BM_%d : MBX Cmd Failed"
4929 				    " status = %d extd_status = %d\n",
4930 				    status, extd_status);
4931 
4932 			return -EIO;
4933 		} else {
4934 			rc = 0;
4935 		}
4936 		break;
4937 
4938 	default:
4939 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_CONFIG,
4940 				"BM_%d : Unsupported bsg command: 0x%x\n",
4941 				bsg_req->msgcode);
4942 		break;
4943 	}
4944 
4945 	return rc;
4946 }
4947 
4948 static void beiscsi_hba_attrs_init(struct beiscsi_hba *phba)
4949 {
4950 	/* Set the logging parameter */
4951 	beiscsi_log_enable_init(phba, beiscsi_log_enable);
4952 }
4953 
4954 void beiscsi_start_boot_work(struct beiscsi_hba *phba, unsigned int s_handle)
4955 {
4956 	if (phba->boot_struct.boot_kset)
4957 		return;
4958 
4959 	/* skip if boot work is already in progress */
4960 	if (test_and_set_bit(BEISCSI_HBA_BOOT_WORK, &phba->state))
4961 		return;
4962 
4963 	phba->boot_struct.retry = 3;
4964 	phba->boot_struct.tag = 0;
4965 	phba->boot_struct.s_handle = s_handle;
4966 	phba->boot_struct.action = BEISCSI_BOOT_GET_SHANDLE;
4967 	schedule_work(&phba->boot_work);
4968 }
4969 
4970 static ssize_t beiscsi_show_boot_tgt_info(void *data, int type, char *buf)
4971 {
4972 	struct beiscsi_hba *phba = data;
4973 	struct mgmt_session_info *boot_sess = &phba->boot_struct.boot_sess;
4974 	struct mgmt_conn_info *boot_conn = &boot_sess->conn_list[0];
4975 	char *str = buf;
4976 	int rc = -EPERM;
4977 
4978 	switch (type) {
4979 	case ISCSI_BOOT_TGT_NAME:
4980 		rc = sprintf(buf, "%.*s\n",
4981 			    (int)strlen(boot_sess->target_name),
4982 			    (char *)&boot_sess->target_name);
4983 		break;
4984 	case ISCSI_BOOT_TGT_IP_ADDR:
4985 		if (boot_conn->dest_ipaddr.ip_type == BEISCSI_IP_TYPE_V4)
4986 			rc = sprintf(buf, "%pI4\n",
4987 				(char *)&boot_conn->dest_ipaddr.addr);
4988 		else
4989 			rc = sprintf(str, "%pI6\n",
4990 				(char *)&boot_conn->dest_ipaddr.addr);
4991 		break;
4992 	case ISCSI_BOOT_TGT_PORT:
4993 		rc = sprintf(str, "%d\n", boot_conn->dest_port);
4994 		break;
4995 
4996 	case ISCSI_BOOT_TGT_CHAP_NAME:
4997 		rc = sprintf(str,  "%.*s\n",
4998 			     boot_conn->negotiated_login_options.auth_data.chap.
4999 			     target_chap_name_length,
5000 			     (char *)&boot_conn->negotiated_login_options.
5001 			     auth_data.chap.target_chap_name);
5002 		break;
5003 	case ISCSI_BOOT_TGT_CHAP_SECRET:
5004 		rc = sprintf(str,  "%.*s\n",
5005 			     boot_conn->negotiated_login_options.auth_data.chap.
5006 			     target_secret_length,
5007 			     (char *)&boot_conn->negotiated_login_options.
5008 			     auth_data.chap.target_secret);
5009 		break;
5010 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5011 		rc = sprintf(str,  "%.*s\n",
5012 			     boot_conn->negotiated_login_options.auth_data.chap.
5013 			     intr_chap_name_length,
5014 			     (char *)&boot_conn->negotiated_login_options.
5015 			     auth_data.chap.intr_chap_name);
5016 		break;
5017 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5018 		rc = sprintf(str,  "%.*s\n",
5019 			     boot_conn->negotiated_login_options.auth_data.chap.
5020 			     intr_secret_length,
5021 			     (char *)&boot_conn->negotiated_login_options.
5022 			     auth_data.chap.intr_secret);
5023 		break;
5024 	case ISCSI_BOOT_TGT_FLAGS:
5025 		rc = sprintf(str, "2\n");
5026 		break;
5027 	case ISCSI_BOOT_TGT_NIC_ASSOC:
5028 		rc = sprintf(str, "0\n");
5029 		break;
5030 	}
5031 	return rc;
5032 }
5033 
5034 static ssize_t beiscsi_show_boot_ini_info(void *data, int type, char *buf)
5035 {
5036 	struct beiscsi_hba *phba = data;
5037 	char *str = buf;
5038 	int rc = -EPERM;
5039 
5040 	switch (type) {
5041 	case ISCSI_BOOT_INI_INITIATOR_NAME:
5042 		rc = sprintf(str, "%s\n",
5043 			     phba->boot_struct.boot_sess.initiator_iscsiname);
5044 		break;
5045 	}
5046 	return rc;
5047 }
5048 
5049 static ssize_t beiscsi_show_boot_eth_info(void *data, int type, char *buf)
5050 {
5051 	struct beiscsi_hba *phba = data;
5052 	char *str = buf;
5053 	int rc = -EPERM;
5054 
5055 	switch (type) {
5056 	case ISCSI_BOOT_ETH_FLAGS:
5057 		rc = sprintf(str, "2\n");
5058 		break;
5059 	case ISCSI_BOOT_ETH_INDEX:
5060 		rc = sprintf(str, "0\n");
5061 		break;
5062 	case ISCSI_BOOT_ETH_MAC:
5063 		rc  = beiscsi_get_macaddr(str, phba);
5064 		break;
5065 	}
5066 	return rc;
5067 }
5068 
5069 static umode_t beiscsi_tgt_get_attr_visibility(void *data, int type)
5070 {
5071 	umode_t rc = 0;
5072 
5073 	switch (type) {
5074 	case ISCSI_BOOT_TGT_NAME:
5075 	case ISCSI_BOOT_TGT_IP_ADDR:
5076 	case ISCSI_BOOT_TGT_PORT:
5077 	case ISCSI_BOOT_TGT_CHAP_NAME:
5078 	case ISCSI_BOOT_TGT_CHAP_SECRET:
5079 	case ISCSI_BOOT_TGT_REV_CHAP_NAME:
5080 	case ISCSI_BOOT_TGT_REV_CHAP_SECRET:
5081 	case ISCSI_BOOT_TGT_NIC_ASSOC:
5082 	case ISCSI_BOOT_TGT_FLAGS:
5083 		rc = S_IRUGO;
5084 		break;
5085 	}
5086 	return rc;
5087 }
5088 
5089 static umode_t beiscsi_ini_get_attr_visibility(void *data, int type)
5090 {
5091 	umode_t rc = 0;
5092 
5093 	switch (type) {
5094 	case ISCSI_BOOT_INI_INITIATOR_NAME:
5095 		rc = S_IRUGO;
5096 		break;
5097 	}
5098 	return rc;
5099 }
5100 
5101 static umode_t beiscsi_eth_get_attr_visibility(void *data, int type)
5102 {
5103 	umode_t rc = 0;
5104 
5105 	switch (type) {
5106 	case ISCSI_BOOT_ETH_FLAGS:
5107 	case ISCSI_BOOT_ETH_MAC:
5108 	case ISCSI_BOOT_ETH_INDEX:
5109 		rc = S_IRUGO;
5110 		break;
5111 	}
5112 	return rc;
5113 }
5114 
5115 static void beiscsi_boot_kobj_release(void *data)
5116 {
5117 	struct beiscsi_hba *phba = data;
5118 
5119 	scsi_host_put(phba->shost);
5120 }
5121 
5122 static int beiscsi_boot_create_kset(struct beiscsi_hba *phba)
5123 {
5124 	struct boot_struct *bs = &phba->boot_struct;
5125 	struct iscsi_boot_kobj *boot_kobj;
5126 
5127 	if (bs->boot_kset) {
5128 		__beiscsi_log(phba, KERN_ERR,
5129 			      "BM_%d: boot_kset already created\n");
5130 		return 0;
5131 	}
5132 
5133 	bs->boot_kset = iscsi_boot_create_host_kset(phba->shost->host_no);
5134 	if (!bs->boot_kset) {
5135 		__beiscsi_log(phba, KERN_ERR,
5136 			      "BM_%d: boot_kset alloc failed\n");
5137 		return -ENOMEM;
5138 	}
5139 
5140 	/* get shost ref because the show function will refer phba */
5141 	if (!scsi_host_get(phba->shost))
5142 		goto free_kset;
5143 
5144 	boot_kobj = iscsi_boot_create_target(bs->boot_kset, 0, phba,
5145 					     beiscsi_show_boot_tgt_info,
5146 					     beiscsi_tgt_get_attr_visibility,
5147 					     beiscsi_boot_kobj_release);
5148 	if (!boot_kobj)
5149 		goto put_shost;
5150 
5151 	if (!scsi_host_get(phba->shost))
5152 		goto free_kset;
5153 
5154 	boot_kobj = iscsi_boot_create_initiator(bs->boot_kset, 0, phba,
5155 						beiscsi_show_boot_ini_info,
5156 						beiscsi_ini_get_attr_visibility,
5157 						beiscsi_boot_kobj_release);
5158 	if (!boot_kobj)
5159 		goto put_shost;
5160 
5161 	if (!scsi_host_get(phba->shost))
5162 		goto free_kset;
5163 
5164 	boot_kobj = iscsi_boot_create_ethernet(bs->boot_kset, 0, phba,
5165 					       beiscsi_show_boot_eth_info,
5166 					       beiscsi_eth_get_attr_visibility,
5167 					       beiscsi_boot_kobj_release);
5168 	if (!boot_kobj)
5169 		goto put_shost;
5170 
5171 	return 0;
5172 
5173 put_shost:
5174 	scsi_host_put(phba->shost);
5175 free_kset:
5176 	iscsi_boot_destroy_kset(bs->boot_kset);
5177 	bs->boot_kset = NULL;
5178 	return -ENOMEM;
5179 }
5180 
5181 static void beiscsi_boot_work(struct work_struct *work)
5182 {
5183 	struct beiscsi_hba *phba =
5184 		container_of(work, struct beiscsi_hba, boot_work);
5185 	struct boot_struct *bs = &phba->boot_struct;
5186 	unsigned int tag = 0;
5187 
5188 	if (!beiscsi_hba_is_online(phba))
5189 		return;
5190 
5191 	beiscsi_log(phba, KERN_INFO,
5192 		    BEISCSI_LOG_CONFIG | BEISCSI_LOG_MBOX,
5193 		    "BM_%d : %s action %d\n",
5194 		    __func__, phba->boot_struct.action);
5195 
5196 	switch (phba->boot_struct.action) {
5197 	case BEISCSI_BOOT_REOPEN_SESS:
5198 		tag = beiscsi_boot_reopen_sess(phba);
5199 		break;
5200 	case BEISCSI_BOOT_GET_SHANDLE:
5201 		tag = __beiscsi_boot_get_shandle(phba, 1);
5202 		break;
5203 	case BEISCSI_BOOT_GET_SINFO:
5204 		tag = beiscsi_boot_get_sinfo(phba);
5205 		break;
5206 	case BEISCSI_BOOT_LOGOUT_SESS:
5207 		tag = beiscsi_boot_logout_sess(phba);
5208 		break;
5209 	case BEISCSI_BOOT_CREATE_KSET:
5210 		beiscsi_boot_create_kset(phba);
5211 		/**
5212 		 * updated boot_kset is made visible to all before
5213 		 * ending the boot work.
5214 		 */
5215 		mb();
5216 		clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
5217 		return;
5218 	}
5219 	if (!tag) {
5220 		if (bs->retry--)
5221 			schedule_work(&phba->boot_work);
5222 		else
5223 			clear_bit(BEISCSI_HBA_BOOT_WORK, &phba->state);
5224 	}
5225 }
5226 
5227 static void beiscsi_eqd_update_work(struct work_struct *work)
5228 {
5229 	struct hwi_context_memory *phwi_context;
5230 	struct be_set_eqd set_eqd[MAX_CPUS];
5231 	struct hwi_controller *phwi_ctrlr;
5232 	struct be_eq_obj *pbe_eq;
5233 	struct beiscsi_hba *phba;
5234 	unsigned int pps, delta;
5235 	struct be_aic_obj *aic;
5236 	int eqd, i, num = 0;
5237 	unsigned long now;
5238 
5239 	phba = container_of(work, struct beiscsi_hba, eqd_update.work);
5240 	if (!beiscsi_hba_is_online(phba))
5241 		return;
5242 
5243 	phwi_ctrlr = phba->phwi_ctrlr;
5244 	phwi_context = phwi_ctrlr->phwi_ctxt;
5245 
5246 	for (i = 0; i <= phba->num_cpus; i++) {
5247 		aic = &phba->aic_obj[i];
5248 		pbe_eq = &phwi_context->be_eq[i];
5249 		now = jiffies;
5250 		if (!aic->jiffies || time_before(now, aic->jiffies) ||
5251 		    pbe_eq->cq_count < aic->eq_prev) {
5252 			aic->jiffies = now;
5253 			aic->eq_prev = pbe_eq->cq_count;
5254 			continue;
5255 		}
5256 		delta = jiffies_to_msecs(now - aic->jiffies);
5257 		pps = (((u32)(pbe_eq->cq_count - aic->eq_prev) * 1000) / delta);
5258 		eqd = (pps / 1500) << 2;
5259 
5260 		if (eqd < 8)
5261 			eqd = 0;
5262 		eqd = min_t(u32, eqd, phwi_context->max_eqd);
5263 		eqd = max_t(u32, eqd, phwi_context->min_eqd);
5264 
5265 		aic->jiffies = now;
5266 		aic->eq_prev = pbe_eq->cq_count;
5267 
5268 		if (eqd != aic->prev_eqd) {
5269 			set_eqd[num].delay_multiplier = (eqd * 65)/100;
5270 			set_eqd[num].eq_id = pbe_eq->q.id;
5271 			aic->prev_eqd = eqd;
5272 			num++;
5273 		}
5274 	}
5275 	if (num)
5276 		/* completion of this is ignored */
5277 		beiscsi_modify_eq_delay(phba, set_eqd, num);
5278 
5279 	schedule_delayed_work(&phba->eqd_update,
5280 			      msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5281 }
5282 
5283 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
5284 {
5285 	int i, status;
5286 
5287 	for (i = 0; i <= phba->num_cpus; i++)
5288 		phba->msix_entries[i].entry = i;
5289 
5290 	status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
5291 				       phba->num_cpus + 1, phba->num_cpus + 1);
5292 	if (status > 0)
5293 		phba->msix_enabled = true;
5294 }
5295 
5296 static void beiscsi_hw_tpe_check(unsigned long ptr)
5297 {
5298 	struct beiscsi_hba *phba;
5299 	u32 wait;
5300 
5301 	phba = (struct beiscsi_hba *)ptr;
5302 	/* if not TPE, do nothing */
5303 	if (!beiscsi_detect_tpe(phba))
5304 		return;
5305 
5306 	/* wait default 4000ms before recovering */
5307 	wait = 4000;
5308 	if (phba->ue2rp > BEISCSI_UE_DETECT_INTERVAL)
5309 		wait = phba->ue2rp - BEISCSI_UE_DETECT_INTERVAL;
5310 	queue_delayed_work(phba->wq, &phba->recover_port,
5311 			   msecs_to_jiffies(wait));
5312 }
5313 
5314 static void beiscsi_hw_health_check(unsigned long ptr)
5315 {
5316 	struct beiscsi_hba *phba;
5317 
5318 	phba = (struct beiscsi_hba *)ptr;
5319 	beiscsi_detect_ue(phba);
5320 	if (beiscsi_detect_ue(phba)) {
5321 		__beiscsi_log(phba, KERN_ERR,
5322 			      "BM_%d : port in error: %lx\n", phba->state);
5323 		/* sessions are no longer valid, so first fail the sessions */
5324 		queue_work(phba->wq, &phba->sess_work);
5325 
5326 		/* detect UER supported */
5327 		if (!test_bit(BEISCSI_HBA_UER_SUPP, &phba->state))
5328 			return;
5329 		/* modify this timer to check TPE */
5330 		phba->hw_check.function = beiscsi_hw_tpe_check;
5331 	}
5332 
5333 	mod_timer(&phba->hw_check,
5334 		  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5335 }
5336 
5337 /*
5338  * beiscsi_enable_port()- Enables the disabled port.
5339  * Only port resources freed in disable function are reallocated.
5340  * This is called in HBA error handling path.
5341  *
5342  * @phba: Instance of driver private structure
5343  *
5344  **/
5345 static int beiscsi_enable_port(struct beiscsi_hba *phba)
5346 {
5347 	struct hwi_context_memory *phwi_context;
5348 	struct hwi_controller *phwi_ctrlr;
5349 	struct be_eq_obj *pbe_eq;
5350 	int ret, i;
5351 
5352 	if (test_bit(BEISCSI_HBA_ONLINE, &phba->state)) {
5353 		__beiscsi_log(phba, KERN_ERR,
5354 			      "BM_%d : %s : port is online %lx\n",
5355 			      __func__, phba->state);
5356 		return 0;
5357 	}
5358 
5359 	ret = beiscsi_init_sliport(phba);
5360 	if (ret)
5361 		return ret;
5362 
5363 	if (enable_msix)
5364 		find_num_cpus(phba);
5365 	else
5366 		phba->num_cpus = 1;
5367 	if (enable_msix) {
5368 		beiscsi_msix_enable(phba);
5369 		if (!phba->msix_enabled)
5370 			phba->num_cpus = 1;
5371 	}
5372 
5373 	beiscsi_get_params(phba);
5374 	/* Re-enable UER. If different TPE occurs then it is recoverable. */
5375 	beiscsi_set_uer_feature(phba);
5376 
5377 	phba->shost->max_id = phba->params.cxns_per_ctrl;
5378 	phba->shost->can_queue = phba->params.ios_per_ctrl;
5379 	ret = beiscsi_init_port(phba);
5380 	if (ret < 0) {
5381 		__beiscsi_log(phba, KERN_ERR,
5382 			      "BM_%d : init port failed\n");
5383 		goto disable_msix;
5384 	}
5385 
5386 	for (i = 0; i < MAX_MCC_CMD; i++) {
5387 		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5388 		phba->ctrl.mcc_tag[i] = i + 1;
5389 		phba->ctrl.mcc_tag_status[i + 1] = 0;
5390 		phba->ctrl.mcc_tag_available++;
5391 	}
5392 
5393 	phwi_ctrlr = phba->phwi_ctrlr;
5394 	phwi_context = phwi_ctrlr->phwi_ctxt;
5395 	for (i = 0; i < phba->num_cpus; i++) {
5396 		pbe_eq = &phwi_context->be_eq[i];
5397 		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
5398 	}
5399 
5400 	i = (phba->msix_enabled) ? i : 0;
5401 	/* Work item for MCC handling */
5402 	pbe_eq = &phwi_context->be_eq[i];
5403 	INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
5404 
5405 	ret = beiscsi_init_irqs(phba);
5406 	if (ret < 0) {
5407 		__beiscsi_log(phba, KERN_ERR,
5408 			      "BM_%d : setup IRQs failed %d\n", ret);
5409 		goto cleanup_port;
5410 	}
5411 	hwi_enable_intr(phba);
5412 	/* port operational: clear all error bits */
5413 	set_bit(BEISCSI_HBA_ONLINE, &phba->state);
5414 	__beiscsi_log(phba, KERN_INFO,
5415 		      "BM_%d : port online: 0x%lx\n", phba->state);
5416 
5417 	/* start hw_check timer and eqd_update work */
5418 	schedule_delayed_work(&phba->eqd_update,
5419 			      msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5420 
5421 	/**
5422 	 * Timer function gets modified for TPE detection.
5423 	 * Always reinit to do health check first.
5424 	 */
5425 	phba->hw_check.function = beiscsi_hw_health_check;
5426 	mod_timer(&phba->hw_check,
5427 		  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5428 	return 0;
5429 
5430 cleanup_port:
5431 	for (i = 0; i < phba->num_cpus; i++) {
5432 		pbe_eq = &phwi_context->be_eq[i];
5433 		irq_poll_disable(&pbe_eq->iopoll);
5434 	}
5435 	hwi_cleanup_port(phba);
5436 
5437 disable_msix:
5438 	if (phba->msix_enabled)
5439 		pci_disable_msix(phba->pcidev);
5440 
5441 	return ret;
5442 }
5443 
5444 /*
5445  * beiscsi_disable_port()- Disable port and cleanup driver resources.
5446  * This is called in HBA error handling and driver removal.
5447  * @phba: Instance Priv structure
5448  * @unload: indicate driver is unloading
5449  *
5450  * Free the OS and HW resources held by the driver
5451  **/
5452 static void beiscsi_disable_port(struct beiscsi_hba *phba, int unload)
5453 {
5454 	struct hwi_context_memory *phwi_context;
5455 	struct hwi_controller *phwi_ctrlr;
5456 	struct be_eq_obj *pbe_eq;
5457 	unsigned int i, msix_vec;
5458 
5459 	if (!test_and_clear_bit(BEISCSI_HBA_ONLINE, &phba->state))
5460 		return;
5461 
5462 	phwi_ctrlr = phba->phwi_ctrlr;
5463 	phwi_context = phwi_ctrlr->phwi_ctxt;
5464 	hwi_disable_intr(phba);
5465 	if (phba->msix_enabled) {
5466 		for (i = 0; i <= phba->num_cpus; i++) {
5467 			msix_vec = phba->msix_entries[i].vector;
5468 			free_irq(msix_vec, &phwi_context->be_eq[i]);
5469 			kfree(phba->msi_name[i]);
5470 		}
5471 	} else
5472 		if (phba->pcidev->irq)
5473 			free_irq(phba->pcidev->irq, phba);
5474 	pci_disable_msix(phba->pcidev);
5475 
5476 	for (i = 0; i < phba->num_cpus; i++) {
5477 		pbe_eq = &phwi_context->be_eq[i];
5478 		irq_poll_disable(&pbe_eq->iopoll);
5479 	}
5480 	cancel_delayed_work_sync(&phba->eqd_update);
5481 	cancel_work_sync(&phba->boot_work);
5482 	/* WQ might be running cancel queued mcc_work if we are not exiting */
5483 	if (!unload && beiscsi_hba_in_error(phba)) {
5484 		pbe_eq = &phwi_context->be_eq[i];
5485 		cancel_work_sync(&pbe_eq->mcc_work);
5486 	}
5487 	hwi_cleanup_port(phba);
5488 	beiscsi_cleanup_port(phba);
5489 }
5490 
5491 static void beiscsi_sess_work(struct work_struct *work)
5492 {
5493 	struct beiscsi_hba *phba;
5494 
5495 	phba = container_of(work, struct beiscsi_hba, sess_work);
5496 	/*
5497 	 * This work gets scheduled only in case of HBA error.
5498 	 * Old sessions are gone so need to be re-established.
5499 	 * iscsi_session_failure needs process context hence this work.
5500 	 */
5501 	iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
5502 }
5503 
5504 static void beiscsi_recover_port(struct work_struct *work)
5505 {
5506 	struct beiscsi_hba *phba;
5507 
5508 	phba = container_of(work, struct beiscsi_hba, recover_port.work);
5509 	beiscsi_disable_port(phba, 0);
5510 	beiscsi_enable_port(phba);
5511 }
5512 
5513 static pci_ers_result_t beiscsi_eeh_err_detected(struct pci_dev *pdev,
5514 		pci_channel_state_t state)
5515 {
5516 	struct beiscsi_hba *phba = NULL;
5517 
5518 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5519 	set_bit(BEISCSI_HBA_PCI_ERR, &phba->state);
5520 
5521 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5522 		    "BM_%d : EEH error detected\n");
5523 
5524 	/* first stop UE detection when PCI error detected */
5525 	del_timer_sync(&phba->hw_check);
5526 	cancel_delayed_work_sync(&phba->recover_port);
5527 
5528 	/* sessions are no longer valid, so first fail the sessions */
5529 	iscsi_host_for_each_session(phba->shost, beiscsi_session_fail);
5530 	beiscsi_disable_port(phba, 0);
5531 
5532 	if (state == pci_channel_io_perm_failure) {
5533 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5534 			    "BM_%d : EEH : State PERM Failure");
5535 		return PCI_ERS_RESULT_DISCONNECT;
5536 	}
5537 
5538 	pci_disable_device(pdev);
5539 
5540 	/* The error could cause the FW to trigger a flash debug dump.
5541 	 * Resetting the card while flash dump is in progress
5542 	 * can cause it not to recover; wait for it to finish.
5543 	 * Wait only for first function as it is needed only once per
5544 	 * adapter.
5545 	 **/
5546 	if (pdev->devfn == 0)
5547 		ssleep(30);
5548 
5549 	return PCI_ERS_RESULT_NEED_RESET;
5550 }
5551 
5552 static pci_ers_result_t beiscsi_eeh_reset(struct pci_dev *pdev)
5553 {
5554 	struct beiscsi_hba *phba = NULL;
5555 	int status = 0;
5556 
5557 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5558 
5559 	beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5560 		    "BM_%d : EEH Reset\n");
5561 
5562 	status = pci_enable_device(pdev);
5563 	if (status)
5564 		return PCI_ERS_RESULT_DISCONNECT;
5565 
5566 	pci_set_master(pdev);
5567 	pci_set_power_state(pdev, PCI_D0);
5568 	pci_restore_state(pdev);
5569 
5570 	status = beiscsi_check_fw_rdy(phba);
5571 	if (status) {
5572 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5573 			    "BM_%d : EEH Reset Completed\n");
5574 	} else {
5575 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5576 			    "BM_%d : EEH Reset Completion Failure\n");
5577 		return PCI_ERS_RESULT_DISCONNECT;
5578 	}
5579 
5580 	pci_cleanup_aer_uncorrect_error_status(pdev);
5581 	return PCI_ERS_RESULT_RECOVERED;
5582 }
5583 
5584 static void beiscsi_eeh_resume(struct pci_dev *pdev)
5585 {
5586 	struct beiscsi_hba *phba;
5587 	int ret;
5588 
5589 	phba = (struct beiscsi_hba *)pci_get_drvdata(pdev);
5590 	pci_save_state(pdev);
5591 
5592 	ret = beiscsi_enable_port(phba);
5593 	if (ret)
5594 		__beiscsi_log(phba, KERN_ERR,
5595 			      "BM_%d : AER EEH resume failed\n");
5596 }
5597 
5598 static int beiscsi_dev_probe(struct pci_dev *pcidev,
5599 			     const struct pci_device_id *id)
5600 {
5601 	struct hwi_context_memory *phwi_context;
5602 	struct hwi_controller *phwi_ctrlr;
5603 	struct beiscsi_hba *phba = NULL;
5604 	struct be_eq_obj *pbe_eq;
5605 	unsigned int s_handle;
5606 	char wq_name[20];
5607 	int ret, i;
5608 
5609 	ret = beiscsi_enable_pci(pcidev);
5610 	if (ret < 0) {
5611 		dev_err(&pcidev->dev,
5612 			"beiscsi_dev_probe - Failed to enable pci device\n");
5613 		return ret;
5614 	}
5615 
5616 	phba = beiscsi_hba_alloc(pcidev);
5617 	if (!phba) {
5618 		dev_err(&pcidev->dev,
5619 			"beiscsi_dev_probe - Failed in beiscsi_hba_alloc\n");
5620 		ret = -ENOMEM;
5621 		goto disable_pci;
5622 	}
5623 
5624 	/* Enable EEH reporting */
5625 	ret = pci_enable_pcie_error_reporting(pcidev);
5626 	if (ret)
5627 		beiscsi_log(phba, KERN_WARNING, BEISCSI_LOG_INIT,
5628 			    "BM_%d : PCIe Error Reporting "
5629 			    "Enabling Failed\n");
5630 
5631 	pci_save_state(pcidev);
5632 
5633 	/* Initialize Driver configuration Paramters */
5634 	beiscsi_hba_attrs_init(phba);
5635 
5636 	phba->mac_addr_set = false;
5637 
5638 	switch (pcidev->device) {
5639 	case BE_DEVICE_ID1:
5640 	case OC_DEVICE_ID1:
5641 	case OC_DEVICE_ID2:
5642 		phba->generation = BE_GEN2;
5643 		phba->iotask_fn = beiscsi_iotask;
5644 		dev_warn(&pcidev->dev,
5645 			 "Obsolete/Unsupported BE2 Adapter Family\n");
5646 		break;
5647 	case BE_DEVICE_ID2:
5648 	case OC_DEVICE_ID3:
5649 		phba->generation = BE_GEN3;
5650 		phba->iotask_fn = beiscsi_iotask;
5651 		break;
5652 	case OC_SKH_ID1:
5653 		phba->generation = BE_GEN4;
5654 		phba->iotask_fn = beiscsi_iotask_v2;
5655 		break;
5656 	default:
5657 		phba->generation = 0;
5658 	}
5659 
5660 	ret = be_ctrl_init(phba, pcidev);
5661 	if (ret) {
5662 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5663 			    "BM_%d : be_ctrl_init failed\n");
5664 		goto hba_free;
5665 	}
5666 
5667 	ret = beiscsi_init_sliport(phba);
5668 	if (ret)
5669 		goto hba_free;
5670 
5671 	spin_lock_init(&phba->io_sgl_lock);
5672 	spin_lock_init(&phba->mgmt_sgl_lock);
5673 	spin_lock_init(&phba->async_pdu_lock);
5674 	ret = beiscsi_get_fw_config(&phba->ctrl, phba);
5675 	if (ret != 0) {
5676 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5677 			    "BM_%d : Error getting fw config\n");
5678 		goto free_port;
5679 	}
5680 	beiscsi_get_port_name(&phba->ctrl, phba);
5681 	beiscsi_get_params(phba);
5682 	beiscsi_set_uer_feature(phba);
5683 
5684 	if (enable_msix)
5685 		find_num_cpus(phba);
5686 	else
5687 		phba->num_cpus = 1;
5688 
5689 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5690 		    "BM_%d : num_cpus = %d\n",
5691 		    phba->num_cpus);
5692 
5693 	if (enable_msix) {
5694 		beiscsi_msix_enable(phba);
5695 		if (!phba->msix_enabled)
5696 			phba->num_cpus = 1;
5697 	}
5698 
5699 	phba->shost->max_id = phba->params.cxns_per_ctrl;
5700 	phba->shost->can_queue = phba->params.ios_per_ctrl;
5701 	ret = beiscsi_get_memory(phba);
5702 	if (ret < 0) {
5703 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5704 			    "BM_%d : alloc host mem failed\n");
5705 		goto free_port;
5706 	}
5707 
5708 	ret = beiscsi_init_port(phba);
5709 	if (ret < 0) {
5710 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5711 			    "BM_%d : init port failed\n");
5712 		beiscsi_free_mem(phba);
5713 		goto free_port;
5714 	}
5715 
5716 	for (i = 0; i < MAX_MCC_CMD; i++) {
5717 		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
5718 		phba->ctrl.mcc_tag[i] = i + 1;
5719 		phba->ctrl.mcc_tag_status[i + 1] = 0;
5720 		phba->ctrl.mcc_tag_available++;
5721 		memset(&phba->ctrl.ptag_state[i].tag_mem_state, 0,
5722 		       sizeof(struct be_dma_mem));
5723 	}
5724 
5725 	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
5726 
5727 	snprintf(wq_name, sizeof(wq_name), "beiscsi_%02x_wq",
5728 		 phba->shost->host_no);
5729 	phba->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, wq_name);
5730 	if (!phba->wq) {
5731 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5732 			    "BM_%d : beiscsi_dev_probe-"
5733 			    "Failed to allocate work queue\n");
5734 		ret = -ENOMEM;
5735 		goto free_twq;
5736 	}
5737 
5738 	INIT_DELAYED_WORK(&phba->eqd_update, beiscsi_eqd_update_work);
5739 
5740 	phwi_ctrlr = phba->phwi_ctrlr;
5741 	phwi_context = phwi_ctrlr->phwi_ctxt;
5742 
5743 	for (i = 0; i < phba->num_cpus; i++) {
5744 		pbe_eq = &phwi_context->be_eq[i];
5745 		irq_poll_init(&pbe_eq->iopoll, be_iopoll_budget, be_iopoll);
5746 	}
5747 
5748 	i = (phba->msix_enabled) ? i : 0;
5749 	/* Work item for MCC handling */
5750 	pbe_eq = &phwi_context->be_eq[i];
5751 	INIT_WORK(&pbe_eq->mcc_work, beiscsi_mcc_work);
5752 
5753 	ret = beiscsi_init_irqs(phba);
5754 	if (ret < 0) {
5755 		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
5756 			    "BM_%d : beiscsi_dev_probe-"
5757 			    "Failed to beiscsi_init_irqs\n");
5758 		goto free_blkenbld;
5759 	}
5760 	hwi_enable_intr(phba);
5761 
5762 	ret = iscsi_host_add(phba->shost, &phba->pcidev->dev);
5763 	if (ret)
5764 		goto free_blkenbld;
5765 
5766 	/* set online bit after port is operational */
5767 	set_bit(BEISCSI_HBA_ONLINE, &phba->state);
5768 	__beiscsi_log(phba, KERN_INFO,
5769 		      "BM_%d : port online: 0x%lx\n", phba->state);
5770 
5771 	INIT_WORK(&phba->boot_work, beiscsi_boot_work);
5772 	ret = beiscsi_boot_get_shandle(phba, &s_handle);
5773 	if (ret > 0) {
5774 		beiscsi_start_boot_work(phba, s_handle);
5775 		/**
5776 		 * Set this bit after starting the work to let
5777 		 * probe handle it first.
5778 		 * ASYNC event can too schedule this work.
5779 		 */
5780 		set_bit(BEISCSI_HBA_BOOT_FOUND, &phba->state);
5781 	}
5782 
5783 	beiscsi_iface_create_default(phba);
5784 	schedule_delayed_work(&phba->eqd_update,
5785 			      msecs_to_jiffies(BEISCSI_EQD_UPDATE_INTERVAL));
5786 
5787 	INIT_WORK(&phba->sess_work, beiscsi_sess_work);
5788 	INIT_DELAYED_WORK(&phba->recover_port, beiscsi_recover_port);
5789 	/**
5790 	 * Start UE detection here. UE before this will cause stall in probe
5791 	 * and eventually fail the probe.
5792 	 */
5793 	init_timer(&phba->hw_check);
5794 	phba->hw_check.function = beiscsi_hw_health_check;
5795 	phba->hw_check.data = (unsigned long)phba;
5796 	mod_timer(&phba->hw_check,
5797 		  jiffies + msecs_to_jiffies(BEISCSI_UE_DETECT_INTERVAL));
5798 	beiscsi_log(phba, KERN_INFO, BEISCSI_LOG_INIT,
5799 		    "\n\n\n BM_%d : SUCCESS - DRIVER LOADED\n\n\n");
5800 	return 0;
5801 
5802 free_blkenbld:
5803 	destroy_workqueue(phba->wq);
5804 	for (i = 0; i < phba->num_cpus; i++) {
5805 		pbe_eq = &phwi_context->be_eq[i];
5806 		irq_poll_disable(&pbe_eq->iopoll);
5807 	}
5808 free_twq:
5809 	hwi_cleanup_port(phba);
5810 	beiscsi_cleanup_port(phba);
5811 	beiscsi_free_mem(phba);
5812 free_port:
5813 	pci_free_consistent(phba->pcidev,
5814 			    phba->ctrl.mbox_mem_alloced.size,
5815 			    phba->ctrl.mbox_mem_alloced.va,
5816 			   phba->ctrl.mbox_mem_alloced.dma);
5817 	beiscsi_unmap_pci_function(phba);
5818 hba_free:
5819 	if (phba->msix_enabled)
5820 		pci_disable_msix(phba->pcidev);
5821 	pci_dev_put(phba->pcidev);
5822 	iscsi_host_free(phba->shost);
5823 	pci_set_drvdata(pcidev, NULL);
5824 disable_pci:
5825 	pci_release_regions(pcidev);
5826 	pci_disable_device(pcidev);
5827 	return ret;
5828 }
5829 
5830 static void beiscsi_remove(struct pci_dev *pcidev)
5831 {
5832 	struct beiscsi_hba *phba = NULL;
5833 
5834 	phba = pci_get_drvdata(pcidev);
5835 	if (!phba) {
5836 		dev_err(&pcidev->dev, "beiscsi_remove called with no phba\n");
5837 		return;
5838 	}
5839 
5840 	/* first stop UE detection before unloading */
5841 	del_timer_sync(&phba->hw_check);
5842 	cancel_delayed_work_sync(&phba->recover_port);
5843 	cancel_work_sync(&phba->sess_work);
5844 
5845 	beiscsi_iface_destroy_default(phba);
5846 	iscsi_host_remove(phba->shost);
5847 	beiscsi_disable_port(phba, 1);
5848 
5849 	/* after cancelling boot_work */
5850 	iscsi_boot_destroy_kset(phba->boot_struct.boot_kset);
5851 
5852 	/* free all resources */
5853 	destroy_workqueue(phba->wq);
5854 	beiscsi_free_mem(phba);
5855 
5856 	/* ctrl uninit */
5857 	beiscsi_unmap_pci_function(phba);
5858 	pci_free_consistent(phba->pcidev,
5859 			    phba->ctrl.mbox_mem_alloced.size,
5860 			    phba->ctrl.mbox_mem_alloced.va,
5861 			    phba->ctrl.mbox_mem_alloced.dma);
5862 
5863 	pci_dev_put(phba->pcidev);
5864 	iscsi_host_free(phba->shost);
5865 	pci_disable_pcie_error_reporting(pcidev);
5866 	pci_set_drvdata(pcidev, NULL);
5867 	pci_release_regions(pcidev);
5868 	pci_disable_device(pcidev);
5869 }
5870 
5871 
5872 static struct pci_error_handlers beiscsi_eeh_handlers = {
5873 	.error_detected = beiscsi_eeh_err_detected,
5874 	.slot_reset = beiscsi_eeh_reset,
5875 	.resume = beiscsi_eeh_resume,
5876 };
5877 
5878 struct iscsi_transport beiscsi_iscsi_transport = {
5879 	.owner = THIS_MODULE,
5880 	.name = DRV_NAME,
5881 	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
5882 		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
5883 	.create_session = beiscsi_session_create,
5884 	.destroy_session = beiscsi_session_destroy,
5885 	.create_conn = beiscsi_conn_create,
5886 	.bind_conn = beiscsi_conn_bind,
5887 	.destroy_conn = iscsi_conn_teardown,
5888 	.attr_is_visible = beiscsi_attr_is_visible,
5889 	.set_iface_param = beiscsi_iface_set_param,
5890 	.get_iface_param = beiscsi_iface_get_param,
5891 	.set_param = beiscsi_set_param,
5892 	.get_conn_param = iscsi_conn_get_param,
5893 	.get_session_param = iscsi_session_get_param,
5894 	.get_host_param = beiscsi_get_host_param,
5895 	.start_conn = beiscsi_conn_start,
5896 	.stop_conn = iscsi_conn_stop,
5897 	.send_pdu = iscsi_conn_send_pdu,
5898 	.xmit_task = beiscsi_task_xmit,
5899 	.cleanup_task = beiscsi_cleanup_task,
5900 	.alloc_pdu = beiscsi_alloc_pdu,
5901 	.parse_pdu_itt = beiscsi_parse_pdu,
5902 	.get_stats = beiscsi_conn_get_stats,
5903 	.get_ep_param = beiscsi_ep_get_param,
5904 	.ep_connect = beiscsi_ep_connect,
5905 	.ep_poll = beiscsi_ep_poll,
5906 	.ep_disconnect = beiscsi_ep_disconnect,
5907 	.session_recovery_timedout = iscsi_session_recovery_timedout,
5908 	.bsg_request = beiscsi_bsg_request,
5909 };
5910 
5911 static struct pci_driver beiscsi_pci_driver = {
5912 	.name = DRV_NAME,
5913 	.probe = beiscsi_dev_probe,
5914 	.remove = beiscsi_remove,
5915 	.id_table = beiscsi_pci_id_table,
5916 	.err_handler = &beiscsi_eeh_handlers
5917 };
5918 
5919 static int __init beiscsi_module_init(void)
5920 {
5921 	int ret;
5922 
5923 	beiscsi_scsi_transport =
5924 			iscsi_register_transport(&beiscsi_iscsi_transport);
5925 	if (!beiscsi_scsi_transport) {
5926 		printk(KERN_ERR
5927 		       "beiscsi_module_init - Unable to  register beiscsi transport.\n");
5928 		return -ENOMEM;
5929 	}
5930 	printk(KERN_INFO "In beiscsi_module_init, tt=%p\n",
5931 	       &beiscsi_iscsi_transport);
5932 
5933 	ret = pci_register_driver(&beiscsi_pci_driver);
5934 	if (ret) {
5935 		printk(KERN_ERR
5936 		       "beiscsi_module_init - Unable to  register beiscsi pci driver.\n");
5937 		goto unregister_iscsi_transport;
5938 	}
5939 	return 0;
5940 
5941 unregister_iscsi_transport:
5942 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
5943 	return ret;
5944 }
5945 
5946 static void __exit beiscsi_module_exit(void)
5947 {
5948 	pci_unregister_driver(&beiscsi_pci_driver);
5949 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
5950 }
5951 
5952 module_init(beiscsi_module_init);
5953 module_exit(beiscsi_module_exit);
5954