xref: /linux/drivers/scsi/be2iscsi/be_main.c (revision b3b77c8caef1750ebeea1054e39e358550ea9f55)
1 /**
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Written by: Jayamohan Kallickal (jayamohank@serverengines.com)
11  *
12  * Contact Information:
13  * linux-drivers@serverengines.com
14  *
15  *  ServerEngines
16  * 209 N. Fair Oaks Ave
17  * Sunnyvale, CA 94085
18  *
19  */
20 #include <linux/reboot.h>
21 #include <linux/delay.h>
22 #include <linux/slab.h>
23 #include <linux/interrupt.h>
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/string.h>
27 #include <linux/kernel.h>
28 #include <linux/semaphore.h>
29 
30 #include <scsi/libiscsi.h>
31 #include <scsi/scsi_transport_iscsi.h>
32 #include <scsi/scsi_transport.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_device.h>
35 #include <scsi/scsi_host.h>
36 #include <scsi/scsi.h>
37 #include "be_main.h"
38 #include "be_iscsi.h"
39 #include "be_mgmt.h"
40 
41 static unsigned int be_iopoll_budget = 10;
42 static unsigned int be_max_phys_size = 64;
43 static unsigned int enable_msix = 1;
44 
45 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
46 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
47 MODULE_AUTHOR("ServerEngines Corporation");
48 MODULE_LICENSE("GPL");
49 module_param(be_iopoll_budget, int, 0);
50 module_param(enable_msix, int, 0);
51 module_param(be_max_phys_size, uint, S_IRUGO);
52 MODULE_PARM_DESC(be_max_phys_size, "Maximum Size (In Kilobytes) of physically"
53 				   "contiguous memory that can be allocated."
54 				   "Range is 16 - 128");
55 
56 static int beiscsi_slave_configure(struct scsi_device *sdev)
57 {
58 	blk_queue_max_segment_size(sdev->request_queue, 65536);
59 	return 0;
60 }
61 
62 static int beiscsi_eh_abort(struct scsi_cmnd *sc)
63 {
64 	struct iscsi_cls_session *cls_session;
65 	struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
66 	struct beiscsi_io_task *aborted_io_task;
67 	struct iscsi_conn *conn;
68 	struct beiscsi_conn *beiscsi_conn;
69 	struct beiscsi_hba *phba;
70 	struct iscsi_session *session;
71 	struct invalidate_command_table *inv_tbl;
72 	unsigned int cid, tag, num_invalidate;
73 
74 	cls_session = starget_to_session(scsi_target(sc->device));
75 	session = cls_session->dd_data;
76 
77 	spin_lock_bh(&session->lock);
78 	if (!aborted_task || !aborted_task->sc) {
79 		/* we raced */
80 		spin_unlock_bh(&session->lock);
81 		return SUCCESS;
82 	}
83 
84 	aborted_io_task = aborted_task->dd_data;
85 	if (!aborted_io_task->scsi_cmnd) {
86 		/* raced or invalid command */
87 		spin_unlock_bh(&session->lock);
88 		return SUCCESS;
89 	}
90 	spin_unlock_bh(&session->lock);
91 	conn = aborted_task->conn;
92 	beiscsi_conn = conn->dd_data;
93 	phba = beiscsi_conn->phba;
94 
95 	/* invalidate iocb */
96 	cid = beiscsi_conn->beiscsi_conn_cid;
97 	inv_tbl = phba->inv_tbl;
98 	memset(inv_tbl, 0x0, sizeof(*inv_tbl));
99 	inv_tbl->cid = cid;
100 	inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
101 	num_invalidate = 1;
102 	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
103 	if (!tag) {
104 		shost_printk(KERN_WARNING, phba->shost,
105 			     "mgmt_invalidate_icds could not be"
106 			     " submitted\n");
107 		return FAILED;
108 	} else {
109 		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
110 					 phba->ctrl.mcc_numtag[tag]);
111 		free_mcc_tag(&phba->ctrl, tag);
112 	}
113 
114 	return iscsi_eh_abort(sc);
115 }
116 
117 static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
118 {
119 	struct iscsi_task *abrt_task;
120 	struct beiscsi_io_task *abrt_io_task;
121 	struct iscsi_conn *conn;
122 	struct beiscsi_conn *beiscsi_conn;
123 	struct beiscsi_hba *phba;
124 	struct iscsi_session *session;
125 	struct iscsi_cls_session *cls_session;
126 	struct invalidate_command_table *inv_tbl;
127 	unsigned int cid, tag, i, num_invalidate;
128 	int rc = FAILED;
129 
130 	/* invalidate iocbs */
131 	cls_session = starget_to_session(scsi_target(sc->device));
132 	session = cls_session->dd_data;
133 	spin_lock_bh(&session->lock);
134 	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
135 		goto unlock;
136 
137 	conn = session->leadconn;
138 	beiscsi_conn = conn->dd_data;
139 	phba = beiscsi_conn->phba;
140 	cid = beiscsi_conn->beiscsi_conn_cid;
141 	inv_tbl = phba->inv_tbl;
142 	memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
143 	num_invalidate = 0;
144 	for (i = 0; i < conn->session->cmds_max; i++) {
145 		abrt_task = conn->session->cmds[i];
146 		abrt_io_task = abrt_task->dd_data;
147 		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
148 			continue;
149 
150 		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
151 			continue;
152 
153 		inv_tbl->cid = cid;
154 		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
155 		num_invalidate++;
156 		inv_tbl++;
157 	}
158 	spin_unlock_bh(&session->lock);
159 	inv_tbl = phba->inv_tbl;
160 
161 	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
162 	if (!tag) {
163 		shost_printk(KERN_WARNING, phba->shost,
164 			     "mgmt_invalidate_icds could not be"
165 			     " submitted\n");
166 		return FAILED;
167 	} else {
168 		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
169 					 phba->ctrl.mcc_numtag[tag]);
170 		free_mcc_tag(&phba->ctrl, tag);
171 	}
172 
173 	return iscsi_eh_device_reset(sc);
174 unlock:
175 	spin_unlock_bh(&session->lock);
176 	return rc;
177 }
178 
179 /*------------------- PCI Driver operations and data ----------------- */
180 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
181 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
182 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
183 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
184 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
185 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
186 	{ 0 }
187 };
188 MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
189 
190 static struct scsi_host_template beiscsi_sht = {
191 	.module = THIS_MODULE,
192 	.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
193 	.proc_name = DRV_NAME,
194 	.queuecommand = iscsi_queuecommand,
195 	.change_queue_depth = iscsi_change_queue_depth,
196 	.slave_configure = beiscsi_slave_configure,
197 	.target_alloc = iscsi_target_alloc,
198 	.eh_abort_handler = beiscsi_eh_abort,
199 	.eh_device_reset_handler = beiscsi_eh_device_reset,
200 	.eh_target_reset_handler = iscsi_eh_session_reset,
201 	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
202 	.can_queue = BE2_IO_DEPTH,
203 	.this_id = -1,
204 	.max_sectors = BEISCSI_MAX_SECTORS,
205 	.cmd_per_lun = BEISCSI_CMD_PER_LUN,
206 	.use_clustering = ENABLE_CLUSTERING,
207 };
208 
209 static struct scsi_transport_template *beiscsi_scsi_transport;
210 
211 static struct beiscsi_hba *beiscsi_hba_alloc(struct pci_dev *pcidev)
212 {
213 	struct beiscsi_hba *phba;
214 	struct Scsi_Host *shost;
215 
216 	shost = iscsi_host_alloc(&beiscsi_sht, sizeof(*phba), 0);
217 	if (!shost) {
218 		dev_err(&pcidev->dev, "beiscsi_hba_alloc -"
219 			"iscsi_host_alloc failed \n");
220 		return NULL;
221 	}
222 	shost->dma_boundary = pcidev->dma_mask;
223 	shost->max_id = BE2_MAX_SESSIONS;
224 	shost->max_channel = 0;
225 	shost->max_cmd_len = BEISCSI_MAX_CMD_LEN;
226 	shost->max_lun = BEISCSI_NUM_MAX_LUN;
227 	shost->transportt = beiscsi_scsi_transport;
228 	phba = iscsi_host_priv(shost);
229 	memset(phba, 0, sizeof(*phba));
230 	phba->shost = shost;
231 	phba->pcidev = pci_dev_get(pcidev);
232 	pci_set_drvdata(pcidev, phba);
233 
234 	if (iscsi_host_add(shost, &phba->pcidev->dev))
235 		goto free_devices;
236 	return phba;
237 
238 free_devices:
239 	pci_dev_put(phba->pcidev);
240 	iscsi_host_free(phba->shost);
241 	return NULL;
242 }
243 
244 static void beiscsi_unmap_pci_function(struct beiscsi_hba *phba)
245 {
246 	if (phba->csr_va) {
247 		iounmap(phba->csr_va);
248 		phba->csr_va = NULL;
249 	}
250 	if (phba->db_va) {
251 		iounmap(phba->db_va);
252 		phba->db_va = NULL;
253 	}
254 	if (phba->pci_va) {
255 		iounmap(phba->pci_va);
256 		phba->pci_va = NULL;
257 	}
258 }
259 
260 static int beiscsi_map_pci_bars(struct beiscsi_hba *phba,
261 				struct pci_dev *pcidev)
262 {
263 	u8 __iomem *addr;
264 	int pcicfg_reg;
265 
266 	addr = ioremap_nocache(pci_resource_start(pcidev, 2),
267 			       pci_resource_len(pcidev, 2));
268 	if (addr == NULL)
269 		return -ENOMEM;
270 	phba->ctrl.csr = addr;
271 	phba->csr_va = addr;
272 	phba->csr_pa.u.a64.address = pci_resource_start(pcidev, 2);
273 
274 	addr = ioremap_nocache(pci_resource_start(pcidev, 4), 128 * 1024);
275 	if (addr == NULL)
276 		goto pci_map_err;
277 	phba->ctrl.db = addr;
278 	phba->db_va = addr;
279 	phba->db_pa.u.a64.address =  pci_resource_start(pcidev, 4);
280 
281 	if (phba->generation == BE_GEN2)
282 		pcicfg_reg = 1;
283 	else
284 		pcicfg_reg = 0;
285 
286 	addr = ioremap_nocache(pci_resource_start(pcidev, pcicfg_reg),
287 			       pci_resource_len(pcidev, pcicfg_reg));
288 
289 	if (addr == NULL)
290 		goto pci_map_err;
291 	phba->ctrl.pcicfg = addr;
292 	phba->pci_va = addr;
293 	phba->pci_pa.u.a64.address = pci_resource_start(pcidev, pcicfg_reg);
294 	return 0;
295 
296 pci_map_err:
297 	beiscsi_unmap_pci_function(phba);
298 	return -ENOMEM;
299 }
300 
301 static int beiscsi_enable_pci(struct pci_dev *pcidev)
302 {
303 	int ret;
304 
305 	ret = pci_enable_device(pcidev);
306 	if (ret) {
307 		dev_err(&pcidev->dev, "beiscsi_enable_pci - enable device "
308 			"failed. Returning -ENODEV\n");
309 		return ret;
310 	}
311 
312 	pci_set_master(pcidev);
313 	if (pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(64))) {
314 		ret = pci_set_consistent_dma_mask(pcidev, DMA_BIT_MASK(32));
315 		if (ret) {
316 			dev_err(&pcidev->dev, "Could not set PCI DMA Mask\n");
317 			pci_disable_device(pcidev);
318 			return ret;
319 		}
320 	}
321 	return 0;
322 }
323 
324 static int be_ctrl_init(struct beiscsi_hba *phba, struct pci_dev *pdev)
325 {
326 	struct be_ctrl_info *ctrl = &phba->ctrl;
327 	struct be_dma_mem *mbox_mem_alloc = &ctrl->mbox_mem_alloced;
328 	struct be_dma_mem *mbox_mem_align = &ctrl->mbox_mem;
329 	int status = 0;
330 
331 	ctrl->pdev = pdev;
332 	status = beiscsi_map_pci_bars(phba, pdev);
333 	if (status)
334 		return status;
335 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
336 	mbox_mem_alloc->va = pci_alloc_consistent(pdev,
337 						  mbox_mem_alloc->size,
338 						  &mbox_mem_alloc->dma);
339 	if (!mbox_mem_alloc->va) {
340 		beiscsi_unmap_pci_function(phba);
341 		status = -ENOMEM;
342 		return status;
343 	}
344 
345 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
346 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
347 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
348 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
349 	spin_lock_init(&ctrl->mbox_lock);
350 	spin_lock_init(&phba->ctrl.mcc_lock);
351 	spin_lock_init(&phba->ctrl.mcc_cq_lock);
352 
353 	return status;
354 }
355 
356 static void beiscsi_get_params(struct beiscsi_hba *phba)
357 {
358 	phba->params.ios_per_ctrl = (phba->fw_config.iscsi_icd_count
359 				    - (phba->fw_config.iscsi_cid_count
360 				    + BE2_TMFS
361 				    + BE2_NOPOUT_REQ));
362 	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
363 	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
364 	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
365 	phba->params.num_sge_per_io = BE2_SGE;
366 	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
367 	phba->params.defpdu_data_sz = BE2_DEFPDU_DATA_SZ;
368 	phba->params.eq_timer = 64;
369 	phba->params.num_eq_entries =
370 	    (((BE2_CMDS_PER_CXN * 2 + phba->fw_config.iscsi_cid_count * 2
371 				    + BE2_TMFS) / 512) + 1) * 512;
372 	phba->params.num_eq_entries = (phba->params.num_eq_entries < 1024)
373 				? 1024 : phba->params.num_eq_entries;
374 	SE_DEBUG(DBG_LVL_8, "phba->params.num_eq_entries=%d \n",
375 			     phba->params.num_eq_entries);
376 	phba->params.num_cq_entries =
377 	    (((BE2_CMDS_PER_CXN * 2 +  phba->fw_config.iscsi_cid_count * 2
378 				    + BE2_TMFS) / 512) + 1) * 512;
379 	phba->params.wrbs_per_cxn = 256;
380 }
381 
382 static void hwi_ring_eq_db(struct beiscsi_hba *phba,
383 			   unsigned int id, unsigned int clr_interrupt,
384 			   unsigned int num_processed,
385 			   unsigned char rearm, unsigned char event)
386 {
387 	u32 val = 0;
388 	val |= id & DB_EQ_RING_ID_MASK;
389 	if (rearm)
390 		val |= 1 << DB_EQ_REARM_SHIFT;
391 	if (clr_interrupt)
392 		val |= 1 << DB_EQ_CLR_SHIFT;
393 	if (event)
394 		val |= 1 << DB_EQ_EVNT_SHIFT;
395 	val |= num_processed << DB_EQ_NUM_POPPED_SHIFT;
396 	iowrite32(val, phba->db_va + DB_EQ_OFFSET);
397 }
398 
399 /**
400  * be_isr_mcc - The isr routine of the driver.
401  * @irq: Not used
402  * @dev_id: Pointer to host adapter structure
403  */
404 static irqreturn_t be_isr_mcc(int irq, void *dev_id)
405 {
406 	struct beiscsi_hba *phba;
407 	struct be_eq_entry *eqe = NULL;
408 	struct be_queue_info *eq;
409 	struct be_queue_info *mcc;
410 	unsigned int num_eq_processed;
411 	struct be_eq_obj *pbe_eq;
412 	unsigned long flags;
413 
414 	pbe_eq = dev_id;
415 	eq = &pbe_eq->q;
416 	phba =  pbe_eq->phba;
417 	mcc = &phba->ctrl.mcc_obj.cq;
418 	eqe = queue_tail_node(eq);
419 	if (!eqe)
420 		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
421 
422 	num_eq_processed = 0;
423 
424 	while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
425 				& EQE_VALID_MASK) {
426 		if (((eqe->dw[offsetof(struct amap_eq_entry,
427 		     resource_id) / 32] &
428 		     EQE_RESID_MASK) >> 16) == mcc->id) {
429 			spin_lock_irqsave(&phba->isr_lock, flags);
430 			phba->todo_mcc_cq = 1;
431 			spin_unlock_irqrestore(&phba->isr_lock, flags);
432 		}
433 		AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
434 		queue_tail_inc(eq);
435 		eqe = queue_tail_node(eq);
436 		num_eq_processed++;
437 	}
438 	if (phba->todo_mcc_cq)
439 		queue_work(phba->wq, &phba->work_cqs);
440 	if (num_eq_processed)
441 		hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 1, 1);
442 
443 	return IRQ_HANDLED;
444 }
445 
446 /**
447  * be_isr_msix - The isr routine of the driver.
448  * @irq: Not used
449  * @dev_id: Pointer to host adapter structure
450  */
451 static irqreturn_t be_isr_msix(int irq, void *dev_id)
452 {
453 	struct beiscsi_hba *phba;
454 	struct be_eq_entry *eqe = NULL;
455 	struct be_queue_info *eq;
456 	struct be_queue_info *cq;
457 	unsigned int num_eq_processed;
458 	struct be_eq_obj *pbe_eq;
459 	unsigned long flags;
460 
461 	pbe_eq = dev_id;
462 	eq = &pbe_eq->q;
463 	cq = pbe_eq->cq;
464 	eqe = queue_tail_node(eq);
465 	if (!eqe)
466 		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
467 
468 	phba = pbe_eq->phba;
469 	num_eq_processed = 0;
470 	if (blk_iopoll_enabled) {
471 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
472 					& EQE_VALID_MASK) {
473 			if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
474 				blk_iopoll_sched(&pbe_eq->iopoll);
475 
476 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
477 			queue_tail_inc(eq);
478 			eqe = queue_tail_node(eq);
479 			num_eq_processed++;
480 		}
481 		if (num_eq_processed)
482 			hwi_ring_eq_db(phba, eq->id, 1,	num_eq_processed, 0, 1);
483 
484 		return IRQ_HANDLED;
485 	} else {
486 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
487 						& EQE_VALID_MASK) {
488 			spin_lock_irqsave(&phba->isr_lock, flags);
489 			phba->todo_cq = 1;
490 			spin_unlock_irqrestore(&phba->isr_lock, flags);
491 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
492 			queue_tail_inc(eq);
493 			eqe = queue_tail_node(eq);
494 			num_eq_processed++;
495 		}
496 		if (phba->todo_cq)
497 			queue_work(phba->wq, &phba->work_cqs);
498 
499 		if (num_eq_processed)
500 			hwi_ring_eq_db(phba, eq->id, 1, num_eq_processed, 1, 1);
501 
502 		return IRQ_HANDLED;
503 	}
504 }
505 
506 /**
507  * be_isr - The isr routine of the driver.
508  * @irq: Not used
509  * @dev_id: Pointer to host adapter structure
510  */
511 static irqreturn_t be_isr(int irq, void *dev_id)
512 {
513 	struct beiscsi_hba *phba;
514 	struct hwi_controller *phwi_ctrlr;
515 	struct hwi_context_memory *phwi_context;
516 	struct be_eq_entry *eqe = NULL;
517 	struct be_queue_info *eq;
518 	struct be_queue_info *cq;
519 	struct be_queue_info *mcc;
520 	unsigned long flags, index;
521 	unsigned int num_mcceq_processed, num_ioeq_processed;
522 	struct be_ctrl_info *ctrl;
523 	struct be_eq_obj *pbe_eq;
524 	int isr;
525 
526 	phba = dev_id;
527 	ctrl = &phba->ctrl;;
528 	isr = ioread32(ctrl->csr + CEV_ISR0_OFFSET +
529 		       (PCI_FUNC(ctrl->pdev->devfn) * CEV_ISR_SIZE));
530 	if (!isr)
531 		return IRQ_NONE;
532 
533 	phwi_ctrlr = phba->phwi_ctrlr;
534 	phwi_context = phwi_ctrlr->phwi_ctxt;
535 	pbe_eq = &phwi_context->be_eq[0];
536 
537 	eq = &phwi_context->be_eq[0].q;
538 	mcc = &phba->ctrl.mcc_obj.cq;
539 	index = 0;
540 	eqe = queue_tail_node(eq);
541 	if (!eqe)
542 		SE_DEBUG(DBG_LVL_1, "eqe is NULL\n");
543 
544 	num_ioeq_processed = 0;
545 	num_mcceq_processed = 0;
546 	if (blk_iopoll_enabled) {
547 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
548 					& EQE_VALID_MASK) {
549 			if (((eqe->dw[offsetof(struct amap_eq_entry,
550 			     resource_id) / 32] &
551 			     EQE_RESID_MASK) >> 16) == mcc->id) {
552 				spin_lock_irqsave(&phba->isr_lock, flags);
553 				phba->todo_mcc_cq = 1;
554 				spin_unlock_irqrestore(&phba->isr_lock, flags);
555 				num_mcceq_processed++;
556 			} else {
557 				if (!blk_iopoll_sched_prep(&pbe_eq->iopoll))
558 					blk_iopoll_sched(&pbe_eq->iopoll);
559 				num_ioeq_processed++;
560 			}
561 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
562 			queue_tail_inc(eq);
563 			eqe = queue_tail_node(eq);
564 		}
565 		if (num_ioeq_processed || num_mcceq_processed) {
566 			if (phba->todo_mcc_cq)
567 				queue_work(phba->wq, &phba->work_cqs);
568 
569 			if ((num_mcceq_processed) && (!num_ioeq_processed))
570 				hwi_ring_eq_db(phba, eq->id, 0,
571 					      (num_ioeq_processed +
572 					       num_mcceq_processed) , 1, 1);
573 			else
574 				hwi_ring_eq_db(phba, eq->id, 0,
575 					       (num_ioeq_processed +
576 						num_mcceq_processed), 0, 1);
577 
578 			return IRQ_HANDLED;
579 		} else
580 			return IRQ_NONE;
581 	} else {
582 		cq = &phwi_context->be_cq[0];
583 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
584 						& EQE_VALID_MASK) {
585 
586 			if (((eqe->dw[offsetof(struct amap_eq_entry,
587 			     resource_id) / 32] &
588 			     EQE_RESID_MASK) >> 16) != cq->id) {
589 				spin_lock_irqsave(&phba->isr_lock, flags);
590 				phba->todo_mcc_cq = 1;
591 				spin_unlock_irqrestore(&phba->isr_lock, flags);
592 			} else {
593 				spin_lock_irqsave(&phba->isr_lock, flags);
594 				phba->todo_cq = 1;
595 				spin_unlock_irqrestore(&phba->isr_lock, flags);
596 			}
597 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
598 			queue_tail_inc(eq);
599 			eqe = queue_tail_node(eq);
600 			num_ioeq_processed++;
601 		}
602 		if (phba->todo_cq || phba->todo_mcc_cq)
603 			queue_work(phba->wq, &phba->work_cqs);
604 
605 		if (num_ioeq_processed) {
606 			hwi_ring_eq_db(phba, eq->id, 0,
607 				       num_ioeq_processed, 1, 1);
608 			return IRQ_HANDLED;
609 		} else
610 			return IRQ_NONE;
611 	}
612 }
613 
614 static int beiscsi_init_irqs(struct beiscsi_hba *phba)
615 {
616 	struct pci_dev *pcidev = phba->pcidev;
617 	struct hwi_controller *phwi_ctrlr;
618 	struct hwi_context_memory *phwi_context;
619 	int ret, msix_vec, i = 0;
620 	char desc[32];
621 
622 	phwi_ctrlr = phba->phwi_ctrlr;
623 	phwi_context = phwi_ctrlr->phwi_ctxt;
624 
625 	if (phba->msix_enabled) {
626 		for (i = 0; i < phba->num_cpus; i++) {
627 			sprintf(desc, "beiscsi_msix_%04x", i);
628 			msix_vec = phba->msix_entries[i].vector;
629 			ret = request_irq(msix_vec, be_isr_msix, 0, desc,
630 					  &phwi_context->be_eq[i]);
631 		}
632 		msix_vec = phba->msix_entries[i].vector;
633 		ret = request_irq(msix_vec, be_isr_mcc, 0, "beiscsi_msix_mcc",
634 				  &phwi_context->be_eq[i]);
635 	} else {
636 		ret = request_irq(pcidev->irq, be_isr, IRQF_SHARED,
637 				  "beiscsi", phba);
638 		if (ret) {
639 			shost_printk(KERN_ERR, phba->shost, "beiscsi_init_irqs-"
640 				     "Failed to register irq\\n");
641 			return ret;
642 		}
643 	}
644 	return 0;
645 }
646 
647 static void hwi_ring_cq_db(struct beiscsi_hba *phba,
648 			   unsigned int id, unsigned int num_processed,
649 			   unsigned char rearm, unsigned char event)
650 {
651 	u32 val = 0;
652 	val |= id & DB_CQ_RING_ID_MASK;
653 	if (rearm)
654 		val |= 1 << DB_CQ_REARM_SHIFT;
655 	val |= num_processed << DB_CQ_NUM_POPPED_SHIFT;
656 	iowrite32(val, phba->db_va + DB_CQ_OFFSET);
657 }
658 
659 static unsigned int
660 beiscsi_process_async_pdu(struct beiscsi_conn *beiscsi_conn,
661 			  struct beiscsi_hba *phba,
662 			  unsigned short cid,
663 			  struct pdu_base *ppdu,
664 			  unsigned long pdu_len,
665 			  void *pbuffer, unsigned long buf_len)
666 {
667 	struct iscsi_conn *conn = beiscsi_conn->conn;
668 	struct iscsi_session *session = conn->session;
669 	struct iscsi_task *task;
670 	struct beiscsi_io_task *io_task;
671 	struct iscsi_hdr *login_hdr;
672 
673 	switch (ppdu->dw[offsetof(struct amap_pdu_base, opcode) / 32] &
674 						PDUBASE_OPCODE_MASK) {
675 	case ISCSI_OP_NOOP_IN:
676 		pbuffer = NULL;
677 		buf_len = 0;
678 		break;
679 	case ISCSI_OP_ASYNC_EVENT:
680 		break;
681 	case ISCSI_OP_REJECT:
682 		WARN_ON(!pbuffer);
683 		WARN_ON(!(buf_len == 48));
684 		SE_DEBUG(DBG_LVL_1, "In ISCSI_OP_REJECT\n");
685 		break;
686 	case ISCSI_OP_LOGIN_RSP:
687 	case ISCSI_OP_TEXT_RSP:
688 		task = conn->login_task;
689 		io_task = task->dd_data;
690 		login_hdr = (struct iscsi_hdr *)ppdu;
691 		login_hdr->itt = io_task->libiscsi_itt;
692 		break;
693 	default:
694 		shost_printk(KERN_WARNING, phba->shost,
695 			     "Unrecognized opcode 0x%x in async msg \n",
696 			     (ppdu->
697 			     dw[offsetof(struct amap_pdu_base, opcode) / 32]
698 						& PDUBASE_OPCODE_MASK));
699 		return 1;
700 	}
701 
702 	spin_lock_bh(&session->lock);
703 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)ppdu, pbuffer, buf_len);
704 	spin_unlock_bh(&session->lock);
705 	return 0;
706 }
707 
708 static struct sgl_handle *alloc_io_sgl_handle(struct beiscsi_hba *phba)
709 {
710 	struct sgl_handle *psgl_handle;
711 
712 	if (phba->io_sgl_hndl_avbl) {
713 		SE_DEBUG(DBG_LVL_8,
714 			 "In alloc_io_sgl_handle,io_sgl_alloc_index=%d \n",
715 			 phba->io_sgl_alloc_index);
716 		psgl_handle = phba->io_sgl_hndl_base[phba->
717 						io_sgl_alloc_index];
718 		phba->io_sgl_hndl_base[phba->io_sgl_alloc_index] = NULL;
719 		phba->io_sgl_hndl_avbl--;
720 		if (phba->io_sgl_alloc_index == (phba->params.
721 						 ios_per_ctrl - 1))
722 			phba->io_sgl_alloc_index = 0;
723 		else
724 			phba->io_sgl_alloc_index++;
725 	} else
726 		psgl_handle = NULL;
727 	return psgl_handle;
728 }
729 
730 static void
731 free_io_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
732 {
733 	SE_DEBUG(DBG_LVL_8, "In free_,io_sgl_free_index=%d \n",
734 		 phba->io_sgl_free_index);
735 	if (phba->io_sgl_hndl_base[phba->io_sgl_free_index]) {
736 		/*
737 		 * this can happen if clean_task is called on a task that
738 		 * failed in xmit_task or alloc_pdu.
739 		 */
740 		 SE_DEBUG(DBG_LVL_8,
741 			 "Double Free in IO SGL io_sgl_free_index=%d,"
742 			 "value there=%p \n", phba->io_sgl_free_index,
743 			 phba->io_sgl_hndl_base[phba->io_sgl_free_index]);
744 		return;
745 	}
746 	phba->io_sgl_hndl_base[phba->io_sgl_free_index] = psgl_handle;
747 	phba->io_sgl_hndl_avbl++;
748 	if (phba->io_sgl_free_index == (phba->params.ios_per_ctrl - 1))
749 		phba->io_sgl_free_index = 0;
750 	else
751 		phba->io_sgl_free_index++;
752 }
753 
754 /**
755  * alloc_wrb_handle - To allocate a wrb handle
756  * @phba: The hba pointer
757  * @cid: The cid to use for allocation
758  *
759  * This happens under session_lock until submission to chip
760  */
761 struct wrb_handle *alloc_wrb_handle(struct beiscsi_hba *phba, unsigned int cid)
762 {
763 	struct hwi_wrb_context *pwrb_context;
764 	struct hwi_controller *phwi_ctrlr;
765 	struct wrb_handle *pwrb_handle, *pwrb_handle_tmp;
766 
767 	phwi_ctrlr = phba->phwi_ctrlr;
768 	pwrb_context = &phwi_ctrlr->wrb_context[cid];
769 	if (pwrb_context->wrb_handles_available >= 2) {
770 		pwrb_handle = pwrb_context->pwrb_handle_base[
771 					    pwrb_context->alloc_index];
772 		pwrb_context->wrb_handles_available--;
773 		if (pwrb_context->alloc_index ==
774 						(phba->params.wrbs_per_cxn - 1))
775 			pwrb_context->alloc_index = 0;
776 		else
777 			pwrb_context->alloc_index++;
778 		pwrb_handle_tmp = pwrb_context->pwrb_handle_base[
779 						pwrb_context->alloc_index];
780 		pwrb_handle->nxt_wrb_index = pwrb_handle_tmp->wrb_index;
781 	} else
782 		pwrb_handle = NULL;
783 	return pwrb_handle;
784 }
785 
786 /**
787  * free_wrb_handle - To free the wrb handle back to pool
788  * @phba: The hba pointer
789  * @pwrb_context: The context to free from
790  * @pwrb_handle: The wrb_handle to free
791  *
792  * This happens under session_lock until submission to chip
793  */
794 static void
795 free_wrb_handle(struct beiscsi_hba *phba, struct hwi_wrb_context *pwrb_context,
796 		struct wrb_handle *pwrb_handle)
797 {
798 	pwrb_context->pwrb_handle_base[pwrb_context->free_index] = pwrb_handle;
799 	pwrb_context->wrb_handles_available++;
800 	if (pwrb_context->free_index == (phba->params.wrbs_per_cxn - 1))
801 		pwrb_context->free_index = 0;
802 	else
803 		pwrb_context->free_index++;
804 
805 	SE_DEBUG(DBG_LVL_8,
806 		 "FREE WRB: pwrb_handle=%p free_index=0x%x"
807 		 "wrb_handles_available=%d \n",
808 		 pwrb_handle, pwrb_context->free_index,
809 		 pwrb_context->wrb_handles_available);
810 }
811 
812 static struct sgl_handle *alloc_mgmt_sgl_handle(struct beiscsi_hba *phba)
813 {
814 	struct sgl_handle *psgl_handle;
815 
816 	if (phba->eh_sgl_hndl_avbl) {
817 		psgl_handle = phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index];
818 		phba->eh_sgl_hndl_base[phba->eh_sgl_alloc_index] = NULL;
819 		SE_DEBUG(DBG_LVL_8, "mgmt_sgl_alloc_index=%d=0x%x \n",
820 			 phba->eh_sgl_alloc_index, phba->eh_sgl_alloc_index);
821 		phba->eh_sgl_hndl_avbl--;
822 		if (phba->eh_sgl_alloc_index ==
823 		    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl -
824 		     1))
825 			phba->eh_sgl_alloc_index = 0;
826 		else
827 			phba->eh_sgl_alloc_index++;
828 	} else
829 		psgl_handle = NULL;
830 	return psgl_handle;
831 }
832 
833 void
834 free_mgmt_sgl_handle(struct beiscsi_hba *phba, struct sgl_handle *psgl_handle)
835 {
836 
837 	SE_DEBUG(DBG_LVL_8, "In  free_mgmt_sgl_handle,eh_sgl_free_index=%d \n",
838 			     phba->eh_sgl_free_index);
839 	if (phba->eh_sgl_hndl_base[phba->eh_sgl_free_index]) {
840 		/*
841 		 * this can happen if clean_task is called on a task that
842 		 * failed in xmit_task or alloc_pdu.
843 		 */
844 		SE_DEBUG(DBG_LVL_8,
845 			 "Double Free in eh SGL ,eh_sgl_free_index=%d \n",
846 			 phba->eh_sgl_free_index);
847 		return;
848 	}
849 	phba->eh_sgl_hndl_base[phba->eh_sgl_free_index] = psgl_handle;
850 	phba->eh_sgl_hndl_avbl++;
851 	if (phba->eh_sgl_free_index ==
852 	    (phba->params.icds_per_ctrl - phba->params.ios_per_ctrl - 1))
853 		phba->eh_sgl_free_index = 0;
854 	else
855 		phba->eh_sgl_free_index++;
856 }
857 
858 static void
859 be_complete_io(struct beiscsi_conn *beiscsi_conn,
860 	       struct iscsi_task *task, struct sol_cqe *psol)
861 {
862 	struct beiscsi_io_task *io_task = task->dd_data;
863 	struct be_status_bhs *sts_bhs =
864 				(struct be_status_bhs *)io_task->cmd_bhs;
865 	struct iscsi_conn *conn = beiscsi_conn->conn;
866 	unsigned int sense_len;
867 	unsigned char *sense;
868 	u32 resid = 0, exp_cmdsn, max_cmdsn;
869 	u8 rsp, status, flags;
870 
871 	exp_cmdsn = (psol->
872 			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
873 			& SOL_EXP_CMD_SN_MASK);
874 	max_cmdsn = ((psol->
875 			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
876 			& SOL_EXP_CMD_SN_MASK) +
877 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
878 				/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
879 	rsp = ((psol->dw[offsetof(struct amap_sol_cqe, i_resp) / 32]
880 						& SOL_RESP_MASK) >> 16);
881 	status = ((psol->dw[offsetof(struct amap_sol_cqe, i_sts) / 32]
882 						& SOL_STS_MASK) >> 8);
883 	flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
884 					& SOL_FLAGS_MASK) >> 24) | 0x80;
885 
886 	task->sc->result = (DID_OK << 16) | status;
887 	if (rsp != ISCSI_STATUS_CMD_COMPLETED) {
888 		task->sc->result = DID_ERROR << 16;
889 		goto unmap;
890 	}
891 
892 	/* bidi not initially supported */
893 	if (flags & (ISCSI_FLAG_CMD_UNDERFLOW | ISCSI_FLAG_CMD_OVERFLOW)) {
894 		resid = (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) /
895 				32] & SOL_RES_CNT_MASK);
896 
897 		if (!status && (flags & ISCSI_FLAG_CMD_OVERFLOW))
898 			task->sc->result = DID_ERROR << 16;
899 
900 		if (flags & ISCSI_FLAG_CMD_UNDERFLOW) {
901 			scsi_set_resid(task->sc, resid);
902 			if (!status && (scsi_bufflen(task->sc) - resid <
903 			    task->sc->underflow))
904 				task->sc->result = DID_ERROR << 16;
905 		}
906 	}
907 
908 	if (status == SAM_STAT_CHECK_CONDITION) {
909 		unsigned short *slen = (unsigned short *)sts_bhs->sense_info;
910 		sense = sts_bhs->sense_info + sizeof(unsigned short);
911 		sense_len =  cpu_to_be16(*slen);
912 		memcpy(task->sc->sense_buffer, sense,
913 		       min_t(u16, sense_len, SCSI_SENSE_BUFFERSIZE));
914 	}
915 
916 	if (io_task->cmd_bhs->iscsi_hdr.flags & ISCSI_FLAG_CMD_READ) {
917 		if (psol->dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
918 							& SOL_RES_CNT_MASK)
919 			 conn->rxdata_octets += (psol->
920 			     dw[offsetof(struct amap_sol_cqe, i_res_cnt) / 32]
921 			     & SOL_RES_CNT_MASK);
922 	}
923 unmap:
924 	scsi_dma_unmap(io_task->scsi_cmnd);
925 	iscsi_complete_scsi_task(task, exp_cmdsn, max_cmdsn);
926 }
927 
928 static void
929 be_complete_logout(struct beiscsi_conn *beiscsi_conn,
930 		   struct iscsi_task *task, struct sol_cqe *psol)
931 {
932 	struct iscsi_logout_rsp *hdr;
933 	struct beiscsi_io_task *io_task = task->dd_data;
934 	struct iscsi_conn *conn = beiscsi_conn->conn;
935 
936 	hdr = (struct iscsi_logout_rsp *)task->hdr;
937 	hdr->opcode = ISCSI_OP_LOGOUT_RSP;
938 	hdr->t2wait = 5;
939 	hdr->t2retain = 0;
940 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
941 					& SOL_FLAGS_MASK) >> 24) | 0x80;
942 	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
943 					32] & SOL_RESP_MASK);
944 	hdr->exp_cmdsn = cpu_to_be32(psol->
945 			dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
946 					& SOL_EXP_CMD_SN_MASK);
947 	hdr->max_cmdsn = be32_to_cpu((psol->
948 			 dw[offsetof(struct amap_sol_cqe, i_exp_cmd_sn) / 32]
949 					& SOL_EXP_CMD_SN_MASK) +
950 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
951 					/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
952 	hdr->dlength[0] = 0;
953 	hdr->dlength[1] = 0;
954 	hdr->dlength[2] = 0;
955 	hdr->hlength = 0;
956 	hdr->itt = io_task->libiscsi_itt;
957 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
958 }
959 
960 static void
961 be_complete_tmf(struct beiscsi_conn *beiscsi_conn,
962 		struct iscsi_task *task, struct sol_cqe *psol)
963 {
964 	struct iscsi_tm_rsp *hdr;
965 	struct iscsi_conn *conn = beiscsi_conn->conn;
966 	struct beiscsi_io_task *io_task = task->dd_data;
967 
968 	hdr = (struct iscsi_tm_rsp *)task->hdr;
969 	hdr->opcode = ISCSI_OP_SCSI_TMFUNC_RSP;
970 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
971 					& SOL_FLAGS_MASK) >> 24) | 0x80;
972 	hdr->response = (psol->dw[offsetof(struct amap_sol_cqe, i_resp) /
973 					32] & SOL_RESP_MASK);
974 	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
975 				    i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
976 	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
977 			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
978 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
979 			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
980 	hdr->itt = io_task->libiscsi_itt;
981 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
982 }
983 
984 static void
985 hwi_complete_drvr_msgs(struct beiscsi_conn *beiscsi_conn,
986 		       struct beiscsi_hba *phba, struct sol_cqe *psol)
987 {
988 	struct hwi_wrb_context *pwrb_context;
989 	struct wrb_handle *pwrb_handle = NULL;
990 	struct hwi_controller *phwi_ctrlr;
991 	struct iscsi_task *task;
992 	struct beiscsi_io_task *io_task;
993 	struct iscsi_conn *conn = beiscsi_conn->conn;
994 	struct iscsi_session *session = conn->session;
995 
996 	phwi_ctrlr = phba->phwi_ctrlr;
997 	pwrb_context = &phwi_ctrlr->wrb_context[((psol->
998 				dw[offsetof(struct amap_sol_cqe, cid) / 32] &
999 				SOL_CID_MASK) >> 6) -
1000 				phba->fw_config.iscsi_cid_start];
1001 	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1002 				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1003 				32] & SOL_WRB_INDEX_MASK) >> 16)];
1004 	task = pwrb_handle->pio_handle;
1005 
1006 	io_task = task->dd_data;
1007 	spin_lock(&phba->mgmt_sgl_lock);
1008 	free_mgmt_sgl_handle(phba, io_task->psgl_handle);
1009 	spin_unlock(&phba->mgmt_sgl_lock);
1010 	spin_lock_bh(&session->lock);
1011 	free_wrb_handle(phba, pwrb_context, pwrb_handle);
1012 	spin_unlock_bh(&session->lock);
1013 }
1014 
1015 static void
1016 be_complete_nopin_resp(struct beiscsi_conn *beiscsi_conn,
1017 		       struct iscsi_task *task, struct sol_cqe *psol)
1018 {
1019 	struct iscsi_nopin *hdr;
1020 	struct iscsi_conn *conn = beiscsi_conn->conn;
1021 	struct beiscsi_io_task *io_task = task->dd_data;
1022 
1023 	hdr = (struct iscsi_nopin *)task->hdr;
1024 	hdr->flags = ((psol->dw[offsetof(struct amap_sol_cqe, i_flags) / 32]
1025 			& SOL_FLAGS_MASK) >> 24) | 0x80;
1026 	hdr->exp_cmdsn = cpu_to_be32(psol->dw[offsetof(struct amap_sol_cqe,
1027 				     i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK);
1028 	hdr->max_cmdsn = be32_to_cpu((psol->dw[offsetof(struct amap_sol_cqe,
1029 			i_exp_cmd_sn) / 32] & SOL_EXP_CMD_SN_MASK) +
1030 			((psol->dw[offsetof(struct amap_sol_cqe, i_cmd_wnd)
1031 			/ 32] & SOL_CMD_WND_MASK) >> 24) - 1);
1032 	hdr->opcode = ISCSI_OP_NOOP_IN;
1033 	hdr->itt = io_task->libiscsi_itt;
1034 	__iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
1035 }
1036 
1037 static void hwi_complete_cmd(struct beiscsi_conn *beiscsi_conn,
1038 			     struct beiscsi_hba *phba, struct sol_cqe *psol)
1039 {
1040 	struct hwi_wrb_context *pwrb_context;
1041 	struct wrb_handle *pwrb_handle;
1042 	struct iscsi_wrb *pwrb = NULL;
1043 	struct hwi_controller *phwi_ctrlr;
1044 	struct iscsi_task *task;
1045 	unsigned int type;
1046 	struct iscsi_conn *conn = beiscsi_conn->conn;
1047 	struct iscsi_session *session = conn->session;
1048 
1049 	phwi_ctrlr = phba->phwi_ctrlr;
1050 	pwrb_context = &phwi_ctrlr->wrb_context[((psol->dw[offsetof
1051 				(struct amap_sol_cqe, cid) / 32]
1052 				& SOL_CID_MASK) >> 6) -
1053 				phba->fw_config.iscsi_cid_start];
1054 	pwrb_handle = pwrb_context->pwrb_handle_basestd[((psol->
1055 				dw[offsetof(struct amap_sol_cqe, wrb_index) /
1056 				32] & SOL_WRB_INDEX_MASK) >> 16)];
1057 	task = pwrb_handle->pio_handle;
1058 	pwrb = pwrb_handle->pwrb;
1059 	type = (pwrb->dw[offsetof(struct amap_iscsi_wrb, type) / 32] &
1060 				 WRB_TYPE_MASK) >> 28;
1061 
1062 	spin_lock_bh(&session->lock);
1063 	switch (type) {
1064 	case HWH_TYPE_IO:
1065 	case HWH_TYPE_IO_RD:
1066 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
1067 		     ISCSI_OP_NOOP_OUT)
1068 			be_complete_nopin_resp(beiscsi_conn, task, psol);
1069 		else
1070 			be_complete_io(beiscsi_conn, task, psol);
1071 		break;
1072 
1073 	case HWH_TYPE_LOGOUT:
1074 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
1075 			be_complete_logout(beiscsi_conn, task, psol);
1076 		else
1077 			be_complete_tmf(beiscsi_conn, task, psol);
1078 
1079 		break;
1080 
1081 	case HWH_TYPE_LOGIN:
1082 		SE_DEBUG(DBG_LVL_1,
1083 			 "\t\t No HWH_TYPE_LOGIN Expected in hwi_complete_cmd"
1084 			 "- Solicited path \n");
1085 		break;
1086 
1087 	case HWH_TYPE_NOP:
1088 		be_complete_nopin_resp(beiscsi_conn, task, psol);
1089 		break;
1090 
1091 	default:
1092 		shost_printk(KERN_WARNING, phba->shost,
1093 				"In hwi_complete_cmd, unknown type = %d"
1094 				"wrb_index 0x%x CID 0x%x\n", type,
1095 				((psol->dw[offsetof(struct amap_iscsi_wrb,
1096 				type) / 32] & SOL_WRB_INDEX_MASK) >> 16),
1097 				((psol->dw[offsetof(struct amap_sol_cqe,
1098 				cid) / 32] & SOL_CID_MASK) >> 6));
1099 		break;
1100 	}
1101 
1102 	spin_unlock_bh(&session->lock);
1103 }
1104 
1105 static struct list_head *hwi_get_async_busy_list(struct hwi_async_pdu_context
1106 					  *pasync_ctx, unsigned int is_header,
1107 					  unsigned int host_write_ptr)
1108 {
1109 	if (is_header)
1110 		return &pasync_ctx->async_entry[host_write_ptr].
1111 		    header_busy_list;
1112 	else
1113 		return &pasync_ctx->async_entry[host_write_ptr].data_busy_list;
1114 }
1115 
1116 static struct async_pdu_handle *
1117 hwi_get_async_handle(struct beiscsi_hba *phba,
1118 		     struct beiscsi_conn *beiscsi_conn,
1119 		     struct hwi_async_pdu_context *pasync_ctx,
1120 		     struct i_t_dpdu_cqe *pdpdu_cqe, unsigned int *pcq_index)
1121 {
1122 	struct be_bus_address phys_addr;
1123 	struct list_head *pbusy_list;
1124 	struct async_pdu_handle *pasync_handle = NULL;
1125 	int buffer_len = 0;
1126 	unsigned char buffer_index = -1;
1127 	unsigned char is_header = 0;
1128 
1129 	phys_addr.u.a32.address_lo =
1130 	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_lo) / 32] -
1131 	    ((pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1132 						& PDUCQE_DPL_MASK) >> 16);
1133 	phys_addr.u.a32.address_hi =
1134 	    pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, db_addr_hi) / 32];
1135 
1136 	phys_addr.u.a64.address =
1137 			*((unsigned long long *)(&phys_addr.u.a64.address));
1138 
1139 	switch (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe, code) / 32]
1140 			& PDUCQE_CODE_MASK) {
1141 	case UNSOL_HDR_NOTIFY:
1142 		is_header = 1;
1143 
1144 		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 1,
1145 			(pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1146 			index) / 32] & PDUCQE_INDEX_MASK));
1147 
1148 		buffer_len = (unsigned int)(phys_addr.u.a64.address -
1149 				pasync_ctx->async_header.pa_base.u.a64.address);
1150 
1151 		buffer_index = buffer_len /
1152 				pasync_ctx->async_header.buffer_size;
1153 
1154 		break;
1155 	case UNSOL_DATA_NOTIFY:
1156 		pbusy_list = hwi_get_async_busy_list(pasync_ctx, 0, (pdpdu_cqe->
1157 					dw[offsetof(struct amap_i_t_dpdu_cqe,
1158 					index) / 32] & PDUCQE_INDEX_MASK));
1159 		buffer_len = (unsigned long)(phys_addr.u.a64.address -
1160 					pasync_ctx->async_data.pa_base.u.
1161 					a64.address);
1162 		buffer_index = buffer_len / pasync_ctx->async_data.buffer_size;
1163 		break;
1164 	default:
1165 		pbusy_list = NULL;
1166 		shost_printk(KERN_WARNING, phba->shost,
1167 			"Unexpected code=%d \n",
1168 			 pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1169 					code) / 32] & PDUCQE_CODE_MASK);
1170 		return NULL;
1171 	}
1172 
1173 	WARN_ON(!(buffer_index <= pasync_ctx->async_data.num_entries));
1174 	WARN_ON(list_empty(pbusy_list));
1175 	list_for_each_entry(pasync_handle, pbusy_list, link) {
1176 		WARN_ON(pasync_handle->consumed);
1177 		if (pasync_handle->index == buffer_index)
1178 			break;
1179 	}
1180 
1181 	WARN_ON(!pasync_handle);
1182 
1183 	pasync_handle->cri = (unsigned short)beiscsi_conn->beiscsi_conn_cid -
1184 					     phba->fw_config.iscsi_cid_start;
1185 	pasync_handle->is_header = is_header;
1186 	pasync_handle->buffer_len = ((pdpdu_cqe->
1187 			dw[offsetof(struct amap_i_t_dpdu_cqe, dpl) / 32]
1188 			& PDUCQE_DPL_MASK) >> 16);
1189 
1190 	*pcq_index = (pdpdu_cqe->dw[offsetof(struct amap_i_t_dpdu_cqe,
1191 			index) / 32] & PDUCQE_INDEX_MASK);
1192 	return pasync_handle;
1193 }
1194 
1195 static unsigned int
1196 hwi_update_async_writables(struct hwi_async_pdu_context *pasync_ctx,
1197 			   unsigned int is_header, unsigned int cq_index)
1198 {
1199 	struct list_head *pbusy_list;
1200 	struct async_pdu_handle *pasync_handle;
1201 	unsigned int num_entries, writables = 0;
1202 	unsigned int *pep_read_ptr, *pwritables;
1203 
1204 
1205 	if (is_header) {
1206 		pep_read_ptr = &pasync_ctx->async_header.ep_read_ptr;
1207 		pwritables = &pasync_ctx->async_header.writables;
1208 		num_entries = pasync_ctx->async_header.num_entries;
1209 	} else {
1210 		pep_read_ptr = &pasync_ctx->async_data.ep_read_ptr;
1211 		pwritables = &pasync_ctx->async_data.writables;
1212 		num_entries = pasync_ctx->async_data.num_entries;
1213 	}
1214 
1215 	while ((*pep_read_ptr) != cq_index) {
1216 		(*pep_read_ptr)++;
1217 		*pep_read_ptr = (*pep_read_ptr) % num_entries;
1218 
1219 		pbusy_list = hwi_get_async_busy_list(pasync_ctx, is_header,
1220 						     *pep_read_ptr);
1221 		if (writables == 0)
1222 			WARN_ON(list_empty(pbusy_list));
1223 
1224 		if (!list_empty(pbusy_list)) {
1225 			pasync_handle = list_entry(pbusy_list->next,
1226 						   struct async_pdu_handle,
1227 						   link);
1228 			WARN_ON(!pasync_handle);
1229 			pasync_handle->consumed = 1;
1230 		}
1231 
1232 		writables++;
1233 	}
1234 
1235 	if (!writables) {
1236 		SE_DEBUG(DBG_LVL_1,
1237 			 "Duplicate notification received - index 0x%x!!\n",
1238 			 cq_index);
1239 		WARN_ON(1);
1240 	}
1241 
1242 	*pwritables = *pwritables + writables;
1243 	return 0;
1244 }
1245 
1246 static unsigned int hwi_free_async_msg(struct beiscsi_hba *phba,
1247 				       unsigned int cri)
1248 {
1249 	struct hwi_controller *phwi_ctrlr;
1250 	struct hwi_async_pdu_context *pasync_ctx;
1251 	struct async_pdu_handle *pasync_handle, *tmp_handle;
1252 	struct list_head *plist;
1253 	unsigned int i = 0;
1254 
1255 	phwi_ctrlr = phba->phwi_ctrlr;
1256 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1257 
1258 	plist  = &pasync_ctx->async_entry[cri].wait_queue.list;
1259 
1260 	list_for_each_entry_safe(pasync_handle, tmp_handle, plist, link) {
1261 		list_del(&pasync_handle->link);
1262 
1263 		if (i == 0) {
1264 			list_add_tail(&pasync_handle->link,
1265 				      &pasync_ctx->async_header.free_list);
1266 			pasync_ctx->async_header.free_entries++;
1267 			i++;
1268 		} else {
1269 			list_add_tail(&pasync_handle->link,
1270 				      &pasync_ctx->async_data.free_list);
1271 			pasync_ctx->async_data.free_entries++;
1272 			i++;
1273 		}
1274 	}
1275 
1276 	INIT_LIST_HEAD(&pasync_ctx->async_entry[cri].wait_queue.list);
1277 	pasync_ctx->async_entry[cri].wait_queue.hdr_received = 0;
1278 	pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1279 	return 0;
1280 }
1281 
1282 static struct phys_addr *
1283 hwi_get_ring_address(struct hwi_async_pdu_context *pasync_ctx,
1284 		     unsigned int is_header, unsigned int host_write_ptr)
1285 {
1286 	struct phys_addr *pasync_sge = NULL;
1287 
1288 	if (is_header)
1289 		pasync_sge = pasync_ctx->async_header.ring_base;
1290 	else
1291 		pasync_sge = pasync_ctx->async_data.ring_base;
1292 
1293 	return pasync_sge + host_write_ptr;
1294 }
1295 
1296 static void hwi_post_async_buffers(struct beiscsi_hba *phba,
1297 				   unsigned int is_header)
1298 {
1299 	struct hwi_controller *phwi_ctrlr;
1300 	struct hwi_async_pdu_context *pasync_ctx;
1301 	struct async_pdu_handle *pasync_handle;
1302 	struct list_head *pfree_link, *pbusy_list;
1303 	struct phys_addr *pasync_sge;
1304 	unsigned int ring_id, num_entries;
1305 	unsigned int host_write_num;
1306 	unsigned int writables;
1307 	unsigned int i = 0;
1308 	u32 doorbell = 0;
1309 
1310 	phwi_ctrlr = phba->phwi_ctrlr;
1311 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1312 
1313 	if (is_header) {
1314 		num_entries = pasync_ctx->async_header.num_entries;
1315 		writables = min(pasync_ctx->async_header.writables,
1316 				pasync_ctx->async_header.free_entries);
1317 		pfree_link = pasync_ctx->async_header.free_list.next;
1318 		host_write_num = pasync_ctx->async_header.host_write_ptr;
1319 		ring_id = phwi_ctrlr->default_pdu_hdr.id;
1320 	} else {
1321 		num_entries = pasync_ctx->async_data.num_entries;
1322 		writables = min(pasync_ctx->async_data.writables,
1323 				pasync_ctx->async_data.free_entries);
1324 		pfree_link = pasync_ctx->async_data.free_list.next;
1325 		host_write_num = pasync_ctx->async_data.host_write_ptr;
1326 		ring_id = phwi_ctrlr->default_pdu_data.id;
1327 	}
1328 
1329 	writables = (writables / 8) * 8;
1330 	if (writables) {
1331 		for (i = 0; i < writables; i++) {
1332 			pbusy_list =
1333 			    hwi_get_async_busy_list(pasync_ctx, is_header,
1334 						    host_write_num);
1335 			pasync_handle =
1336 			    list_entry(pfree_link, struct async_pdu_handle,
1337 								link);
1338 			WARN_ON(!pasync_handle);
1339 			pasync_handle->consumed = 0;
1340 
1341 			pfree_link = pfree_link->next;
1342 
1343 			pasync_sge = hwi_get_ring_address(pasync_ctx,
1344 						is_header, host_write_num);
1345 
1346 			pasync_sge->hi = pasync_handle->pa.u.a32.address_lo;
1347 			pasync_sge->lo = pasync_handle->pa.u.a32.address_hi;
1348 
1349 			list_move(&pasync_handle->link, pbusy_list);
1350 
1351 			host_write_num++;
1352 			host_write_num = host_write_num % num_entries;
1353 		}
1354 
1355 		if (is_header) {
1356 			pasync_ctx->async_header.host_write_ptr =
1357 							host_write_num;
1358 			pasync_ctx->async_header.free_entries -= writables;
1359 			pasync_ctx->async_header.writables -= writables;
1360 			pasync_ctx->async_header.busy_entries += writables;
1361 		} else {
1362 			pasync_ctx->async_data.host_write_ptr = host_write_num;
1363 			pasync_ctx->async_data.free_entries -= writables;
1364 			pasync_ctx->async_data.writables -= writables;
1365 			pasync_ctx->async_data.busy_entries += writables;
1366 		}
1367 
1368 		doorbell |= ring_id & DB_DEF_PDU_RING_ID_MASK;
1369 		doorbell |= 1 << DB_DEF_PDU_REARM_SHIFT;
1370 		doorbell |= 0 << DB_DEF_PDU_EVENT_SHIFT;
1371 		doorbell |= (writables & DB_DEF_PDU_CQPROC_MASK)
1372 					<< DB_DEF_PDU_CQPROC_SHIFT;
1373 
1374 		iowrite32(doorbell, phba->db_va + DB_RXULP0_OFFSET);
1375 	}
1376 }
1377 
1378 static void hwi_flush_default_pdu_buffer(struct beiscsi_hba *phba,
1379 					 struct beiscsi_conn *beiscsi_conn,
1380 					 struct i_t_dpdu_cqe *pdpdu_cqe)
1381 {
1382 	struct hwi_controller *phwi_ctrlr;
1383 	struct hwi_async_pdu_context *pasync_ctx;
1384 	struct async_pdu_handle *pasync_handle = NULL;
1385 	unsigned int cq_index = -1;
1386 
1387 	phwi_ctrlr = phba->phwi_ctrlr;
1388 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1389 
1390 	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1391 					     pdpdu_cqe, &cq_index);
1392 	BUG_ON(pasync_handle->is_header != 0);
1393 	if (pasync_handle->consumed == 0)
1394 		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1395 					   cq_index);
1396 
1397 	hwi_free_async_msg(phba, pasync_handle->cri);
1398 	hwi_post_async_buffers(phba, pasync_handle->is_header);
1399 }
1400 
1401 static unsigned int
1402 hwi_fwd_async_msg(struct beiscsi_conn *beiscsi_conn,
1403 		  struct beiscsi_hba *phba,
1404 		  struct hwi_async_pdu_context *pasync_ctx, unsigned short cri)
1405 {
1406 	struct list_head *plist;
1407 	struct async_pdu_handle *pasync_handle;
1408 	void *phdr = NULL;
1409 	unsigned int hdr_len = 0, buf_len = 0;
1410 	unsigned int status, index = 0, offset = 0;
1411 	void *pfirst_buffer = NULL;
1412 	unsigned int num_buf = 0;
1413 
1414 	plist = &pasync_ctx->async_entry[cri].wait_queue.list;
1415 
1416 	list_for_each_entry(pasync_handle, plist, link) {
1417 		if (index == 0) {
1418 			phdr = pasync_handle->pbuffer;
1419 			hdr_len = pasync_handle->buffer_len;
1420 		} else {
1421 			buf_len = pasync_handle->buffer_len;
1422 			if (!num_buf) {
1423 				pfirst_buffer = pasync_handle->pbuffer;
1424 				num_buf++;
1425 			}
1426 			memcpy(pfirst_buffer + offset,
1427 			       pasync_handle->pbuffer, buf_len);
1428 			offset = buf_len;
1429 		}
1430 		index++;
1431 	}
1432 
1433 	status = beiscsi_process_async_pdu(beiscsi_conn, phba,
1434 					   (beiscsi_conn->beiscsi_conn_cid -
1435 					    phba->fw_config.iscsi_cid_start),
1436 					    phdr, hdr_len, pfirst_buffer,
1437 					    buf_len);
1438 
1439 	if (status == 0)
1440 		hwi_free_async_msg(phba, cri);
1441 	return 0;
1442 }
1443 
1444 static unsigned int
1445 hwi_gather_async_pdu(struct beiscsi_conn *beiscsi_conn,
1446 		     struct beiscsi_hba *phba,
1447 		     struct async_pdu_handle *pasync_handle)
1448 {
1449 	struct hwi_async_pdu_context *pasync_ctx;
1450 	struct hwi_controller *phwi_ctrlr;
1451 	unsigned int bytes_needed = 0, status = 0;
1452 	unsigned short cri = pasync_handle->cri;
1453 	struct pdu_base *ppdu;
1454 
1455 	phwi_ctrlr = phba->phwi_ctrlr;
1456 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1457 
1458 	list_del(&pasync_handle->link);
1459 	if (pasync_handle->is_header) {
1460 		pasync_ctx->async_header.busy_entries--;
1461 		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1462 			hwi_free_async_msg(phba, cri);
1463 			BUG();
1464 		}
1465 
1466 		pasync_ctx->async_entry[cri].wait_queue.bytes_received = 0;
1467 		pasync_ctx->async_entry[cri].wait_queue.hdr_received = 1;
1468 		pasync_ctx->async_entry[cri].wait_queue.hdr_len =
1469 				(unsigned short)pasync_handle->buffer_len;
1470 		list_add_tail(&pasync_handle->link,
1471 			      &pasync_ctx->async_entry[cri].wait_queue.list);
1472 
1473 		ppdu = pasync_handle->pbuffer;
1474 		bytes_needed = ((((ppdu->dw[offsetof(struct amap_pdu_base,
1475 			data_len_hi) / 32] & PDUBASE_DATALENHI_MASK) << 8) &
1476 			0xFFFF0000) | ((be16_to_cpu((ppdu->
1477 			dw[offsetof(struct amap_pdu_base, data_len_lo) / 32]
1478 			& PDUBASE_DATALENLO_MASK) >> 16)) & 0x0000FFFF));
1479 
1480 		if (status == 0) {
1481 			pasync_ctx->async_entry[cri].wait_queue.bytes_needed =
1482 			    bytes_needed;
1483 
1484 			if (bytes_needed == 0)
1485 				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1486 							   pasync_ctx, cri);
1487 		}
1488 	} else {
1489 		pasync_ctx->async_data.busy_entries--;
1490 		if (pasync_ctx->async_entry[cri].wait_queue.hdr_received) {
1491 			list_add_tail(&pasync_handle->link,
1492 				      &pasync_ctx->async_entry[cri].wait_queue.
1493 				      list);
1494 			pasync_ctx->async_entry[cri].wait_queue.
1495 				bytes_received +=
1496 				(unsigned short)pasync_handle->buffer_len;
1497 
1498 			if (pasync_ctx->async_entry[cri].wait_queue.
1499 			    bytes_received >=
1500 			    pasync_ctx->async_entry[cri].wait_queue.
1501 			    bytes_needed)
1502 				status = hwi_fwd_async_msg(beiscsi_conn, phba,
1503 							   pasync_ctx, cri);
1504 		}
1505 	}
1506 	return status;
1507 }
1508 
1509 static void hwi_process_default_pdu_ring(struct beiscsi_conn *beiscsi_conn,
1510 					 struct beiscsi_hba *phba,
1511 					 struct i_t_dpdu_cqe *pdpdu_cqe)
1512 {
1513 	struct hwi_controller *phwi_ctrlr;
1514 	struct hwi_async_pdu_context *pasync_ctx;
1515 	struct async_pdu_handle *pasync_handle = NULL;
1516 	unsigned int cq_index = -1;
1517 
1518 	phwi_ctrlr = phba->phwi_ctrlr;
1519 	pasync_ctx = HWI_GET_ASYNC_PDU_CTX(phwi_ctrlr);
1520 	pasync_handle = hwi_get_async_handle(phba, beiscsi_conn, pasync_ctx,
1521 					     pdpdu_cqe, &cq_index);
1522 
1523 	if (pasync_handle->consumed == 0)
1524 		hwi_update_async_writables(pasync_ctx, pasync_handle->is_header,
1525 					   cq_index);
1526 	hwi_gather_async_pdu(beiscsi_conn, phba, pasync_handle);
1527 	hwi_post_async_buffers(phba, pasync_handle->is_header);
1528 }
1529 
1530 static void  beiscsi_process_mcc_isr(struct beiscsi_hba *phba)
1531 {
1532 	struct be_queue_info *mcc_cq;
1533 	struct  be_mcc_compl *mcc_compl;
1534 	unsigned int num_processed = 0;
1535 
1536 	mcc_cq = &phba->ctrl.mcc_obj.cq;
1537 	mcc_compl = queue_tail_node(mcc_cq);
1538 	mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1539 	while (mcc_compl->flags & CQE_FLAGS_VALID_MASK) {
1540 
1541 		if (num_processed >= 32) {
1542 			hwi_ring_cq_db(phba, mcc_cq->id,
1543 					num_processed, 0, 0);
1544 			num_processed = 0;
1545 		}
1546 		if (mcc_compl->flags & CQE_FLAGS_ASYNC_MASK) {
1547 			/* Interpret flags as an async trailer */
1548 			if (is_link_state_evt(mcc_compl->flags))
1549 				/* Interpret compl as a async link evt */
1550 				beiscsi_async_link_state_process(phba,
1551 				(struct be_async_event_link_state *) mcc_compl);
1552 			else
1553 				SE_DEBUG(DBG_LVL_1,
1554 					" Unsupported Async Event, flags"
1555 					" = 0x%08x \n", mcc_compl->flags);
1556 		} else if (mcc_compl->flags & CQE_FLAGS_COMPLETED_MASK) {
1557 			be_mcc_compl_process_isr(&phba->ctrl, mcc_compl);
1558 			atomic_dec(&phba->ctrl.mcc_obj.q.used);
1559 		}
1560 
1561 		mcc_compl->flags = 0;
1562 		queue_tail_inc(mcc_cq);
1563 		mcc_compl = queue_tail_node(mcc_cq);
1564 		mcc_compl->flags = le32_to_cpu(mcc_compl->flags);
1565 		num_processed++;
1566 	}
1567 
1568 	if (num_processed > 0)
1569 		hwi_ring_cq_db(phba, mcc_cq->id, num_processed, 1, 0);
1570 
1571 }
1572 
1573 static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
1574 {
1575 	struct be_queue_info *cq;
1576 	struct sol_cqe *sol;
1577 	struct dmsg_cqe *dmsg;
1578 	unsigned int num_processed = 0;
1579 	unsigned int tot_nump = 0;
1580 	struct beiscsi_conn *beiscsi_conn;
1581 	struct beiscsi_endpoint *beiscsi_ep;
1582 	struct iscsi_endpoint *ep;
1583 	struct beiscsi_hba *phba;
1584 
1585 	cq = pbe_eq->cq;
1586 	sol = queue_tail_node(cq);
1587 	phba = pbe_eq->phba;
1588 
1589 	while (sol->dw[offsetof(struct amap_sol_cqe, valid) / 32] &
1590 	       CQE_VALID_MASK) {
1591 		be_dws_le_to_cpu(sol, sizeof(struct sol_cqe));
1592 
1593 		ep = phba->ep_array[(u32) ((sol->
1594 				   dw[offsetof(struct amap_sol_cqe, cid) / 32] &
1595 				   SOL_CID_MASK) >> 6) -
1596 				   phba->fw_config.iscsi_cid_start];
1597 
1598 		beiscsi_ep = ep->dd_data;
1599 		beiscsi_conn = beiscsi_ep->conn;
1600 
1601 		if (num_processed >= 32) {
1602 			hwi_ring_cq_db(phba, cq->id,
1603 					num_processed, 0, 0);
1604 			tot_nump += num_processed;
1605 			num_processed = 0;
1606 		}
1607 
1608 		switch ((u32) sol->dw[offsetof(struct amap_sol_cqe, code) /
1609 			32] & CQE_CODE_MASK) {
1610 		case SOL_CMD_COMPLETE:
1611 			hwi_complete_cmd(beiscsi_conn, phba, sol);
1612 			break;
1613 		case DRIVERMSG_NOTIFY:
1614 			SE_DEBUG(DBG_LVL_8, "Received DRIVERMSG_NOTIFY \n");
1615 			dmsg = (struct dmsg_cqe *)sol;
1616 			hwi_complete_drvr_msgs(beiscsi_conn, phba, sol);
1617 			break;
1618 		case UNSOL_HDR_NOTIFY:
1619 			SE_DEBUG(DBG_LVL_8, "Received UNSOL_HDR_ NOTIFY\n");
1620 			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1621 					     (struct i_t_dpdu_cqe *)sol);
1622 			break;
1623 		case UNSOL_DATA_NOTIFY:
1624 			SE_DEBUG(DBG_LVL_8, "Received UNSOL_DATA_NOTIFY\n");
1625 			hwi_process_default_pdu_ring(beiscsi_conn, phba,
1626 					     (struct i_t_dpdu_cqe *)sol);
1627 			break;
1628 		case CXN_INVALIDATE_INDEX_NOTIFY:
1629 		case CMD_INVALIDATED_NOTIFY:
1630 		case CXN_INVALIDATE_NOTIFY:
1631 			SE_DEBUG(DBG_LVL_1,
1632 				 "Ignoring CQ Error notification for cmd/cxn"
1633 				 "invalidate\n");
1634 			break;
1635 		case SOL_CMD_KILLED_DATA_DIGEST_ERR:
1636 		case CMD_KILLED_INVALID_STATSN_RCVD:
1637 		case CMD_KILLED_INVALID_R2T_RCVD:
1638 		case CMD_CXN_KILLED_LUN_INVALID:
1639 		case CMD_CXN_KILLED_ICD_INVALID:
1640 		case CMD_CXN_KILLED_ITT_INVALID:
1641 		case CMD_CXN_KILLED_SEQ_OUTOFORDER:
1642 		case CMD_CXN_KILLED_INVALID_DATASN_RCVD:
1643 			SE_DEBUG(DBG_LVL_1,
1644 				 "CQ Error notification for cmd.. "
1645 				 "code %d cid 0x%x\n",
1646 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1647 				 32] & CQE_CODE_MASK,
1648 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1649 				 32] & SOL_CID_MASK));
1650 			break;
1651 		case UNSOL_DATA_DIGEST_ERROR_NOTIFY:
1652 			SE_DEBUG(DBG_LVL_1,
1653 				 "Digest error on def pdu ring, dropping..\n");
1654 			hwi_flush_default_pdu_buffer(phba, beiscsi_conn,
1655 					     (struct i_t_dpdu_cqe *) sol);
1656 			break;
1657 		case CXN_KILLED_PDU_SIZE_EXCEEDS_DSL:
1658 		case CXN_KILLED_BURST_LEN_MISMATCH:
1659 		case CXN_KILLED_AHS_RCVD:
1660 		case CXN_KILLED_HDR_DIGEST_ERR:
1661 		case CXN_KILLED_UNKNOWN_HDR:
1662 		case CXN_KILLED_STALE_ITT_TTT_RCVD:
1663 		case CXN_KILLED_INVALID_ITT_TTT_RCVD:
1664 		case CXN_KILLED_TIMED_OUT:
1665 		case CXN_KILLED_FIN_RCVD:
1666 		case CXN_KILLED_BAD_UNSOL_PDU_RCVD:
1667 		case CXN_KILLED_BAD_WRB_INDEX_ERROR:
1668 		case CXN_KILLED_OVER_RUN_RESIDUAL:
1669 		case CXN_KILLED_UNDER_RUN_RESIDUAL:
1670 		case CXN_KILLED_CMND_DATA_NOT_ON_SAME_CONN:
1671 			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset CID "
1672 				 "0x%x...\n",
1673 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1674 				 32] & CQE_CODE_MASK,
1675 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1676 				 32] & CQE_CID_MASK));
1677 			iscsi_conn_failure(beiscsi_conn->conn,
1678 					   ISCSI_ERR_CONN_FAILED);
1679 			break;
1680 		case CXN_KILLED_RST_SENT:
1681 		case CXN_KILLED_RST_RCVD:
1682 			SE_DEBUG(DBG_LVL_1, "CQ Error %d, reset"
1683 				"received/sent on CID 0x%x...\n",
1684 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1685 				 32] & CQE_CODE_MASK,
1686 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1687 				 32] & CQE_CID_MASK));
1688 			iscsi_conn_failure(beiscsi_conn->conn,
1689 					   ISCSI_ERR_CONN_FAILED);
1690 			break;
1691 		default:
1692 			SE_DEBUG(DBG_LVL_1, "CQ Error Invalid code= %d "
1693 				 "received on CID 0x%x...\n",
1694 				 sol->dw[offsetof(struct amap_sol_cqe, code) /
1695 				 32] & CQE_CODE_MASK,
1696 				 (sol->dw[offsetof(struct amap_sol_cqe, cid) /
1697 				 32] & CQE_CID_MASK));
1698 			break;
1699 		}
1700 
1701 		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
1702 		queue_tail_inc(cq);
1703 		sol = queue_tail_node(cq);
1704 		num_processed++;
1705 	}
1706 
1707 	if (num_processed > 0) {
1708 		tot_nump += num_processed;
1709 		hwi_ring_cq_db(phba, cq->id, num_processed, 1, 0);
1710 	}
1711 	return tot_nump;
1712 }
1713 
1714 void beiscsi_process_all_cqs(struct work_struct *work)
1715 {
1716 	unsigned long flags;
1717 	struct hwi_controller *phwi_ctrlr;
1718 	struct hwi_context_memory *phwi_context;
1719 	struct be_eq_obj *pbe_eq;
1720 	struct beiscsi_hba *phba =
1721 	    container_of(work, struct beiscsi_hba, work_cqs);
1722 
1723 	phwi_ctrlr = phba->phwi_ctrlr;
1724 	phwi_context = phwi_ctrlr->phwi_ctxt;
1725 	if (phba->msix_enabled)
1726 		pbe_eq = &phwi_context->be_eq[phba->num_cpus];
1727 	else
1728 		pbe_eq = &phwi_context->be_eq[0];
1729 
1730 	if (phba->todo_mcc_cq) {
1731 		spin_lock_irqsave(&phba->isr_lock, flags);
1732 		phba->todo_mcc_cq = 0;
1733 		spin_unlock_irqrestore(&phba->isr_lock, flags);
1734 		beiscsi_process_mcc_isr(phba);
1735 	}
1736 
1737 	if (phba->todo_cq) {
1738 		spin_lock_irqsave(&phba->isr_lock, flags);
1739 		phba->todo_cq = 0;
1740 		spin_unlock_irqrestore(&phba->isr_lock, flags);
1741 		beiscsi_process_cq(pbe_eq);
1742 	}
1743 }
1744 
1745 static int be_iopoll(struct blk_iopoll *iop, int budget)
1746 {
1747 	static unsigned int ret;
1748 	struct beiscsi_hba *phba;
1749 	struct be_eq_obj *pbe_eq;
1750 
1751 	pbe_eq = container_of(iop, struct be_eq_obj, iopoll);
1752 	ret = beiscsi_process_cq(pbe_eq);
1753 	if (ret < budget) {
1754 		phba = pbe_eq->phba;
1755 		blk_iopoll_complete(iop);
1756 		SE_DEBUG(DBG_LVL_8, "rearm pbe_eq->q.id =%d\n", pbe_eq->q.id);
1757 		hwi_ring_eq_db(phba, pbe_eq->q.id, 0, 0, 1, 1);
1758 	}
1759 	return ret;
1760 }
1761 
1762 static void
1763 hwi_write_sgl(struct iscsi_wrb *pwrb, struct scatterlist *sg,
1764 	      unsigned int num_sg, struct beiscsi_io_task *io_task)
1765 {
1766 	struct iscsi_sge *psgl;
1767 	unsigned short sg_len, index;
1768 	unsigned int sge_len = 0;
1769 	unsigned long long addr;
1770 	struct scatterlist *l_sg;
1771 	unsigned int offset;
1772 
1773 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1774 				      io_task->bhs_pa.u.a32.address_lo);
1775 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1776 				      io_task->bhs_pa.u.a32.address_hi);
1777 
1778 	l_sg = sg;
1779 	for (index = 0; (index < num_sg) && (index < 2); index++,
1780 							 sg = sg_next(sg)) {
1781 		if (index == 0) {
1782 			sg_len = sg_dma_len(sg);
1783 			addr = (u64) sg_dma_address(sg);
1784 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1785 							(addr & 0xFFFFFFFF));
1786 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1787 							(addr >> 32));
1788 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1789 							sg_len);
1790 			sge_len = sg_len;
1791 		} else {
1792 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_r2t_offset,
1793 							pwrb, sge_len);
1794 			sg_len = sg_dma_len(sg);
1795 			addr = (u64) sg_dma_address(sg);
1796 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_lo, pwrb,
1797 							(addr & 0xFFFFFFFF));
1798 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_addr_hi, pwrb,
1799 							(addr >> 32));
1800 			AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_len, pwrb,
1801 							sg_len);
1802 		}
1803 	}
1804 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1805 	memset(psgl, 0, sizeof(*psgl) * BE2_SGE);
1806 
1807 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len - 2);
1808 
1809 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1810 			io_task->bhs_pa.u.a32.address_hi);
1811 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1812 			io_task->bhs_pa.u.a32.address_lo);
1813 
1814 	if (num_sg == 1) {
1815 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1816 								1);
1817 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1818 								0);
1819 	} else if (num_sg == 2) {
1820 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1821 								0);
1822 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1823 								1);
1824 	} else {
1825 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb,
1826 								0);
1827 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge1_last, pwrb,
1828 								0);
1829 	}
1830 	sg = l_sg;
1831 	psgl++;
1832 	psgl++;
1833 	offset = 0;
1834 	for (index = 0; index < num_sg; index++, sg = sg_next(sg), psgl++) {
1835 		sg_len = sg_dma_len(sg);
1836 		addr = (u64) sg_dma_address(sg);
1837 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1838 						(addr & 0xFFFFFFFF));
1839 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1840 						(addr >> 32));
1841 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, sg_len);
1842 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, offset);
1843 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1844 		offset += sg_len;
1845 	}
1846 	psgl--;
1847 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1848 }
1849 
1850 static void hwi_write_buffer(struct iscsi_wrb *pwrb, struct iscsi_task *task)
1851 {
1852 	struct iscsi_sge *psgl;
1853 	unsigned long long addr;
1854 	struct beiscsi_io_task *io_task = task->dd_data;
1855 	struct beiscsi_conn *beiscsi_conn = io_task->conn;
1856 	struct beiscsi_hba *phba = beiscsi_conn->phba;
1857 
1858 	io_task->bhs_len = sizeof(struct be_nonio_bhs) - 2;
1859 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_lo, pwrb,
1860 				io_task->bhs_pa.u.a32.address_lo);
1861 	AMAP_SET_BITS(struct amap_iscsi_wrb, iscsi_bhs_addr_hi, pwrb,
1862 				io_task->bhs_pa.u.a32.address_hi);
1863 
1864 	if (task->data) {
1865 		if (task->data_count) {
1866 			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
1867 			addr = (u64) pci_map_single(phba->pcidev,
1868 						    task->data,
1869 						    task->data_count, 1);
1870 		} else {
1871 			AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1872 			addr = 0;
1873 		}
1874 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_lo, pwrb,
1875 						(addr & 0xFFFFFFFF));
1876 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_addr_hi, pwrb,
1877 						(addr >> 32));
1878 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_len, pwrb,
1879 						task->data_count);
1880 
1881 		AMAP_SET_BITS(struct amap_iscsi_wrb, sge0_last, pwrb, 1);
1882 	} else {
1883 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
1884 		addr = 0;
1885 	}
1886 
1887 	psgl = (struct iscsi_sge *)io_task->psgl_handle->pfrag;
1888 
1889 	AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, io_task->bhs_len);
1890 
1891 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1892 		      io_task->bhs_pa.u.a32.address_hi);
1893 	AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1894 		      io_task->bhs_pa.u.a32.address_lo);
1895 	if (task->data) {
1896 		psgl++;
1897 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl, 0);
1898 		AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl, 0);
1899 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0);
1900 		AMAP_SET_BITS(struct amap_iscsi_sge, sge_offset, psgl, 0);
1901 		AMAP_SET_BITS(struct amap_iscsi_sge, rsvd0, psgl, 0);
1902 		AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 0);
1903 
1904 		psgl++;
1905 		if (task->data) {
1906 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, psgl,
1907 						(addr & 0xFFFFFFFF));
1908 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, psgl,
1909 						(addr >> 32));
1910 		}
1911 		AMAP_SET_BITS(struct amap_iscsi_sge, len, psgl, 0x106);
1912 	}
1913 	AMAP_SET_BITS(struct amap_iscsi_sge, last_sge, psgl, 1);
1914 }
1915 
1916 static void beiscsi_find_mem_req(struct beiscsi_hba *phba)
1917 {
1918 	unsigned int num_cq_pages, num_async_pdu_buf_pages;
1919 	unsigned int num_async_pdu_data_pages, wrb_sz_per_cxn;
1920 	unsigned int num_async_pdu_buf_sgl_pages, num_async_pdu_data_sgl_pages;
1921 
1922 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
1923 				      sizeof(struct sol_cqe));
1924 	num_async_pdu_buf_pages =
1925 			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1926 				       phba->params.defpdu_hdr_sz);
1927 	num_async_pdu_buf_sgl_pages =
1928 			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1929 				       sizeof(struct phys_addr));
1930 	num_async_pdu_data_pages =
1931 			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1932 				       phba->params.defpdu_data_sz);
1933 	num_async_pdu_data_sgl_pages =
1934 			PAGES_REQUIRED(phba->params.asyncpdus_per_ctrl * \
1935 				       sizeof(struct phys_addr));
1936 
1937 	phba->params.hwi_ws_sz = sizeof(struct hwi_controller);
1938 
1939 	phba->mem_req[ISCSI_MEM_GLOBAL_HEADER] = 2 *
1940 						 BE_ISCSI_PDU_HEADER_SIZE;
1941 	phba->mem_req[HWI_MEM_ADDN_CONTEXT] =
1942 					    sizeof(struct hwi_context_memory);
1943 
1944 
1945 	phba->mem_req[HWI_MEM_WRB] = sizeof(struct iscsi_wrb)
1946 	    * (phba->params.wrbs_per_cxn)
1947 	    * phba->params.cxns_per_ctrl;
1948 	wrb_sz_per_cxn =  sizeof(struct wrb_handle) *
1949 				 (phba->params.wrbs_per_cxn);
1950 	phba->mem_req[HWI_MEM_WRBH] = roundup_pow_of_two((wrb_sz_per_cxn) *
1951 				phba->params.cxns_per_ctrl);
1952 
1953 	phba->mem_req[HWI_MEM_SGLH] = sizeof(struct sgl_handle) *
1954 		phba->params.icds_per_ctrl;
1955 	phba->mem_req[HWI_MEM_SGE] = sizeof(struct iscsi_sge) *
1956 		phba->params.num_sge_per_io * phba->params.icds_per_ctrl;
1957 
1958 	phba->mem_req[HWI_MEM_ASYNC_HEADER_BUF] =
1959 		num_async_pdu_buf_pages * PAGE_SIZE;
1960 	phba->mem_req[HWI_MEM_ASYNC_DATA_BUF] =
1961 		num_async_pdu_data_pages * PAGE_SIZE;
1962 	phba->mem_req[HWI_MEM_ASYNC_HEADER_RING] =
1963 		num_async_pdu_buf_sgl_pages * PAGE_SIZE;
1964 	phba->mem_req[HWI_MEM_ASYNC_DATA_RING] =
1965 		num_async_pdu_data_sgl_pages * PAGE_SIZE;
1966 	phba->mem_req[HWI_MEM_ASYNC_HEADER_HANDLE] =
1967 		phba->params.asyncpdus_per_ctrl *
1968 		sizeof(struct async_pdu_handle);
1969 	phba->mem_req[HWI_MEM_ASYNC_DATA_HANDLE] =
1970 		phba->params.asyncpdus_per_ctrl *
1971 		sizeof(struct async_pdu_handle);
1972 	phba->mem_req[HWI_MEM_ASYNC_PDU_CONTEXT] =
1973 		sizeof(struct hwi_async_pdu_context) +
1974 		(phba->params.cxns_per_ctrl * sizeof(struct hwi_async_entry));
1975 }
1976 
1977 static int beiscsi_alloc_mem(struct beiscsi_hba *phba)
1978 {
1979 	struct be_mem_descriptor *mem_descr;
1980 	dma_addr_t bus_add;
1981 	struct mem_array *mem_arr, *mem_arr_orig;
1982 	unsigned int i, j, alloc_size, curr_alloc_size;
1983 
1984 	phba->phwi_ctrlr = kmalloc(phba->params.hwi_ws_sz, GFP_KERNEL);
1985 	if (!phba->phwi_ctrlr)
1986 		return -ENOMEM;
1987 
1988 	phba->init_mem = kcalloc(SE_MEM_MAX, sizeof(*mem_descr),
1989 				 GFP_KERNEL);
1990 	if (!phba->init_mem) {
1991 		kfree(phba->phwi_ctrlr);
1992 		return -ENOMEM;
1993 	}
1994 
1995 	mem_arr_orig = kmalloc(sizeof(*mem_arr_orig) * BEISCSI_MAX_FRAGS_INIT,
1996 			       GFP_KERNEL);
1997 	if (!mem_arr_orig) {
1998 		kfree(phba->init_mem);
1999 		kfree(phba->phwi_ctrlr);
2000 		return -ENOMEM;
2001 	}
2002 
2003 	mem_descr = phba->init_mem;
2004 	for (i = 0; i < SE_MEM_MAX; i++) {
2005 		j = 0;
2006 		mem_arr = mem_arr_orig;
2007 		alloc_size = phba->mem_req[i];
2008 		memset(mem_arr, 0, sizeof(struct mem_array) *
2009 		       BEISCSI_MAX_FRAGS_INIT);
2010 		curr_alloc_size = min(be_max_phys_size * 1024, alloc_size);
2011 		do {
2012 			mem_arr->virtual_address = pci_alloc_consistent(
2013 							phba->pcidev,
2014 							curr_alloc_size,
2015 							&bus_add);
2016 			if (!mem_arr->virtual_address) {
2017 				if (curr_alloc_size <= BE_MIN_MEM_SIZE)
2018 					goto free_mem;
2019 				if (curr_alloc_size -
2020 					rounddown_pow_of_two(curr_alloc_size))
2021 					curr_alloc_size = rounddown_pow_of_two
2022 							     (curr_alloc_size);
2023 				else
2024 					curr_alloc_size = curr_alloc_size / 2;
2025 			} else {
2026 				mem_arr->bus_address.u.
2027 				    a64.address = (__u64) bus_add;
2028 				mem_arr->size = curr_alloc_size;
2029 				alloc_size -= curr_alloc_size;
2030 				curr_alloc_size = min(be_max_phys_size *
2031 						      1024, alloc_size);
2032 				j++;
2033 				mem_arr++;
2034 			}
2035 		} while (alloc_size);
2036 		mem_descr->num_elements = j;
2037 		mem_descr->size_in_bytes = phba->mem_req[i];
2038 		mem_descr->mem_array = kmalloc(sizeof(*mem_arr) * j,
2039 					       GFP_KERNEL);
2040 		if (!mem_descr->mem_array)
2041 			goto free_mem;
2042 
2043 		memcpy(mem_descr->mem_array, mem_arr_orig,
2044 		       sizeof(struct mem_array) * j);
2045 		mem_descr++;
2046 	}
2047 	kfree(mem_arr_orig);
2048 	return 0;
2049 free_mem:
2050 	mem_descr->num_elements = j;
2051 	while ((i) || (j)) {
2052 		for (j = mem_descr->num_elements; j > 0; j--) {
2053 			pci_free_consistent(phba->pcidev,
2054 					    mem_descr->mem_array[j - 1].size,
2055 					    mem_descr->mem_array[j - 1].
2056 					    virtual_address,
2057 					    mem_descr->mem_array[j - 1].
2058 					    bus_address.u.a64.address);
2059 		}
2060 		if (i) {
2061 			i--;
2062 			kfree(mem_descr->mem_array);
2063 			mem_descr--;
2064 		}
2065 	}
2066 	kfree(mem_arr_orig);
2067 	kfree(phba->init_mem);
2068 	kfree(phba->phwi_ctrlr);
2069 	return -ENOMEM;
2070 }
2071 
2072 static int beiscsi_get_memory(struct beiscsi_hba *phba)
2073 {
2074 	beiscsi_find_mem_req(phba);
2075 	return beiscsi_alloc_mem(phba);
2076 }
2077 
2078 static void iscsi_init_global_templates(struct beiscsi_hba *phba)
2079 {
2080 	struct pdu_data_out *pdata_out;
2081 	struct pdu_nop_out *pnop_out;
2082 	struct be_mem_descriptor *mem_descr;
2083 
2084 	mem_descr = phba->init_mem;
2085 	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
2086 	pdata_out =
2087 	    (struct pdu_data_out *)mem_descr->mem_array[0].virtual_address;
2088 	memset(pdata_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2089 
2090 	AMAP_SET_BITS(struct amap_pdu_data_out, opcode, pdata_out,
2091 		      IIOC_SCSI_DATA);
2092 
2093 	pnop_out =
2094 	    (struct pdu_nop_out *)((unsigned char *)mem_descr->mem_array[0].
2095 				   virtual_address + BE_ISCSI_PDU_HEADER_SIZE);
2096 
2097 	memset(pnop_out, 0, BE_ISCSI_PDU_HEADER_SIZE);
2098 	AMAP_SET_BITS(struct amap_pdu_nop_out, ttt, pnop_out, 0xFFFFFFFF);
2099 	AMAP_SET_BITS(struct amap_pdu_nop_out, f_bit, pnop_out, 1);
2100 	AMAP_SET_BITS(struct amap_pdu_nop_out, i_bit, pnop_out, 0);
2101 }
2102 
2103 static void beiscsi_init_wrb_handle(struct beiscsi_hba *phba)
2104 {
2105 	struct be_mem_descriptor *mem_descr_wrbh, *mem_descr_wrb;
2106 	struct wrb_handle *pwrb_handle;
2107 	struct hwi_controller *phwi_ctrlr;
2108 	struct hwi_wrb_context *pwrb_context;
2109 	struct iscsi_wrb *pwrb;
2110 	unsigned int num_cxn_wrbh;
2111 	unsigned int num_cxn_wrb, j, idx, index;
2112 
2113 	mem_descr_wrbh = phba->init_mem;
2114 	mem_descr_wrbh += HWI_MEM_WRBH;
2115 
2116 	mem_descr_wrb = phba->init_mem;
2117 	mem_descr_wrb += HWI_MEM_WRB;
2118 
2119 	idx = 0;
2120 	pwrb_handle = mem_descr_wrbh->mem_array[idx].virtual_address;
2121 	num_cxn_wrbh = ((mem_descr_wrbh->mem_array[idx].size) /
2122 			((sizeof(struct wrb_handle)) *
2123 			 phba->params.wrbs_per_cxn));
2124 	phwi_ctrlr = phba->phwi_ctrlr;
2125 
2126 	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2127 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2128 		pwrb_context->pwrb_handle_base =
2129 				kzalloc(sizeof(struct wrb_handle *) *
2130 					phba->params.wrbs_per_cxn, GFP_KERNEL);
2131 		pwrb_context->pwrb_handle_basestd =
2132 				kzalloc(sizeof(struct wrb_handle *) *
2133 					phba->params.wrbs_per_cxn, GFP_KERNEL);
2134 		if (num_cxn_wrbh) {
2135 			pwrb_context->alloc_index = 0;
2136 			pwrb_context->wrb_handles_available = 0;
2137 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2138 				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2139 				pwrb_context->pwrb_handle_basestd[j] =
2140 								pwrb_handle;
2141 				pwrb_context->wrb_handles_available++;
2142 				pwrb_handle->wrb_index = j;
2143 				pwrb_handle++;
2144 			}
2145 			pwrb_context->free_index = 0;
2146 			num_cxn_wrbh--;
2147 		} else {
2148 			idx++;
2149 			pwrb_handle =
2150 			    mem_descr_wrbh->mem_array[idx].virtual_address;
2151 			num_cxn_wrbh =
2152 			    ((mem_descr_wrbh->mem_array[idx].size) /
2153 			     ((sizeof(struct wrb_handle)) *
2154 			      phba->params.wrbs_per_cxn));
2155 			pwrb_context->alloc_index = 0;
2156 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2157 				pwrb_context->pwrb_handle_base[j] = pwrb_handle;
2158 				pwrb_context->pwrb_handle_basestd[j] =
2159 				    pwrb_handle;
2160 				pwrb_context->wrb_handles_available++;
2161 				pwrb_handle->wrb_index = j;
2162 				pwrb_handle++;
2163 			}
2164 			pwrb_context->free_index = 0;
2165 			num_cxn_wrbh--;
2166 		}
2167 	}
2168 	idx = 0;
2169 	pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2170 	num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2171 		      ((sizeof(struct iscsi_wrb) *
2172 			phba->params.wrbs_per_cxn));
2173 	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2174 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2175 		if (num_cxn_wrb) {
2176 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2177 				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2178 				pwrb_handle->pwrb = pwrb;
2179 				pwrb++;
2180 			}
2181 			num_cxn_wrb--;
2182 		} else {
2183 			idx++;
2184 			pwrb = mem_descr_wrb->mem_array[idx].virtual_address;
2185 			num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
2186 				      ((sizeof(struct iscsi_wrb) *
2187 					phba->params.wrbs_per_cxn));
2188 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
2189 				pwrb_handle = pwrb_context->pwrb_handle_base[j];
2190 				pwrb_handle->pwrb = pwrb;
2191 				pwrb++;
2192 			}
2193 			num_cxn_wrb--;
2194 		}
2195 	}
2196 }
2197 
2198 static void hwi_init_async_pdu_ctx(struct beiscsi_hba *phba)
2199 {
2200 	struct hwi_controller *phwi_ctrlr;
2201 	struct hba_parameters *p = &phba->params;
2202 	struct hwi_async_pdu_context *pasync_ctx;
2203 	struct async_pdu_handle *pasync_header_h, *pasync_data_h;
2204 	unsigned int index;
2205 	struct be_mem_descriptor *mem_descr;
2206 
2207 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2208 	mem_descr += HWI_MEM_ASYNC_PDU_CONTEXT;
2209 
2210 	phwi_ctrlr = phba->phwi_ctrlr;
2211 	phwi_ctrlr->phwi_ctxt->pasync_ctx = (struct hwi_async_pdu_context *)
2212 				mem_descr->mem_array[0].virtual_address;
2213 	pasync_ctx = phwi_ctrlr->phwi_ctxt->pasync_ctx;
2214 	memset(pasync_ctx, 0, sizeof(*pasync_ctx));
2215 
2216 	pasync_ctx->async_header.num_entries = p->asyncpdus_per_ctrl;
2217 	pasync_ctx->async_header.buffer_size = p->defpdu_hdr_sz;
2218 	pasync_ctx->async_data.buffer_size = p->defpdu_data_sz;
2219 	pasync_ctx->async_data.num_entries = p->asyncpdus_per_ctrl;
2220 
2221 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2222 	mem_descr += HWI_MEM_ASYNC_HEADER_BUF;
2223 	if (mem_descr->mem_array[0].virtual_address) {
2224 		SE_DEBUG(DBG_LVL_8,
2225 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_BUF"
2226 			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2227 	} else
2228 		shost_printk(KERN_WARNING, phba->shost,
2229 			     "No Virtual address \n");
2230 
2231 	pasync_ctx->async_header.va_base =
2232 			mem_descr->mem_array[0].virtual_address;
2233 
2234 	pasync_ctx->async_header.pa_base.u.a64.address =
2235 			mem_descr->mem_array[0].bus_address.u.a64.address;
2236 
2237 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2238 	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2239 	if (mem_descr->mem_array[0].virtual_address) {
2240 		SE_DEBUG(DBG_LVL_8,
2241 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_RING"
2242 			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2243 	} else
2244 		shost_printk(KERN_WARNING, phba->shost,
2245 			    "No Virtual address \n");
2246 	pasync_ctx->async_header.ring_base =
2247 			mem_descr->mem_array[0].virtual_address;
2248 
2249 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2250 	mem_descr += HWI_MEM_ASYNC_HEADER_HANDLE;
2251 	if (mem_descr->mem_array[0].virtual_address) {
2252 		SE_DEBUG(DBG_LVL_8,
2253 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_HEADER_HANDLE"
2254 			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2255 	} else
2256 		shost_printk(KERN_WARNING, phba->shost,
2257 			    "No Virtual address \n");
2258 
2259 	pasync_ctx->async_header.handle_base =
2260 			mem_descr->mem_array[0].virtual_address;
2261 	pasync_ctx->async_header.writables = 0;
2262 	INIT_LIST_HEAD(&pasync_ctx->async_header.free_list);
2263 
2264 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2265 	mem_descr += HWI_MEM_ASYNC_DATA_BUF;
2266 	if (mem_descr->mem_array[0].virtual_address) {
2267 		SE_DEBUG(DBG_LVL_8,
2268 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_BUF"
2269 			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2270 	} else
2271 		shost_printk(KERN_WARNING, phba->shost,
2272 			    "No Virtual address \n");
2273 	pasync_ctx->async_data.va_base =
2274 			mem_descr->mem_array[0].virtual_address;
2275 	pasync_ctx->async_data.pa_base.u.a64.address =
2276 			mem_descr->mem_array[0].bus_address.u.a64.address;
2277 
2278 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2279 	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2280 	if (mem_descr->mem_array[0].virtual_address) {
2281 		SE_DEBUG(DBG_LVL_8,
2282 			 "hwi_init_async_pdu_ctx HWI_MEM_ASYNC_DATA_RING"
2283 			 "va=%p \n", mem_descr->mem_array[0].virtual_address);
2284 	} else
2285 		shost_printk(KERN_WARNING, phba->shost,
2286 			     "No Virtual address \n");
2287 
2288 	pasync_ctx->async_data.ring_base =
2289 			mem_descr->mem_array[0].virtual_address;
2290 
2291 	mem_descr = (struct be_mem_descriptor *)phba->init_mem;
2292 	mem_descr += HWI_MEM_ASYNC_DATA_HANDLE;
2293 	if (!mem_descr->mem_array[0].virtual_address)
2294 		shost_printk(KERN_WARNING, phba->shost,
2295 			    "No Virtual address \n");
2296 
2297 	pasync_ctx->async_data.handle_base =
2298 			mem_descr->mem_array[0].virtual_address;
2299 	pasync_ctx->async_data.writables = 0;
2300 	INIT_LIST_HEAD(&pasync_ctx->async_data.free_list);
2301 
2302 	pasync_header_h =
2303 		(struct async_pdu_handle *)pasync_ctx->async_header.handle_base;
2304 	pasync_data_h =
2305 		(struct async_pdu_handle *)pasync_ctx->async_data.handle_base;
2306 
2307 	for (index = 0; index < p->asyncpdus_per_ctrl; index++) {
2308 		pasync_header_h->cri = -1;
2309 		pasync_header_h->index = (char)index;
2310 		INIT_LIST_HEAD(&pasync_header_h->link);
2311 		pasync_header_h->pbuffer =
2312 			(void *)((unsigned long)
2313 			(pasync_ctx->async_header.va_base) +
2314 			(p->defpdu_hdr_sz * index));
2315 
2316 		pasync_header_h->pa.u.a64.address =
2317 			pasync_ctx->async_header.pa_base.u.a64.address +
2318 			(p->defpdu_hdr_sz * index);
2319 
2320 		list_add_tail(&pasync_header_h->link,
2321 				&pasync_ctx->async_header.free_list);
2322 		pasync_header_h++;
2323 		pasync_ctx->async_header.free_entries++;
2324 		pasync_ctx->async_header.writables++;
2325 
2326 		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].wait_queue.list);
2327 		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].
2328 			       header_busy_list);
2329 		pasync_data_h->cri = -1;
2330 		pasync_data_h->index = (char)index;
2331 		INIT_LIST_HEAD(&pasync_data_h->link);
2332 		pasync_data_h->pbuffer =
2333 			(void *)((unsigned long)
2334 			(pasync_ctx->async_data.va_base) +
2335 			(p->defpdu_data_sz * index));
2336 
2337 		pasync_data_h->pa.u.a64.address =
2338 		    pasync_ctx->async_data.pa_base.u.a64.address +
2339 		    (p->defpdu_data_sz * index);
2340 
2341 		list_add_tail(&pasync_data_h->link,
2342 			      &pasync_ctx->async_data.free_list);
2343 		pasync_data_h++;
2344 		pasync_ctx->async_data.free_entries++;
2345 		pasync_ctx->async_data.writables++;
2346 
2347 		INIT_LIST_HEAD(&pasync_ctx->async_entry[index].data_busy_list);
2348 	}
2349 
2350 	pasync_ctx->async_header.host_write_ptr = 0;
2351 	pasync_ctx->async_header.ep_read_ptr = -1;
2352 	pasync_ctx->async_data.host_write_ptr = 0;
2353 	pasync_ctx->async_data.ep_read_ptr = -1;
2354 }
2355 
2356 static int
2357 be_sgl_create_contiguous(void *virtual_address,
2358 			 u64 physical_address, u32 length,
2359 			 struct be_dma_mem *sgl)
2360 {
2361 	WARN_ON(!virtual_address);
2362 	WARN_ON(!physical_address);
2363 	WARN_ON(!length > 0);
2364 	WARN_ON(!sgl);
2365 
2366 	sgl->va = virtual_address;
2367 	sgl->dma = physical_address;
2368 	sgl->size = length;
2369 
2370 	return 0;
2371 }
2372 
2373 static void be_sgl_destroy_contiguous(struct be_dma_mem *sgl)
2374 {
2375 	memset(sgl, 0, sizeof(*sgl));
2376 }
2377 
2378 static void
2379 hwi_build_be_sgl_arr(struct beiscsi_hba *phba,
2380 		     struct mem_array *pmem, struct be_dma_mem *sgl)
2381 {
2382 	if (sgl->va)
2383 		be_sgl_destroy_contiguous(sgl);
2384 
2385 	be_sgl_create_contiguous(pmem->virtual_address,
2386 				 pmem->bus_address.u.a64.address,
2387 				 pmem->size, sgl);
2388 }
2389 
2390 static void
2391 hwi_build_be_sgl_by_offset(struct beiscsi_hba *phba,
2392 			   struct mem_array *pmem, struct be_dma_mem *sgl)
2393 {
2394 	if (sgl->va)
2395 		be_sgl_destroy_contiguous(sgl);
2396 
2397 	be_sgl_create_contiguous((unsigned char *)pmem->virtual_address,
2398 				 pmem->bus_address.u.a64.address,
2399 				 pmem->size, sgl);
2400 }
2401 
2402 static int be_fill_queue(struct be_queue_info *q,
2403 		u16 len, u16 entry_size, void *vaddress)
2404 {
2405 	struct be_dma_mem *mem = &q->dma_mem;
2406 
2407 	memset(q, 0, sizeof(*q));
2408 	q->len = len;
2409 	q->entry_size = entry_size;
2410 	mem->size = len * entry_size;
2411 	mem->va = vaddress;
2412 	if (!mem->va)
2413 		return -ENOMEM;
2414 	memset(mem->va, 0, mem->size);
2415 	return 0;
2416 }
2417 
2418 static int beiscsi_create_eqs(struct beiscsi_hba *phba,
2419 			     struct hwi_context_memory *phwi_context)
2420 {
2421 	unsigned int i, num_eq_pages;
2422 	int ret, eq_for_mcc;
2423 	struct be_queue_info *eq;
2424 	struct be_dma_mem *mem;
2425 	void *eq_vaddress;
2426 	dma_addr_t paddr;
2427 
2428 	num_eq_pages = PAGES_REQUIRED(phba->params.num_eq_entries * \
2429 				      sizeof(struct be_eq_entry));
2430 
2431 	if (phba->msix_enabled)
2432 		eq_for_mcc = 1;
2433 	else
2434 		eq_for_mcc = 0;
2435 	for (i = 0; i < (phba->num_cpus + eq_for_mcc); i++) {
2436 		eq = &phwi_context->be_eq[i].q;
2437 		mem = &eq->dma_mem;
2438 		phwi_context->be_eq[i].phba = phba;
2439 		eq_vaddress = pci_alloc_consistent(phba->pcidev,
2440 						     num_eq_pages * PAGE_SIZE,
2441 						     &paddr);
2442 		if (!eq_vaddress)
2443 			goto create_eq_error;
2444 
2445 		mem->va = eq_vaddress;
2446 		ret = be_fill_queue(eq, phba->params.num_eq_entries,
2447 				    sizeof(struct be_eq_entry), eq_vaddress);
2448 		if (ret) {
2449 			shost_printk(KERN_ERR, phba->shost,
2450 				     "be_fill_queue Failed for EQ \n");
2451 			goto create_eq_error;
2452 		}
2453 
2454 		mem->dma = paddr;
2455 		ret = beiscsi_cmd_eq_create(&phba->ctrl, eq,
2456 					    phwi_context->cur_eqd);
2457 		if (ret) {
2458 			shost_printk(KERN_ERR, phba->shost,
2459 				     "beiscsi_cmd_eq_create"
2460 				     "Failedfor EQ \n");
2461 			goto create_eq_error;
2462 		}
2463 		SE_DEBUG(DBG_LVL_8, "eqid = %d\n", phwi_context->be_eq[i].q.id);
2464 	}
2465 	return 0;
2466 create_eq_error:
2467 	for (i = 0; i < (phba->num_cpus + 1); i++) {
2468 		eq = &phwi_context->be_eq[i].q;
2469 		mem = &eq->dma_mem;
2470 		if (mem->va)
2471 			pci_free_consistent(phba->pcidev, num_eq_pages
2472 					    * PAGE_SIZE,
2473 					    mem->va, mem->dma);
2474 	}
2475 	return ret;
2476 }
2477 
2478 static int beiscsi_create_cqs(struct beiscsi_hba *phba,
2479 			     struct hwi_context_memory *phwi_context)
2480 {
2481 	unsigned int i, num_cq_pages;
2482 	int ret;
2483 	struct be_queue_info *cq, *eq;
2484 	struct be_dma_mem *mem;
2485 	struct be_eq_obj *pbe_eq;
2486 	void *cq_vaddress;
2487 	dma_addr_t paddr;
2488 
2489 	num_cq_pages = PAGES_REQUIRED(phba->params.num_cq_entries * \
2490 				      sizeof(struct sol_cqe));
2491 
2492 	for (i = 0; i < phba->num_cpus; i++) {
2493 		cq = &phwi_context->be_cq[i];
2494 		eq = &phwi_context->be_eq[i].q;
2495 		pbe_eq = &phwi_context->be_eq[i];
2496 		pbe_eq->cq = cq;
2497 		pbe_eq->phba = phba;
2498 		mem = &cq->dma_mem;
2499 		cq_vaddress = pci_alloc_consistent(phba->pcidev,
2500 						     num_cq_pages * PAGE_SIZE,
2501 						     &paddr);
2502 		if (!cq_vaddress)
2503 			goto create_cq_error;
2504 		ret = be_fill_queue(cq, phba->params.num_cq_entries,
2505 				    sizeof(struct sol_cqe), cq_vaddress);
2506 		if (ret) {
2507 			shost_printk(KERN_ERR, phba->shost,
2508 				     "be_fill_queue Failed for ISCSI CQ \n");
2509 			goto create_cq_error;
2510 		}
2511 
2512 		mem->dma = paddr;
2513 		ret = beiscsi_cmd_cq_create(&phba->ctrl, cq, eq, false,
2514 					    false, 0);
2515 		if (ret) {
2516 			shost_printk(KERN_ERR, phba->shost,
2517 				     "beiscsi_cmd_eq_create"
2518 				     "Failed for ISCSI CQ \n");
2519 			goto create_cq_error;
2520 		}
2521 		SE_DEBUG(DBG_LVL_8, "iscsi cq_id is %d for eq_id %d\n",
2522 						 cq->id, eq->id);
2523 		SE_DEBUG(DBG_LVL_8, "ISCSI CQ CREATED\n");
2524 	}
2525 	return 0;
2526 
2527 create_cq_error:
2528 	for (i = 0; i < phba->num_cpus; i++) {
2529 		cq = &phwi_context->be_cq[i];
2530 		mem = &cq->dma_mem;
2531 		if (mem->va)
2532 			pci_free_consistent(phba->pcidev, num_cq_pages
2533 					    * PAGE_SIZE,
2534 					    mem->va, mem->dma);
2535 	}
2536 	return ret;
2537 
2538 }
2539 
2540 static int
2541 beiscsi_create_def_hdr(struct beiscsi_hba *phba,
2542 		       struct hwi_context_memory *phwi_context,
2543 		       struct hwi_controller *phwi_ctrlr,
2544 		       unsigned int def_pdu_ring_sz)
2545 {
2546 	unsigned int idx;
2547 	int ret;
2548 	struct be_queue_info *dq, *cq;
2549 	struct be_dma_mem *mem;
2550 	struct be_mem_descriptor *mem_descr;
2551 	void *dq_vaddress;
2552 
2553 	idx = 0;
2554 	dq = &phwi_context->be_def_hdrq;
2555 	cq = &phwi_context->be_cq[0];
2556 	mem = &dq->dma_mem;
2557 	mem_descr = phba->init_mem;
2558 	mem_descr += HWI_MEM_ASYNC_HEADER_RING;
2559 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2560 	ret = be_fill_queue(dq, mem_descr->mem_array[0].size /
2561 			    sizeof(struct phys_addr),
2562 			    sizeof(struct phys_addr), dq_vaddress);
2563 	if (ret) {
2564 		shost_printk(KERN_ERR, phba->shost,
2565 			     "be_fill_queue Failed for DEF PDU HDR\n");
2566 		return ret;
2567 	}
2568 	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2569 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dq,
2570 					      def_pdu_ring_sz,
2571 					      phba->params.defpdu_hdr_sz);
2572 	if (ret) {
2573 		shost_printk(KERN_ERR, phba->shost,
2574 			     "be_cmd_create_default_pdu_queue Failed DEFHDR\n");
2575 		return ret;
2576 	}
2577 	phwi_ctrlr->default_pdu_hdr.id = phwi_context->be_def_hdrq.id;
2578 	SE_DEBUG(DBG_LVL_8, "iscsi def pdu id is %d\n",
2579 		 phwi_context->be_def_hdrq.id);
2580 	hwi_post_async_buffers(phba, 1);
2581 	return 0;
2582 }
2583 
2584 static int
2585 beiscsi_create_def_data(struct beiscsi_hba *phba,
2586 			struct hwi_context_memory *phwi_context,
2587 			struct hwi_controller *phwi_ctrlr,
2588 			unsigned int def_pdu_ring_sz)
2589 {
2590 	unsigned int idx;
2591 	int ret;
2592 	struct be_queue_info *dataq, *cq;
2593 	struct be_dma_mem *mem;
2594 	struct be_mem_descriptor *mem_descr;
2595 	void *dq_vaddress;
2596 
2597 	idx = 0;
2598 	dataq = &phwi_context->be_def_dataq;
2599 	cq = &phwi_context->be_cq[0];
2600 	mem = &dataq->dma_mem;
2601 	mem_descr = phba->init_mem;
2602 	mem_descr += HWI_MEM_ASYNC_DATA_RING;
2603 	dq_vaddress = mem_descr->mem_array[idx].virtual_address;
2604 	ret = be_fill_queue(dataq, mem_descr->mem_array[0].size /
2605 			    sizeof(struct phys_addr),
2606 			    sizeof(struct phys_addr), dq_vaddress);
2607 	if (ret) {
2608 		shost_printk(KERN_ERR, phba->shost,
2609 			     "be_fill_queue Failed for DEF PDU DATA\n");
2610 		return ret;
2611 	}
2612 	mem->dma = mem_descr->mem_array[idx].bus_address.u.a64.address;
2613 	ret = be_cmd_create_default_pdu_queue(&phba->ctrl, cq, dataq,
2614 					      def_pdu_ring_sz,
2615 					      phba->params.defpdu_data_sz);
2616 	if (ret) {
2617 		shost_printk(KERN_ERR, phba->shost,
2618 			     "be_cmd_create_default_pdu_queue Failed"
2619 			     " for DEF PDU DATA\n");
2620 		return ret;
2621 	}
2622 	phwi_ctrlr->default_pdu_data.id = phwi_context->be_def_dataq.id;
2623 	SE_DEBUG(DBG_LVL_8, "iscsi def data id is %d\n",
2624 		 phwi_context->be_def_dataq.id);
2625 	hwi_post_async_buffers(phba, 0);
2626 	SE_DEBUG(DBG_LVL_8, "DEFAULT PDU DATA RING CREATED \n");
2627 	return 0;
2628 }
2629 
2630 static int
2631 beiscsi_post_pages(struct beiscsi_hba *phba)
2632 {
2633 	struct be_mem_descriptor *mem_descr;
2634 	struct mem_array *pm_arr;
2635 	unsigned int page_offset, i;
2636 	struct be_dma_mem sgl;
2637 	int status;
2638 
2639 	mem_descr = phba->init_mem;
2640 	mem_descr += HWI_MEM_SGE;
2641 	pm_arr = mem_descr->mem_array;
2642 
2643 	page_offset = (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io *
2644 			phba->fw_config.iscsi_icd_start) / PAGE_SIZE;
2645 	for (i = 0; i < mem_descr->num_elements; i++) {
2646 		hwi_build_be_sgl_arr(phba, pm_arr, &sgl);
2647 		status = be_cmd_iscsi_post_sgl_pages(&phba->ctrl, &sgl,
2648 						page_offset,
2649 						(pm_arr->size / PAGE_SIZE));
2650 		page_offset += pm_arr->size / PAGE_SIZE;
2651 		if (status != 0) {
2652 			shost_printk(KERN_ERR, phba->shost,
2653 				     "post sgl failed.\n");
2654 			return status;
2655 		}
2656 		pm_arr++;
2657 	}
2658 	SE_DEBUG(DBG_LVL_8, "POSTED PAGES \n");
2659 	return 0;
2660 }
2661 
2662 static void be_queue_free(struct beiscsi_hba *phba, struct be_queue_info *q)
2663 {
2664 	struct be_dma_mem *mem = &q->dma_mem;
2665 	if (mem->va)
2666 		pci_free_consistent(phba->pcidev, mem->size,
2667 			mem->va, mem->dma);
2668 }
2669 
2670 static int be_queue_alloc(struct beiscsi_hba *phba, struct be_queue_info *q,
2671 		u16 len, u16 entry_size)
2672 {
2673 	struct be_dma_mem *mem = &q->dma_mem;
2674 
2675 	memset(q, 0, sizeof(*q));
2676 	q->len = len;
2677 	q->entry_size = entry_size;
2678 	mem->size = len * entry_size;
2679 	mem->va = pci_alloc_consistent(phba->pcidev, mem->size, &mem->dma);
2680 	if (!mem->va)
2681 		return -1;
2682 	memset(mem->va, 0, mem->size);
2683 	return 0;
2684 }
2685 
2686 static int
2687 beiscsi_create_wrb_rings(struct beiscsi_hba *phba,
2688 			 struct hwi_context_memory *phwi_context,
2689 			 struct hwi_controller *phwi_ctrlr)
2690 {
2691 	unsigned int wrb_mem_index, offset, size, num_wrb_rings;
2692 	u64 pa_addr_lo;
2693 	unsigned int idx, num, i;
2694 	struct mem_array *pwrb_arr;
2695 	void *wrb_vaddr;
2696 	struct be_dma_mem sgl;
2697 	struct be_mem_descriptor *mem_descr;
2698 	int status;
2699 
2700 	idx = 0;
2701 	mem_descr = phba->init_mem;
2702 	mem_descr += HWI_MEM_WRB;
2703 	pwrb_arr = kmalloc(sizeof(*pwrb_arr) * phba->params.cxns_per_ctrl,
2704 			   GFP_KERNEL);
2705 	if (!pwrb_arr) {
2706 		shost_printk(KERN_ERR, phba->shost,
2707 			     "Memory alloc failed in create wrb ring.\n");
2708 		return -ENOMEM;
2709 	}
2710 	wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2711 	pa_addr_lo = mem_descr->mem_array[idx].bus_address.u.a64.address;
2712 	num_wrb_rings = mem_descr->mem_array[idx].size /
2713 		(phba->params.wrbs_per_cxn * sizeof(struct iscsi_wrb));
2714 
2715 	for (num = 0; num < phba->params.cxns_per_ctrl; num++) {
2716 		if (num_wrb_rings) {
2717 			pwrb_arr[num].virtual_address = wrb_vaddr;
2718 			pwrb_arr[num].bus_address.u.a64.address	= pa_addr_lo;
2719 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2720 					    sizeof(struct iscsi_wrb);
2721 			wrb_vaddr += pwrb_arr[num].size;
2722 			pa_addr_lo += pwrb_arr[num].size;
2723 			num_wrb_rings--;
2724 		} else {
2725 			idx++;
2726 			wrb_vaddr = mem_descr->mem_array[idx].virtual_address;
2727 			pa_addr_lo = mem_descr->mem_array[idx].\
2728 					bus_address.u.a64.address;
2729 			num_wrb_rings = mem_descr->mem_array[idx].size /
2730 					(phba->params.wrbs_per_cxn *
2731 					sizeof(struct iscsi_wrb));
2732 			pwrb_arr[num].virtual_address = wrb_vaddr;
2733 			pwrb_arr[num].bus_address.u.a64.address\
2734 						= pa_addr_lo;
2735 			pwrb_arr[num].size = phba->params.wrbs_per_cxn *
2736 						 sizeof(struct iscsi_wrb);
2737 			wrb_vaddr += pwrb_arr[num].size;
2738 			pa_addr_lo   += pwrb_arr[num].size;
2739 			num_wrb_rings--;
2740 		}
2741 	}
2742 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2743 		wrb_mem_index = 0;
2744 		offset = 0;
2745 		size = 0;
2746 
2747 		hwi_build_be_sgl_by_offset(phba, &pwrb_arr[i], &sgl);
2748 		status = be_cmd_wrbq_create(&phba->ctrl, &sgl,
2749 					    &phwi_context->be_wrbq[i]);
2750 		if (status != 0) {
2751 			shost_printk(KERN_ERR, phba->shost,
2752 				     "wrbq create failed.");
2753 			return status;
2754 		}
2755 		phwi_ctrlr->wrb_context[i * 2].cid = phwi_context->be_wrbq[i].
2756 								   id;
2757 	}
2758 	kfree(pwrb_arr);
2759 	return 0;
2760 }
2761 
2762 static void free_wrb_handles(struct beiscsi_hba *phba)
2763 {
2764 	unsigned int index;
2765 	struct hwi_controller *phwi_ctrlr;
2766 	struct hwi_wrb_context *pwrb_context;
2767 
2768 	phwi_ctrlr = phba->phwi_ctrlr;
2769 	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
2770 		pwrb_context = &phwi_ctrlr->wrb_context[index];
2771 		kfree(pwrb_context->pwrb_handle_base);
2772 		kfree(pwrb_context->pwrb_handle_basestd);
2773 	}
2774 }
2775 
2776 static void be_mcc_queues_destroy(struct beiscsi_hba *phba)
2777 {
2778 	struct be_queue_info *q;
2779 	struct be_ctrl_info *ctrl = &phba->ctrl;
2780 
2781 	q = &phba->ctrl.mcc_obj.q;
2782 	if (q->created)
2783 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_MCCQ);
2784 	be_queue_free(phba, q);
2785 
2786 	q = &phba->ctrl.mcc_obj.cq;
2787 	if (q->created)
2788 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2789 	be_queue_free(phba, q);
2790 }
2791 
2792 static void hwi_cleanup(struct beiscsi_hba *phba)
2793 {
2794 	struct be_queue_info *q;
2795 	struct be_ctrl_info *ctrl = &phba->ctrl;
2796 	struct hwi_controller *phwi_ctrlr;
2797 	struct hwi_context_memory *phwi_context;
2798 	int i, eq_num;
2799 
2800 	phwi_ctrlr = phba->phwi_ctrlr;
2801 	phwi_context = phwi_ctrlr->phwi_ctxt;
2802 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
2803 		q = &phwi_context->be_wrbq[i];
2804 		if (q->created)
2805 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_WRBQ);
2806 	}
2807 	free_wrb_handles(phba);
2808 
2809 	q = &phwi_context->be_def_hdrq;
2810 	if (q->created)
2811 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2812 
2813 	q = &phwi_context->be_def_dataq;
2814 	if (q->created)
2815 		beiscsi_cmd_q_destroy(ctrl, q, QTYPE_DPDUQ);
2816 
2817 	beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);
2818 
2819 	for (i = 0; i < (phba->num_cpus); i++) {
2820 		q = &phwi_context->be_cq[i];
2821 		if (q->created)
2822 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_CQ);
2823 	}
2824 	if (phba->msix_enabled)
2825 		eq_num = 1;
2826 	else
2827 		eq_num = 0;
2828 	for (i = 0; i < (phba->num_cpus + eq_num); i++) {
2829 		q = &phwi_context->be_eq[i].q;
2830 		if (q->created)
2831 			beiscsi_cmd_q_destroy(ctrl, q, QTYPE_EQ);
2832 	}
2833 	be_mcc_queues_destroy(phba);
2834 }
2835 
2836 static int be_mcc_queues_create(struct beiscsi_hba *phba,
2837 				struct hwi_context_memory *phwi_context)
2838 {
2839 	struct be_queue_info *q, *cq;
2840 	struct be_ctrl_info *ctrl = &phba->ctrl;
2841 
2842 	/* Alloc MCC compl queue */
2843 	cq = &phba->ctrl.mcc_obj.cq;
2844 	if (be_queue_alloc(phba, cq, MCC_CQ_LEN,
2845 			sizeof(struct be_mcc_compl)))
2846 		goto err;
2847 	/* Ask BE to create MCC compl queue; */
2848 	if (phba->msix_enabled) {
2849 		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq
2850 					 [phba->num_cpus].q, false, true, 0))
2851 		goto mcc_cq_free;
2852 	} else {
2853 		if (beiscsi_cmd_cq_create(ctrl, cq, &phwi_context->be_eq[0].q,
2854 					  false, true, 0))
2855 		goto mcc_cq_free;
2856 	}
2857 
2858 	/* Alloc MCC queue */
2859 	q = &phba->ctrl.mcc_obj.q;
2860 	if (be_queue_alloc(phba, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2861 		goto mcc_cq_destroy;
2862 
2863 	/* Ask BE to create MCC queue */
2864 	if (beiscsi_cmd_mccq_create(phba, q, cq))
2865 		goto mcc_q_free;
2866 
2867 	return 0;
2868 
2869 mcc_q_free:
2870 	be_queue_free(phba, q);
2871 mcc_cq_destroy:
2872 	beiscsi_cmd_q_destroy(ctrl, cq, QTYPE_CQ);
2873 mcc_cq_free:
2874 	be_queue_free(phba, cq);
2875 err:
2876 	return -1;
2877 }
2878 
2879 static int find_num_cpus(void)
2880 {
2881 	int  num_cpus = 0;
2882 
2883 	num_cpus = num_online_cpus();
2884 	if (num_cpus >= MAX_CPUS)
2885 		num_cpus = MAX_CPUS - 1;
2886 
2887 	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", num_cpus);
2888 	return num_cpus;
2889 }
2890 
2891 static int hwi_init_port(struct beiscsi_hba *phba)
2892 {
2893 	struct hwi_controller *phwi_ctrlr;
2894 	struct hwi_context_memory *phwi_context;
2895 	unsigned int def_pdu_ring_sz;
2896 	struct be_ctrl_info *ctrl = &phba->ctrl;
2897 	int status;
2898 
2899 	def_pdu_ring_sz =
2900 		phba->params.asyncpdus_per_ctrl * sizeof(struct phys_addr);
2901 	phwi_ctrlr = phba->phwi_ctrlr;
2902 	phwi_context = phwi_ctrlr->phwi_ctxt;
2903 	phwi_context->max_eqd = 0;
2904 	phwi_context->min_eqd = 0;
2905 	phwi_context->cur_eqd = 64;
2906 	be_cmd_fw_initialize(&phba->ctrl);
2907 
2908 	status = beiscsi_create_eqs(phba, phwi_context);
2909 	if (status != 0) {
2910 		shost_printk(KERN_ERR, phba->shost, "EQ not created \n");
2911 		goto error;
2912 	}
2913 
2914 	status = be_mcc_queues_create(phba, phwi_context);
2915 	if (status != 0)
2916 		goto error;
2917 
2918 	status = mgmt_check_supported_fw(ctrl, phba);
2919 	if (status != 0) {
2920 		shost_printk(KERN_ERR, phba->shost,
2921 			     "Unsupported fw version \n");
2922 		goto error;
2923 	}
2924 
2925 	status = beiscsi_create_cqs(phba, phwi_context);
2926 	if (status != 0) {
2927 		shost_printk(KERN_ERR, phba->shost, "CQ not created\n");
2928 		goto error;
2929 	}
2930 
2931 	status = beiscsi_create_def_hdr(phba, phwi_context, phwi_ctrlr,
2932 					def_pdu_ring_sz);
2933 	if (status != 0) {
2934 		shost_printk(KERN_ERR, phba->shost,
2935 			     "Default Header not created\n");
2936 		goto error;
2937 	}
2938 
2939 	status = beiscsi_create_def_data(phba, phwi_context,
2940 					 phwi_ctrlr, def_pdu_ring_sz);
2941 	if (status != 0) {
2942 		shost_printk(KERN_ERR, phba->shost,
2943 			     "Default Data not created\n");
2944 		goto error;
2945 	}
2946 
2947 	status = beiscsi_post_pages(phba);
2948 	if (status != 0) {
2949 		shost_printk(KERN_ERR, phba->shost, "Post SGL Pages Failed\n");
2950 		goto error;
2951 	}
2952 
2953 	status = beiscsi_create_wrb_rings(phba,	phwi_context, phwi_ctrlr);
2954 	if (status != 0) {
2955 		shost_printk(KERN_ERR, phba->shost,
2956 			     "WRB Rings not created\n");
2957 		goto error;
2958 	}
2959 
2960 	SE_DEBUG(DBG_LVL_8, "hwi_init_port success\n");
2961 	return 0;
2962 
2963 error:
2964 	shost_printk(KERN_ERR, phba->shost, "hwi_init_port failed");
2965 	hwi_cleanup(phba);
2966 	return -ENOMEM;
2967 }
2968 
2969 static int hwi_init_controller(struct beiscsi_hba *phba)
2970 {
2971 	struct hwi_controller *phwi_ctrlr;
2972 
2973 	phwi_ctrlr = phba->phwi_ctrlr;
2974 	if (1 == phba->init_mem[HWI_MEM_ADDN_CONTEXT].num_elements) {
2975 		phwi_ctrlr->phwi_ctxt = (struct hwi_context_memory *)phba->
2976 		    init_mem[HWI_MEM_ADDN_CONTEXT].mem_array[0].virtual_address;
2977 		SE_DEBUG(DBG_LVL_8, " phwi_ctrlr->phwi_ctxt=%p \n",
2978 			 phwi_ctrlr->phwi_ctxt);
2979 	} else {
2980 		shost_printk(KERN_ERR, phba->shost,
2981 			     "HWI_MEM_ADDN_CONTEXT is more than one element."
2982 			     "Failing to load\n");
2983 		return -ENOMEM;
2984 	}
2985 
2986 	iscsi_init_global_templates(phba);
2987 	beiscsi_init_wrb_handle(phba);
2988 	hwi_init_async_pdu_ctx(phba);
2989 	if (hwi_init_port(phba) != 0) {
2990 		shost_printk(KERN_ERR, phba->shost,
2991 			     "hwi_init_controller failed\n");
2992 		return -ENOMEM;
2993 	}
2994 	return 0;
2995 }
2996 
2997 static void beiscsi_free_mem(struct beiscsi_hba *phba)
2998 {
2999 	struct be_mem_descriptor *mem_descr;
3000 	int i, j;
3001 
3002 	mem_descr = phba->init_mem;
3003 	i = 0;
3004 	j = 0;
3005 	for (i = 0; i < SE_MEM_MAX; i++) {
3006 		for (j = mem_descr->num_elements; j > 0; j--) {
3007 			pci_free_consistent(phba->pcidev,
3008 			  mem_descr->mem_array[j - 1].size,
3009 			  mem_descr->mem_array[j - 1].virtual_address,
3010 			  mem_descr->mem_array[j - 1].bus_address.
3011 				u.a64.address);
3012 		}
3013 		kfree(mem_descr->mem_array);
3014 		mem_descr++;
3015 	}
3016 	kfree(phba->init_mem);
3017 	kfree(phba->phwi_ctrlr);
3018 }
3019 
3020 static int beiscsi_init_controller(struct beiscsi_hba *phba)
3021 {
3022 	int ret = -ENOMEM;
3023 
3024 	ret = beiscsi_get_memory(phba);
3025 	if (ret < 0) {
3026 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe -"
3027 			     "Failed in beiscsi_alloc_memory \n");
3028 		return ret;
3029 	}
3030 
3031 	ret = hwi_init_controller(phba);
3032 	if (ret)
3033 		goto free_init;
3034 	SE_DEBUG(DBG_LVL_8, "Return success from beiscsi_init_controller");
3035 	return 0;
3036 
3037 free_init:
3038 	beiscsi_free_mem(phba);
3039 	return -ENOMEM;
3040 }
3041 
3042 static int beiscsi_init_sgl_handle(struct beiscsi_hba *phba)
3043 {
3044 	struct be_mem_descriptor *mem_descr_sglh, *mem_descr_sg;
3045 	struct sgl_handle *psgl_handle;
3046 	struct iscsi_sge *pfrag;
3047 	unsigned int arr_index, i, idx;
3048 
3049 	phba->io_sgl_hndl_avbl = 0;
3050 	phba->eh_sgl_hndl_avbl = 0;
3051 
3052 	mem_descr_sglh = phba->init_mem;
3053 	mem_descr_sglh += HWI_MEM_SGLH;
3054 	if (1 == mem_descr_sglh->num_elements) {
3055 		phba->io_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3056 						 phba->params.ios_per_ctrl,
3057 						 GFP_KERNEL);
3058 		if (!phba->io_sgl_hndl_base) {
3059 			shost_printk(KERN_ERR, phba->shost,
3060 				     "Mem Alloc Failed. Failing to load\n");
3061 			return -ENOMEM;
3062 		}
3063 		phba->eh_sgl_hndl_base = kzalloc(sizeof(struct sgl_handle *) *
3064 						 (phba->params.icds_per_ctrl -
3065 						 phba->params.ios_per_ctrl),
3066 						 GFP_KERNEL);
3067 		if (!phba->eh_sgl_hndl_base) {
3068 			kfree(phba->io_sgl_hndl_base);
3069 			shost_printk(KERN_ERR, phba->shost,
3070 				     "Mem Alloc Failed. Failing to load\n");
3071 			return -ENOMEM;
3072 		}
3073 	} else {
3074 		shost_printk(KERN_ERR, phba->shost,
3075 			     "HWI_MEM_SGLH is more than one element."
3076 			     "Failing to load\n");
3077 		return -ENOMEM;
3078 	}
3079 
3080 	arr_index = 0;
3081 	idx = 0;
3082 	while (idx < mem_descr_sglh->num_elements) {
3083 		psgl_handle = mem_descr_sglh->mem_array[idx].virtual_address;
3084 
3085 		for (i = 0; i < (mem_descr_sglh->mem_array[idx].size /
3086 		      sizeof(struct sgl_handle)); i++) {
3087 			if (arr_index < phba->params.ios_per_ctrl) {
3088 				phba->io_sgl_hndl_base[arr_index] = psgl_handle;
3089 				phba->io_sgl_hndl_avbl++;
3090 				arr_index++;
3091 			} else {
3092 				phba->eh_sgl_hndl_base[arr_index -
3093 					phba->params.ios_per_ctrl] =
3094 								psgl_handle;
3095 				arr_index++;
3096 				phba->eh_sgl_hndl_avbl++;
3097 			}
3098 			psgl_handle++;
3099 		}
3100 		idx++;
3101 	}
3102 	SE_DEBUG(DBG_LVL_8,
3103 		 "phba->io_sgl_hndl_avbl=%d"
3104 		 "phba->eh_sgl_hndl_avbl=%d \n",
3105 		 phba->io_sgl_hndl_avbl,
3106 		 phba->eh_sgl_hndl_avbl);
3107 	mem_descr_sg = phba->init_mem;
3108 	mem_descr_sg += HWI_MEM_SGE;
3109 	SE_DEBUG(DBG_LVL_8, "\n mem_descr_sg->num_elements=%d \n",
3110 		 mem_descr_sg->num_elements);
3111 	arr_index = 0;
3112 	idx = 0;
3113 	while (idx < mem_descr_sg->num_elements) {
3114 		pfrag = mem_descr_sg->mem_array[idx].virtual_address;
3115 
3116 		for (i = 0;
3117 		     i < (mem_descr_sg->mem_array[idx].size) /
3118 		     (sizeof(struct iscsi_sge) * phba->params.num_sge_per_io);
3119 		     i++) {
3120 			if (arr_index < phba->params.ios_per_ctrl)
3121 				psgl_handle = phba->io_sgl_hndl_base[arr_index];
3122 			else
3123 				psgl_handle = phba->eh_sgl_hndl_base[arr_index -
3124 						phba->params.ios_per_ctrl];
3125 			psgl_handle->pfrag = pfrag;
3126 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_hi, pfrag, 0);
3127 			AMAP_SET_BITS(struct amap_iscsi_sge, addr_lo, pfrag, 0);
3128 			pfrag += phba->params.num_sge_per_io;
3129 			psgl_handle->sgl_index =
3130 				phba->fw_config.iscsi_icd_start + arr_index++;
3131 		}
3132 		idx++;
3133 	}
3134 	phba->io_sgl_free_index = 0;
3135 	phba->io_sgl_alloc_index = 0;
3136 	phba->eh_sgl_free_index = 0;
3137 	phba->eh_sgl_alloc_index = 0;
3138 	return 0;
3139 }
3140 
3141 static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
3142 {
3143 	int i, new_cid;
3144 
3145 	phba->cid_array = kzalloc(sizeof(void *) * phba->params.cxns_per_ctrl,
3146 				  GFP_KERNEL);
3147 	if (!phba->cid_array) {
3148 		shost_printk(KERN_ERR, phba->shost,
3149 			     "Failed to allocate memory in "
3150 			     "hba_setup_cid_tbls\n");
3151 		return -ENOMEM;
3152 	}
3153 	phba->ep_array = kzalloc(sizeof(struct iscsi_endpoint *) *
3154 				 phba->params.cxns_per_ctrl * 2, GFP_KERNEL);
3155 	if (!phba->ep_array) {
3156 		shost_printk(KERN_ERR, phba->shost,
3157 			     "Failed to allocate memory in "
3158 			     "hba_setup_cid_tbls \n");
3159 		kfree(phba->cid_array);
3160 		return -ENOMEM;
3161 	}
3162 	new_cid = phba->fw_config.iscsi_cid_start;
3163 	for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
3164 		phba->cid_array[i] = new_cid;
3165 		new_cid += 2;
3166 	}
3167 	phba->avlbl_cids = phba->params.cxns_per_ctrl;
3168 	return 0;
3169 }
3170 
3171 static unsigned char hwi_enable_intr(struct beiscsi_hba *phba)
3172 {
3173 	struct be_ctrl_info *ctrl = &phba->ctrl;
3174 	struct hwi_controller *phwi_ctrlr;
3175 	struct hwi_context_memory *phwi_context;
3176 	struct be_queue_info *eq;
3177 	u8 __iomem *addr;
3178 	u32 reg, i;
3179 	u32 enabled;
3180 
3181 	phwi_ctrlr = phba->phwi_ctrlr;
3182 	phwi_context = phwi_ctrlr->phwi_ctxt;
3183 
3184 	addr = (u8 __iomem *) ((u8 __iomem *) ctrl->pcicfg +
3185 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET);
3186 	reg = ioread32(addr);
3187 	SE_DEBUG(DBG_LVL_8, "reg =x%08x \n", reg);
3188 
3189 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3190 	if (!enabled) {
3191 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3192 		SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
3193 		iowrite32(reg, addr);
3194 		if (!phba->msix_enabled) {
3195 			eq = &phwi_context->be_eq[0].q;
3196 			SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3197 			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3198 		} else {
3199 			for (i = 0; i <= phba->num_cpus; i++) {
3200 				eq = &phwi_context->be_eq[i].q;
3201 				SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
3202 				hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
3203 			}
3204 		}
3205 	}
3206 	return true;
3207 }
3208 
3209 static void hwi_disable_intr(struct beiscsi_hba *phba)
3210 {
3211 	struct be_ctrl_info *ctrl = &phba->ctrl;
3212 
3213 	u8 __iomem *addr = ctrl->pcicfg + PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET;
3214 	u32 reg = ioread32(addr);
3215 
3216 	u32 enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3217 	if (enabled) {
3218 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
3219 		iowrite32(reg, addr);
3220 	} else
3221 		shost_printk(KERN_WARNING, phba->shost,
3222 			     "In hwi_disable_intr, Already Disabled \n");
3223 }
3224 
3225 static int beiscsi_init_port(struct beiscsi_hba *phba)
3226 {
3227 	int ret;
3228 
3229 	ret = beiscsi_init_controller(phba);
3230 	if (ret < 0) {
3231 		shost_printk(KERN_ERR, phba->shost,
3232 			     "beiscsi_dev_probe - Failed in"
3233 			     "beiscsi_init_controller \n");
3234 		return ret;
3235 	}
3236 	ret = beiscsi_init_sgl_handle(phba);
3237 	if (ret < 0) {
3238 		shost_printk(KERN_ERR, phba->shost,
3239 			     "beiscsi_dev_probe - Failed in"
3240 			     "beiscsi_init_sgl_handle \n");
3241 		goto do_cleanup_ctrlr;
3242 	}
3243 
3244 	if (hba_setup_cid_tbls(phba)) {
3245 		shost_printk(KERN_ERR, phba->shost,
3246 			     "Failed in hba_setup_cid_tbls\n");
3247 		kfree(phba->io_sgl_hndl_base);
3248 		kfree(phba->eh_sgl_hndl_base);
3249 		goto do_cleanup_ctrlr;
3250 	}
3251 
3252 	return ret;
3253 
3254 do_cleanup_ctrlr:
3255 	hwi_cleanup(phba);
3256 	return ret;
3257 }
3258 
3259 static void hwi_purge_eq(struct beiscsi_hba *phba)
3260 {
3261 	struct hwi_controller *phwi_ctrlr;
3262 	struct hwi_context_memory *phwi_context;
3263 	struct be_queue_info *eq;
3264 	struct be_eq_entry *eqe = NULL;
3265 	int i, eq_msix;
3266 	unsigned int num_processed;
3267 
3268 	phwi_ctrlr = phba->phwi_ctrlr;
3269 	phwi_context = phwi_ctrlr->phwi_ctxt;
3270 	if (phba->msix_enabled)
3271 		eq_msix = 1;
3272 	else
3273 		eq_msix = 0;
3274 
3275 	for (i = 0; i < (phba->num_cpus + eq_msix); i++) {
3276 		eq = &phwi_context->be_eq[i].q;
3277 		eqe = queue_tail_node(eq);
3278 		num_processed = 0;
3279 		while (eqe->dw[offsetof(struct amap_eq_entry, valid) / 32]
3280 					& EQE_VALID_MASK) {
3281 			AMAP_SET_BITS(struct amap_eq_entry, valid, eqe, 0);
3282 			queue_tail_inc(eq);
3283 			eqe = queue_tail_node(eq);
3284 			num_processed++;
3285 		}
3286 
3287 		if (num_processed)
3288 			hwi_ring_eq_db(phba, eq->id, 1,	num_processed, 1, 1);
3289 	}
3290 }
3291 
3292 static void beiscsi_clean_port(struct beiscsi_hba *phba)
3293 {
3294 	unsigned char mgmt_status;
3295 
3296 	mgmt_status = mgmt_epfw_cleanup(phba, CMD_CONNECTION_CHUTE_0);
3297 	if (mgmt_status)
3298 		shost_printk(KERN_WARNING, phba->shost,
3299 			     "mgmt_epfw_cleanup FAILED \n");
3300 
3301 	hwi_purge_eq(phba);
3302 	hwi_cleanup(phba);
3303 	kfree(phba->io_sgl_hndl_base);
3304 	kfree(phba->eh_sgl_hndl_base);
3305 	kfree(phba->cid_array);
3306 	kfree(phba->ep_array);
3307 }
3308 
3309 void
3310 beiscsi_offload_connection(struct beiscsi_conn *beiscsi_conn,
3311 			   struct beiscsi_offload_params *params)
3312 {
3313 	struct wrb_handle *pwrb_handle;
3314 	struct iscsi_target_context_update_wrb *pwrb = NULL;
3315 	struct be_mem_descriptor *mem_descr;
3316 	struct beiscsi_hba *phba = beiscsi_conn->phba;
3317 	u32 doorbell = 0;
3318 
3319 	/*
3320 	 * We can always use 0 here because it is reserved by libiscsi for
3321 	 * login/startup related tasks.
3322 	 */
3323 	pwrb_handle = alloc_wrb_handle(phba, (beiscsi_conn->beiscsi_conn_cid -
3324 				       phba->fw_config.iscsi_cid_start));
3325 	pwrb = (struct iscsi_target_context_update_wrb *)pwrb_handle->pwrb;
3326 	memset(pwrb, 0, sizeof(*pwrb));
3327 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3328 		      max_burst_length, pwrb, params->dw[offsetof
3329 		      (struct amap_beiscsi_offload_params,
3330 		      max_burst_length) / 32]);
3331 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3332 		      max_send_data_segment_length, pwrb,
3333 		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3334 		      max_send_data_segment_length) / 32]);
3335 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3336 		      first_burst_length,
3337 		      pwrb,
3338 		      params->dw[offsetof(struct amap_beiscsi_offload_params,
3339 		      first_burst_length) / 32]);
3340 
3341 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, erl, pwrb,
3342 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3343 		      erl) / 32] & OFFLD_PARAMS_ERL));
3344 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, dde, pwrb,
3345 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3346 		      dde) / 32] & OFFLD_PARAMS_DDE) >> 2);
3347 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, hde, pwrb,
3348 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3349 		      hde) / 32] & OFFLD_PARAMS_HDE) >> 3);
3350 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ir2t, pwrb,
3351 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3352 		      ir2t) / 32] & OFFLD_PARAMS_IR2T) >> 4);
3353 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, imd, pwrb,
3354 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3355 		       imd) / 32] & OFFLD_PARAMS_IMD) >> 5);
3356 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, stat_sn,
3357 		      pwrb,
3358 		      (params->dw[offsetof(struct amap_beiscsi_offload_params,
3359 		      exp_statsn) / 32] + 1));
3360 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, type, pwrb,
3361 		      0x7);
3362 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, wrb_idx,
3363 		      pwrb, pwrb_handle->wrb_index);
3364 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, ptr2nextwrb,
3365 		      pwrb, pwrb_handle->nxt_wrb_index);
3366 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3367 			session_state, pwrb, 0);
3368 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, compltonack,
3369 		      pwrb, 1);
3370 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, notpredblq,
3371 		      pwrb, 0);
3372 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb, mode, pwrb,
3373 		      0);
3374 
3375 	mem_descr = phba->init_mem;
3376 	mem_descr += ISCSI_MEM_GLOBAL_HEADER;
3377 
3378 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3379 			pad_buffer_addr_hi, pwrb,
3380 		      mem_descr->mem_array[0].bus_address.u.a32.address_hi);
3381 	AMAP_SET_BITS(struct amap_iscsi_target_context_update_wrb,
3382 			pad_buffer_addr_lo, pwrb,
3383 		      mem_descr->mem_array[0].bus_address.u.a32.address_lo);
3384 
3385 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_target_context_update_wrb));
3386 
3387 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3388 	doorbell |= (pwrb_handle->wrb_index & DB_DEF_PDU_WRB_INDEX_MASK)
3389 			     << DB_DEF_PDU_WRB_INDEX_SHIFT;
3390 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3391 
3392 	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3393 }
3394 
3395 static void beiscsi_parse_pdu(struct iscsi_conn *conn, itt_t itt,
3396 			      int *index, int *age)
3397 {
3398 	*index = (int)itt;
3399 	if (age)
3400 		*age = conn->session->age;
3401 }
3402 
3403 /**
3404  * beiscsi_alloc_pdu - allocates pdu and related resources
3405  * @task: libiscsi task
3406  * @opcode: opcode of pdu for task
3407  *
3408  * This is called with the session lock held. It will allocate
3409  * the wrb and sgl if needed for the command. And it will prep
3410  * the pdu's itt. beiscsi_parse_pdu will later translate
3411  * the pdu itt to the libiscsi task itt.
3412  */
3413 static int beiscsi_alloc_pdu(struct iscsi_task *task, uint8_t opcode)
3414 {
3415 	struct beiscsi_io_task *io_task = task->dd_data;
3416 	struct iscsi_conn *conn = task->conn;
3417 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3418 	struct beiscsi_hba *phba = beiscsi_conn->phba;
3419 	struct hwi_wrb_context *pwrb_context;
3420 	struct hwi_controller *phwi_ctrlr;
3421 	itt_t itt;
3422 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3423 	dma_addr_t paddr;
3424 
3425 	io_task->cmd_bhs = pci_pool_alloc(beiscsi_sess->bhs_pool,
3426 					  GFP_KERNEL, &paddr);
3427 	if (!io_task->cmd_bhs)
3428 		return -ENOMEM;
3429 	io_task->bhs_pa.u.a64.address = paddr;
3430 	io_task->libiscsi_itt = (itt_t)task->itt;
3431 	io_task->pwrb_handle = alloc_wrb_handle(phba,
3432 						beiscsi_conn->beiscsi_conn_cid -
3433 						phba->fw_config.iscsi_cid_start
3434 						);
3435 	io_task->conn = beiscsi_conn;
3436 
3437 	task->hdr = (struct iscsi_hdr *)&io_task->cmd_bhs->iscsi_hdr;
3438 	task->hdr_max = sizeof(struct be_cmd_bhs);
3439 
3440 	if (task->sc) {
3441 		spin_lock(&phba->io_sgl_lock);
3442 		io_task->psgl_handle = alloc_io_sgl_handle(phba);
3443 		spin_unlock(&phba->io_sgl_lock);
3444 		if (!io_task->psgl_handle)
3445 			goto free_hndls;
3446 	} else {
3447 		io_task->scsi_cmnd = NULL;
3448 		if ((opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN) {
3449 			if (!beiscsi_conn->login_in_progress) {
3450 				spin_lock(&phba->mgmt_sgl_lock);
3451 				io_task->psgl_handle = (struct sgl_handle *)
3452 						alloc_mgmt_sgl_handle(phba);
3453 				spin_unlock(&phba->mgmt_sgl_lock);
3454 				if (!io_task->psgl_handle)
3455 					goto free_hndls;
3456 
3457 				beiscsi_conn->login_in_progress = 1;
3458 				beiscsi_conn->plogin_sgl_handle =
3459 							io_task->psgl_handle;
3460 			} else {
3461 				io_task->psgl_handle =
3462 						beiscsi_conn->plogin_sgl_handle;
3463 			}
3464 		} else {
3465 			spin_lock(&phba->mgmt_sgl_lock);
3466 			io_task->psgl_handle = alloc_mgmt_sgl_handle(phba);
3467 			spin_unlock(&phba->mgmt_sgl_lock);
3468 			if (!io_task->psgl_handle)
3469 				goto free_hndls;
3470 		}
3471 	}
3472 	itt = (itt_t) cpu_to_be32(((unsigned int)io_task->pwrb_handle->
3473 				 wrb_index << 16) | (unsigned int)
3474 				(io_task->psgl_handle->sgl_index));
3475 	io_task->pwrb_handle->pio_handle = task;
3476 
3477 	io_task->cmd_bhs->iscsi_hdr.itt = itt;
3478 	return 0;
3479 
3480 free_hndls:
3481 	phwi_ctrlr = phba->phwi_ctrlr;
3482 	pwrb_context = &phwi_ctrlr->wrb_context[
3483 			beiscsi_conn->beiscsi_conn_cid -
3484 			phba->fw_config.iscsi_cid_start];
3485 	free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3486 	io_task->pwrb_handle = NULL;
3487 	pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3488 		      io_task->bhs_pa.u.a64.address);
3489 	SE_DEBUG(DBG_LVL_1, "Alloc of SGL_ICD Failed \n");
3490 	return -ENOMEM;
3491 }
3492 
3493 static void beiscsi_cleanup_task(struct iscsi_task *task)
3494 {
3495 	struct beiscsi_io_task *io_task = task->dd_data;
3496 	struct iscsi_conn *conn = task->conn;
3497 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3498 	struct beiscsi_hba *phba = beiscsi_conn->phba;
3499 	struct beiscsi_session *beiscsi_sess = beiscsi_conn->beiscsi_sess;
3500 	struct hwi_wrb_context *pwrb_context;
3501 	struct hwi_controller *phwi_ctrlr;
3502 
3503 	phwi_ctrlr = phba->phwi_ctrlr;
3504 	pwrb_context = &phwi_ctrlr->wrb_context[beiscsi_conn->beiscsi_conn_cid
3505 			- phba->fw_config.iscsi_cid_start];
3506 	if (io_task->pwrb_handle) {
3507 		free_wrb_handle(phba, pwrb_context, io_task->pwrb_handle);
3508 		io_task->pwrb_handle = NULL;
3509 	}
3510 
3511 	if (io_task->cmd_bhs) {
3512 		pci_pool_free(beiscsi_sess->bhs_pool, io_task->cmd_bhs,
3513 			      io_task->bhs_pa.u.a64.address);
3514 	}
3515 
3516 	if (task->sc) {
3517 		if (io_task->psgl_handle) {
3518 			spin_lock(&phba->io_sgl_lock);
3519 			free_io_sgl_handle(phba, io_task->psgl_handle);
3520 			spin_unlock(&phba->io_sgl_lock);
3521 			io_task->psgl_handle = NULL;
3522 		}
3523 	} else {
3524 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGIN)
3525 			return;
3526 		if (io_task->psgl_handle) {
3527 			spin_lock(&phba->mgmt_sgl_lock);
3528 			free_mgmt_sgl_handle(phba, io_task->psgl_handle);
3529 			spin_unlock(&phba->mgmt_sgl_lock);
3530 			io_task->psgl_handle = NULL;
3531 		}
3532 	}
3533 }
3534 
3535 static int beiscsi_iotask(struct iscsi_task *task, struct scatterlist *sg,
3536 			  unsigned int num_sg, unsigned int xferlen,
3537 			  unsigned int writedir)
3538 {
3539 
3540 	struct beiscsi_io_task *io_task = task->dd_data;
3541 	struct iscsi_conn *conn = task->conn;
3542 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3543 	struct beiscsi_hba *phba = beiscsi_conn->phba;
3544 	struct iscsi_wrb *pwrb = NULL;
3545 	unsigned int doorbell = 0;
3546 
3547 	pwrb = io_task->pwrb_handle->pwrb;
3548 	io_task->cmd_bhs->iscsi_hdr.exp_statsn = 0;
3549 	io_task->bhs_len = sizeof(struct be_cmd_bhs);
3550 
3551 	if (writedir) {
3552 		memset(&io_task->cmd_bhs->iscsi_data_pdu, 0, 48);
3553 		AMAP_SET_BITS(struct amap_pdu_data_out, itt,
3554 			      &io_task->cmd_bhs->iscsi_data_pdu,
3555 			      (unsigned int)io_task->cmd_bhs->iscsi_hdr.itt);
3556 		AMAP_SET_BITS(struct amap_pdu_data_out, opcode,
3557 			      &io_task->cmd_bhs->iscsi_data_pdu,
3558 			      ISCSI_OPCODE_SCSI_DATA_OUT);
3559 		AMAP_SET_BITS(struct amap_pdu_data_out, final_bit,
3560 			      &io_task->cmd_bhs->iscsi_data_pdu, 1);
3561 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3562 			      INI_WR_CMD);
3563 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 1);
3564 	} else {
3565 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3566 			      INI_RD_CMD);
3567 		AMAP_SET_BITS(struct amap_iscsi_wrb, dsp, pwrb, 0);
3568 	}
3569 	memcpy(&io_task->cmd_bhs->iscsi_data_pdu.
3570 	       dw[offsetof(struct amap_pdu_data_out, lun) / 32],
3571 	       io_task->cmd_bhs->iscsi_hdr.lun, sizeof(struct scsi_lun));
3572 
3573 	AMAP_SET_BITS(struct amap_iscsi_wrb, lun, pwrb,
3574 		      cpu_to_be16((unsigned short)io_task->cmd_bhs->iscsi_hdr.
3575 				  lun[0]));
3576 	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb, xferlen);
3577 	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3578 		      io_task->pwrb_handle->wrb_index);
3579 	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3580 		      be32_to_cpu(task->cmdsn));
3581 	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3582 		      io_task->psgl_handle->sgl_index);
3583 
3584 	hwi_write_sgl(pwrb, sg, num_sg, io_task);
3585 
3586 	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3587 		      io_task->pwrb_handle->nxt_wrb_index);
3588 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3589 
3590 	doorbell |= beiscsi_conn->beiscsi_conn_cid & DB_WRB_POST_CID_MASK;
3591 	doorbell |= (io_task->pwrb_handle->wrb_index &
3592 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3593 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3594 
3595 	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3596 	return 0;
3597 }
3598 
3599 static int beiscsi_mtask(struct iscsi_task *task)
3600 {
3601 	struct beiscsi_io_task *io_task = task->dd_data;
3602 	struct iscsi_conn *conn = task->conn;
3603 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
3604 	struct beiscsi_hba *phba = beiscsi_conn->phba;
3605 	struct iscsi_wrb *pwrb = NULL;
3606 	unsigned int doorbell = 0;
3607 	unsigned int cid;
3608 
3609 	cid = beiscsi_conn->beiscsi_conn_cid;
3610 	pwrb = io_task->pwrb_handle->pwrb;
3611 	memset(pwrb, 0, sizeof(*pwrb));
3612 	AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb,
3613 		      be32_to_cpu(task->cmdsn));
3614 	AMAP_SET_BITS(struct amap_iscsi_wrb, wrb_idx, pwrb,
3615 		      io_task->pwrb_handle->wrb_index);
3616 	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
3617 		      io_task->psgl_handle->sgl_index);
3618 
3619 	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
3620 	case ISCSI_OP_LOGIN:
3621 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3622 			      TGT_DM_CMD);
3623 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3624 		AMAP_SET_BITS(struct amap_iscsi_wrb, cmdsn_itt, pwrb, 1);
3625 		hwi_write_buffer(pwrb, task);
3626 		break;
3627 	case ISCSI_OP_NOOP_OUT:
3628 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3629 			      INI_RD_CMD);
3630 		if (task->hdr->ttt == ISCSI_RESERVED_TAG)
3631 			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3632 		else
3633 			AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 1);
3634 		hwi_write_buffer(pwrb, task);
3635 		break;
3636 	case ISCSI_OP_TEXT:
3637 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3638 			      TGT_DM_CMD);
3639 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3640 		hwi_write_buffer(pwrb, task);
3641 		break;
3642 	case ISCSI_OP_SCSI_TMFUNC:
3643 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3644 			      INI_TMF_CMD);
3645 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3646 		hwi_write_buffer(pwrb, task);
3647 		break;
3648 	case ISCSI_OP_LOGOUT:
3649 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
3650 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
3651 			      HWH_TYPE_LOGOUT);
3652 		hwi_write_buffer(pwrb, task);
3653 		break;
3654 
3655 	default:
3656 		SE_DEBUG(DBG_LVL_1, "opcode =%d Not supported \n",
3657 			 task->hdr->opcode & ISCSI_OPCODE_MASK);
3658 		return -EINVAL;
3659 	}
3660 
3661 	AMAP_SET_BITS(struct amap_iscsi_wrb, r2t_exp_dtl, pwrb,
3662 		      task->data_count);
3663 	AMAP_SET_BITS(struct amap_iscsi_wrb, ptr2nextwrb, pwrb,
3664 		      io_task->pwrb_handle->nxt_wrb_index);
3665 	be_dws_le_to_cpu(pwrb, sizeof(struct iscsi_wrb));
3666 
3667 	doorbell |= cid & DB_WRB_POST_CID_MASK;
3668 	doorbell |= (io_task->pwrb_handle->wrb_index &
3669 		     DB_DEF_PDU_WRB_INDEX_MASK) << DB_DEF_PDU_WRB_INDEX_SHIFT;
3670 	doorbell |= 1 << DB_DEF_PDU_NUM_POSTED_SHIFT;
3671 	iowrite32(doorbell, phba->db_va + DB_TXULP0_OFFSET);
3672 	return 0;
3673 }
3674 
3675 static int beiscsi_task_xmit(struct iscsi_task *task)
3676 {
3677 	struct beiscsi_io_task *io_task = task->dd_data;
3678 	struct scsi_cmnd *sc = task->sc;
3679 	struct scatterlist *sg;
3680 	int num_sg;
3681 	unsigned int  writedir = 0, xferlen = 0;
3682 
3683 	if (!sc)
3684 		return beiscsi_mtask(task);
3685 
3686 	io_task->scsi_cmnd = sc;
3687 	num_sg = scsi_dma_map(sc);
3688 	if (num_sg < 0) {
3689 		SE_DEBUG(DBG_LVL_1, " scsi_dma_map Failed\n")
3690 		return num_sg;
3691 	}
3692 	SE_DEBUG(DBG_LVL_4, "xferlen=0x%08x scmd=%p num_sg=%d sernum=%lu\n",
3693 		  (scsi_bufflen(sc)), sc, num_sg, sc->serial_number);
3694 	xferlen = scsi_bufflen(sc);
3695 	sg = scsi_sglist(sc);
3696 	if (sc->sc_data_direction == DMA_TO_DEVICE) {
3697 		writedir = 1;
3698 		SE_DEBUG(DBG_LVL_4, "task->imm_count=0x%08x \n",
3699 			 task->imm_count);
3700 	} else
3701 		writedir = 0;
3702 	return beiscsi_iotask(task, sg, num_sg, xferlen, writedir);
3703 }
3704 
3705 static void beiscsi_remove(struct pci_dev *pcidev)
3706 {
3707 	struct beiscsi_hba *phba = NULL;
3708 	struct hwi_controller *phwi_ctrlr;
3709 	struct hwi_context_memory *phwi_context;
3710 	struct be_eq_obj *pbe_eq;
3711 	unsigned int i, msix_vec;
3712 
3713 	phba = (struct beiscsi_hba *)pci_get_drvdata(pcidev);
3714 	if (!phba) {
3715 		dev_err(&pcidev->dev, "beiscsi_remove called with no phba \n");
3716 		return;
3717 	}
3718 
3719 	phwi_ctrlr = phba->phwi_ctrlr;
3720 	phwi_context = phwi_ctrlr->phwi_ctxt;
3721 	hwi_disable_intr(phba);
3722 	if (phba->msix_enabled) {
3723 		for (i = 0; i <= phba->num_cpus; i++) {
3724 			msix_vec = phba->msix_entries[i].vector;
3725 			free_irq(msix_vec, &phwi_context->be_eq[i]);
3726 		}
3727 	} else
3728 		if (phba->pcidev->irq)
3729 			free_irq(phba->pcidev->irq, phba);
3730 	pci_disable_msix(phba->pcidev);
3731 	destroy_workqueue(phba->wq);
3732 	if (blk_iopoll_enabled)
3733 		for (i = 0; i < phba->num_cpus; i++) {
3734 			pbe_eq = &phwi_context->be_eq[i];
3735 			blk_iopoll_disable(&pbe_eq->iopoll);
3736 		}
3737 
3738 	beiscsi_clean_port(phba);
3739 	beiscsi_free_mem(phba);
3740 	beiscsi_unmap_pci_function(phba);
3741 	pci_free_consistent(phba->pcidev,
3742 			    phba->ctrl.mbox_mem_alloced.size,
3743 			    phba->ctrl.mbox_mem_alloced.va,
3744 			    phba->ctrl.mbox_mem_alloced.dma);
3745 	iscsi_host_remove(phba->shost);
3746 	pci_dev_put(phba->pcidev);
3747 	iscsi_host_free(phba->shost);
3748 }
3749 
3750 static void beiscsi_msix_enable(struct beiscsi_hba *phba)
3751 {
3752 	int i, status;
3753 
3754 	for (i = 0; i <= phba->num_cpus; i++)
3755 		phba->msix_entries[i].entry = i;
3756 
3757 	status = pci_enable_msix(phba->pcidev, phba->msix_entries,
3758 				 (phba->num_cpus + 1));
3759 	if (!status)
3760 		phba->msix_enabled = true;
3761 
3762 	return;
3763 }
3764 
3765 static int __devinit beiscsi_dev_probe(struct pci_dev *pcidev,
3766 				const struct pci_device_id *id)
3767 {
3768 	struct beiscsi_hba *phba = NULL;
3769 	struct hwi_controller *phwi_ctrlr;
3770 	struct hwi_context_memory *phwi_context;
3771 	struct be_eq_obj *pbe_eq;
3772 	int ret, msix_vec, num_cpus, i;
3773 
3774 	ret = beiscsi_enable_pci(pcidev);
3775 	if (ret < 0) {
3776 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3777 			     "Failed to enable pci device \n");
3778 		return ret;
3779 	}
3780 
3781 	phba = beiscsi_hba_alloc(pcidev);
3782 	if (!phba) {
3783 		dev_err(&pcidev->dev, "beiscsi_dev_probe-"
3784 			" Failed in beiscsi_hba_alloc \n");
3785 		goto disable_pci;
3786 	}
3787 
3788 	switch (pcidev->device) {
3789 	case BE_DEVICE_ID1:
3790 	case OC_DEVICE_ID1:
3791 	case OC_DEVICE_ID2:
3792 		phba->generation = BE_GEN2;
3793 		break;
3794 	case BE_DEVICE_ID2:
3795 	case OC_DEVICE_ID3:
3796 		phba->generation = BE_GEN3;
3797 		break;
3798 	default:
3799 		phba->generation = 0;
3800 	}
3801 
3802 	if (enable_msix)
3803 		num_cpus = find_num_cpus();
3804 	else
3805 		num_cpus = 1;
3806 	phba->num_cpus = num_cpus;
3807 	SE_DEBUG(DBG_LVL_8, "num_cpus = %d \n", phba->num_cpus);
3808 
3809 	if (enable_msix)
3810 		beiscsi_msix_enable(phba);
3811 	ret = be_ctrl_init(phba, pcidev);
3812 	if (ret) {
3813 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3814 				"Failed in be_ctrl_init\n");
3815 		goto hba_free;
3816 	}
3817 
3818 	spin_lock_init(&phba->io_sgl_lock);
3819 	spin_lock_init(&phba->mgmt_sgl_lock);
3820 	spin_lock_init(&phba->isr_lock);
3821 	ret = mgmt_get_fw_config(&phba->ctrl, phba);
3822 	if (ret != 0) {
3823 		shost_printk(KERN_ERR, phba->shost,
3824 			     "Error getting fw config\n");
3825 		goto free_port;
3826 	}
3827 	phba->shost->max_id = phba->fw_config.iscsi_cid_count;
3828 	beiscsi_get_params(phba);
3829 	phba->shost->can_queue = phba->params.ios_per_ctrl;
3830 	ret = beiscsi_init_port(phba);
3831 	if (ret < 0) {
3832 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3833 			     "Failed in beiscsi_init_port\n");
3834 		goto free_port;
3835 	}
3836 
3837 	for (i = 0; i < MAX_MCC_CMD ; i++) {
3838 		init_waitqueue_head(&phba->ctrl.mcc_wait[i + 1]);
3839 		phba->ctrl.mcc_tag[i] = i + 1;
3840 		phba->ctrl.mcc_numtag[i + 1] = 0;
3841 		phba->ctrl.mcc_tag_available++;
3842 	}
3843 
3844 	phba->ctrl.mcc_alloc_index = phba->ctrl.mcc_free_index = 0;
3845 
3846 	snprintf(phba->wq_name, sizeof(phba->wq_name), "beiscsi_q_irq%u",
3847 		 phba->shost->host_no);
3848 	phba->wq = create_workqueue(phba->wq_name);
3849 	if (!phba->wq) {
3850 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3851 				"Failed to allocate work queue\n");
3852 		goto free_twq;
3853 	}
3854 
3855 	INIT_WORK(&phba->work_cqs, beiscsi_process_all_cqs);
3856 
3857 	phwi_ctrlr = phba->phwi_ctrlr;
3858 	phwi_context = phwi_ctrlr->phwi_ctxt;
3859 	if (blk_iopoll_enabled) {
3860 		for (i = 0; i < phba->num_cpus; i++) {
3861 			pbe_eq = &phwi_context->be_eq[i];
3862 			blk_iopoll_init(&pbe_eq->iopoll, be_iopoll_budget,
3863 					be_iopoll);
3864 			blk_iopoll_enable(&pbe_eq->iopoll);
3865 		}
3866 	}
3867 	ret = beiscsi_init_irqs(phba);
3868 	if (ret < 0) {
3869 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3870 			     "Failed to beiscsi_init_irqs\n");
3871 		goto free_blkenbld;
3872 	}
3873 	ret = hwi_enable_intr(phba);
3874 	if (ret < 0) {
3875 		shost_printk(KERN_ERR, phba->shost, "beiscsi_dev_probe-"
3876 			     "Failed to hwi_enable_intr\n");
3877 		goto free_ctrlr;
3878 	}
3879 	SE_DEBUG(DBG_LVL_8, "\n\n\n SUCCESS - DRIVER LOADED \n\n\n");
3880 	return 0;
3881 
3882 free_ctrlr:
3883 	if (phba->msix_enabled) {
3884 		for (i = 0; i <= phba->num_cpus; i++) {
3885 			msix_vec = phba->msix_entries[i].vector;
3886 			free_irq(msix_vec, &phwi_context->be_eq[i]);
3887 		}
3888 	} else
3889 		if (phba->pcidev->irq)
3890 			free_irq(phba->pcidev->irq, phba);
3891 	pci_disable_msix(phba->pcidev);
3892 free_blkenbld:
3893 	destroy_workqueue(phba->wq);
3894 	if (blk_iopoll_enabled)
3895 		for (i = 0; i < phba->num_cpus; i++) {
3896 			pbe_eq = &phwi_context->be_eq[i];
3897 			blk_iopoll_disable(&pbe_eq->iopoll);
3898 		}
3899 free_twq:
3900 	beiscsi_clean_port(phba);
3901 	beiscsi_free_mem(phba);
3902 free_port:
3903 	pci_free_consistent(phba->pcidev,
3904 			    phba->ctrl.mbox_mem_alloced.size,
3905 			    phba->ctrl.mbox_mem_alloced.va,
3906 			   phba->ctrl.mbox_mem_alloced.dma);
3907 	beiscsi_unmap_pci_function(phba);
3908 hba_free:
3909 	iscsi_host_remove(phba->shost);
3910 	pci_dev_put(phba->pcidev);
3911 	iscsi_host_free(phba->shost);
3912 disable_pci:
3913 	pci_disable_device(pcidev);
3914 	return ret;
3915 }
3916 
3917 struct iscsi_transport beiscsi_iscsi_transport = {
3918 	.owner = THIS_MODULE,
3919 	.name = DRV_NAME,
3920 	.caps = CAP_RECOVERY_L0 | CAP_HDRDGST | CAP_TEXT_NEGO |
3921 		CAP_MULTI_R2T | CAP_DATADGST | CAP_DATA_PATH_OFFLOAD,
3922 	.param_mask = ISCSI_MAX_RECV_DLENGTH |
3923 		ISCSI_MAX_XMIT_DLENGTH |
3924 		ISCSI_HDRDGST_EN |
3925 		ISCSI_DATADGST_EN |
3926 		ISCSI_INITIAL_R2T_EN |
3927 		ISCSI_MAX_R2T |
3928 		ISCSI_IMM_DATA_EN |
3929 		ISCSI_FIRST_BURST |
3930 		ISCSI_MAX_BURST |
3931 		ISCSI_PDU_INORDER_EN |
3932 		ISCSI_DATASEQ_INORDER_EN |
3933 		ISCSI_ERL |
3934 		ISCSI_CONN_PORT |
3935 		ISCSI_CONN_ADDRESS |
3936 		ISCSI_EXP_STATSN |
3937 		ISCSI_PERSISTENT_PORT |
3938 		ISCSI_PERSISTENT_ADDRESS |
3939 		ISCSI_TARGET_NAME | ISCSI_TPGT |
3940 		ISCSI_USERNAME | ISCSI_PASSWORD |
3941 		ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
3942 		ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
3943 		ISCSI_LU_RESET_TMO |
3944 		ISCSI_PING_TMO | ISCSI_RECV_TMO |
3945 		ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
3946 	.host_param_mask = ISCSI_HOST_HWADDRESS | ISCSI_HOST_IPADDRESS |
3947 				ISCSI_HOST_INITIATOR_NAME,
3948 	.create_session = beiscsi_session_create,
3949 	.destroy_session = beiscsi_session_destroy,
3950 	.create_conn = beiscsi_conn_create,
3951 	.bind_conn = beiscsi_conn_bind,
3952 	.destroy_conn = iscsi_conn_teardown,
3953 	.set_param = beiscsi_set_param,
3954 	.get_conn_param = beiscsi_conn_get_param,
3955 	.get_session_param = iscsi_session_get_param,
3956 	.get_host_param = beiscsi_get_host_param,
3957 	.start_conn = beiscsi_conn_start,
3958 	.stop_conn = beiscsi_conn_stop,
3959 	.send_pdu = iscsi_conn_send_pdu,
3960 	.xmit_task = beiscsi_task_xmit,
3961 	.cleanup_task = beiscsi_cleanup_task,
3962 	.alloc_pdu = beiscsi_alloc_pdu,
3963 	.parse_pdu_itt = beiscsi_parse_pdu,
3964 	.get_stats = beiscsi_conn_get_stats,
3965 	.ep_connect = beiscsi_ep_connect,
3966 	.ep_poll = beiscsi_ep_poll,
3967 	.ep_disconnect = beiscsi_ep_disconnect,
3968 	.session_recovery_timedout = iscsi_session_recovery_timedout,
3969 };
3970 
3971 static struct pci_driver beiscsi_pci_driver = {
3972 	.name = DRV_NAME,
3973 	.probe = beiscsi_dev_probe,
3974 	.remove = beiscsi_remove,
3975 	.id_table = beiscsi_pci_id_table
3976 };
3977 
3978 
3979 static int __init beiscsi_module_init(void)
3980 {
3981 	int ret;
3982 
3983 	beiscsi_scsi_transport =
3984 			iscsi_register_transport(&beiscsi_iscsi_transport);
3985 	if (!beiscsi_scsi_transport) {
3986 		SE_DEBUG(DBG_LVL_1,
3987 			 "beiscsi_module_init - Unable to  register beiscsi"
3988 			 "transport.\n");
3989 		return -ENOMEM;
3990 	}
3991 	SE_DEBUG(DBG_LVL_8, "In beiscsi_module_init, tt=%p \n",
3992 		 &beiscsi_iscsi_transport);
3993 
3994 	ret = pci_register_driver(&beiscsi_pci_driver);
3995 	if (ret) {
3996 		SE_DEBUG(DBG_LVL_1,
3997 			 "beiscsi_module_init - Unable to  register"
3998 			 "beiscsi pci driver.\n");
3999 		goto unregister_iscsi_transport;
4000 	}
4001 	return 0;
4002 
4003 unregister_iscsi_transport:
4004 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4005 	return ret;
4006 }
4007 
4008 static void __exit beiscsi_module_exit(void)
4009 {
4010 	pci_unregister_driver(&beiscsi_pci_driver);
4011 	iscsi_unregister_transport(&beiscsi_iscsi_transport);
4012 }
4013 
4014 module_init(beiscsi_module_init);
4015 module_exit(beiscsi_module_exit);
4016