xref: /linux/drivers/scsi/lpfc/lpfc_sli.c (revision 2b8232ce512105e28453f301d1510de8363bccd1)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2007 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32 
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
41 #include "lpfc_debugfs.h"
42 
43 /*
44  * Define macro to log: Mailbox command x%x cannot issue Data
45  * This allows multiple uses of lpfc_msgBlk0311
46  * w/o perturbing log msg utility.
47  */
48 #define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
49 			lpfc_printf_log(phba, \
50 				KERN_INFO, \
51 				LOG_MBOX | LOG_SLI, \
52 				"(%d):0311 Mailbox command x%x cannot " \
53 				"issue Data: x%x x%x x%x\n", \
54 				pmbox->vport ? pmbox->vport->vpi : 0, \
55 				pmbox->mb.mbxCommand,		\
56 				phba->pport->port_state,	\
57 				psli->sli_flag,	\
58 				flag)
59 
60 
61 /* There are only four IOCB completion types. */
62 typedef enum _lpfc_iocb_type {
63 	LPFC_UNKNOWN_IOCB,
64 	LPFC_UNSOL_IOCB,
65 	LPFC_SOL_IOCB,
66 	LPFC_ABORT_IOCB
67 } lpfc_iocb_type;
68 
69 		/* SLI-2/SLI-3 provide different sized iocbs.  Given a pointer
70 		 * to the start of the ring, and the slot number of the
71 		 * desired iocb entry, calc a pointer to that entry.
72 		 */
73 static inline IOCB_t *
74 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
75 {
76 	return (IOCB_t *) (((char *) pring->cmdringaddr) +
77 			   pring->cmdidx * phba->iocb_cmd_size);
78 }
79 
80 static inline IOCB_t *
81 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
82 {
83 	return (IOCB_t *) (((char *) pring->rspringaddr) +
84 			   pring->rspidx * phba->iocb_rsp_size);
85 }
86 
87 static struct lpfc_iocbq *
88 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
89 {
90 	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
91 	struct lpfc_iocbq * iocbq = NULL;
92 
93 	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
94 	return iocbq;
95 }
96 
97 struct lpfc_iocbq *
98 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
99 {
100 	struct lpfc_iocbq * iocbq = NULL;
101 	unsigned long iflags;
102 
103 	spin_lock_irqsave(&phba->hbalock, iflags);
104 	iocbq = __lpfc_sli_get_iocbq(phba);
105 	spin_unlock_irqrestore(&phba->hbalock, iflags);
106 	return iocbq;
107 }
108 
109 void
110 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
111 {
112 	size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
113 
114 	/*
115 	 * Clean all volatile data fields, preserve iotag and node struct.
116 	 */
117 	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
118 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
119 }
120 
121 void
122 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
123 {
124 	unsigned long iflags;
125 
126 	/*
127 	 * Clean all volatile data fields, preserve iotag and node struct.
128 	 */
129 	spin_lock_irqsave(&phba->hbalock, iflags);
130 	__lpfc_sli_release_iocbq(phba, iocbq);
131 	spin_unlock_irqrestore(&phba->hbalock, iflags);
132 }
133 
134 /*
135  * Translate the iocb command to an iocb command type used to decide the final
136  * disposition of each completed IOCB.
137  */
138 static lpfc_iocb_type
139 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
140 {
141 	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
142 
143 	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
144 		return 0;
145 
146 	switch (iocb_cmnd) {
147 	case CMD_XMIT_SEQUENCE_CR:
148 	case CMD_XMIT_SEQUENCE_CX:
149 	case CMD_XMIT_BCAST_CN:
150 	case CMD_XMIT_BCAST_CX:
151 	case CMD_ELS_REQUEST_CR:
152 	case CMD_ELS_REQUEST_CX:
153 	case CMD_CREATE_XRI_CR:
154 	case CMD_CREATE_XRI_CX:
155 	case CMD_GET_RPI_CN:
156 	case CMD_XMIT_ELS_RSP_CX:
157 	case CMD_GET_RPI_CR:
158 	case CMD_FCP_IWRITE_CR:
159 	case CMD_FCP_IWRITE_CX:
160 	case CMD_FCP_IREAD_CR:
161 	case CMD_FCP_IREAD_CX:
162 	case CMD_FCP_ICMND_CR:
163 	case CMD_FCP_ICMND_CX:
164 	case CMD_FCP_TSEND_CX:
165 	case CMD_FCP_TRSP_CX:
166 	case CMD_FCP_TRECEIVE_CX:
167 	case CMD_FCP_AUTO_TRSP_CX:
168 	case CMD_ADAPTER_MSG:
169 	case CMD_ADAPTER_DUMP:
170 	case CMD_XMIT_SEQUENCE64_CR:
171 	case CMD_XMIT_SEQUENCE64_CX:
172 	case CMD_XMIT_BCAST64_CN:
173 	case CMD_XMIT_BCAST64_CX:
174 	case CMD_ELS_REQUEST64_CR:
175 	case CMD_ELS_REQUEST64_CX:
176 	case CMD_FCP_IWRITE64_CR:
177 	case CMD_FCP_IWRITE64_CX:
178 	case CMD_FCP_IREAD64_CR:
179 	case CMD_FCP_IREAD64_CX:
180 	case CMD_FCP_ICMND64_CR:
181 	case CMD_FCP_ICMND64_CX:
182 	case CMD_FCP_TSEND64_CX:
183 	case CMD_FCP_TRSP64_CX:
184 	case CMD_FCP_TRECEIVE64_CX:
185 	case CMD_GEN_REQUEST64_CR:
186 	case CMD_GEN_REQUEST64_CX:
187 	case CMD_XMIT_ELS_RSP64_CX:
188 		type = LPFC_SOL_IOCB;
189 		break;
190 	case CMD_ABORT_XRI_CN:
191 	case CMD_ABORT_XRI_CX:
192 	case CMD_CLOSE_XRI_CN:
193 	case CMD_CLOSE_XRI_CX:
194 	case CMD_XRI_ABORTED_CX:
195 	case CMD_ABORT_MXRI64_CN:
196 		type = LPFC_ABORT_IOCB;
197 		break;
198 	case CMD_RCV_SEQUENCE_CX:
199 	case CMD_RCV_ELS_REQ_CX:
200 	case CMD_RCV_SEQUENCE64_CX:
201 	case CMD_RCV_ELS_REQ64_CX:
202 	case CMD_IOCB_RCV_SEQ64_CX:
203 	case CMD_IOCB_RCV_ELS64_CX:
204 	case CMD_IOCB_RCV_CONT64_CX:
205 		type = LPFC_UNSOL_IOCB;
206 		break;
207 	default:
208 		type = LPFC_UNKNOWN_IOCB;
209 		break;
210 	}
211 
212 	return type;
213 }
214 
215 static int
216 lpfc_sli_ring_map(struct lpfc_hba *phba)
217 {
218 	struct lpfc_sli *psli = &phba->sli;
219 	LPFC_MBOXQ_t *pmb;
220 	MAILBOX_t *pmbox;
221 	int i, rc, ret = 0;
222 
223 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
224 	if (!pmb)
225 		return -ENOMEM;
226 	pmbox = &pmb->mb;
227 	phba->link_state = LPFC_INIT_MBX_CMDS;
228 	for (i = 0; i < psli->num_rings; i++) {
229 		lpfc_config_ring(phba, i, pmb);
230 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
231 		if (rc != MBX_SUCCESS) {
232 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
233 					"0446 Adapter failed to init (%d), "
234 					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
235 					"ring %d\n",
236 					rc, pmbox->mbxCommand,
237 					pmbox->mbxStatus, i);
238 			phba->link_state = LPFC_HBA_ERROR;
239 			ret = -ENXIO;
240 			break;
241 		}
242 	}
243 	mempool_free(pmb, phba->mbox_mem_pool);
244 	return ret;
245 }
246 
247 static int
248 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
249 			struct lpfc_iocbq *piocb)
250 {
251 	list_add_tail(&piocb->list, &pring->txcmplq);
252 	pring->txcmplq_cnt++;
253 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
254 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
255 	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
256 		if (!piocb->vport)
257 			BUG();
258 		else
259 			mod_timer(&piocb->vport->els_tmofunc,
260 				  jiffies + HZ * (phba->fc_ratov << 1));
261 	}
262 
263 
264 	return 0;
265 }
266 
267 static struct lpfc_iocbq *
268 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
269 {
270 	struct lpfc_iocbq *cmd_iocb;
271 
272 	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
273 	if (cmd_iocb != NULL)
274 		pring->txq_cnt--;
275 	return cmd_iocb;
276 }
277 
278 static IOCB_t *
279 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
280 {
281 	struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
282 		&phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
283 		&phba->slim2p->mbx.us.s2.port[pring->ringno];
284 	uint32_t  max_cmd_idx = pring->numCiocb;
285 
286 	if ((pring->next_cmdidx == pring->cmdidx) &&
287 	   (++pring->next_cmdidx >= max_cmd_idx))
288 		pring->next_cmdidx = 0;
289 
290 	if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
291 
292 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
293 
294 		if (unlikely(pring->local_getidx >= max_cmd_idx)) {
295 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
296 					"0315 Ring %d issue: portCmdGet %d "
297 					"is bigger then cmd ring %d\n",
298 					pring->ringno,
299 					pring->local_getidx, max_cmd_idx);
300 
301 			phba->link_state = LPFC_HBA_ERROR;
302 			/*
303 			 * All error attention handlers are posted to
304 			 * worker thread
305 			 */
306 			phba->work_ha |= HA_ERATT;
307 			phba->work_hs = HS_FFER3;
308 
309 			/* hbalock should already be held */
310 			if (phba->work_wait)
311 				lpfc_worker_wake_up(phba);
312 
313 			return NULL;
314 		}
315 
316 		if (pring->local_getidx == pring->next_cmdidx)
317 			return NULL;
318 	}
319 
320 	return lpfc_cmd_iocb(phba, pring);
321 }
322 
323 uint16_t
324 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
325 {
326 	struct lpfc_iocbq **new_arr;
327 	struct lpfc_iocbq **old_arr;
328 	size_t new_len;
329 	struct lpfc_sli *psli = &phba->sli;
330 	uint16_t iotag;
331 
332 	spin_lock_irq(&phba->hbalock);
333 	iotag = psli->last_iotag;
334 	if(++iotag < psli->iocbq_lookup_len) {
335 		psli->last_iotag = iotag;
336 		psli->iocbq_lookup[iotag] = iocbq;
337 		spin_unlock_irq(&phba->hbalock);
338 		iocbq->iotag = iotag;
339 		return iotag;
340 	} else if (psli->iocbq_lookup_len < (0xffff
341 					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
342 		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
343 		spin_unlock_irq(&phba->hbalock);
344 		new_arr = kzalloc(new_len * sizeof (struct lpfc_iocbq *),
345 				  GFP_KERNEL);
346 		if (new_arr) {
347 			spin_lock_irq(&phba->hbalock);
348 			old_arr = psli->iocbq_lookup;
349 			if (new_len <= psli->iocbq_lookup_len) {
350 				/* highly unprobable case */
351 				kfree(new_arr);
352 				iotag = psli->last_iotag;
353 				if(++iotag < psli->iocbq_lookup_len) {
354 					psli->last_iotag = iotag;
355 					psli->iocbq_lookup[iotag] = iocbq;
356 					spin_unlock_irq(&phba->hbalock);
357 					iocbq->iotag = iotag;
358 					return iotag;
359 				}
360 				spin_unlock_irq(&phba->hbalock);
361 				return 0;
362 			}
363 			if (psli->iocbq_lookup)
364 				memcpy(new_arr, old_arr,
365 				       ((psli->last_iotag  + 1) *
366 					sizeof (struct lpfc_iocbq *)));
367 			psli->iocbq_lookup = new_arr;
368 			psli->iocbq_lookup_len = new_len;
369 			psli->last_iotag = iotag;
370 			psli->iocbq_lookup[iotag] = iocbq;
371 			spin_unlock_irq(&phba->hbalock);
372 			iocbq->iotag = iotag;
373 			kfree(old_arr);
374 			return iotag;
375 		}
376 	} else
377 		spin_unlock_irq(&phba->hbalock);
378 
379 	lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
380 			"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
381 			psli->last_iotag);
382 
383 	return 0;
384 }
385 
386 static void
387 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
388 		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
389 {
390 	/*
391 	 * Set up an iotag
392 	 */
393 	nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
394 
395 	if (pring->ringno == LPFC_ELS_RING) {
396 		lpfc_debugfs_slow_ring_trc(phba,
397 			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
398 			*(((uint32_t *) &nextiocb->iocb) + 4),
399 			*(((uint32_t *) &nextiocb->iocb) + 6),
400 			*(((uint32_t *) &nextiocb->iocb) + 7));
401 	}
402 
403 	/*
404 	 * Issue iocb command to adapter
405 	 */
406 	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
407 	wmb();
408 	pring->stats.iocb_cmd++;
409 
410 	/*
411 	 * If there is no completion routine to call, we can release the
412 	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
413 	 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
414 	 */
415 	if (nextiocb->iocb_cmpl)
416 		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
417 	else
418 		__lpfc_sli_release_iocbq(phba, nextiocb);
419 
420 	/*
421 	 * Let the HBA know what IOCB slot will be the next one the
422 	 * driver will put a command into.
423 	 */
424 	pring->cmdidx = pring->next_cmdidx;
425 	writel(pring->cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
426 }
427 
428 static void
429 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
430 {
431 	int ringno = pring->ringno;
432 
433 	pring->flag |= LPFC_CALL_RING_AVAILABLE;
434 
435 	wmb();
436 
437 	/*
438 	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
439 	 * The HBA will tell us when an IOCB entry is available.
440 	 */
441 	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
442 	readl(phba->CAregaddr); /* flush */
443 
444 	pring->stats.iocb_cmd_full++;
445 }
446 
447 static void
448 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
449 {
450 	int ringno = pring->ringno;
451 
452 	/*
453 	 * Tell the HBA that there is work to do in this ring.
454 	 */
455 	wmb();
456 	writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
457 	readl(phba->CAregaddr); /* flush */
458 }
459 
460 static void
461 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
462 {
463 	IOCB_t *iocb;
464 	struct lpfc_iocbq *nextiocb;
465 
466 	/*
467 	 * Check to see if:
468 	 *  (a) there is anything on the txq to send
469 	 *  (b) link is up
470 	 *  (c) link attention events can be processed (fcp ring only)
471 	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
472 	 */
473 	if (pring->txq_cnt &&
474 	    lpfc_is_link_up(phba) &&
475 	    (pring->ringno != phba->sli.fcp_ring ||
476 	     phba->sli.sli_flag & LPFC_PROCESS_LA) &&
477 	    !(pring->flag & LPFC_STOP_IOCB_MBX)) {
478 
479 		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
480 		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
481 			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
482 
483 		if (iocb)
484 			lpfc_sli_update_ring(phba, pring);
485 		else
486 			lpfc_sli_update_full_ring(phba, pring);
487 	}
488 
489 	return;
490 }
491 
492 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
493 static void
494 lpfc_sli_turn_on_ring(struct lpfc_hba *phba, int ringno)
495 {
496 	struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
497 		&phba->slim2p->mbx.us.s3_pgp.port[ringno] :
498 		&phba->slim2p->mbx.us.s2.port[ringno];
499 	unsigned long iflags;
500 
501 	/* If the ring is active, flag it */
502 	spin_lock_irqsave(&phba->hbalock, iflags);
503 	if (phba->sli.ring[ringno].cmdringaddr) {
504 		if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
505 			phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
506 			/*
507 			 * Force update of the local copy of cmdGetInx
508 			 */
509 			phba->sli.ring[ringno].local_getidx
510 				= le32_to_cpu(pgp->cmdGetInx);
511 			lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
512 		}
513 	}
514 	spin_unlock_irqrestore(&phba->hbalock, iflags);
515 }
516 
517 struct lpfc_hbq_entry *
518 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
519 {
520 	struct hbq_s *hbqp = &phba->hbqs[hbqno];
521 
522 	if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
523 	    ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
524 		hbqp->next_hbqPutIdx = 0;
525 
526 	if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
527 		uint32_t raw_index = phba->hbq_get[hbqno];
528 		uint32_t getidx = le32_to_cpu(raw_index);
529 
530 		hbqp->local_hbqGetIdx = getidx;
531 
532 		if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
533 			lpfc_printf_log(phba, KERN_ERR,
534 					LOG_SLI | LOG_VPORT,
535 					"1802 HBQ %d: local_hbqGetIdx "
536 					"%u is > than hbqp->entry_count %u\n",
537 					hbqno, hbqp->local_hbqGetIdx,
538 					hbqp->entry_count);
539 
540 			phba->link_state = LPFC_HBA_ERROR;
541 			return NULL;
542 		}
543 
544 		if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
545 			return NULL;
546 	}
547 
548 	return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
549 			hbqp->hbqPutIdx;
550 }
551 
552 void
553 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
554 {
555 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
556 	struct hbq_dmabuf *hbq_buf;
557 	int i, hbq_count;
558 
559 	hbq_count = lpfc_sli_hbq_count();
560 	/* Return all memory used by all HBQs */
561 	for (i = 0; i < hbq_count; ++i) {
562 		list_for_each_entry_safe(dmabuf, next_dmabuf,
563 				&phba->hbqs[i].hbq_buffer_list, list) {
564 			hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
565 			list_del(&hbq_buf->dbuf.list);
566 			(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
567 		}
568 	}
569 }
570 
571 static struct lpfc_hbq_entry *
572 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
573 			 struct hbq_dmabuf *hbq_buf)
574 {
575 	struct lpfc_hbq_entry *hbqe;
576 	dma_addr_t physaddr = hbq_buf->dbuf.phys;
577 
578 	/* Get next HBQ entry slot to use */
579 	hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
580 	if (hbqe) {
581 		struct hbq_s *hbqp = &phba->hbqs[hbqno];
582 
583 		hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
584 		hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
585 		hbqe->bde.tus.f.bdeSize = hbq_buf->size;
586 		hbqe->bde.tus.f.bdeFlags = 0;
587 		hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
588 		hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
589 				/* Sync SLIM */
590 		hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
591 		writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
592 				/* flush */
593 		readl(phba->hbq_put + hbqno);
594 		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
595 	}
596 	return hbqe;
597 }
598 
599 static struct lpfc_hbq_init lpfc_els_hbq = {
600 	.rn = 1,
601 	.entry_count = 200,
602 	.mask_count = 0,
603 	.profile = 0,
604 	.ring_mask = (1 << LPFC_ELS_RING),
605 	.buffer_count = 0,
606 	.init_count = 20,
607 	.add_count = 5,
608 };
609 
610 static struct lpfc_hbq_init lpfc_extra_hbq = {
611 	.rn = 1,
612 	.entry_count = 200,
613 	.mask_count = 0,
614 	.profile = 0,
615 	.ring_mask = (1 << LPFC_EXTRA_RING),
616 	.buffer_count = 0,
617 	.init_count = 0,
618 	.add_count = 5,
619 };
620 
621 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
622 	&lpfc_els_hbq,
623 	&lpfc_extra_hbq,
624 };
625 
626 static int
627 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
628 {
629 	uint32_t i, start, end;
630 	struct hbq_dmabuf *hbq_buffer;
631 
632 	if (!phba->hbqs[hbqno].hbq_alloc_buffer) {
633 		return 0;
634 	}
635 
636 	start = lpfc_hbq_defs[hbqno]->buffer_count;
637 	end = count + lpfc_hbq_defs[hbqno]->buffer_count;
638 	if (end > lpfc_hbq_defs[hbqno]->entry_count) {
639 		end = lpfc_hbq_defs[hbqno]->entry_count;
640 	}
641 
642 	/* Populate HBQ entries */
643 	for (i = start; i < end; i++) {
644 		hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
645 		if (!hbq_buffer)
646 			return 1;
647 		hbq_buffer->tag = (i | (hbqno << 16));
648 		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
649 			lpfc_hbq_defs[hbqno]->buffer_count++;
650 		else
651 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
652 	}
653 	return 0;
654 }
655 
656 int
657 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
658 {
659 	return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
660 					 lpfc_hbq_defs[qno]->add_count));
661 }
662 
663 int
664 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
665 {
666 	return(lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
667 					 lpfc_hbq_defs[qno]->init_count));
668 }
669 
670 struct hbq_dmabuf *
671 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
672 {
673 	struct lpfc_dmabuf *d_buf;
674 	struct hbq_dmabuf *hbq_buf;
675 	uint32_t hbqno;
676 
677 	hbqno = tag >> 16;
678 	if (hbqno > LPFC_MAX_HBQS)
679 		return NULL;
680 
681 	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
682 		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
683 		if (hbq_buf->tag == tag) {
684 			return hbq_buf;
685 		}
686 	}
687 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
688 			"1803 Bad hbq tag. Data: x%x x%x\n",
689 			tag, lpfc_hbq_defs[tag >> 16]->buffer_count);
690 	return NULL;
691 }
692 
693 void
694 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
695 {
696 	uint32_t hbqno;
697 
698 	if (hbq_buffer) {
699 		hbqno = hbq_buffer->tag >> 16;
700 		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
701 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
702 		}
703 	}
704 }
705 
706 static int
707 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
708 {
709 	uint8_t ret;
710 
711 	switch (mbxCommand) {
712 	case MBX_LOAD_SM:
713 	case MBX_READ_NV:
714 	case MBX_WRITE_NV:
715 	case MBX_RUN_BIU_DIAG:
716 	case MBX_INIT_LINK:
717 	case MBX_DOWN_LINK:
718 	case MBX_CONFIG_LINK:
719 	case MBX_CONFIG_RING:
720 	case MBX_RESET_RING:
721 	case MBX_READ_CONFIG:
722 	case MBX_READ_RCONFIG:
723 	case MBX_READ_SPARM:
724 	case MBX_READ_STATUS:
725 	case MBX_READ_RPI:
726 	case MBX_READ_XRI:
727 	case MBX_READ_REV:
728 	case MBX_READ_LNK_STAT:
729 	case MBX_REG_LOGIN:
730 	case MBX_UNREG_LOGIN:
731 	case MBX_READ_LA:
732 	case MBX_CLEAR_LA:
733 	case MBX_DUMP_MEMORY:
734 	case MBX_DUMP_CONTEXT:
735 	case MBX_RUN_DIAGS:
736 	case MBX_RESTART:
737 	case MBX_UPDATE_CFG:
738 	case MBX_DOWN_LOAD:
739 	case MBX_DEL_LD_ENTRY:
740 	case MBX_RUN_PROGRAM:
741 	case MBX_SET_MASK:
742 	case MBX_SET_SLIM:
743 	case MBX_UNREG_D_ID:
744 	case MBX_KILL_BOARD:
745 	case MBX_CONFIG_FARP:
746 	case MBX_BEACON:
747 	case MBX_LOAD_AREA:
748 	case MBX_RUN_BIU_DIAG64:
749 	case MBX_CONFIG_PORT:
750 	case MBX_READ_SPARM64:
751 	case MBX_READ_RPI64:
752 	case MBX_REG_LOGIN64:
753 	case MBX_READ_LA64:
754 	case MBX_FLASH_WR_ULA:
755 	case MBX_SET_DEBUG:
756 	case MBX_LOAD_EXP_ROM:
757 	case MBX_REG_VPI:
758 	case MBX_UNREG_VPI:
759 	case MBX_HEARTBEAT:
760 		ret = mbxCommand;
761 		break;
762 	default:
763 		ret = MBX_SHUTDOWN;
764 		break;
765 	}
766 	return ret;
767 }
768 static void
769 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
770 {
771 	wait_queue_head_t *pdone_q;
772 	unsigned long drvr_flag;
773 
774 	/*
775 	 * If pdone_q is empty, the driver thread gave up waiting and
776 	 * continued running.
777 	 */
778 	pmboxq->mbox_flag |= LPFC_MBX_WAKE;
779 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
780 	pdone_q = (wait_queue_head_t *) pmboxq->context1;
781 	if (pdone_q)
782 		wake_up_interruptible(pdone_q);
783 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
784 	return;
785 }
786 
787 void
788 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
789 {
790 	struct lpfc_dmabuf *mp;
791 	uint16_t rpi;
792 	int rc;
793 
794 	mp = (struct lpfc_dmabuf *) (pmb->context1);
795 
796 	if (mp) {
797 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
798 		kfree(mp);
799 	}
800 
801 	/*
802 	 * If a REG_LOGIN succeeded  after node is destroyed or node
803 	 * is in re-discovery driver need to cleanup the RPI.
804 	 */
805 	if (!(phba->pport->load_flag & FC_UNLOADING) &&
806 	    pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
807 	    !pmb->mb.mbxStatus) {
808 
809 		rpi = pmb->mb.un.varWords[0];
810 		lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
811 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
812 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
813 		if (rc != MBX_NOT_FINISHED)
814 			return;
815 	}
816 
817 	mempool_free(pmb, phba->mbox_mem_pool);
818 	return;
819 }
820 
821 int
822 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
823 {
824 	MAILBOX_t *pmbox;
825 	LPFC_MBOXQ_t *pmb;
826 	int rc;
827 	LIST_HEAD(cmplq);
828 
829 	phba->sli.slistat.mbox_event++;
830 
831 	/* Get all completed mailboxe buffers into the cmplq */
832 	spin_lock_irq(&phba->hbalock);
833 	list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
834 	spin_unlock_irq(&phba->hbalock);
835 
836 	/* Get a Mailbox buffer to setup mailbox commands for callback */
837 	do {
838 		list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
839 		if (pmb == NULL)
840 			break;
841 
842 		pmbox = &pmb->mb;
843 
844 		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
845 			if (pmb->vport) {
846 				lpfc_debugfs_disc_trc(pmb->vport,
847 					LPFC_DISC_TRC_MBOX_VPORT,
848 					"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
849 					(uint32_t)pmbox->mbxCommand,
850 					pmbox->un.varWords[0],
851 					pmbox->un.varWords[1]);
852 			}
853 			else {
854 				lpfc_debugfs_disc_trc(phba->pport,
855 					LPFC_DISC_TRC_MBOX,
856 					"MBOX cmpl:       cmd:x%x mb:x%x x%x",
857 					(uint32_t)pmbox->mbxCommand,
858 					pmbox->un.varWords[0],
859 					pmbox->un.varWords[1]);
860 			}
861 		}
862 
863 		/*
864 		 * It is a fatal error if unknown mbox command completion.
865 		 */
866 		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
867 		    MBX_SHUTDOWN) {
868 			/* Unknow mailbox command compl */
869 			lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
870 					"(%d):0323 Unknown Mailbox command "
871 					"%x Cmpl\n",
872 					pmb->vport ? pmb->vport->vpi : 0,
873 					pmbox->mbxCommand);
874 			phba->link_state = LPFC_HBA_ERROR;
875 			phba->work_hs = HS_FFER3;
876 			lpfc_handle_eratt(phba);
877 			continue;
878 		}
879 
880 		if (pmbox->mbxStatus) {
881 			phba->sli.slistat.mbox_stat_err++;
882 			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
883 				/* Mbox cmd cmpl error - RETRYing */
884 				lpfc_printf_log(phba, KERN_INFO,
885 						LOG_MBOX | LOG_SLI,
886 						"(%d):0305 Mbox cmd cmpl "
887 						"error - RETRYing Data: x%x "
888 						"x%x x%x x%x\n",
889 						pmb->vport ? pmb->vport->vpi :0,
890 						pmbox->mbxCommand,
891 						pmbox->mbxStatus,
892 						pmbox->un.varWords[0],
893 						pmb->vport->port_state);
894 				pmbox->mbxStatus = 0;
895 				pmbox->mbxOwner = OWN_HOST;
896 				spin_lock_irq(&phba->hbalock);
897 				phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
898 				spin_unlock_irq(&phba->hbalock);
899 				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
900 				if (rc == MBX_SUCCESS)
901 					continue;
902 			}
903 		}
904 
905 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
906 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
907 				"(%d):0307 Mailbox cmd x%x Cmpl x%p "
908 				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
909 				pmb->vport ? pmb->vport->vpi : 0,
910 				pmbox->mbxCommand,
911 				pmb->mbox_cmpl,
912 				*((uint32_t *) pmbox),
913 				pmbox->un.varWords[0],
914 				pmbox->un.varWords[1],
915 				pmbox->un.varWords[2],
916 				pmbox->un.varWords[3],
917 				pmbox->un.varWords[4],
918 				pmbox->un.varWords[5],
919 				pmbox->un.varWords[6],
920 				pmbox->un.varWords[7]);
921 
922 		if (pmb->mbox_cmpl)
923 			pmb->mbox_cmpl(phba,pmb);
924 	} while (1);
925 	return 0;
926 }
927 
928 static struct lpfc_dmabuf *
929 lpfc_sli_replace_hbqbuff(struct lpfc_hba *phba, uint32_t tag)
930 {
931 	struct hbq_dmabuf *hbq_entry, *new_hbq_entry;
932 	uint32_t hbqno;
933 	void *virt;		/* virtual address ptr */
934 	dma_addr_t phys;	/* mapped address */
935 
936 	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
937 	if (hbq_entry == NULL)
938 		return NULL;
939 	list_del(&hbq_entry->dbuf.list);
940 
941 	hbqno = tag >> 16;
942 	new_hbq_entry = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
943 	if (new_hbq_entry == NULL)
944 		return &hbq_entry->dbuf;
945 	new_hbq_entry->tag = -1;
946 	phys = new_hbq_entry->dbuf.phys;
947 	virt = new_hbq_entry->dbuf.virt;
948 	new_hbq_entry->dbuf.phys = hbq_entry->dbuf.phys;
949 	new_hbq_entry->dbuf.virt = hbq_entry->dbuf.virt;
950 	hbq_entry->dbuf.phys = phys;
951 	hbq_entry->dbuf.virt = virt;
952 	lpfc_sli_free_hbq(phba, hbq_entry);
953 	return &new_hbq_entry->dbuf;
954 }
955 
956 static int
957 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
958 			    struct lpfc_iocbq *saveq)
959 {
960 	IOCB_t           * irsp;
961 	WORD5            * w5p;
962 	uint32_t           Rctl, Type;
963 	uint32_t           match, i;
964 
965 	match = 0;
966 	irsp = &(saveq->iocb);
967 	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
968 	    || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)
969 	    || (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)
970 	    || (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX)) {
971 		Rctl = FC_ELS_REQ;
972 		Type = FC_ELS_DATA;
973 	} else {
974 		w5p =
975 		    (WORD5 *) & (saveq->iocb.un.
976 				 ulpWord[5]);
977 		Rctl = w5p->hcsw.Rctl;
978 		Type = w5p->hcsw.Type;
979 
980 		/* Firmware Workaround */
981 		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
982 			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
983 			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
984 			Rctl = FC_ELS_REQ;
985 			Type = FC_ELS_DATA;
986 			w5p->hcsw.Rctl = Rctl;
987 			w5p->hcsw.Type = Type;
988 		}
989 	}
990 
991 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
992 		if (irsp->ulpBdeCount != 0)
993 			saveq->context2 = lpfc_sli_replace_hbqbuff(phba,
994 						irsp->un.ulpWord[3]);
995 		if (irsp->ulpBdeCount == 2)
996 			saveq->context3 = lpfc_sli_replace_hbqbuff(phba,
997 						irsp->unsli3.sli3Words[7]);
998 	}
999 
1000 	/* unSolicited Responses */
1001 	if (pring->prt[0].profile) {
1002 		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
1003 			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
1004 									saveq);
1005 		match = 1;
1006 	} else {
1007 		/* We must search, based on rctl / type
1008 		   for the right routine */
1009 		for (i = 0; i < pring->num_mask;
1010 		     i++) {
1011 			if ((pring->prt[i].rctl ==
1012 			     Rctl)
1013 			    && (pring->prt[i].
1014 				type == Type)) {
1015 				if (pring->prt[i].lpfc_sli_rcv_unsol_event)
1016 					(pring->prt[i].lpfc_sli_rcv_unsol_event)
1017 							(phba, pring, saveq);
1018 				match = 1;
1019 				break;
1020 			}
1021 		}
1022 	}
1023 	if (match == 0) {
1024 		/* Unexpected Rctl / Type received */
1025 		/* Ring <ringno> handler: unexpected
1026 		   Rctl <Rctl> Type <Type> received */
1027 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1028 				"0313 Ring %d handler: unexpected Rctl x%x "
1029 				"Type x%x received\n",
1030 				pring->ringno, Rctl, Type);
1031 	}
1032 	return 1;
1033 }
1034 
1035 static struct lpfc_iocbq *
1036 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
1037 		      struct lpfc_sli_ring *pring,
1038 		      struct lpfc_iocbq *prspiocb)
1039 {
1040 	struct lpfc_iocbq *cmd_iocb = NULL;
1041 	uint16_t iotag;
1042 
1043 	iotag = prspiocb->iocb.ulpIoTag;
1044 
1045 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
1046 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
1047 		list_del_init(&cmd_iocb->list);
1048 		pring->txcmplq_cnt--;
1049 		return cmd_iocb;
1050 	}
1051 
1052 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1053 			"0317 iotag x%x is out off "
1054 			"range: max iotag x%x wd0 x%x\n",
1055 			iotag, phba->sli.last_iotag,
1056 			*(((uint32_t *) &prspiocb->iocb) + 7));
1057 	return NULL;
1058 }
1059 
1060 static int
1061 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1062 			  struct lpfc_iocbq *saveq)
1063 {
1064 	struct lpfc_iocbq *cmdiocbp;
1065 	int rc = 1;
1066 	unsigned long iflag;
1067 
1068 	/* Based on the iotag field, get the cmd IOCB from the txcmplq */
1069 	spin_lock_irqsave(&phba->hbalock, iflag);
1070 	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
1071 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1072 
1073 	if (cmdiocbp) {
1074 		if (cmdiocbp->iocb_cmpl) {
1075 			/*
1076 			 * Post all ELS completions to the worker thread.
1077 			 * All other are passed to the completion callback.
1078 			 */
1079 			if (pring->ringno == LPFC_ELS_RING) {
1080 				if (cmdiocbp->iocb_flag & LPFC_DRIVER_ABORTED) {
1081 					cmdiocbp->iocb_flag &=
1082 						~LPFC_DRIVER_ABORTED;
1083 					saveq->iocb.ulpStatus =
1084 						IOSTAT_LOCAL_REJECT;
1085 					saveq->iocb.un.ulpWord[4] =
1086 						IOERR_SLI_ABORTED;
1087 				}
1088 			}
1089 			(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
1090 		} else
1091 			lpfc_sli_release_iocbq(phba, cmdiocbp);
1092 	} else {
1093 		/*
1094 		 * Unknown initiating command based on the response iotag.
1095 		 * This could be the case on the ELS ring because of
1096 		 * lpfc_els_abort().
1097 		 */
1098 		if (pring->ringno != LPFC_ELS_RING) {
1099 			/*
1100 			 * Ring <ringno> handler: unexpected completion IoTag
1101 			 * <IoTag>
1102 			 */
1103 			lpfc_printf_vlog(cmdiocbp->vport, KERN_WARNING, LOG_SLI,
1104 					 "0322 Ring %d handler: "
1105 					 "unexpected completion IoTag x%x "
1106 					 "Data: x%x x%x x%x x%x\n",
1107 					 pring->ringno,
1108 					 saveq->iocb.ulpIoTag,
1109 					 saveq->iocb.ulpStatus,
1110 					 saveq->iocb.un.ulpWord[4],
1111 					 saveq->iocb.ulpCommand,
1112 					 saveq->iocb.ulpContext);
1113 		}
1114 	}
1115 
1116 	return rc;
1117 }
1118 
1119 static void
1120 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1121 {
1122 	struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1123 		&phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1124 		&phba->slim2p->mbx.us.s2.port[pring->ringno];
1125 	/*
1126 	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1127 	 * rsp ring <portRspMax>
1128 	 */
1129 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1130 			"0312 Ring %d handler: portRspPut %d "
1131 			"is bigger then rsp ring %d\n",
1132 			pring->ringno, le32_to_cpu(pgp->rspPutInx),
1133 			pring->numRiocb);
1134 
1135 	phba->link_state = LPFC_HBA_ERROR;
1136 
1137 	/*
1138 	 * All error attention handlers are posted to
1139 	 * worker thread
1140 	 */
1141 	phba->work_ha |= HA_ERATT;
1142 	phba->work_hs = HS_FFER3;
1143 
1144 	/* hbalock should already be held */
1145 	if (phba->work_wait)
1146 		lpfc_worker_wake_up(phba);
1147 
1148 	return;
1149 }
1150 
1151 void lpfc_sli_poll_fcp_ring(struct lpfc_hba *phba)
1152 {
1153 	struct lpfc_sli      *psli  = &phba->sli;
1154 	struct lpfc_sli_ring *pring = &psli->ring[LPFC_FCP_RING];
1155 	IOCB_t *irsp = NULL;
1156 	IOCB_t *entry = NULL;
1157 	struct lpfc_iocbq *cmdiocbq = NULL;
1158 	struct lpfc_iocbq rspiocbq;
1159 	struct lpfc_pgp *pgp;
1160 	uint32_t status;
1161 	uint32_t portRspPut, portRspMax;
1162 	int type;
1163 	uint32_t rsp_cmpl = 0;
1164 	uint32_t ha_copy;
1165 	unsigned long iflags;
1166 
1167 	pring->stats.iocb_event++;
1168 
1169 	pgp = (phba->sli_rev == 3) ?
1170 		&phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1171 		&phba->slim2p->mbx.us.s2.port[pring->ringno];
1172 
1173 
1174 	/*
1175 	 * The next available response entry should never exceed the maximum
1176 	 * entries.  If it does, treat it as an adapter hardware error.
1177 	 */
1178 	portRspMax = pring->numRiocb;
1179 	portRspPut = le32_to_cpu(pgp->rspPutInx);
1180 	if (unlikely(portRspPut >= portRspMax)) {
1181 		lpfc_sli_rsp_pointers_error(phba, pring);
1182 		return;
1183 	}
1184 
1185 	rmb();
1186 	while (pring->rspidx != portRspPut) {
1187 		entry = lpfc_resp_iocb(phba, pring);
1188 		if (++pring->rspidx >= portRspMax)
1189 			pring->rspidx = 0;
1190 
1191 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1192 				      (uint32_t *) &rspiocbq.iocb,
1193 				      phba->iocb_rsp_size);
1194 		irsp = &rspiocbq.iocb;
1195 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1196 		pring->stats.iocb_rsp++;
1197 		rsp_cmpl++;
1198 
1199 		if (unlikely(irsp->ulpStatus)) {
1200 			/* Rsp ring <ringno> error: IOCB */
1201 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1202 					"0326 Rsp Ring %d error: IOCB Data: "
1203 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
1204 					pring->ringno,
1205 					irsp->un.ulpWord[0],
1206 					irsp->un.ulpWord[1],
1207 					irsp->un.ulpWord[2],
1208 					irsp->un.ulpWord[3],
1209 					irsp->un.ulpWord[4],
1210 					irsp->un.ulpWord[5],
1211 					*(((uint32_t *) irsp) + 6),
1212 					*(((uint32_t *) irsp) + 7));
1213 		}
1214 
1215 		switch (type) {
1216 		case LPFC_ABORT_IOCB:
1217 		case LPFC_SOL_IOCB:
1218 			/*
1219 			 * Idle exchange closed via ABTS from port.  No iocb
1220 			 * resources need to be recovered.
1221 			 */
1222 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1223 				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1224 						"0314 IOCB cmd 0x%x "
1225 						"processed. Skipping "
1226 						"completion",
1227 						irsp->ulpCommand);
1228 				break;
1229 			}
1230 
1231 			spin_lock_irqsave(&phba->hbalock, iflags);
1232 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1233 							 &rspiocbq);
1234 			spin_unlock_irqrestore(&phba->hbalock, iflags);
1235 			if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1236 				(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1237 						      &rspiocbq);
1238 			}
1239 			break;
1240 		default:
1241 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1242 				char adaptermsg[LPFC_MAX_ADPTMSG];
1243 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1244 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
1245 				       MAX_MSG_DATA);
1246 				dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1247 					 phba->brd_no, adaptermsg);
1248 			} else {
1249 				/* Unknown IOCB command */
1250 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1251 						"0321 Unknown IOCB command "
1252 						"Data: x%x, x%x x%x x%x x%x\n",
1253 						type, irsp->ulpCommand,
1254 						irsp->ulpStatus,
1255 						irsp->ulpIoTag,
1256 						irsp->ulpContext);
1257 			}
1258 			break;
1259 		}
1260 
1261 		/*
1262 		 * The response IOCB has been processed.  Update the ring
1263 		 * pointer in SLIM.  If the port response put pointer has not
1264 		 * been updated, sync the pgp->rspPutInx and fetch the new port
1265 		 * response put pointer.
1266 		 */
1267 		writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1268 
1269 		if (pring->rspidx == portRspPut)
1270 			portRspPut = le32_to_cpu(pgp->rspPutInx);
1271 	}
1272 
1273 	ha_copy = readl(phba->HAregaddr);
1274 	ha_copy >>= (LPFC_FCP_RING * 4);
1275 
1276 	if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1277 		spin_lock_irqsave(&phba->hbalock, iflags);
1278 		pring->stats.iocb_rsp_full++;
1279 		status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1280 		writel(status, phba->CAregaddr);
1281 		readl(phba->CAregaddr);
1282 		spin_unlock_irqrestore(&phba->hbalock, iflags);
1283 	}
1284 	if ((ha_copy & HA_R0CE_RSP) &&
1285 	    (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1286 		spin_lock_irqsave(&phba->hbalock, iflags);
1287 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1288 		pring->stats.iocb_cmd_empty++;
1289 
1290 		/* Force update of the local copy of cmdGetInx */
1291 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1292 		lpfc_sli_resume_iocb(phba, pring);
1293 
1294 		if ((pring->lpfc_sli_cmd_available))
1295 			(pring->lpfc_sli_cmd_available) (phba, pring);
1296 
1297 		spin_unlock_irqrestore(&phba->hbalock, iflags);
1298 	}
1299 
1300 	return;
1301 }
1302 
1303 /*
1304  * This routine presumes LPFC_FCP_RING handling and doesn't bother
1305  * to check it explicitly.
1306  */
1307 static int
1308 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
1309 				struct lpfc_sli_ring *pring, uint32_t mask)
1310 {
1311 	struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1312 		&phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1313 		&phba->slim2p->mbx.us.s2.port[pring->ringno];
1314 	IOCB_t *irsp = NULL;
1315 	IOCB_t *entry = NULL;
1316 	struct lpfc_iocbq *cmdiocbq = NULL;
1317 	struct lpfc_iocbq rspiocbq;
1318 	uint32_t status;
1319 	uint32_t portRspPut, portRspMax;
1320 	int rc = 1;
1321 	lpfc_iocb_type type;
1322 	unsigned long iflag;
1323 	uint32_t rsp_cmpl = 0;
1324 
1325 	spin_lock_irqsave(&phba->hbalock, iflag);
1326 	pring->stats.iocb_event++;
1327 
1328 	/*
1329 	 * The next available response entry should never exceed the maximum
1330 	 * entries.  If it does, treat it as an adapter hardware error.
1331 	 */
1332 	portRspMax = pring->numRiocb;
1333 	portRspPut = le32_to_cpu(pgp->rspPutInx);
1334 	if (unlikely(portRspPut >= portRspMax)) {
1335 		lpfc_sli_rsp_pointers_error(phba, pring);
1336 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1337 		return 1;
1338 	}
1339 
1340 	rmb();
1341 	while (pring->rspidx != portRspPut) {
1342 		/*
1343 		 * Fetch an entry off the ring and copy it into a local data
1344 		 * structure.  The copy involves a byte-swap since the
1345 		 * network byte order and pci byte orders are different.
1346 		 */
1347 		entry = lpfc_resp_iocb(phba, pring);
1348 		phba->last_completion_time = jiffies;
1349 
1350 		if (++pring->rspidx >= portRspMax)
1351 			pring->rspidx = 0;
1352 
1353 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1354 				      (uint32_t *) &rspiocbq.iocb,
1355 				      phba->iocb_rsp_size);
1356 		INIT_LIST_HEAD(&(rspiocbq.list));
1357 		irsp = &rspiocbq.iocb;
1358 
1359 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1360 		pring->stats.iocb_rsp++;
1361 		rsp_cmpl++;
1362 
1363 		if (unlikely(irsp->ulpStatus)) {
1364 			/*
1365 			 * If resource errors reported from HBA, reduce
1366 			 * queuedepths of the SCSI device.
1367 			 */
1368 			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1369 				(irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1370 				spin_unlock_irqrestore(&phba->hbalock, iflag);
1371 				lpfc_adjust_queue_depth(phba);
1372 				spin_lock_irqsave(&phba->hbalock, iflag);
1373 			}
1374 
1375 			/* Rsp ring <ringno> error: IOCB */
1376 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1377 					"0336 Rsp Ring %d error: IOCB Data: "
1378 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
1379 					pring->ringno,
1380 					irsp->un.ulpWord[0],
1381 					irsp->un.ulpWord[1],
1382 					irsp->un.ulpWord[2],
1383 					irsp->un.ulpWord[3],
1384 					irsp->un.ulpWord[4],
1385 					irsp->un.ulpWord[5],
1386 					*(((uint32_t *) irsp) + 6),
1387 					*(((uint32_t *) irsp) + 7));
1388 		}
1389 
1390 		switch (type) {
1391 		case LPFC_ABORT_IOCB:
1392 		case LPFC_SOL_IOCB:
1393 			/*
1394 			 * Idle exchange closed via ABTS from port.  No iocb
1395 			 * resources need to be recovered.
1396 			 */
1397 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1398 				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1399 						"0333 IOCB cmd 0x%x"
1400 						" processed. Skipping"
1401 						" completion\n",
1402 						irsp->ulpCommand);
1403 				break;
1404 			}
1405 
1406 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1407 							 &rspiocbq);
1408 			if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1409 				if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1410 					(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1411 							      &rspiocbq);
1412 				} else {
1413 					spin_unlock_irqrestore(&phba->hbalock,
1414 							       iflag);
1415 					(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1416 							      &rspiocbq);
1417 					spin_lock_irqsave(&phba->hbalock,
1418 							  iflag);
1419 				}
1420 			}
1421 			break;
1422 		case LPFC_UNSOL_IOCB:
1423 			spin_unlock_irqrestore(&phba->hbalock, iflag);
1424 			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1425 			spin_lock_irqsave(&phba->hbalock, iflag);
1426 			break;
1427 		default:
1428 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1429 				char adaptermsg[LPFC_MAX_ADPTMSG];
1430 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1431 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
1432 				       MAX_MSG_DATA);
1433 				dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1434 					 phba->brd_no, adaptermsg);
1435 			} else {
1436 				/* Unknown IOCB command */
1437 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1438 						"0334 Unknown IOCB command "
1439 						"Data: x%x, x%x x%x x%x x%x\n",
1440 						type, irsp->ulpCommand,
1441 						irsp->ulpStatus,
1442 						irsp->ulpIoTag,
1443 						irsp->ulpContext);
1444 			}
1445 			break;
1446 		}
1447 
1448 		/*
1449 		 * The response IOCB has been processed.  Update the ring
1450 		 * pointer in SLIM.  If the port response put pointer has not
1451 		 * been updated, sync the pgp->rspPutInx and fetch the new port
1452 		 * response put pointer.
1453 		 */
1454 		writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1455 
1456 		if (pring->rspidx == portRspPut)
1457 			portRspPut = le32_to_cpu(pgp->rspPutInx);
1458 	}
1459 
1460 	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1461 		pring->stats.iocb_rsp_full++;
1462 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1463 		writel(status, phba->CAregaddr);
1464 		readl(phba->CAregaddr);
1465 	}
1466 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1467 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1468 		pring->stats.iocb_cmd_empty++;
1469 
1470 		/* Force update of the local copy of cmdGetInx */
1471 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1472 		lpfc_sli_resume_iocb(phba, pring);
1473 
1474 		if ((pring->lpfc_sli_cmd_available))
1475 			(pring->lpfc_sli_cmd_available) (phba, pring);
1476 
1477 	}
1478 
1479 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1480 	return rc;
1481 }
1482 
1483 int
1484 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
1485 				struct lpfc_sli_ring *pring, uint32_t mask)
1486 {
1487 	struct lpfc_pgp *pgp = (phba->sli_rev == 3) ?
1488 		&phba->slim2p->mbx.us.s3_pgp.port[pring->ringno] :
1489 		&phba->slim2p->mbx.us.s2.port[pring->ringno];
1490 	IOCB_t *entry;
1491 	IOCB_t *irsp = NULL;
1492 	struct lpfc_iocbq *rspiocbp = NULL;
1493 	struct lpfc_iocbq *next_iocb;
1494 	struct lpfc_iocbq *cmdiocbp;
1495 	struct lpfc_iocbq *saveq;
1496 	uint8_t iocb_cmd_type;
1497 	lpfc_iocb_type type;
1498 	uint32_t status, free_saveq;
1499 	uint32_t portRspPut, portRspMax;
1500 	int rc = 1;
1501 	unsigned long iflag;
1502 
1503 	spin_lock_irqsave(&phba->hbalock, iflag);
1504 	pring->stats.iocb_event++;
1505 
1506 	/*
1507 	 * The next available response entry should never exceed the maximum
1508 	 * entries.  If it does, treat it as an adapter hardware error.
1509 	 */
1510 	portRspMax = pring->numRiocb;
1511 	portRspPut = le32_to_cpu(pgp->rspPutInx);
1512 	if (portRspPut >= portRspMax) {
1513 		/*
1514 		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1515 		 * rsp ring <portRspMax>
1516 		 */
1517 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1518 				"0303 Ring %d handler: portRspPut %d "
1519 				"is bigger then rsp ring %d\n",
1520 				pring->ringno, portRspPut, portRspMax);
1521 
1522 		phba->link_state = LPFC_HBA_ERROR;
1523 		spin_unlock_irqrestore(&phba->hbalock, iflag);
1524 
1525 		phba->work_hs = HS_FFER3;
1526 		lpfc_handle_eratt(phba);
1527 
1528 		return 1;
1529 	}
1530 
1531 	rmb();
1532 	while (pring->rspidx != portRspPut) {
1533 		/*
1534 		 * Build a completion list and call the appropriate handler.
1535 		 * The process is to get the next available response iocb, get
1536 		 * a free iocb from the list, copy the response data into the
1537 		 * free iocb, insert to the continuation list, and update the
1538 		 * next response index to slim.  This process makes response
1539 		 * iocb's in the ring available to DMA as fast as possible but
1540 		 * pays a penalty for a copy operation.  Since the iocb is
1541 		 * only 32 bytes, this penalty is considered small relative to
1542 		 * the PCI reads for register values and a slim write.  When
1543 		 * the ulpLe field is set, the entire Command has been
1544 		 * received.
1545 		 */
1546 		entry = lpfc_resp_iocb(phba, pring);
1547 
1548 		phba->last_completion_time = jiffies;
1549 		rspiocbp = __lpfc_sli_get_iocbq(phba);
1550 		if (rspiocbp == NULL) {
1551 			printk(KERN_ERR "%s: out of buffers! Failing "
1552 			       "completion.\n", __FUNCTION__);
1553 			break;
1554 		}
1555 
1556 		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
1557 				      phba->iocb_rsp_size);
1558 		irsp = &rspiocbp->iocb;
1559 
1560 		if (++pring->rspidx >= portRspMax)
1561 			pring->rspidx = 0;
1562 
1563 		if (pring->ringno == LPFC_ELS_RING) {
1564 			lpfc_debugfs_slow_ring_trc(phba,
1565 			"IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
1566 				*(((uint32_t *) irsp) + 4),
1567 				*(((uint32_t *) irsp) + 6),
1568 				*(((uint32_t *) irsp) + 7));
1569 		}
1570 
1571 		writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
1572 
1573 		if (list_empty(&(pring->iocb_continueq))) {
1574 			list_add(&rspiocbp->list, &(pring->iocb_continueq));
1575 		} else {
1576 			list_add_tail(&rspiocbp->list,
1577 				      &(pring->iocb_continueq));
1578 		}
1579 
1580 		pring->iocb_continueq_cnt++;
1581 		if (irsp->ulpLe) {
1582 			/*
1583 			 * By default, the driver expects to free all resources
1584 			 * associated with this iocb completion.
1585 			 */
1586 			free_saveq = 1;
1587 			saveq = list_get_first(&pring->iocb_continueq,
1588 					       struct lpfc_iocbq, list);
1589 			irsp = &(saveq->iocb);
1590 			list_del_init(&pring->iocb_continueq);
1591 			pring->iocb_continueq_cnt = 0;
1592 
1593 			pring->stats.iocb_rsp++;
1594 
1595 			/*
1596 			 * If resource errors reported from HBA, reduce
1597 			 * queuedepths of the SCSI device.
1598 			 */
1599 			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
1600 			     (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
1601 				spin_unlock_irqrestore(&phba->hbalock, iflag);
1602 				lpfc_adjust_queue_depth(phba);
1603 				spin_lock_irqsave(&phba->hbalock, iflag);
1604 			}
1605 
1606 			if (irsp->ulpStatus) {
1607 				/* Rsp ring <ringno> error: IOCB */
1608 				lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1609 						"0328 Rsp Ring %d error: "
1610 						"IOCB Data: "
1611 						"x%x x%x x%x x%x "
1612 						"x%x x%x x%x x%x "
1613 						"x%x x%x x%x x%x "
1614 						"x%x x%x x%x x%x\n",
1615 						pring->ringno,
1616 						irsp->un.ulpWord[0],
1617 						irsp->un.ulpWord[1],
1618 						irsp->un.ulpWord[2],
1619 						irsp->un.ulpWord[3],
1620 						irsp->un.ulpWord[4],
1621 						irsp->un.ulpWord[5],
1622 						*(((uint32_t *) irsp) + 6),
1623 						*(((uint32_t *) irsp) + 7),
1624 						*(((uint32_t *) irsp) + 8),
1625 						*(((uint32_t *) irsp) + 9),
1626 						*(((uint32_t *) irsp) + 10),
1627 						*(((uint32_t *) irsp) + 11),
1628 						*(((uint32_t *) irsp) + 12),
1629 						*(((uint32_t *) irsp) + 13),
1630 						*(((uint32_t *) irsp) + 14),
1631 						*(((uint32_t *) irsp) + 15));
1632 			}
1633 
1634 			/*
1635 			 * Fetch the IOCB command type and call the correct
1636 			 * completion routine.  Solicited and Unsolicited
1637 			 * IOCBs on the ELS ring get freed back to the
1638 			 * lpfc_iocb_list by the discovery kernel thread.
1639 			 */
1640 			iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1641 			type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1642 			if (type == LPFC_SOL_IOCB) {
1643 				spin_unlock_irqrestore(&phba->hbalock,
1644 						       iflag);
1645 				rc = lpfc_sli_process_sol_iocb(phba, pring,
1646 							       saveq);
1647 				spin_lock_irqsave(&phba->hbalock, iflag);
1648 			} else if (type == LPFC_UNSOL_IOCB) {
1649 				spin_unlock_irqrestore(&phba->hbalock,
1650 						       iflag);
1651 				rc = lpfc_sli_process_unsol_iocb(phba, pring,
1652 								 saveq);
1653 				spin_lock_irqsave(&phba->hbalock, iflag);
1654 			} else if (type == LPFC_ABORT_IOCB) {
1655 				if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1656 				    ((cmdiocbp =
1657 				      lpfc_sli_iocbq_lookup(phba, pring,
1658 							    saveq)))) {
1659 					/* Call the specified completion
1660 					   routine */
1661 					if (cmdiocbp->iocb_cmpl) {
1662 						spin_unlock_irqrestore(
1663 						       &phba->hbalock,
1664 						       iflag);
1665 						(cmdiocbp->iocb_cmpl) (phba,
1666 							     cmdiocbp, saveq);
1667 						spin_lock_irqsave(
1668 							  &phba->hbalock,
1669 							  iflag);
1670 					} else
1671 						__lpfc_sli_release_iocbq(phba,
1672 								      cmdiocbp);
1673 				}
1674 			} else if (type == LPFC_UNKNOWN_IOCB) {
1675 				if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1676 
1677 					char adaptermsg[LPFC_MAX_ADPTMSG];
1678 
1679 					memset(adaptermsg, 0,
1680 					       LPFC_MAX_ADPTMSG);
1681 					memcpy(&adaptermsg[0], (uint8_t *) irsp,
1682 					       MAX_MSG_DATA);
1683 					dev_warn(&((phba->pcidev)->dev),
1684 						 "lpfc%d: %s",
1685 						 phba->brd_no, adaptermsg);
1686 				} else {
1687 					/* Unknown IOCB command */
1688 					lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1689 							"0335 Unknown IOCB "
1690 							"command Data: x%x "
1691 							"x%x x%x x%x\n",
1692 							irsp->ulpCommand,
1693 							irsp->ulpStatus,
1694 							irsp->ulpIoTag,
1695 							irsp->ulpContext);
1696 				}
1697 			}
1698 
1699 			if (free_saveq) {
1700 				list_for_each_entry_safe(rspiocbp, next_iocb,
1701 							 &saveq->list, list) {
1702 					list_del(&rspiocbp->list);
1703 					__lpfc_sli_release_iocbq(phba,
1704 								 rspiocbp);
1705 				}
1706 				__lpfc_sli_release_iocbq(phba, saveq);
1707 			}
1708 			rspiocbp = NULL;
1709 		}
1710 
1711 		/*
1712 		 * If the port response put pointer has not been updated, sync
1713 		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1714 		 * response put pointer.
1715 		 */
1716 		if (pring->rspidx == portRspPut) {
1717 			portRspPut = le32_to_cpu(pgp->rspPutInx);
1718 		}
1719 	} /* while (pring->rspidx != portRspPut) */
1720 
1721 	if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
1722 		/* At least one response entry has been freed */
1723 		pring->stats.iocb_rsp_full++;
1724 		/* SET RxRE_RSP in Chip Att register */
1725 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1726 		writel(status, phba->CAregaddr);
1727 		readl(phba->CAregaddr); /* flush */
1728 	}
1729 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1730 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1731 		pring->stats.iocb_cmd_empty++;
1732 
1733 		/* Force update of the local copy of cmdGetInx */
1734 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1735 		lpfc_sli_resume_iocb(phba, pring);
1736 
1737 		if ((pring->lpfc_sli_cmd_available))
1738 			(pring->lpfc_sli_cmd_available) (phba, pring);
1739 
1740 	}
1741 
1742 	spin_unlock_irqrestore(&phba->hbalock, iflag);
1743 	return rc;
1744 }
1745 
1746 void
1747 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1748 {
1749 	LIST_HEAD(completions);
1750 	struct lpfc_iocbq *iocb, *next_iocb;
1751 	IOCB_t *cmd = NULL;
1752 
1753 	if (pring->ringno == LPFC_ELS_RING) {
1754 		lpfc_fabric_abort_hba(phba);
1755 	}
1756 
1757 	/* Error everything on txq and txcmplq
1758 	 * First do the txq.
1759 	 */
1760 	spin_lock_irq(&phba->hbalock);
1761 	list_splice_init(&pring->txq, &completions);
1762 	pring->txq_cnt = 0;
1763 
1764 	/* Next issue ABTS for everything on the txcmplq */
1765 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
1766 		lpfc_sli_issue_abort_iotag(phba, pring, iocb);
1767 
1768 	spin_unlock_irq(&phba->hbalock);
1769 
1770 	while (!list_empty(&completions)) {
1771 		iocb = list_get_first(&completions, struct lpfc_iocbq, list);
1772 		cmd = &iocb->iocb;
1773 		list_del_init(&iocb->list);
1774 
1775 		if (!iocb->iocb_cmpl)
1776 			lpfc_sli_release_iocbq(phba, iocb);
1777 		else {
1778 			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1779 			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1780 			(iocb->iocb_cmpl) (phba, iocb, iocb);
1781 		}
1782 	}
1783 }
1784 
1785 int
1786 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
1787 {
1788 	uint32_t status;
1789 	int i = 0;
1790 	int retval = 0;
1791 
1792 	/* Read the HBA Host Status Register */
1793 	status = readl(phba->HSregaddr);
1794 
1795 	/*
1796 	 * Check status register every 100ms for 5 retries, then every
1797 	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
1798 	 * every 2.5 sec for 4.
1799 	 * Break our of the loop if errors occurred during init.
1800 	 */
1801 	while (((status & mask) != mask) &&
1802 	       !(status & HS_FFERM) &&
1803 	       i++ < 20) {
1804 
1805 		if (i <= 5)
1806 			msleep(10);
1807 		else if (i <= 10)
1808 			msleep(500);
1809 		else
1810 			msleep(2500);
1811 
1812 		if (i == 15) {
1813 				/* Do post */
1814 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
1815 			lpfc_sli_brdrestart(phba);
1816 		}
1817 		/* Read the HBA Host Status Register */
1818 		status = readl(phba->HSregaddr);
1819 	}
1820 
1821 	/* Check to see if any errors occurred during init */
1822 	if ((status & HS_FFERM) || (i >= 20)) {
1823 		phba->link_state = LPFC_HBA_ERROR;
1824 		retval = 1;
1825 	}
1826 
1827 	return retval;
1828 }
1829 
1830 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1831 
1832 void lpfc_reset_barrier(struct lpfc_hba *phba)
1833 {
1834 	uint32_t __iomem *resp_buf;
1835 	uint32_t __iomem *mbox_buf;
1836 	volatile uint32_t mbox;
1837 	uint32_t hc_copy;
1838 	int  i;
1839 	uint8_t hdrtype;
1840 
1841 	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1842 	if (hdrtype != 0x80 ||
1843 	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1844 	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1845 		return;
1846 
1847 	/*
1848 	 * Tell the other part of the chip to suspend temporarily all
1849 	 * its DMA activity.
1850 	 */
1851 	resp_buf = phba->MBslimaddr;
1852 
1853 	/* Disable the error attention */
1854 	hc_copy = readl(phba->HCregaddr);
1855 	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1856 	readl(phba->HCregaddr); /* flush */
1857 	phba->link_flag |= LS_IGNORE_ERATT;
1858 
1859 	if (readl(phba->HAregaddr) & HA_ERATT) {
1860 		/* Clear Chip error bit */
1861 		writel(HA_ERATT, phba->HAregaddr);
1862 		phba->pport->stopped = 1;
1863 	}
1864 
1865 	mbox = 0;
1866 	((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1867 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1868 
1869 	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1870 	mbox_buf = phba->MBslimaddr;
1871 	writel(mbox, mbox_buf);
1872 
1873 	for (i = 0;
1874 	     readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1875 		mdelay(1);
1876 
1877 	if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1878 		if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1879 		    phba->pport->stopped)
1880 			goto restore_hc;
1881 		else
1882 			goto clear_errat;
1883 	}
1884 
1885 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1886 	for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
1887 		mdelay(1);
1888 
1889 clear_errat:
1890 
1891 	while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1892 		mdelay(1);
1893 
1894 	if (readl(phba->HAregaddr) & HA_ERATT) {
1895 		writel(HA_ERATT, phba->HAregaddr);
1896 		phba->pport->stopped = 1;
1897 	}
1898 
1899 restore_hc:
1900 	phba->link_flag &= ~LS_IGNORE_ERATT;
1901 	writel(hc_copy, phba->HCregaddr);
1902 	readl(phba->HCregaddr); /* flush */
1903 }
1904 
1905 int
1906 lpfc_sli_brdkill(struct lpfc_hba *phba)
1907 {
1908 	struct lpfc_sli *psli;
1909 	LPFC_MBOXQ_t *pmb;
1910 	uint32_t status;
1911 	uint32_t ha_copy;
1912 	int retval;
1913 	int i = 0;
1914 
1915 	psli = &phba->sli;
1916 
1917 	/* Kill HBA */
1918 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1919 			"0329 Kill HBA Data: x%x x%x\n",
1920 			phba->pport->port_state, psli->sli_flag);
1921 
1922 	if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1923 						  GFP_KERNEL)) == 0)
1924 		return 1;
1925 
1926 	/* Disable the error attention */
1927 	spin_lock_irq(&phba->hbalock);
1928 	status = readl(phba->HCregaddr);
1929 	status &= ~HC_ERINT_ENA;
1930 	writel(status, phba->HCregaddr);
1931 	readl(phba->HCregaddr); /* flush */
1932 	phba->link_flag |= LS_IGNORE_ERATT;
1933 	spin_unlock_irq(&phba->hbalock);
1934 
1935 	lpfc_kill_board(phba, pmb);
1936 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1937 	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1938 
1939 	if (retval != MBX_SUCCESS) {
1940 		if (retval != MBX_BUSY)
1941 			mempool_free(pmb, phba->mbox_mem_pool);
1942 		spin_lock_irq(&phba->hbalock);
1943 		phba->link_flag &= ~LS_IGNORE_ERATT;
1944 		spin_unlock_irq(&phba->hbalock);
1945 		return 1;
1946 	}
1947 
1948 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1949 
1950 	mempool_free(pmb, phba->mbox_mem_pool);
1951 
1952 	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1953 	 * attention every 100ms for 3 seconds. If we don't get ERATT after
1954 	 * 3 seconds we still set HBA_ERROR state because the status of the
1955 	 * board is now undefined.
1956 	 */
1957 	ha_copy = readl(phba->HAregaddr);
1958 
1959 	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1960 		mdelay(100);
1961 		ha_copy = readl(phba->HAregaddr);
1962 	}
1963 
1964 	del_timer_sync(&psli->mbox_tmo);
1965 	if (ha_copy & HA_ERATT) {
1966 		writel(HA_ERATT, phba->HAregaddr);
1967 		phba->pport->stopped = 1;
1968 	}
1969 	spin_lock_irq(&phba->hbalock);
1970 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1971 	phba->link_flag &= ~LS_IGNORE_ERATT;
1972 	spin_unlock_irq(&phba->hbalock);
1973 
1974 	psli->mbox_active = NULL;
1975 	lpfc_hba_down_post(phba);
1976 	phba->link_state = LPFC_HBA_ERROR;
1977 
1978 	return ha_copy & HA_ERATT ? 0 : 1;
1979 }
1980 
1981 int
1982 lpfc_sli_brdreset(struct lpfc_hba *phba)
1983 {
1984 	struct lpfc_sli *psli;
1985 	struct lpfc_sli_ring *pring;
1986 	uint16_t cfg_value;
1987 	int i;
1988 
1989 	psli = &phba->sli;
1990 
1991 	/* Reset HBA */
1992 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1993 			"0325 Reset HBA Data: x%x x%x\n",
1994 			phba->pport->port_state, psli->sli_flag);
1995 
1996 	/* perform board reset */
1997 	phba->fc_eventTag = 0;
1998 	phba->pport->fc_myDID = 0;
1999 	phba->pport->fc_prevDID = 0;
2000 
2001 	/* Turn off parity checking and serr during the physical reset */
2002 	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
2003 	pci_write_config_word(phba->pcidev, PCI_COMMAND,
2004 			      (cfg_value &
2005 			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
2006 
2007 	psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
2008 	/* Now toggle INITFF bit in the Host Control Register */
2009 	writel(HC_INITFF, phba->HCregaddr);
2010 	mdelay(1);
2011 	readl(phba->HCregaddr); /* flush */
2012 	writel(0, phba->HCregaddr);
2013 	readl(phba->HCregaddr); /* flush */
2014 
2015 	/* Restore PCI cmd register */
2016 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
2017 
2018 	/* Initialize relevant SLI info */
2019 	for (i = 0; i < psli->num_rings; i++) {
2020 		pring = &psli->ring[i];
2021 		pring->flag = 0;
2022 		pring->rspidx = 0;
2023 		pring->next_cmdidx  = 0;
2024 		pring->local_getidx = 0;
2025 		pring->cmdidx = 0;
2026 		pring->missbufcnt = 0;
2027 	}
2028 
2029 	phba->link_state = LPFC_WARM_START;
2030 	return 0;
2031 }
2032 
2033 int
2034 lpfc_sli_brdrestart(struct lpfc_hba *phba)
2035 {
2036 	MAILBOX_t *mb;
2037 	struct lpfc_sli *psli;
2038 	uint16_t skip_post;
2039 	volatile uint32_t word0;
2040 	void __iomem *to_slim;
2041 
2042 	spin_lock_irq(&phba->hbalock);
2043 
2044 	psli = &phba->sli;
2045 
2046 	/* Restart HBA */
2047 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
2048 			"0337 Restart HBA Data: x%x x%x\n",
2049 			phba->pport->port_state, psli->sli_flag);
2050 
2051 	word0 = 0;
2052 	mb = (MAILBOX_t *) &word0;
2053 	mb->mbxCommand = MBX_RESTART;
2054 	mb->mbxHc = 1;
2055 
2056 	lpfc_reset_barrier(phba);
2057 
2058 	to_slim = phba->MBslimaddr;
2059 	writel(*(uint32_t *) mb, to_slim);
2060 	readl(to_slim); /* flush */
2061 
2062 	/* Only skip post after fc_ffinit is completed */
2063 	if (phba->pport->port_state) {
2064 		skip_post = 1;
2065 		word0 = 1;	/* This is really setting up word1 */
2066 	} else {
2067 		skip_post = 0;
2068 		word0 = 0;	/* This is really setting up word1 */
2069 	}
2070 	to_slim = phba->MBslimaddr + sizeof (uint32_t);
2071 	writel(*(uint32_t *) mb, to_slim);
2072 	readl(to_slim); /* flush */
2073 
2074 	lpfc_sli_brdreset(phba);
2075 	phba->pport->stopped = 0;
2076 	phba->link_state = LPFC_INIT_START;
2077 
2078 	spin_unlock_irq(&phba->hbalock);
2079 
2080 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
2081 	psli->stats_start = get_seconds();
2082 
2083 	if (skip_post)
2084 		mdelay(100);
2085 	else
2086 		mdelay(2000);
2087 
2088 	lpfc_hba_down_post(phba);
2089 
2090 	return 0;
2091 }
2092 
2093 static int
2094 lpfc_sli_chipset_init(struct lpfc_hba *phba)
2095 {
2096 	uint32_t status, i = 0;
2097 
2098 	/* Read the HBA Host Status Register */
2099 	status = readl(phba->HSregaddr);
2100 
2101 	/* Check status register to see what current state is */
2102 	i = 0;
2103 	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
2104 
2105 		/* Check every 100ms for 5 retries, then every 500ms for 5, then
2106 		 * every 2.5 sec for 5, then reset board and every 2.5 sec for
2107 		 * 4.
2108 		 */
2109 		if (i++ >= 20) {
2110 			/* Adapter failed to init, timeout, status reg
2111 			   <status> */
2112 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2113 					"0436 Adapter failed to init, "
2114 					"timeout, status reg x%x\n", status);
2115 			phba->link_state = LPFC_HBA_ERROR;
2116 			return -ETIMEDOUT;
2117 		}
2118 
2119 		/* Check to see if any errors occurred during init */
2120 		if (status & HS_FFERM) {
2121 			/* ERROR: During chipset initialization */
2122 			/* Adapter failed to init, chipset, status reg
2123 			   <status> */
2124 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2125 					"0437 Adapter failed to init, "
2126 					"chipset, status reg x%x\n", status);
2127 			phba->link_state = LPFC_HBA_ERROR;
2128 			return -EIO;
2129 		}
2130 
2131 		if (i <= 5) {
2132 			msleep(10);
2133 		} else if (i <= 10) {
2134 			msleep(500);
2135 		} else {
2136 			msleep(2500);
2137 		}
2138 
2139 		if (i == 15) {
2140 				/* Do post */
2141 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2142 			lpfc_sli_brdrestart(phba);
2143 		}
2144 		/* Read the HBA Host Status Register */
2145 		status = readl(phba->HSregaddr);
2146 	}
2147 
2148 	/* Check to see if any errors occurred during init */
2149 	if (status & HS_FFERM) {
2150 		/* ERROR: During chipset initialization */
2151 		/* Adapter failed to init, chipset, status reg <status> */
2152 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2153 				"0438 Adapter failed to init, chipset, "
2154 				"status reg x%x\n", status);
2155 		phba->link_state = LPFC_HBA_ERROR;
2156 		return -EIO;
2157 	}
2158 
2159 	/* Clear all interrupt enable conditions */
2160 	writel(0, phba->HCregaddr);
2161 	readl(phba->HCregaddr); /* flush */
2162 
2163 	/* setup host attn register */
2164 	writel(0xffffffff, phba->HAregaddr);
2165 	readl(phba->HAregaddr); /* flush */
2166 	return 0;
2167 }
2168 
2169 int
2170 lpfc_sli_hbq_count(void)
2171 {
2172 	return ARRAY_SIZE(lpfc_hbq_defs);
2173 }
2174 
2175 static int
2176 lpfc_sli_hbq_entry_count(void)
2177 {
2178 	int  hbq_count = lpfc_sli_hbq_count();
2179 	int  count = 0;
2180 	int  i;
2181 
2182 	for (i = 0; i < hbq_count; ++i)
2183 		count += lpfc_hbq_defs[i]->entry_count;
2184 	return count;
2185 }
2186 
2187 int
2188 lpfc_sli_hbq_size(void)
2189 {
2190 	return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
2191 }
2192 
2193 static int
2194 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
2195 {
2196 	int  hbq_count = lpfc_sli_hbq_count();
2197 	LPFC_MBOXQ_t *pmb;
2198 	MAILBOX_t *pmbox;
2199 	uint32_t hbqno;
2200 	uint32_t hbq_entry_index;
2201 
2202 				/* Get a Mailbox buffer to setup mailbox
2203 				 * commands for HBA initialization
2204 				 */
2205 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2206 
2207 	if (!pmb)
2208 		return -ENOMEM;
2209 
2210 	pmbox = &pmb->mb;
2211 
2212 	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
2213 	phba->link_state = LPFC_INIT_MBX_CMDS;
2214 
2215 	hbq_entry_index = 0;
2216 	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2217 		phba->hbqs[hbqno].next_hbqPutIdx = 0;
2218 		phba->hbqs[hbqno].hbqPutIdx      = 0;
2219 		phba->hbqs[hbqno].local_hbqGetIdx   = 0;
2220 		phba->hbqs[hbqno].entry_count =
2221 			lpfc_hbq_defs[hbqno]->entry_count;
2222 		lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
2223 			hbq_entry_index, pmb);
2224 		hbq_entry_index += phba->hbqs[hbqno].entry_count;
2225 
2226 		if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
2227 			/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
2228 			   mbxStatus <status>, ring <num> */
2229 
2230 			lpfc_printf_log(phba, KERN_ERR,
2231 					LOG_SLI | LOG_VPORT,
2232 					"1805 Adapter failed to init. "
2233 					"Data: x%x x%x x%x\n",
2234 					pmbox->mbxCommand,
2235 					pmbox->mbxStatus, hbqno);
2236 
2237 			phba->link_state = LPFC_HBA_ERROR;
2238 			mempool_free(pmb, phba->mbox_mem_pool);
2239 			return ENXIO;
2240 		}
2241 	}
2242 	phba->hbq_count = hbq_count;
2243 
2244 	mempool_free(pmb, phba->mbox_mem_pool);
2245 
2246 	/* Initially populate or replenish the HBQs */
2247 	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
2248 		if (lpfc_sli_hbqbuf_init_hbqs(phba, hbqno))
2249 			return -ENOMEM;
2250 	}
2251 	return 0;
2252 }
2253 
2254 static int
2255 lpfc_do_config_port(struct lpfc_hba *phba, int sli_mode)
2256 {
2257 	LPFC_MBOXQ_t *pmb;
2258 	uint32_t resetcount = 0, rc = 0, done = 0;
2259 
2260 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2261 	if (!pmb) {
2262 		phba->link_state = LPFC_HBA_ERROR;
2263 		return -ENOMEM;
2264 	}
2265 
2266 	phba->sli_rev = sli_mode;
2267 	while (resetcount < 2 && !done) {
2268 		spin_lock_irq(&phba->hbalock);
2269 		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2270 		spin_unlock_irq(&phba->hbalock);
2271 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
2272 		lpfc_sli_brdrestart(phba);
2273 		msleep(2500);
2274 		rc = lpfc_sli_chipset_init(phba);
2275 		if (rc)
2276 			break;
2277 
2278 		spin_lock_irq(&phba->hbalock);
2279 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2280 		spin_unlock_irq(&phba->hbalock);
2281 		resetcount++;
2282 
2283 		/* Call pre CONFIG_PORT mailbox command initialization.  A
2284 		 * value of 0 means the call was successful.  Any other
2285 		 * nonzero value is a failure, but if ERESTART is returned,
2286 		 * the driver may reset the HBA and try again.
2287 		 */
2288 		rc = lpfc_config_port_prep(phba);
2289 		if (rc == -ERESTART) {
2290 			phba->link_state = LPFC_LINK_UNKNOWN;
2291 			continue;
2292 		} else if (rc) {
2293 			break;
2294 		}
2295 
2296 		phba->link_state = LPFC_INIT_MBX_CMDS;
2297 		lpfc_config_port(phba, pmb);
2298 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
2299 		if (rc != MBX_SUCCESS) {
2300 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2301 				"0442 Adapter failed to init, mbxCmd x%x "
2302 				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
2303 				pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0);
2304 			spin_lock_irq(&phba->hbalock);
2305 			phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
2306 			spin_unlock_irq(&phba->hbalock);
2307 			rc = -ENXIO;
2308 		} else {
2309 			done = 1;
2310 			phba->max_vpi = (phba->max_vpi &&
2311 					 pmb->mb.un.varCfgPort.gmv) != 0
2312 				? pmb->mb.un.varCfgPort.max_vpi
2313 				: 0;
2314 		}
2315 	}
2316 
2317 	if (!done) {
2318 		rc = -EINVAL;
2319 		goto do_prep_failed;
2320 	}
2321 
2322 	if ((pmb->mb.un.varCfgPort.sli_mode == 3) &&
2323 		(!pmb->mb.un.varCfgPort.cMA)) {
2324 		rc = -ENXIO;
2325 		goto do_prep_failed;
2326 	}
2327 	return rc;
2328 
2329 do_prep_failed:
2330 	mempool_free(pmb, phba->mbox_mem_pool);
2331 	return rc;
2332 }
2333 
2334 int
2335 lpfc_sli_hba_setup(struct lpfc_hba *phba)
2336 {
2337 	uint32_t rc;
2338 	int  mode = 3;
2339 
2340 	switch (lpfc_sli_mode) {
2341 	case 2:
2342 		if (phba->cfg_enable_npiv) {
2343 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2344 				"1824 NPIV enabled: Override lpfc_sli_mode "
2345 				"parameter (%d) to auto (0).\n",
2346 				lpfc_sli_mode);
2347 			break;
2348 		}
2349 		mode = 2;
2350 		break;
2351 	case 0:
2352 	case 3:
2353 		break;
2354 	default:
2355 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2356 				"1819 Unrecognized lpfc_sli_mode "
2357 				"parameter: %d.\n", lpfc_sli_mode);
2358 
2359 		break;
2360 	}
2361 
2362 	rc = lpfc_do_config_port(phba, mode);
2363 	if (rc && lpfc_sli_mode == 3)
2364 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_VPORT,
2365 				"1820 Unable to select SLI-3.  "
2366 				"Not supported by adapter.\n");
2367 	if (rc && mode != 2)
2368 		rc = lpfc_do_config_port(phba, 2);
2369 	if (rc)
2370 		goto lpfc_sli_hba_setup_error;
2371 
2372 	if (phba->sli_rev == 3) {
2373 		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
2374 		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
2375 		phba->sli3_options |= LPFC_SLI3_ENABLED;
2376 		phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
2377 
2378 	} else {
2379 		phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
2380 		phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
2381 		phba->sli3_options = 0;
2382 	}
2383 
2384 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2385 			"0444 Firmware in SLI %x mode. Max_vpi %d\n",
2386 			phba->sli_rev, phba->max_vpi);
2387 	rc = lpfc_sli_ring_map(phba);
2388 
2389 	if (rc)
2390 		goto lpfc_sli_hba_setup_error;
2391 
2392 				/* Init HBQs */
2393 
2394 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2395 		rc = lpfc_sli_hbq_setup(phba);
2396 		if (rc)
2397 			goto lpfc_sli_hba_setup_error;
2398 	}
2399 
2400 	phba->sli.sli_flag |= LPFC_PROCESS_LA;
2401 
2402 	rc = lpfc_config_port_post(phba);
2403 	if (rc)
2404 		goto lpfc_sli_hba_setup_error;
2405 
2406 	return rc;
2407 
2408 lpfc_sli_hba_setup_error:
2409 	phba->link_state = LPFC_HBA_ERROR;
2410 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
2411 			"0445 Firmware initialization failed\n");
2412 	return rc;
2413 }
2414 
2415 /*! lpfc_mbox_timeout
2416  *
2417  * \pre
2418  * \post
2419  * \param hba Pointer to per struct lpfc_hba structure
2420  * \param l1  Pointer to the driver's mailbox queue.
2421  * \return
2422  *   void
2423  *
2424  * \b Description:
2425  *
2426  * This routine handles mailbox timeout events at timer interrupt context.
2427  */
2428 void
2429 lpfc_mbox_timeout(unsigned long ptr)
2430 {
2431 	struct lpfc_hba  *phba = (struct lpfc_hba *) ptr;
2432 	unsigned long iflag;
2433 	uint32_t tmo_posted;
2434 
2435 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
2436 	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
2437 	if (!tmo_posted)
2438 		phba->pport->work_port_events |= WORKER_MBOX_TMO;
2439 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
2440 
2441 	if (!tmo_posted) {
2442 		spin_lock_irqsave(&phba->hbalock, iflag);
2443 		if (phba->work_wait)
2444 			lpfc_worker_wake_up(phba);
2445 		spin_unlock_irqrestore(&phba->hbalock, iflag);
2446 	}
2447 }
2448 
2449 void
2450 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2451 {
2452 	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
2453 	MAILBOX_t *mb = &pmbox->mb;
2454 	struct lpfc_sli *psli = &phba->sli;
2455 	struct lpfc_sli_ring *pring;
2456 
2457 	if (!(phba->pport->work_port_events & WORKER_MBOX_TMO)) {
2458 		return;
2459 	}
2460 
2461 	/* Mbox cmd <mbxCommand> timeout */
2462 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2463 			"0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
2464 			mb->mbxCommand,
2465 			phba->pport->port_state,
2466 			phba->sli.sli_flag,
2467 			phba->sli.mbox_active);
2468 
2469 	/* Setting state unknown so lpfc_sli_abort_iocb_ring
2470 	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
2471 	 * it to fail all oustanding SCSI IO.
2472 	 */
2473 	spin_lock_irq(&phba->pport->work_port_lock);
2474 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
2475 	spin_unlock_irq(&phba->pport->work_port_lock);
2476 	spin_lock_irq(&phba->hbalock);
2477 	phba->link_state = LPFC_LINK_UNKNOWN;
2478 	phba->pport->fc_flag |= FC_ESTABLISH_LINK;
2479 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
2480 	spin_unlock_irq(&phba->hbalock);
2481 
2482 	pring = &psli->ring[psli->fcp_ring];
2483 	lpfc_sli_abort_iocb_ring(phba, pring);
2484 
2485 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
2486 			"0316 Resetting board due to mailbox timeout\n");
2487 	/*
2488 	 * lpfc_offline calls lpfc_sli_hba_down which will clean up
2489 	 * on oustanding mailbox commands.
2490 	 */
2491 	lpfc_offline_prep(phba);
2492 	lpfc_offline(phba);
2493 	lpfc_sli_brdrestart(phba);
2494 	if (lpfc_online(phba) == 0)		/* Initialize the HBA */
2495 		mod_timer(&phba->fc_estabtmo, jiffies + HZ * 60);
2496 	lpfc_unblock_mgmt_io(phba);
2497 	return;
2498 }
2499 
2500 int
2501 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
2502 {
2503 	MAILBOX_t *mb;
2504 	struct lpfc_sli *psli = &phba->sli;
2505 	uint32_t status, evtctr;
2506 	uint32_t ha_copy;
2507 	int i;
2508 	unsigned long drvr_flag = 0;
2509 	volatile uint32_t word0, ldata;
2510 	void __iomem *to_slim;
2511 
2512 	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
2513 		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
2514 		if(!pmbox->vport) {
2515 			lpfc_printf_log(phba, KERN_ERR,
2516 					LOG_MBOX | LOG_VPORT,
2517 					"1806 Mbox x%x failed. No vport\n",
2518 					pmbox->mb.mbxCommand);
2519 			dump_stack();
2520 			return MBXERR_ERROR;
2521 		}
2522 	}
2523 
2524 
2525 	/* If the PCI channel is in offline state, do not post mbox. */
2526 	if (unlikely(pci_channel_offline(phba->pcidev)))
2527 		return MBX_NOT_FINISHED;
2528 
2529 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
2530 	psli = &phba->sli;
2531 
2532 
2533 	mb = &pmbox->mb;
2534 	status = MBX_SUCCESS;
2535 
2536 	if (phba->link_state == LPFC_HBA_ERROR) {
2537 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2538 
2539 		/* Mbox command <mbxCommand> cannot issue */
2540 		LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2541 		return MBX_NOT_FINISHED;
2542 	}
2543 
2544 	if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2545 	    !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2546 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2547 		LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag)
2548 		return MBX_NOT_FINISHED;
2549 	}
2550 
2551 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2552 		/* Polling for a mbox command when another one is already active
2553 		 * is not allowed in SLI. Also, the driver must have established
2554 		 * SLI2 mode to queue and process multiple mbox commands.
2555 		 */
2556 
2557 		if (flag & MBX_POLL) {
2558 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2559 
2560 			/* Mbox command <mbxCommand> cannot issue */
2561 			LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2562 			return MBX_NOT_FINISHED;
2563 		}
2564 
2565 		if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2566 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2567 			/* Mbox command <mbxCommand> cannot issue */
2568 			LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2569 			return MBX_NOT_FINISHED;
2570 		}
2571 
2572 		/* Handle STOP IOCB processing flag. This is only meaningful
2573 		 * if we are not polling for mbox completion.
2574 		 */
2575 		if (flag & MBX_STOP_IOCB) {
2576 			flag &= ~MBX_STOP_IOCB;
2577 			/* Now flag each ring */
2578 			for (i = 0; i < psli->num_rings; i++) {
2579 				/* If the ring is active, flag it */
2580 				if (psli->ring[i].cmdringaddr) {
2581 					psli->ring[i].flag |=
2582 					    LPFC_STOP_IOCB_MBX;
2583 				}
2584 			}
2585 		}
2586 
2587 		/* Another mailbox command is still being processed, queue this
2588 		 * command to be processed later.
2589 		 */
2590 		lpfc_mbox_put(phba, pmbox);
2591 
2592 		/* Mbox cmd issue - BUSY */
2593 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2594 				"(%d):0308 Mbox cmd issue - BUSY Data: "
2595 				"x%x x%x x%x x%x\n",
2596 				pmbox->vport ? pmbox->vport->vpi : 0xffffff,
2597 				mb->mbxCommand, phba->pport->port_state,
2598 				psli->sli_flag, flag);
2599 
2600 		psli->slistat.mbox_busy++;
2601 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2602 
2603 		if (pmbox->vport) {
2604 			lpfc_debugfs_disc_trc(pmbox->vport,
2605 				LPFC_DISC_TRC_MBOX_VPORT,
2606 				"MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
2607 				(uint32_t)mb->mbxCommand,
2608 				mb->un.varWords[0], mb->un.varWords[1]);
2609 		}
2610 		else {
2611 			lpfc_debugfs_disc_trc(phba->pport,
2612 				LPFC_DISC_TRC_MBOX,
2613 				"MBOX Bsy:        cmd:x%x mb:x%x x%x",
2614 				(uint32_t)mb->mbxCommand,
2615 				mb->un.varWords[0], mb->un.varWords[1]);
2616 		}
2617 
2618 		return MBX_BUSY;
2619 	}
2620 
2621 	/* Handle STOP IOCB processing flag. This is only meaningful
2622 	 * if we are not polling for mbox completion.
2623 	 */
2624 	if (flag & MBX_STOP_IOCB) {
2625 		flag &= ~MBX_STOP_IOCB;
2626 		if (flag == MBX_NOWAIT) {
2627 			/* Now flag each ring */
2628 			for (i = 0; i < psli->num_rings; i++) {
2629 				/* If the ring is active, flag it */
2630 				if (psli->ring[i].cmdringaddr) {
2631 					psli->ring[i].flag |=
2632 					    LPFC_STOP_IOCB_MBX;
2633 				}
2634 			}
2635 		}
2636 	}
2637 
2638 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2639 
2640 	/* If we are not polling, we MUST be in SLI2 mode */
2641 	if (flag != MBX_POLL) {
2642 		if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2643 		    (mb->mbxCommand != MBX_KILL_BOARD)) {
2644 			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2645 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2646 			/* Mbox command <mbxCommand> cannot issue */
2647 			LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
2648 			return MBX_NOT_FINISHED;
2649 		}
2650 		/* timeout active mbox command */
2651 		mod_timer(&psli->mbox_tmo, (jiffies +
2652 			       (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2653 	}
2654 
2655 	/* Mailbox cmd <cmd> issue */
2656 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
2657 			"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
2658 			"x%x\n",
2659 			pmbox->vport ? pmbox->vport->vpi : 0,
2660 			mb->mbxCommand, phba->pport->port_state,
2661 			psli->sli_flag, flag);
2662 
2663 	if (mb->mbxCommand != MBX_HEARTBEAT) {
2664 		if (pmbox->vport) {
2665 			lpfc_debugfs_disc_trc(pmbox->vport,
2666 				LPFC_DISC_TRC_MBOX_VPORT,
2667 				"MBOX Send vport: cmd:x%x mb:x%x x%x",
2668 				(uint32_t)mb->mbxCommand,
2669 				mb->un.varWords[0], mb->un.varWords[1]);
2670 		}
2671 		else {
2672 			lpfc_debugfs_disc_trc(phba->pport,
2673 				LPFC_DISC_TRC_MBOX,
2674 				"MBOX Send:       cmd:x%x mb:x%x x%x",
2675 				(uint32_t)mb->mbxCommand,
2676 				mb->un.varWords[0], mb->un.varWords[1]);
2677 		}
2678 	}
2679 
2680 	psli->slistat.mbox_cmd++;
2681 	evtctr = psli->slistat.mbox_event;
2682 
2683 	/* next set own bit for the adapter and copy over command word */
2684 	mb->mbxOwner = OWN_CHIP;
2685 
2686 	if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2687 		/* First copy command data to host SLIM area */
2688 		lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2689 	} else {
2690 		if (mb->mbxCommand == MBX_CONFIG_PORT) {
2691 			/* copy command data into host mbox for cmpl */
2692 			lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2693 					      MAILBOX_CMD_SIZE);
2694 		}
2695 
2696 		/* First copy mbox command data to HBA SLIM, skip past first
2697 		   word */
2698 		to_slim = phba->MBslimaddr + sizeof (uint32_t);
2699 		lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2700 			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
2701 
2702 		/* Next copy over first word, with mbxOwner set */
2703 		ldata = *((volatile uint32_t *)mb);
2704 		to_slim = phba->MBslimaddr;
2705 		writel(ldata, to_slim);
2706 		readl(to_slim); /* flush */
2707 
2708 		if (mb->mbxCommand == MBX_CONFIG_PORT) {
2709 			/* switch over to host mailbox */
2710 			psli->sli_flag |= LPFC_SLI2_ACTIVE;
2711 		}
2712 	}
2713 
2714 	wmb();
2715 	/* interrupt board to doit right away */
2716 	writel(CA_MBATT, phba->CAregaddr);
2717 	readl(phba->CAregaddr); /* flush */
2718 
2719 	switch (flag) {
2720 	case MBX_NOWAIT:
2721 		/* Don't wait for it to finish, just return */
2722 		psli->mbox_active = pmbox;
2723 		break;
2724 
2725 	case MBX_POLL:
2726 		psli->mbox_active = NULL;
2727 		if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2728 			/* First read mbox status word */
2729 			word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2730 			word0 = le32_to_cpu(word0);
2731 		} else {
2732 			/* First read mbox status word */
2733 			word0 = readl(phba->MBslimaddr);
2734 		}
2735 
2736 		/* Read the HBA Host Attention Register */
2737 		ha_copy = readl(phba->HAregaddr);
2738 
2739 		i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2740 		i *= 1000; /* Convert to ms */
2741 
2742 		/* Wait for command to complete */
2743 		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2744 		       (!(ha_copy & HA_MBATT) &&
2745 			(phba->link_state > LPFC_WARM_START))) {
2746 			if (i-- <= 0) {
2747 				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2748 				spin_unlock_irqrestore(&phba->hbalock,
2749 						       drvr_flag);
2750 				return MBX_NOT_FINISHED;
2751 			}
2752 
2753 			/* Check if we took a mbox interrupt while we were
2754 			   polling */
2755 			if (((word0 & OWN_CHIP) != OWN_CHIP)
2756 			    && (evtctr != psli->slistat.mbox_event))
2757 				break;
2758 
2759 			spin_unlock_irqrestore(&phba->hbalock,
2760 					       drvr_flag);
2761 
2762 			msleep(1);
2763 
2764 			spin_lock_irqsave(&phba->hbalock, drvr_flag);
2765 
2766 			if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2767 				/* First copy command data */
2768 				word0 = *((volatile uint32_t *)
2769 						&phba->slim2p->mbx);
2770 				word0 = le32_to_cpu(word0);
2771 				if (mb->mbxCommand == MBX_CONFIG_PORT) {
2772 					MAILBOX_t *slimmb;
2773 					volatile uint32_t slimword0;
2774 					/* Check real SLIM for any errors */
2775 					slimword0 = readl(phba->MBslimaddr);
2776 					slimmb = (MAILBOX_t *) & slimword0;
2777 					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2778 					    && slimmb->mbxStatus) {
2779 						psli->sli_flag &=
2780 						    ~LPFC_SLI2_ACTIVE;
2781 						word0 = slimword0;
2782 					}
2783 				}
2784 			} else {
2785 				/* First copy command data */
2786 				word0 = readl(phba->MBslimaddr);
2787 			}
2788 			/* Read the HBA Host Attention Register */
2789 			ha_copy = readl(phba->HAregaddr);
2790 		}
2791 
2792 		if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2793 			/* copy results back to user */
2794 			lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2795 					      MAILBOX_CMD_SIZE);
2796 		} else {
2797 			/* First copy command data */
2798 			lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2799 							MAILBOX_CMD_SIZE);
2800 			if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2801 				pmbox->context2) {
2802 				lpfc_memcpy_from_slim((void *)pmbox->context2,
2803 				      phba->MBslimaddr + DMP_RSP_OFFSET,
2804 						      mb->un.varDmp.word_cnt);
2805 			}
2806 		}
2807 
2808 		writel(HA_MBATT, phba->HAregaddr);
2809 		readl(phba->HAregaddr); /* flush */
2810 
2811 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2812 		status = mb->mbxStatus;
2813 	}
2814 
2815 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2816 	return status;
2817 }
2818 
2819 /*
2820  * Caller needs to hold lock.
2821  */
2822 static void
2823 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2824 		    struct lpfc_iocbq *piocb)
2825 {
2826 	/* Insert the caller's iocb in the txq tail for later processing. */
2827 	list_add_tail(&piocb->list, &pring->txq);
2828 	pring->txq_cnt++;
2829 }
2830 
2831 static struct lpfc_iocbq *
2832 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2833 		   struct lpfc_iocbq **piocb)
2834 {
2835 	struct lpfc_iocbq * nextiocb;
2836 
2837 	nextiocb = lpfc_sli_ringtx_get(phba, pring);
2838 	if (!nextiocb) {
2839 		nextiocb = *piocb;
2840 		*piocb = NULL;
2841 	}
2842 
2843 	return nextiocb;
2844 }
2845 
2846 /*
2847  * Lockless version of lpfc_sli_issue_iocb.
2848  */
2849 int
2850 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2851 		    struct lpfc_iocbq *piocb, uint32_t flag)
2852 {
2853 	struct lpfc_iocbq *nextiocb;
2854 	IOCB_t *iocb;
2855 
2856 	if (piocb->iocb_cmpl && (!piocb->vport) &&
2857 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
2858 	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
2859 		lpfc_printf_log(phba, KERN_ERR,
2860 				LOG_SLI | LOG_VPORT,
2861 				"1807 IOCB x%x failed. No vport\n",
2862 				piocb->iocb.ulpCommand);
2863 		dump_stack();
2864 		return IOCB_ERROR;
2865 	}
2866 
2867 
2868 	/* If the PCI channel is in offline state, do not post iocbs. */
2869 	if (unlikely(pci_channel_offline(phba->pcidev)))
2870 		return IOCB_ERROR;
2871 
2872 	/*
2873 	 * We should never get an IOCB if we are in a < LINK_DOWN state
2874 	 */
2875 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
2876 		return IOCB_ERROR;
2877 
2878 	/*
2879 	 * Check to see if we are blocking IOCB processing because of a
2880 	 * outstanding mbox command.
2881 	 */
2882 	if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2883 		goto iocb_busy;
2884 
2885 	if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
2886 		/*
2887 		 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
2888 		 * can be issued if the link is not up.
2889 		 */
2890 		switch (piocb->iocb.ulpCommand) {
2891 		case CMD_QUE_RING_BUF_CN:
2892 		case CMD_QUE_RING_BUF64_CN:
2893 			/*
2894 			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2895 			 * completion, iocb_cmpl MUST be 0.
2896 			 */
2897 			if (piocb->iocb_cmpl)
2898 				piocb->iocb_cmpl = NULL;
2899 			/*FALLTHROUGH*/
2900 		case CMD_CREATE_XRI_CR:
2901 		case CMD_CLOSE_XRI_CN:
2902 		case CMD_CLOSE_XRI_CX:
2903 			break;
2904 		default:
2905 			goto iocb_busy;
2906 		}
2907 
2908 	/*
2909 	 * For FCP commands, we must be in a state where we can process link
2910 	 * attention events.
2911 	 */
2912 	} else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2913 			    !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
2914 		goto iocb_busy;
2915 	}
2916 
2917 	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2918 	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2919 		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2920 
2921 	if (iocb)
2922 		lpfc_sli_update_ring(phba, pring);
2923 	else
2924 		lpfc_sli_update_full_ring(phba, pring);
2925 
2926 	if (!piocb)
2927 		return IOCB_SUCCESS;
2928 
2929 	goto out_busy;
2930 
2931  iocb_busy:
2932 	pring->stats.iocb_cmd_delay++;
2933 
2934  out_busy:
2935 
2936 	if (!(flag & SLI_IOCB_RET_IOCB)) {
2937 		__lpfc_sli_ringtx_put(phba, pring, piocb);
2938 		return IOCB_SUCCESS;
2939 	}
2940 
2941 	return IOCB_BUSY;
2942 }
2943 
2944 
2945 int
2946 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2947 		    struct lpfc_iocbq *piocb, uint32_t flag)
2948 {
2949 	unsigned long iflags;
2950 	int rc;
2951 
2952 	spin_lock_irqsave(&phba->hbalock, iflags);
2953 	rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
2954 	spin_unlock_irqrestore(&phba->hbalock, iflags);
2955 
2956 	return rc;
2957 }
2958 
2959 static int
2960 lpfc_extra_ring_setup( struct lpfc_hba *phba)
2961 {
2962 	struct lpfc_sli *psli;
2963 	struct lpfc_sli_ring *pring;
2964 
2965 	psli = &phba->sli;
2966 
2967 	/* Adjust cmd/rsp ring iocb entries more evenly */
2968 
2969 	/* Take some away from the FCP ring */
2970 	pring = &psli->ring[psli->fcp_ring];
2971 	pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2972 	pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2973 	pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2974 	pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2975 
2976 	/* and give them to the extra ring */
2977 	pring = &psli->ring[psli->extra_ring];
2978 
2979 	pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2980 	pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2981 	pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2982 	pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2983 
2984 	/* Setup default profile for this ring */
2985 	pring->iotag_max = 4096;
2986 	pring->num_mask = 1;
2987 	pring->prt[0].profile = 0;      /* Mask 0 */
2988 	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2989 	pring->prt[0].type = phba->cfg_multi_ring_type;
2990 	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2991 	return 0;
2992 }
2993 
2994 int
2995 lpfc_sli_setup(struct lpfc_hba *phba)
2996 {
2997 	int i, totiocbsize = 0;
2998 	struct lpfc_sli *psli = &phba->sli;
2999 	struct lpfc_sli_ring *pring;
3000 
3001 	psli->num_rings = MAX_CONFIGURED_RINGS;
3002 	psli->sli_flag = 0;
3003 	psli->fcp_ring = LPFC_FCP_RING;
3004 	psli->next_ring = LPFC_FCP_NEXT_RING;
3005 	psli->extra_ring = LPFC_EXTRA_RING;
3006 
3007 	psli->iocbq_lookup = NULL;
3008 	psli->iocbq_lookup_len = 0;
3009 	psli->last_iotag = 0;
3010 
3011 	for (i = 0; i < psli->num_rings; i++) {
3012 		pring = &psli->ring[i];
3013 		switch (i) {
3014 		case LPFC_FCP_RING:	/* ring 0 - FCP */
3015 			/* numCiocb and numRiocb are used in config_port */
3016 			pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
3017 			pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
3018 			pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
3019 			pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
3020 			pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
3021 			pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
3022 			pring->sizeCiocb = (phba->sli_rev == 3) ?
3023 							SLI3_IOCB_CMD_SIZE :
3024 							SLI2_IOCB_CMD_SIZE;
3025 			pring->sizeRiocb = (phba->sli_rev == 3) ?
3026 							SLI3_IOCB_RSP_SIZE :
3027 							SLI2_IOCB_RSP_SIZE;
3028 			pring->iotag_ctr = 0;
3029 			pring->iotag_max =
3030 			    (phba->cfg_hba_queue_depth * 2);
3031 			pring->fast_iotag = pring->iotag_max;
3032 			pring->num_mask = 0;
3033 			break;
3034 		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
3035 			/* numCiocb and numRiocb are used in config_port */
3036 			pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
3037 			pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
3038 			pring->sizeCiocb = (phba->sli_rev == 3) ?
3039 							SLI3_IOCB_CMD_SIZE :
3040 							SLI2_IOCB_CMD_SIZE;
3041 			pring->sizeRiocb = (phba->sli_rev == 3) ?
3042 							SLI3_IOCB_RSP_SIZE :
3043 							SLI2_IOCB_RSP_SIZE;
3044 			pring->iotag_max = phba->cfg_hba_queue_depth;
3045 			pring->num_mask = 0;
3046 			break;
3047 		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
3048 			/* numCiocb and numRiocb are used in config_port */
3049 			pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
3050 			pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
3051 			pring->sizeCiocb = (phba->sli_rev == 3) ?
3052 							SLI3_IOCB_CMD_SIZE :
3053 							SLI2_IOCB_CMD_SIZE;
3054 			pring->sizeRiocb = (phba->sli_rev == 3) ?
3055 							SLI3_IOCB_RSP_SIZE :
3056 							SLI2_IOCB_RSP_SIZE;
3057 			pring->fast_iotag = 0;
3058 			pring->iotag_ctr = 0;
3059 			pring->iotag_max = 4096;
3060 			pring->num_mask = 4;
3061 			pring->prt[0].profile = 0;	/* Mask 0 */
3062 			pring->prt[0].rctl = FC_ELS_REQ;
3063 			pring->prt[0].type = FC_ELS_DATA;
3064 			pring->prt[0].lpfc_sli_rcv_unsol_event =
3065 			    lpfc_els_unsol_event;
3066 			pring->prt[1].profile = 0;	/* Mask 1 */
3067 			pring->prt[1].rctl = FC_ELS_RSP;
3068 			pring->prt[1].type = FC_ELS_DATA;
3069 			pring->prt[1].lpfc_sli_rcv_unsol_event =
3070 			    lpfc_els_unsol_event;
3071 			pring->prt[2].profile = 0;	/* Mask 2 */
3072 			/* NameServer Inquiry */
3073 			pring->prt[2].rctl = FC_UNSOL_CTL;
3074 			/* NameServer */
3075 			pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
3076 			pring->prt[2].lpfc_sli_rcv_unsol_event =
3077 			    lpfc_ct_unsol_event;
3078 			pring->prt[3].profile = 0;	/* Mask 3 */
3079 			/* NameServer response */
3080 			pring->prt[3].rctl = FC_SOL_CTL;
3081 			/* NameServer */
3082 			pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
3083 			pring->prt[3].lpfc_sli_rcv_unsol_event =
3084 			    lpfc_ct_unsol_event;
3085 			break;
3086 		}
3087 		totiocbsize += (pring->numCiocb * pring->sizeCiocb) +
3088 				(pring->numRiocb * pring->sizeRiocb);
3089 	}
3090 	if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
3091 		/* Too many cmd / rsp ring entries in SLI2 SLIM */
3092 		printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
3093 		       "SLI2 SLIM Data: x%x x%lx\n",
3094 		       phba->brd_no, totiocbsize,
3095 		       (unsigned long) MAX_SLIM_IOCB_SIZE);
3096 	}
3097 	if (phba->cfg_multi_ring_support == 2)
3098 		lpfc_extra_ring_setup(phba);
3099 
3100 	return 0;
3101 }
3102 
3103 int
3104 lpfc_sli_queue_setup(struct lpfc_hba *phba)
3105 {
3106 	struct lpfc_sli *psli;
3107 	struct lpfc_sli_ring *pring;
3108 	int i;
3109 
3110 	psli = &phba->sli;
3111 	spin_lock_irq(&phba->hbalock);
3112 	INIT_LIST_HEAD(&psli->mboxq);
3113 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
3114 	/* Initialize list headers for txq and txcmplq as double linked lists */
3115 	for (i = 0; i < psli->num_rings; i++) {
3116 		pring = &psli->ring[i];
3117 		pring->ringno = i;
3118 		pring->next_cmdidx  = 0;
3119 		pring->local_getidx = 0;
3120 		pring->cmdidx = 0;
3121 		INIT_LIST_HEAD(&pring->txq);
3122 		INIT_LIST_HEAD(&pring->txcmplq);
3123 		INIT_LIST_HEAD(&pring->iocb_continueq);
3124 		INIT_LIST_HEAD(&pring->postbufq);
3125 	}
3126 	spin_unlock_irq(&phba->hbalock);
3127 	return 1;
3128 }
3129 
3130 int
3131 lpfc_sli_host_down(struct lpfc_vport *vport)
3132 {
3133 	LIST_HEAD(completions);
3134 	struct lpfc_hba *phba = vport->phba;
3135 	struct lpfc_sli *psli = &phba->sli;
3136 	struct lpfc_sli_ring *pring;
3137 	struct lpfc_iocbq *iocb, *next_iocb;
3138 	int i;
3139 	unsigned long flags = 0;
3140 	uint16_t prev_pring_flag;
3141 
3142 	lpfc_cleanup_discovery_resources(vport);
3143 
3144 	spin_lock_irqsave(&phba->hbalock, flags);
3145 	for (i = 0; i < psli->num_rings; i++) {
3146 		pring = &psli->ring[i];
3147 		prev_pring_flag = pring->flag;
3148 		if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3149 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
3150 		/*
3151 		 * Error everything on the txq since these iocbs have not been
3152 		 * given to the FW yet.
3153 		 */
3154 		list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
3155 			if (iocb->vport != vport)
3156 				continue;
3157 			list_move_tail(&iocb->list, &completions);
3158 			pring->txq_cnt--;
3159 		}
3160 
3161 		/* Next issue ABTS for everything on the txcmplq */
3162 		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq,
3163 									list) {
3164 			if (iocb->vport != vport)
3165 				continue;
3166 			lpfc_sli_issue_abort_iotag(phba, pring, iocb);
3167 		}
3168 
3169 		pring->flag = prev_pring_flag;
3170 	}
3171 
3172 	spin_unlock_irqrestore(&phba->hbalock, flags);
3173 
3174 	while (!list_empty(&completions)) {
3175 		list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3176 
3177 		if (!iocb->iocb_cmpl)
3178 			lpfc_sli_release_iocbq(phba, iocb);
3179 		else {
3180 			iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3181 			iocb->iocb.un.ulpWord[4] = IOERR_SLI_DOWN;
3182 			(iocb->iocb_cmpl) (phba, iocb, iocb);
3183 		}
3184 	}
3185 	return 1;
3186 }
3187 
3188 int
3189 lpfc_sli_hba_down(struct lpfc_hba *phba)
3190 {
3191 	LIST_HEAD(completions);
3192 	struct lpfc_sli *psli = &phba->sli;
3193 	struct lpfc_sli_ring *pring;
3194 	LPFC_MBOXQ_t *pmb;
3195 	struct lpfc_iocbq *iocb;
3196 	IOCB_t *cmd = NULL;
3197 	int i;
3198 	unsigned long flags = 0;
3199 
3200 	lpfc_hba_down_prep(phba);
3201 
3202 	lpfc_fabric_abort_hba(phba);
3203 
3204 	spin_lock_irqsave(&phba->hbalock, flags);
3205 	for (i = 0; i < psli->num_rings; i++) {
3206 		pring = &psli->ring[i];
3207 		if (pring->ringno == LPFC_ELS_RING) /* Only slow rings */
3208 			pring->flag |= LPFC_DEFERRED_RING_EVENT;
3209 
3210 		/*
3211 		 * Error everything on the txq since these iocbs have not been
3212 		 * given to the FW yet.
3213 		 */
3214 		list_splice_init(&pring->txq, &completions);
3215 		pring->txq_cnt = 0;
3216 
3217 	}
3218 	spin_unlock_irqrestore(&phba->hbalock, flags);
3219 
3220 	while (!list_empty(&completions)) {
3221 		list_remove_head(&completions, iocb, struct lpfc_iocbq, list);
3222 		cmd = &iocb->iocb;
3223 
3224 		if (!iocb->iocb_cmpl)
3225 			lpfc_sli_release_iocbq(phba, iocb);
3226 		else {
3227 			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
3228 			cmd->un.ulpWord[4] = IOERR_SLI_DOWN;
3229 			(iocb->iocb_cmpl) (phba, iocb, iocb);
3230 		}
3231 	}
3232 
3233 	/* Return any active mbox cmds */
3234 	del_timer_sync(&psli->mbox_tmo);
3235 	spin_lock_irqsave(&phba->hbalock, flags);
3236 
3237 	spin_lock(&phba->pport->work_port_lock);
3238 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3239 	spin_unlock(&phba->pport->work_port_lock);
3240 
3241 	if (psli->mbox_active) {
3242 		list_add_tail(&psli->mbox_active->list, &completions);
3243 		psli->mbox_active = NULL;
3244 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3245 	}
3246 
3247 	/* Return any pending or completed mbox cmds */
3248 	list_splice_init(&phba->sli.mboxq, &completions);
3249 	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
3250 	INIT_LIST_HEAD(&psli->mboxq);
3251 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
3252 
3253 	spin_unlock_irqrestore(&phba->hbalock, flags);
3254 
3255 	while (!list_empty(&completions)) {
3256 		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
3257 		pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3258 		if (pmb->mbox_cmpl) {
3259 			pmb->mbox_cmpl(phba,pmb);
3260 		}
3261 	}
3262 	return 1;
3263 }
3264 
3265 void
3266 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
3267 {
3268 	uint32_t *src = srcp;
3269 	uint32_t *dest = destp;
3270 	uint32_t ldata;
3271 	int i;
3272 
3273 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
3274 		ldata = *src;
3275 		ldata = le32_to_cpu(ldata);
3276 		*dest = ldata;
3277 		src++;
3278 		dest++;
3279 	}
3280 }
3281 
3282 int
3283 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3284 			 struct lpfc_dmabuf *mp)
3285 {
3286 	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
3287 	   later */
3288 	spin_lock_irq(&phba->hbalock);
3289 	list_add_tail(&mp->list, &pring->postbufq);
3290 	pring->postbufq_cnt++;
3291 	spin_unlock_irq(&phba->hbalock);
3292 	return 0;
3293 }
3294 
3295 
3296 struct lpfc_dmabuf *
3297 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3298 			 dma_addr_t phys)
3299 {
3300 	struct lpfc_dmabuf *mp, *next_mp;
3301 	struct list_head *slp = &pring->postbufq;
3302 
3303 	/* Search postbufq, from the begining, looking for a match on phys */
3304 	spin_lock_irq(&phba->hbalock);
3305 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
3306 		if (mp->phys == phys) {
3307 			list_del_init(&mp->list);
3308 			pring->postbufq_cnt--;
3309 			spin_unlock_irq(&phba->hbalock);
3310 			return mp;
3311 		}
3312 	}
3313 
3314 	spin_unlock_irq(&phba->hbalock);
3315 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
3316 			"0410 Cannot find virtual addr for mapped buf on "
3317 			"ring %d Data x%llx x%p x%p x%x\n",
3318 			pring->ringno, (unsigned long long)phys,
3319 			slp->next, slp->prev, pring->postbufq_cnt);
3320 	return NULL;
3321 }
3322 
3323 static void
3324 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3325 			struct lpfc_iocbq *rspiocb)
3326 {
3327 	IOCB_t *irsp = &rspiocb->iocb;
3328 	uint16_t abort_iotag, abort_context;
3329 	struct lpfc_iocbq *abort_iocb;
3330 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
3331 
3332 	abort_iocb = NULL;
3333 
3334 	if (irsp->ulpStatus) {
3335 		abort_context = cmdiocb->iocb.un.acxri.abortContextTag;
3336 		abort_iotag = cmdiocb->iocb.un.acxri.abortIoTag;
3337 
3338 		spin_lock_irq(&phba->hbalock);
3339 		if (abort_iotag != 0 && abort_iotag <= phba->sli.last_iotag)
3340 			abort_iocb = phba->sli.iocbq_lookup[abort_iotag];
3341 
3342 		lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
3343 				"0327 Cannot abort els iocb %p "
3344 				"with tag %x context %x, abort status %x, "
3345 				"abort code %x\n",
3346 				abort_iocb, abort_iotag, abort_context,
3347 				irsp->ulpStatus, irsp->un.ulpWord[4]);
3348 
3349 		/*
3350 		 * make sure we have the right iocbq before taking it
3351 		 * off the txcmplq and try to call completion routine.
3352 		 */
3353 		if (!abort_iocb ||
3354 		    abort_iocb->iocb.ulpContext != abort_context ||
3355 		    (abort_iocb->iocb_flag & LPFC_DRIVER_ABORTED) == 0)
3356 			spin_unlock_irq(&phba->hbalock);
3357 		else {
3358 			list_del_init(&abort_iocb->list);
3359 			pring->txcmplq_cnt--;
3360 			spin_unlock_irq(&phba->hbalock);
3361 
3362 			abort_iocb->iocb_flag &= ~LPFC_DRIVER_ABORTED;
3363 			abort_iocb->iocb.ulpStatus = IOSTAT_LOCAL_REJECT;
3364 			abort_iocb->iocb.un.ulpWord[4] = IOERR_SLI_ABORTED;
3365 			(abort_iocb->iocb_cmpl)(phba, abort_iocb, abort_iocb);
3366 		}
3367 	}
3368 
3369 	lpfc_sli_release_iocbq(phba, cmdiocb);
3370 	return;
3371 }
3372 
3373 static void
3374 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3375 		     struct lpfc_iocbq *rspiocb)
3376 {
3377 	IOCB_t *irsp = &rspiocb->iocb;
3378 
3379 	/* ELS cmd tag <ulpIoTag> completes */
3380 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
3381 			"0133 Ignoring ELS cmd tag x%x completion Data: "
3382 			"x%x x%x x%x\n",
3383 			irsp->ulpIoTag, irsp->ulpStatus,
3384 			irsp->un.ulpWord[4], irsp->ulpTimeout);
3385 	if (cmdiocb->iocb.ulpCommand == CMD_GEN_REQUEST64_CR)
3386 		lpfc_ct_free_iocb(phba, cmdiocb);
3387 	else
3388 		lpfc_els_free_iocb(phba, cmdiocb);
3389 	return;
3390 }
3391 
3392 int
3393 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3394 			   struct lpfc_iocbq *cmdiocb)
3395 {
3396 	struct lpfc_vport *vport = cmdiocb->vport;
3397 	struct lpfc_iocbq *abtsiocbp;
3398 	IOCB_t *icmd = NULL;
3399 	IOCB_t *iabt = NULL;
3400 	int retval = IOCB_ERROR;
3401 
3402 	/*
3403 	 * There are certain command types we don't want to abort.  And we
3404 	 * don't want to abort commands that are already in the process of
3405 	 * being aborted.
3406 	 */
3407 	icmd = &cmdiocb->iocb;
3408 	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
3409 	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
3410 	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
3411 		return 0;
3412 
3413 	/* If we're unloading, don't abort iocb on the ELS ring, but change the
3414 	 * callback so that nothing happens when it finishes.
3415 	 */
3416 	if ((vport->load_flag & FC_UNLOADING) &&
3417 	    (pring->ringno == LPFC_ELS_RING)) {
3418 		if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
3419 			cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
3420 		else
3421 			cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
3422 		goto abort_iotag_exit;
3423 	}
3424 
3425 	/* issue ABTS for this IOCB based on iotag */
3426 	abtsiocbp = __lpfc_sli_get_iocbq(phba);
3427 	if (abtsiocbp == NULL)
3428 		return 0;
3429 
3430 	/* This signals the response to set the correct status
3431 	 * before calling the completion handler.
3432 	 */
3433 	cmdiocb->iocb_flag |= LPFC_DRIVER_ABORTED;
3434 
3435 	iabt = &abtsiocbp->iocb;
3436 	iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
3437 	iabt->un.acxri.abortContextTag = icmd->ulpContext;
3438 	iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
3439 	iabt->ulpLe = 1;
3440 	iabt->ulpClass = icmd->ulpClass;
3441 
3442 	if (phba->link_state >= LPFC_LINK_UP)
3443 		iabt->ulpCommand = CMD_ABORT_XRI_CN;
3444 	else
3445 		iabt->ulpCommand = CMD_CLOSE_XRI_CN;
3446 
3447 	abtsiocbp->iocb_cmpl = lpfc_sli_abort_els_cmpl;
3448 
3449 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
3450 			 "0339 Abort xri x%x, original iotag x%x, "
3451 			 "abort cmd iotag x%x\n",
3452 			 iabt->un.acxri.abortContextTag,
3453 			 iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
3454 	retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
3455 
3456 abort_iotag_exit:
3457 	/*
3458 	 * Caller to this routine should check for IOCB_ERROR
3459 	 * and handle it properly.  This routine no longer removes
3460 	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
3461 	 */
3462 	return retval;
3463 }
3464 
3465 static int
3466 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
3467 			   uint16_t tgt_id, uint64_t lun_id,
3468 			   lpfc_ctx_cmd ctx_cmd)
3469 {
3470 	struct lpfc_scsi_buf *lpfc_cmd;
3471 	struct scsi_cmnd *cmnd;
3472 	int rc = 1;
3473 
3474 	if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
3475 		return rc;
3476 
3477 	if (iocbq->vport != vport)
3478 		return rc;
3479 
3480 	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
3481 	cmnd = lpfc_cmd->pCmd;
3482 
3483 	if (cmnd == NULL)
3484 		return rc;
3485 
3486 	switch (ctx_cmd) {
3487 	case LPFC_CTX_LUN:
3488 		if ((cmnd->device->id == tgt_id) &&
3489 		    (cmnd->device->lun == lun_id))
3490 			rc = 0;
3491 		break;
3492 	case LPFC_CTX_TGT:
3493 		if (cmnd->device->id == tgt_id)
3494 			rc = 0;
3495 		break;
3496 	case LPFC_CTX_HOST:
3497 		rc = 0;
3498 		break;
3499 	default:
3500 		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
3501 			__FUNCTION__, ctx_cmd);
3502 		break;
3503 	}
3504 
3505 	return rc;
3506 }
3507 
3508 int
3509 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
3510 		  lpfc_ctx_cmd ctx_cmd)
3511 {
3512 	struct lpfc_hba *phba = vport->phba;
3513 	struct lpfc_iocbq *iocbq;
3514 	int sum, i;
3515 
3516 	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
3517 		iocbq = phba->sli.iocbq_lookup[i];
3518 
3519 		if (lpfc_sli_validate_fcp_iocb (iocbq, vport, tgt_id, lun_id,
3520 						ctx_cmd) == 0)
3521 			sum++;
3522 	}
3523 
3524 	return sum;
3525 }
3526 
3527 void
3528 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
3529 			struct lpfc_iocbq *rspiocb)
3530 {
3531 	lpfc_sli_release_iocbq(phba, cmdiocb);
3532 	return;
3533 }
3534 
3535 int
3536 lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
3537 		    uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd abort_cmd)
3538 {
3539 	struct lpfc_hba *phba = vport->phba;
3540 	struct lpfc_iocbq *iocbq;
3541 	struct lpfc_iocbq *abtsiocb;
3542 	IOCB_t *cmd = NULL;
3543 	int errcnt = 0, ret_val = 0;
3544 	int i;
3545 
3546 	for (i = 1; i <= phba->sli.last_iotag; i++) {
3547 		iocbq = phba->sli.iocbq_lookup[i];
3548 
3549 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
3550 					       abort_cmd) != 0)
3551 			continue;
3552 
3553 		/* issue ABTS for this IOCB based on iotag */
3554 		abtsiocb = lpfc_sli_get_iocbq(phba);
3555 		if (abtsiocb == NULL) {
3556 			errcnt++;
3557 			continue;
3558 		}
3559 
3560 		cmd = &iocbq->iocb;
3561 		abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
3562 		abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
3563 		abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
3564 		abtsiocb->iocb.ulpLe = 1;
3565 		abtsiocb->iocb.ulpClass = cmd->ulpClass;
3566 		abtsiocb->vport = phba->pport;
3567 
3568 		if (lpfc_is_link_up(phba))
3569 			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
3570 		else
3571 			abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
3572 
3573 		/* Setup callback routine and issue the command. */
3574 		abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
3575 		ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
3576 		if (ret_val == IOCB_ERROR) {
3577 			lpfc_sli_release_iocbq(phba, abtsiocb);
3578 			errcnt++;
3579 			continue;
3580 		}
3581 	}
3582 
3583 	return errcnt;
3584 }
3585 
3586 static void
3587 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
3588 			struct lpfc_iocbq *cmdiocbq,
3589 			struct lpfc_iocbq *rspiocbq)
3590 {
3591 	wait_queue_head_t *pdone_q;
3592 	unsigned long iflags;
3593 
3594 	spin_lock_irqsave(&phba->hbalock, iflags);
3595 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
3596 	if (cmdiocbq->context2 && rspiocbq)
3597 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
3598 		       &rspiocbq->iocb, sizeof(IOCB_t));
3599 
3600 	pdone_q = cmdiocbq->context_un.wait_queue;
3601 	if (pdone_q)
3602 		wake_up(pdone_q);
3603 	spin_unlock_irqrestore(&phba->hbalock, iflags);
3604 	return;
3605 }
3606 
3607 /*
3608  * Issue the caller's iocb and wait for its completion, but no longer than the
3609  * caller's timeout.  Note that iocb_flags is cleared before the
3610  * lpfc_sli_issue_call since the wake routine sets a unique value and by
3611  * definition this is a wait function.
3612  */
3613 
3614 int
3615 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
3616 			 struct lpfc_sli_ring *pring,
3617 			 struct lpfc_iocbq *piocb,
3618 			 struct lpfc_iocbq *prspiocbq,
3619 			 uint32_t timeout)
3620 {
3621 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3622 	long timeleft, timeout_req = 0;
3623 	int retval = IOCB_SUCCESS;
3624 	uint32_t creg_val;
3625 
3626 	/*
3627 	 * If the caller has provided a response iocbq buffer, then context2
3628 	 * is NULL or its an error.
3629 	 */
3630 	if (prspiocbq) {
3631 		if (piocb->context2)
3632 			return IOCB_ERROR;
3633 		piocb->context2 = prspiocbq;
3634 	}
3635 
3636 	piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3637 	piocb->context_un.wait_queue = &done_q;
3638 	piocb->iocb_flag &= ~LPFC_IO_WAKE;
3639 
3640 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3641 		creg_val = readl(phba->HCregaddr);
3642 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3643 		writel(creg_val, phba->HCregaddr);
3644 		readl(phba->HCregaddr); /* flush */
3645 	}
3646 
3647 	retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3648 	if (retval == IOCB_SUCCESS) {
3649 		timeout_req = timeout * HZ;
3650 		timeleft = wait_event_timeout(done_q,
3651 				piocb->iocb_flag & LPFC_IO_WAKE,
3652 				timeout_req);
3653 
3654 		if (piocb->iocb_flag & LPFC_IO_WAKE) {
3655 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3656 					"0331 IOCB wake signaled\n");
3657 		} else if (timeleft == 0) {
3658 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3659 					"0338 IOCB wait timeout error - no "
3660 					"wake response Data x%x\n", timeout);
3661 			retval = IOCB_TIMEDOUT;
3662 		} else {
3663 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3664 					"0330 IOCB wake NOT set, "
3665 					"Data x%x x%lx\n",
3666 					timeout, (timeleft / jiffies));
3667 			retval = IOCB_TIMEDOUT;
3668 		}
3669 	} else {
3670 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3671 				":0332 IOCB wait issue failed, Data x%x\n",
3672 				retval);
3673 		retval = IOCB_ERROR;
3674 	}
3675 
3676 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3677 		creg_val = readl(phba->HCregaddr);
3678 		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3679 		writel(creg_val, phba->HCregaddr);
3680 		readl(phba->HCregaddr); /* flush */
3681 	}
3682 
3683 	if (prspiocbq)
3684 		piocb->context2 = NULL;
3685 
3686 	piocb->context_un.wait_queue = NULL;
3687 	piocb->iocb_cmpl = NULL;
3688 	return retval;
3689 }
3690 
3691 int
3692 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
3693 			 uint32_t timeout)
3694 {
3695 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3696 	int retval;
3697 	unsigned long flag;
3698 
3699 	/* The caller must leave context1 empty. */
3700 	if (pmboxq->context1 != 0)
3701 		return MBX_NOT_FINISHED;
3702 
3703 	/* setup wake call as IOCB callback */
3704 	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3705 	/* setup context field to pass wait_queue pointer to wake function  */
3706 	pmboxq->context1 = &done_q;
3707 
3708 	/* now issue the command */
3709 	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3710 
3711 	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3712 		wait_event_interruptible_timeout(done_q,
3713 				pmboxq->mbox_flag & LPFC_MBX_WAKE,
3714 				timeout * HZ);
3715 
3716 		spin_lock_irqsave(&phba->hbalock, flag);
3717 		pmboxq->context1 = NULL;
3718 		/*
3719 		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
3720 		 * else do not free the resources.
3721 		 */
3722 		if (pmboxq->mbox_flag & LPFC_MBX_WAKE)
3723 			retval = MBX_SUCCESS;
3724 		else {
3725 			retval = MBX_TIMEOUT;
3726 			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
3727 		}
3728 		spin_unlock_irqrestore(&phba->hbalock, flag);
3729 	}
3730 
3731 	return retval;
3732 }
3733 
3734 int
3735 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3736 {
3737 	struct lpfc_vport *vport = phba->pport;
3738 	int i = 0;
3739 	uint32_t ha_copy;
3740 
3741 	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
3742 		if (i++ > LPFC_MBOX_TMO * 1000)
3743 			return 1;
3744 
3745 		/*
3746 		 * Call lpfc_sli_handle_mb_event only if a mailbox cmd
3747 		 * did finish. This way we won't get the misleading
3748 		 * "Stray Mailbox Interrupt" message.
3749 		 */
3750 		spin_lock_irq(&phba->hbalock);
3751 		ha_copy = phba->work_ha;
3752 		phba->work_ha &= ~HA_MBATT;
3753 		spin_unlock_irq(&phba->hbalock);
3754 
3755 		if (ha_copy & HA_MBATT)
3756 			if (lpfc_sli_handle_mb_event(phba) == 0)
3757 				i = 0;
3758 
3759 		msleep(1);
3760 	}
3761 
3762 	return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3763 }
3764 
3765 irqreturn_t
3766 lpfc_intr_handler(int irq, void *dev_id)
3767 {
3768 	struct lpfc_hba  *phba;
3769 	uint32_t ha_copy;
3770 	uint32_t work_ha_copy;
3771 	unsigned long status;
3772 	int i;
3773 	uint32_t control;
3774 
3775 	MAILBOX_t *mbox, *pmbox;
3776 	struct lpfc_vport *vport;
3777 	struct lpfc_nodelist *ndlp;
3778 	struct lpfc_dmabuf *mp;
3779 	LPFC_MBOXQ_t *pmb;
3780 	int rc;
3781 
3782 	/*
3783 	 * Get the driver's phba structure from the dev_id and
3784 	 * assume the HBA is not interrupting.
3785 	 */
3786 	phba = (struct lpfc_hba *) dev_id;
3787 
3788 	if (unlikely(!phba))
3789 		return IRQ_NONE;
3790 
3791 	/* If the pci channel is offline, ignore all the interrupts. */
3792 	if (unlikely(pci_channel_offline(phba->pcidev)))
3793 		return IRQ_NONE;
3794 
3795 	phba->sli.slistat.sli_intr++;
3796 
3797 	/*
3798 	 * Call the HBA to see if it is interrupting.  If not, don't claim
3799 	 * the interrupt
3800 	 */
3801 
3802 	/* Ignore all interrupts during initialization. */
3803 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
3804 		return IRQ_NONE;
3805 
3806 	/*
3807 	 * Read host attention register to determine interrupt source
3808 	 * Clear Attention Sources, except Error Attention (to
3809 	 * preserve status) and Link Attention
3810 	 */
3811 	spin_lock(&phba->hbalock);
3812 	ha_copy = readl(phba->HAregaddr);
3813 	/* If somebody is waiting to handle an eratt don't process it
3814 	 * here.  The brdkill function will do this.
3815 	 */
3816 	if (phba->link_flag & LS_IGNORE_ERATT)
3817 		ha_copy &= ~HA_ERATT;
3818 	writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3819 	readl(phba->HAregaddr); /* flush */
3820 	spin_unlock(&phba->hbalock);
3821 
3822 	if (unlikely(!ha_copy))
3823 		return IRQ_NONE;
3824 
3825 	work_ha_copy = ha_copy & phba->work_ha_mask;
3826 
3827 	if (unlikely(work_ha_copy)) {
3828 		if (work_ha_copy & HA_LATT) {
3829 			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3830 				/*
3831 				 * Turn off Link Attention interrupts
3832 				 * until CLEAR_LA done
3833 				 */
3834 				spin_lock(&phba->hbalock);
3835 				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3836 				control = readl(phba->HCregaddr);
3837 				control &= ~HC_LAINT_ENA;
3838 				writel(control, phba->HCregaddr);
3839 				readl(phba->HCregaddr); /* flush */
3840 				spin_unlock(&phba->hbalock);
3841 			}
3842 			else
3843 				work_ha_copy &= ~HA_LATT;
3844 		}
3845 
3846 		if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3847 			/*
3848 			 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
3849 			 * the only slow ring.
3850 			 */
3851 			status = (work_ha_copy &
3852 				(HA_RXMASK  << (4*LPFC_ELS_RING)));
3853 			status >>= (4*LPFC_ELS_RING);
3854 			if (status & HA_RXMASK) {
3855 				spin_lock(&phba->hbalock);
3856 				control = readl(phba->HCregaddr);
3857 
3858 				lpfc_debugfs_slow_ring_trc(phba,
3859 				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
3860 				control, status,
3861 				(uint32_t)phba->sli.slistat.sli_intr);
3862 
3863 				if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
3864 					lpfc_debugfs_slow_ring_trc(phba,
3865 						"ISR Disable ring:"
3866 						"pwork:x%x hawork:x%x wait:x%x",
3867 						phba->work_ha, work_ha_copy,
3868 						(uint32_t)((unsigned long)
3869 						phba->work_wait));
3870 
3871 					control &=
3872 					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
3873 					writel(control, phba->HCregaddr);
3874 					readl(phba->HCregaddr); /* flush */
3875 				}
3876 				else {
3877 					lpfc_debugfs_slow_ring_trc(phba,
3878 						"ISR slow ring:   pwork:"
3879 						"x%x hawork:x%x wait:x%x",
3880 						phba->work_ha, work_ha_copy,
3881 						(uint32_t)((unsigned long)
3882 						phba->work_wait));
3883 				}
3884 				spin_unlock(&phba->hbalock);
3885 			}
3886 		}
3887 
3888 		if (work_ha_copy & HA_ERATT) {
3889 			phba->link_state = LPFC_HBA_ERROR;
3890 			/*
3891 			 * There was a link/board error.  Read the
3892 			 * status register to retrieve the error event
3893 			 * and process it.
3894 			 */
3895 			phba->sli.slistat.err_attn_event++;
3896 			/* Save status info */
3897 			phba->work_hs = readl(phba->HSregaddr);
3898 			phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3899 			phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3900 
3901 			/* Clear Chip error bit */
3902 			writel(HA_ERATT, phba->HAregaddr);
3903 			readl(phba->HAregaddr); /* flush */
3904 			phba->pport->stopped = 1;
3905 		}
3906 
3907 		if ((work_ha_copy & HA_MBATT) &&
3908 		    (phba->sli.mbox_active)) {
3909 			pmb = phba->sli.mbox_active;
3910 			pmbox = &pmb->mb;
3911 			mbox = &phba->slim2p->mbx;
3912 			vport = pmb->vport;
3913 
3914 			/* First check out the status word */
3915 			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
3916 			if (pmbox->mbxOwner != OWN_HOST) {
3917 				/*
3918 				 * Stray Mailbox Interrupt, mbxCommand <cmd>
3919 				 * mbxStatus <status>
3920 				 */
3921 				lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX |
3922 						LOG_SLI,
3923 						"(%d):0304 Stray Mailbox "
3924 						"Interrupt mbxCommand x%x "
3925 						"mbxStatus x%x\n",
3926 						(vport ? vport->vpi : 0),
3927 						pmbox->mbxCommand,
3928 						pmbox->mbxStatus);
3929 			}
3930 			phba->last_completion_time = jiffies;
3931 			del_timer_sync(&phba->sli.mbox_tmo);
3932 
3933 			phba->sli.mbox_active = NULL;
3934 			if (pmb->mbox_cmpl) {
3935 				lpfc_sli_pcimem_bcopy(mbox, pmbox,
3936 						      MAILBOX_CMD_SIZE);
3937 			}
3938 			if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
3939 				pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
3940 
3941 				lpfc_debugfs_disc_trc(vport,
3942 					LPFC_DISC_TRC_MBOX_VPORT,
3943 					"MBOX dflt rpi: : status:x%x rpi:x%x",
3944 					(uint32_t)pmbox->mbxStatus,
3945 					pmbox->un.varWords[0], 0);
3946 
3947 				if ( !pmbox->mbxStatus) {
3948 					mp = (struct lpfc_dmabuf *)
3949 						(pmb->context1);
3950 					ndlp = (struct lpfc_nodelist *)
3951 						pmb->context2;
3952 
3953 					/* Reg_LOGIN of dflt RPI was successful.
3954 					 * new lets get rid of the RPI using the
3955 					 * same mbox buffer.
3956 					 */
3957 					lpfc_unreg_login(phba, vport->vpi,
3958 						pmbox->un.varWords[0], pmb);
3959 					pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
3960 					pmb->context1 = mp;
3961 					pmb->context2 = ndlp;
3962 					pmb->vport = vport;
3963 					spin_lock(&phba->hbalock);
3964 					phba->sli.sli_flag &=
3965 						~LPFC_SLI_MBOX_ACTIVE;
3966 					spin_unlock(&phba->hbalock);
3967 					goto send_current_mbox;
3968 				}
3969 			}
3970 			spin_lock(&phba->pport->work_port_lock);
3971 			phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
3972 			spin_unlock(&phba->pport->work_port_lock);
3973 			lpfc_mbox_cmpl_put(phba, pmb);
3974 		}
3975 		if ((work_ha_copy & HA_MBATT) &&
3976 		    (phba->sli.mbox_active == NULL)) {
3977 send_next_mbox:
3978 			spin_lock(&phba->hbalock);
3979 			phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
3980 			pmb = lpfc_mbox_get(phba);
3981 			spin_unlock(&phba->hbalock);
3982 send_current_mbox:
3983 			/* Process next mailbox command if there is one */
3984 			if (pmb != NULL) {
3985 				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3986 				if (rc == MBX_NOT_FINISHED) {
3987 					pmb->mb.mbxStatus = MBX_NOT_FINISHED;
3988 					lpfc_mbox_cmpl_put(phba, pmb);
3989 					goto send_next_mbox;
3990 				}
3991 			} else {
3992 				/* Turn on IOCB processing */
3993 				for (i = 0; i < phba->sli.num_rings; i++)
3994 					lpfc_sli_turn_on_ring(phba, i);
3995 			}
3996 
3997 		}
3998 
3999 		spin_lock(&phba->hbalock);
4000 		phba->work_ha |= work_ha_copy;
4001 		if (phba->work_wait)
4002 			lpfc_worker_wake_up(phba);
4003 		spin_unlock(&phba->hbalock);
4004 	}
4005 
4006 	ha_copy &= ~(phba->work_ha_mask);
4007 
4008 	/*
4009 	 * Process all events on FCP ring.  Take the optimized path for
4010 	 * FCP IO.  Any other IO is slow path and is handled by
4011 	 * the worker thread.
4012 	 */
4013 	status = (ha_copy & (HA_RXMASK  << (4*LPFC_FCP_RING)));
4014 	status >>= (4*LPFC_FCP_RING);
4015 	if (status & HA_RXMASK)
4016 		lpfc_sli_handle_fast_ring_event(phba,
4017 						&phba->sli.ring[LPFC_FCP_RING],
4018 						status);
4019 
4020 	if (phba->cfg_multi_ring_support == 2) {
4021 		/*
4022 		 * Process all events on extra ring.  Take the optimized path
4023 		 * for extra ring IO.  Any other IO is slow path and is handled
4024 		 * by the worker thread.
4025 		 */
4026 		status = (ha_copy & (HA_RXMASK  << (4*LPFC_EXTRA_RING)));
4027 		status >>= (4*LPFC_EXTRA_RING);
4028 		if (status & HA_RXMASK) {
4029 			lpfc_sli_handle_fast_ring_event(phba,
4030 					&phba->sli.ring[LPFC_EXTRA_RING],
4031 					status);
4032 		}
4033 	}
4034 	return IRQ_HANDLED;
4035 
4036 } /* lpfc_intr_handler */
4037