xref: /linux/drivers/scsi/lpfc/lpfc_sli.c (revision 6e8331ac6973435b1e7604c30f2ad394035b46e1)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32 
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
41 
42 /*
43  * Define macro to log: Mailbox command x%x cannot issue Data
44  * This allows multiple uses of lpfc_msgBlk0311
45  * w/o perturbing log msg utility.
46  */
47 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
48 			lpfc_printf_log(phba, \
49 				KERN_INFO, \
50 				LOG_MBOX | LOG_SLI, \
51 				"%d:0311 Mailbox command x%x cannot issue " \
52 				"Data: x%x x%x x%x\n", \
53 				phba->brd_no, \
54 				mb->mbxCommand,		\
55 				phba->hba_state,	\
56 				psli->sli_flag,	\
57 				flag);
58 
59 
60 /* There are only four IOCB completion types. */
61 typedef enum _lpfc_iocb_type {
62 	LPFC_UNKNOWN_IOCB,
63 	LPFC_UNSOL_IOCB,
64 	LPFC_SOL_IOCB,
65 	LPFC_ABORT_IOCB
66 } lpfc_iocb_type;
67 
68 struct lpfc_iocbq *
69 lpfc_sli_get_iocbq(struct lpfc_hba * phba)
70 {
71 	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
72 	struct lpfc_iocbq * iocbq = NULL;
73 
74 	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
75 	return iocbq;
76 }
77 
78 void
79 lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
80 {
81 	size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb);
82 
83 	/*
84 	 * Clean all volatile data fields, preserve iotag and node struct.
85 	 */
86 	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
87 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
88 }
89 
90 /*
91  * Translate the iocb command to an iocb command type used to decide the final
92  * disposition of each completed IOCB.
93  */
94 static lpfc_iocb_type
95 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
96 {
97 	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
98 
99 	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
100 		return 0;
101 
102 	switch (iocb_cmnd) {
103 	case CMD_XMIT_SEQUENCE_CR:
104 	case CMD_XMIT_SEQUENCE_CX:
105 	case CMD_XMIT_BCAST_CN:
106 	case CMD_XMIT_BCAST_CX:
107 	case CMD_ELS_REQUEST_CR:
108 	case CMD_ELS_REQUEST_CX:
109 	case CMD_CREATE_XRI_CR:
110 	case CMD_CREATE_XRI_CX:
111 	case CMD_GET_RPI_CN:
112 	case CMD_XMIT_ELS_RSP_CX:
113 	case CMD_GET_RPI_CR:
114 	case CMD_FCP_IWRITE_CR:
115 	case CMD_FCP_IWRITE_CX:
116 	case CMD_FCP_IREAD_CR:
117 	case CMD_FCP_IREAD_CX:
118 	case CMD_FCP_ICMND_CR:
119 	case CMD_FCP_ICMND_CX:
120 	case CMD_ADAPTER_MSG:
121 	case CMD_ADAPTER_DUMP:
122 	case CMD_XMIT_SEQUENCE64_CR:
123 	case CMD_XMIT_SEQUENCE64_CX:
124 	case CMD_XMIT_BCAST64_CN:
125 	case CMD_XMIT_BCAST64_CX:
126 	case CMD_ELS_REQUEST64_CR:
127 	case CMD_ELS_REQUEST64_CX:
128 	case CMD_FCP_IWRITE64_CR:
129 	case CMD_FCP_IWRITE64_CX:
130 	case CMD_FCP_IREAD64_CR:
131 	case CMD_FCP_IREAD64_CX:
132 	case CMD_FCP_ICMND64_CR:
133 	case CMD_FCP_ICMND64_CX:
134 	case CMD_GEN_REQUEST64_CR:
135 	case CMD_GEN_REQUEST64_CX:
136 	case CMD_XMIT_ELS_RSP64_CX:
137 		type = LPFC_SOL_IOCB;
138 		break;
139 	case CMD_ABORT_XRI_CN:
140 	case CMD_ABORT_XRI_CX:
141 	case CMD_CLOSE_XRI_CN:
142 	case CMD_CLOSE_XRI_CX:
143 	case CMD_XRI_ABORTED_CX:
144 	case CMD_ABORT_MXRI64_CN:
145 		type = LPFC_ABORT_IOCB;
146 		break;
147 	case CMD_RCV_SEQUENCE_CX:
148 	case CMD_RCV_ELS_REQ_CX:
149 	case CMD_RCV_SEQUENCE64_CX:
150 	case CMD_RCV_ELS_REQ64_CX:
151 		type = LPFC_UNSOL_IOCB;
152 		break;
153 	default:
154 		type = LPFC_UNKNOWN_IOCB;
155 		break;
156 	}
157 
158 	return type;
159 }
160 
161 static int
162 lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb)
163 {
164 	struct lpfc_sli *psli = &phba->sli;
165 	MAILBOX_t *pmbox = &pmb->mb;
166 	int i, rc;
167 
168 	for (i = 0; i < psli->num_rings; i++) {
169 		phba->hba_state = LPFC_INIT_MBX_CMDS;
170 		lpfc_config_ring(phba, i, pmb);
171 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
172 		if (rc != MBX_SUCCESS) {
173 			lpfc_printf_log(phba,
174 					KERN_ERR,
175 					LOG_INIT,
176 					"%d:0446 Adapter failed to init, "
177 					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
178 					"ring %d\n",
179 					phba->brd_no,
180 					pmbox->mbxCommand,
181 					pmbox->mbxStatus,
182 					i);
183 			phba->hba_state = LPFC_HBA_ERROR;
184 			return -ENXIO;
185 		}
186 	}
187 	return 0;
188 }
189 
190 static int
191 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
192 			struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
193 {
194 	list_add_tail(&piocb->list, &pring->txcmplq);
195 	pring->txcmplq_cnt++;
196 	if (unlikely(pring->ringno == LPFC_ELS_RING))
197 		mod_timer(&phba->els_tmofunc,
198 					jiffies + HZ * (phba->fc_ratov << 1));
199 
200 	return (0);
201 }
202 
203 static struct lpfc_iocbq *
204 lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
205 {
206 	struct list_head *dlp;
207 	struct lpfc_iocbq *cmd_iocb;
208 
209 	dlp = &pring->txq;
210 	cmd_iocb = NULL;
211 	list_remove_head((&pring->txq), cmd_iocb,
212 			 struct lpfc_iocbq,
213 			 list);
214 	if (cmd_iocb) {
215 		/* If the first ptr is not equal to the list header,
216 		 * deque the IOCBQ_t and return it.
217 		 */
218 		pring->txq_cnt--;
219 	}
220 	return (cmd_iocb);
221 }
222 
223 static IOCB_t *
224 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
225 {
226 	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
227 	uint32_t  max_cmd_idx = pring->numCiocb;
228 	IOCB_t *iocb = NULL;
229 
230 	if ((pring->next_cmdidx == pring->cmdidx) &&
231 	   (++pring->next_cmdidx >= max_cmd_idx))
232 		pring->next_cmdidx = 0;
233 
234 	if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
235 
236 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
237 
238 		if (unlikely(pring->local_getidx >= max_cmd_idx)) {
239 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
240 					"%d:0315 Ring %d issue: portCmdGet %d "
241 					"is bigger then cmd ring %d\n",
242 					phba->brd_no, pring->ringno,
243 					pring->local_getidx, max_cmd_idx);
244 
245 			phba->hba_state = LPFC_HBA_ERROR;
246 			/*
247 			 * All error attention handlers are posted to
248 			 * worker thread
249 			 */
250 			phba->work_ha |= HA_ERATT;
251 			phba->work_hs = HS_FFER3;
252 			if (phba->work_wait)
253 				wake_up(phba->work_wait);
254 
255 			return NULL;
256 		}
257 
258 		if (pring->local_getidx == pring->next_cmdidx)
259 			return NULL;
260 	}
261 
262 	iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
263 
264 	return iocb;
265 }
266 
267 uint16_t
268 lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
269 {
270 	struct lpfc_iocbq ** new_arr;
271 	struct lpfc_iocbq ** old_arr;
272 	size_t new_len;
273 	struct lpfc_sli *psli = &phba->sli;
274 	uint16_t iotag;
275 
276 	spin_lock_irq(phba->host->host_lock);
277 	iotag = psli->last_iotag;
278 	if(++iotag < psli->iocbq_lookup_len) {
279 		psli->last_iotag = iotag;
280 		psli->iocbq_lookup[iotag] = iocbq;
281 		spin_unlock_irq(phba->host->host_lock);
282 		iocbq->iotag = iotag;
283 		return iotag;
284 	}
285 	else if (psli->iocbq_lookup_len < (0xffff
286 					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
287 		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
288 		spin_unlock_irq(phba->host->host_lock);
289 		new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *),
290 				  GFP_KERNEL);
291 		if (new_arr) {
292 			memset((char *)new_arr, 0,
293 			       new_len * sizeof (struct lpfc_iocbq *));
294 			spin_lock_irq(phba->host->host_lock);
295 			old_arr = psli->iocbq_lookup;
296 			if (new_len <= psli->iocbq_lookup_len) {
297 				/* highly unprobable case */
298 				kfree(new_arr);
299 				iotag = psli->last_iotag;
300 				if(++iotag < psli->iocbq_lookup_len) {
301 					psli->last_iotag = iotag;
302 					psli->iocbq_lookup[iotag] = iocbq;
303 					spin_unlock_irq(phba->host->host_lock);
304 					iocbq->iotag = iotag;
305 					return iotag;
306 				}
307 				spin_unlock_irq(phba->host->host_lock);
308 				return 0;
309 			}
310 			if (psli->iocbq_lookup)
311 				memcpy(new_arr, old_arr,
312 				       ((psli->last_iotag  + 1) *
313 	 				sizeof (struct lpfc_iocbq *)));
314 			psli->iocbq_lookup = new_arr;
315 			psli->iocbq_lookup_len = new_len;
316 			psli->last_iotag = iotag;
317 			psli->iocbq_lookup[iotag] = iocbq;
318 			spin_unlock_irq(phba->host->host_lock);
319 			iocbq->iotag = iotag;
320 			kfree(old_arr);
321 			return iotag;
322 		}
323 	}
324 
325 	lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
326 			"%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
327 			phba->brd_no, psli->last_iotag);
328 
329 	return 0;
330 }
331 
332 static void
333 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
334 		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
335 {
336 	/*
337 	 * Set up an iotag
338 	 */
339 	nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
340 
341 	/*
342 	 * Issue iocb command to adapter
343 	 */
344 	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
345 	wmb();
346 	pring->stats.iocb_cmd++;
347 
348 	/*
349 	 * If there is no completion routine to call, we can release the
350 	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
351 	 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
352 	 */
353 	if (nextiocb->iocb_cmpl)
354 		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
355 	else
356 		lpfc_sli_release_iocbq(phba, nextiocb);
357 
358 	/*
359 	 * Let the HBA know what IOCB slot will be the next one the
360 	 * driver will put a command into.
361 	 */
362 	pring->cmdidx = pring->next_cmdidx;
363 	writel(pring->cmdidx, phba->MBslimaddr
364 	       + (SLIMOFF + (pring->ringno * 2)) * 4);
365 }
366 
367 static void
368 lpfc_sli_update_full_ring(struct lpfc_hba * phba,
369 			  struct lpfc_sli_ring *pring)
370 {
371 	int ringno = pring->ringno;
372 
373 	pring->flag |= LPFC_CALL_RING_AVAILABLE;
374 
375 	wmb();
376 
377 	/*
378 	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
379 	 * The HBA will tell us when an IOCB entry is available.
380 	 */
381 	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
382 	readl(phba->CAregaddr); /* flush */
383 
384 	pring->stats.iocb_cmd_full++;
385 }
386 
387 static void
388 lpfc_sli_update_ring(struct lpfc_hba * phba,
389 		     struct lpfc_sli_ring *pring)
390 {
391 	int ringno = pring->ringno;
392 
393 	/*
394 	 * Tell the HBA that there is work to do in this ring.
395 	 */
396 	wmb();
397 	writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
398 	readl(phba->CAregaddr); /* flush */
399 }
400 
401 static void
402 lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
403 {
404 	IOCB_t *iocb;
405 	struct lpfc_iocbq *nextiocb;
406 
407 	/*
408 	 * Check to see if:
409 	 *  (a) there is anything on the txq to send
410 	 *  (b) link is up
411 	 *  (c) link attention events can be processed (fcp ring only)
412 	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
413 	 */
414 	if (pring->txq_cnt &&
415 	    (phba->hba_state > LPFC_LINK_DOWN) &&
416 	    (pring->ringno != phba->sli.fcp_ring ||
417 	     phba->sli.sli_flag & LPFC_PROCESS_LA) &&
418 	    !(pring->flag & LPFC_STOP_IOCB_MBX)) {
419 
420 		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
421 		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
422 			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
423 
424 		if (iocb)
425 			lpfc_sli_update_ring(phba, pring);
426 		else
427 			lpfc_sli_update_full_ring(phba, pring);
428 	}
429 
430 	return;
431 }
432 
433 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
434 static void
435 lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
436 {
437 	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno];
438 
439 	/* If the ring is active, flag it */
440 	if (phba->sli.ring[ringno].cmdringaddr) {
441 		if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
442 			phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
443 			/*
444 			 * Force update of the local copy of cmdGetInx
445 			 */
446 			phba->sli.ring[ringno].local_getidx
447 				= le32_to_cpu(pgp->cmdGetInx);
448 			spin_lock_irq(phba->host->host_lock);
449 			lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
450 			spin_unlock_irq(phba->host->host_lock);
451 		}
452 	}
453 }
454 
455 static int
456 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
457 {
458 	uint8_t ret;
459 
460 	switch (mbxCommand) {
461 	case MBX_LOAD_SM:
462 	case MBX_READ_NV:
463 	case MBX_WRITE_NV:
464 	case MBX_RUN_BIU_DIAG:
465 	case MBX_INIT_LINK:
466 	case MBX_DOWN_LINK:
467 	case MBX_CONFIG_LINK:
468 	case MBX_CONFIG_RING:
469 	case MBX_RESET_RING:
470 	case MBX_READ_CONFIG:
471 	case MBX_READ_RCONFIG:
472 	case MBX_READ_SPARM:
473 	case MBX_READ_STATUS:
474 	case MBX_READ_RPI:
475 	case MBX_READ_XRI:
476 	case MBX_READ_REV:
477 	case MBX_READ_LNK_STAT:
478 	case MBX_REG_LOGIN:
479 	case MBX_UNREG_LOGIN:
480 	case MBX_READ_LA:
481 	case MBX_CLEAR_LA:
482 	case MBX_DUMP_MEMORY:
483 	case MBX_DUMP_CONTEXT:
484 	case MBX_RUN_DIAGS:
485 	case MBX_RESTART:
486 	case MBX_UPDATE_CFG:
487 	case MBX_DOWN_LOAD:
488 	case MBX_DEL_LD_ENTRY:
489 	case MBX_RUN_PROGRAM:
490 	case MBX_SET_MASK:
491 	case MBX_SET_SLIM:
492 	case MBX_UNREG_D_ID:
493 	case MBX_KILL_BOARD:
494 	case MBX_CONFIG_FARP:
495 	case MBX_BEACON:
496 	case MBX_LOAD_AREA:
497 	case MBX_RUN_BIU_DIAG64:
498 	case MBX_CONFIG_PORT:
499 	case MBX_READ_SPARM64:
500 	case MBX_READ_RPI64:
501 	case MBX_REG_LOGIN64:
502 	case MBX_READ_LA64:
503 	case MBX_FLASH_WR_ULA:
504 	case MBX_SET_DEBUG:
505 	case MBX_LOAD_EXP_ROM:
506 		ret = mbxCommand;
507 		break;
508 	default:
509 		ret = MBX_SHUTDOWN;
510 		break;
511 	}
512 	return (ret);
513 }
514 static void
515 lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
516 {
517 	wait_queue_head_t *pdone_q;
518 
519 	/*
520 	 * If pdone_q is empty, the driver thread gave up waiting and
521 	 * continued running.
522 	 */
523 	pdone_q = (wait_queue_head_t *) pmboxq->context1;
524 	if (pdone_q)
525 		wake_up_interruptible(pdone_q);
526 	return;
527 }
528 
529 void
530 lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
531 {
532 	struct lpfc_dmabuf *mp;
533 	mp = (struct lpfc_dmabuf *) (pmb->context1);
534 	if (mp) {
535 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
536 		kfree(mp);
537 	}
538 	mempool_free( pmb, phba->mbox_mem_pool);
539 	return;
540 }
541 
542 int
543 lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
544 {
545 	MAILBOX_t *mbox;
546 	MAILBOX_t *pmbox;
547 	LPFC_MBOXQ_t *pmb;
548 	struct lpfc_sli *psli;
549 	int i, rc;
550 	uint32_t process_next;
551 
552 	psli = &phba->sli;
553 	/* We should only get here if we are in SLI2 mode */
554 	if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
555 		return (1);
556 	}
557 
558 	phba->sli.slistat.mbox_event++;
559 
560 	/* Get a Mailbox buffer to setup mailbox commands for callback */
561 	if ((pmb = phba->sli.mbox_active)) {
562 		pmbox = &pmb->mb;
563 		mbox = &phba->slim2p->mbx;
564 
565 		/* First check out the status word */
566 		lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
567 
568 		/* Sanity check to ensure the host owns the mailbox */
569 		if (pmbox->mbxOwner != OWN_HOST) {
570 			/* Lets try for a while */
571 			for (i = 0; i < 10240; i++) {
572 				/* First copy command data */
573 				lpfc_sli_pcimem_bcopy(mbox, pmbox,
574 							sizeof (uint32_t));
575 				if (pmbox->mbxOwner == OWN_HOST)
576 					goto mbout;
577 			}
578 			/* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
579 			   <status> */
580 			lpfc_printf_log(phba,
581 					KERN_WARNING,
582 					LOG_MBOX | LOG_SLI,
583 					"%d:0304 Stray Mailbox Interrupt "
584 					"mbxCommand x%x mbxStatus x%x\n",
585 					phba->brd_no,
586 					pmbox->mbxCommand,
587 					pmbox->mbxStatus);
588 
589 			spin_lock_irq(phba->host->host_lock);
590 			phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
591 			spin_unlock_irq(phba->host->host_lock);
592 			return (1);
593 		}
594 
595 	      mbout:
596 		del_timer_sync(&phba->sli.mbox_tmo);
597 		phba->work_hba_events &= ~WORKER_MBOX_TMO;
598 
599 		/*
600 		 * It is a fatal error if unknown mbox command completion.
601 		 */
602 		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
603 		    MBX_SHUTDOWN) {
604 
605 			/* Unknow mailbox command compl */
606 			lpfc_printf_log(phba,
607 				KERN_ERR,
608 				LOG_MBOX | LOG_SLI,
609 				"%d:0323 Unknown Mailbox command %x Cmpl\n",
610 				phba->brd_no,
611 				pmbox->mbxCommand);
612 			phba->hba_state = LPFC_HBA_ERROR;
613 			phba->work_hs = HS_FFER3;
614 			lpfc_handle_eratt(phba);
615 			return (0);
616 		}
617 
618 		phba->sli.mbox_active = NULL;
619 		if (pmbox->mbxStatus) {
620 			phba->sli.slistat.mbox_stat_err++;
621 			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
622 				/* Mbox cmd cmpl error - RETRYing */
623 				lpfc_printf_log(phba,
624 					KERN_INFO,
625 					LOG_MBOX | LOG_SLI,
626 					"%d:0305 Mbox cmd cmpl error - "
627 					"RETRYing Data: x%x x%x x%x x%x\n",
628 					phba->brd_no,
629 					pmbox->mbxCommand,
630 					pmbox->mbxStatus,
631 					pmbox->un.varWords[0],
632 					phba->hba_state);
633 				pmbox->mbxStatus = 0;
634 				pmbox->mbxOwner = OWN_HOST;
635 				spin_lock_irq(phba->host->host_lock);
636 				phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
637 				spin_unlock_irq(phba->host->host_lock);
638 				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
639 				if (rc == MBX_SUCCESS)
640 					return (0);
641 			}
642 		}
643 
644 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
645 		lpfc_printf_log(phba,
646 				KERN_INFO,
647 				LOG_MBOX | LOG_SLI,
648 				"%d:0307 Mailbox cmd x%x Cmpl x%p "
649 				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
650 				phba->brd_no,
651 				pmbox->mbxCommand,
652 				pmb->mbox_cmpl,
653 				*((uint32_t *) pmbox),
654 				pmbox->un.varWords[0],
655 				pmbox->un.varWords[1],
656 				pmbox->un.varWords[2],
657 				pmbox->un.varWords[3],
658 				pmbox->un.varWords[4],
659 				pmbox->un.varWords[5],
660 				pmbox->un.varWords[6],
661 				pmbox->un.varWords[7]);
662 
663 		if (pmb->mbox_cmpl) {
664 			lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
665 			pmb->mbox_cmpl(phba,pmb);
666 		}
667 	}
668 
669 
670 	do {
671 		process_next = 0;	/* by default don't loop */
672 		spin_lock_irq(phba->host->host_lock);
673 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
674 
675 		/* Process next mailbox command if there is one */
676 		if ((pmb = lpfc_mbox_get(phba))) {
677 			spin_unlock_irq(phba->host->host_lock);
678 			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
679 			if (rc == MBX_NOT_FINISHED) {
680 				pmb->mb.mbxStatus = MBX_NOT_FINISHED;
681 				pmb->mbox_cmpl(phba,pmb);
682 				process_next = 1;
683 				continue;	/* loop back */
684 			}
685 		} else {
686 			spin_unlock_irq(phba->host->host_lock);
687 			/* Turn on IOCB processing */
688 			for (i = 0; i < phba->sli.num_rings; i++) {
689 				lpfc_sli_turn_on_ring(phba, i);
690 			}
691 
692 			/* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
693 			while (!list_empty(&phba->freebufList)) {
694 				struct lpfc_dmabuf *mp;
695 
696 				mp = NULL;
697 				list_remove_head((&phba->freebufList),
698 						 mp,
699 						 struct lpfc_dmabuf,
700 						 list);
701 				if (mp) {
702 					lpfc_mbuf_free(phba, mp->virt,
703 						       mp->phys);
704 					kfree(mp);
705 				}
706 			}
707 		}
708 
709 	} while (process_next);
710 
711 	return (0);
712 }
713 static int
714 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
715 			    struct lpfc_iocbq *saveq)
716 {
717 	IOCB_t           * irsp;
718 	WORD5            * w5p;
719 	uint32_t           Rctl, Type;
720 	uint32_t           match, i;
721 
722 	match = 0;
723 	irsp = &(saveq->iocb);
724 	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
725 	    || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
726 		Rctl = FC_ELS_REQ;
727 		Type = FC_ELS_DATA;
728 	} else {
729 		w5p =
730 		    (WORD5 *) & (saveq->iocb.un.
731 				 ulpWord[5]);
732 		Rctl = w5p->hcsw.Rctl;
733 		Type = w5p->hcsw.Type;
734 
735 		/* Firmware Workaround */
736 		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
737 			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
738 			Rctl = FC_ELS_REQ;
739 			Type = FC_ELS_DATA;
740 			w5p->hcsw.Rctl = Rctl;
741 			w5p->hcsw.Type = Type;
742 		}
743 	}
744 	/* unSolicited Responses */
745 	if (pring->prt[0].profile) {
746 		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
747 			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
748 									saveq);
749 		match = 1;
750 	} else {
751 		/* We must search, based on rctl / type
752 		   for the right routine */
753 		for (i = 0; i < pring->num_mask;
754 		     i++) {
755 			if ((pring->prt[i].rctl ==
756 			     Rctl)
757 			    && (pring->prt[i].
758 				type == Type)) {
759 				if (pring->prt[i].lpfc_sli_rcv_unsol_event)
760 					(pring->prt[i].lpfc_sli_rcv_unsol_event)
761 							(phba, pring, saveq);
762 				match = 1;
763 				break;
764 			}
765 		}
766 	}
767 	if (match == 0) {
768 		/* Unexpected Rctl / Type received */
769 		/* Ring <ringno> handler: unexpected
770 		   Rctl <Rctl> Type <Type> received */
771 		lpfc_printf_log(phba,
772 				KERN_WARNING,
773 				LOG_SLI,
774 				"%d:0313 Ring %d handler: unexpected Rctl x%x "
775 				"Type x%x received \n",
776 				phba->brd_no,
777 				pring->ringno,
778 				Rctl,
779 				Type);
780 	}
781 	return(1);
782 }
783 
784 static struct lpfc_iocbq *
785 lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
786 		      struct lpfc_sli_ring * pring,
787 		      struct lpfc_iocbq * prspiocb)
788 {
789 	struct lpfc_iocbq *cmd_iocb = NULL;
790 	uint16_t iotag;
791 
792 	iotag = prspiocb->iocb.ulpIoTag;
793 
794 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
795 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
796 		list_del(&cmd_iocb->list);
797 		pring->txcmplq_cnt--;
798 		return cmd_iocb;
799 	}
800 
801 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
802 			"%d:0317 iotag x%x is out off "
803 			"range: max iotag x%x wd0 x%x\n",
804 			phba->brd_no, iotag,
805 			phba->sli.last_iotag,
806 			*(((uint32_t *) &prspiocb->iocb) + 7));
807 	return NULL;
808 }
809 
810 static int
811 lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
812 			  struct lpfc_iocbq *saveq)
813 {
814 	struct lpfc_iocbq * cmdiocbp;
815 	int rc = 1;
816 	unsigned long iflag;
817 
818 	/* Based on the iotag field, get the cmd IOCB from the txcmplq */
819 	spin_lock_irqsave(phba->host->host_lock, iflag);
820 	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
821 	if (cmdiocbp) {
822 		if (cmdiocbp->iocb_cmpl) {
823 			/*
824 			 * Post all ELS completions to the worker thread.
825 			 * All other are passed to the completion callback.
826 			 */
827 			if (pring->ringno == LPFC_ELS_RING) {
828 				spin_unlock_irqrestore(phba->host->host_lock,
829 						       iflag);
830 				(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
831 				spin_lock_irqsave(phba->host->host_lock, iflag);
832 			}
833 			else {
834 				spin_unlock_irqrestore(phba->host->host_lock,
835 						       iflag);
836 				(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
837 				spin_lock_irqsave(phba->host->host_lock, iflag);
838 			}
839 		} else
840 			lpfc_sli_release_iocbq(phba, cmdiocbp);
841 	} else {
842 		/*
843 		 * Unknown initiating command based on the response iotag.
844 		 * This could be the case on the ELS ring because of
845 		 * lpfc_els_abort().
846 		 */
847 		if (pring->ringno != LPFC_ELS_RING) {
848 			/*
849 			 * Ring <ringno> handler: unexpected completion IoTag
850 			 * <IoTag>
851 			 */
852 			lpfc_printf_log(phba,
853 				KERN_WARNING,
854 				LOG_SLI,
855 				"%d:0322 Ring %d handler: unexpected "
856 				"completion IoTag x%x Data: x%x x%x x%x x%x\n",
857 				phba->brd_no,
858 				pring->ringno,
859 				saveq->iocb.ulpIoTag,
860 				saveq->iocb.ulpStatus,
861 				saveq->iocb.un.ulpWord[4],
862 				saveq->iocb.ulpCommand,
863 				saveq->iocb.ulpContext);
864 		}
865 	}
866 
867 	spin_unlock_irqrestore(phba->host->host_lock, iflag);
868 	return rc;
869 }
870 
871 static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
872 					struct lpfc_sli_ring * pring)
873 {
874 	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
875 	/*
876 	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
877 	 * rsp ring <portRspMax>
878 	 */
879 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
880 			"%d:0312 Ring %d handler: portRspPut %d "
881 			"is bigger then rsp ring %d\n",
882 			phba->brd_no, pring->ringno,
883 			le32_to_cpu(pgp->rspPutInx),
884 			pring->numRiocb);
885 
886 	phba->hba_state = LPFC_HBA_ERROR;
887 
888 	/*
889 	 * All error attention handlers are posted to
890 	 * worker thread
891 	 */
892 	phba->work_ha |= HA_ERATT;
893 	phba->work_hs = HS_FFER3;
894 	if (phba->work_wait)
895 		wake_up(phba->work_wait);
896 
897 	return;
898 }
899 
900 void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
901 {
902 	struct lpfc_sli      * psli   = &phba->sli;
903 	struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING];
904 	IOCB_t *irsp = NULL;
905 	IOCB_t *entry = NULL;
906 	struct lpfc_iocbq *cmdiocbq = NULL;
907 	struct lpfc_iocbq rspiocbq;
908 	struct lpfc_pgp *pgp;
909 	uint32_t status;
910 	uint32_t portRspPut, portRspMax;
911 	int type;
912 	uint32_t rsp_cmpl = 0;
913 	void __iomem *to_slim;
914 	uint32_t ha_copy;
915 
916 	pring->stats.iocb_event++;
917 
918 	/* The driver assumes SLI-2 mode */
919 	pgp =  &phba->slim2p->mbx.us.s2.port[pring->ringno];
920 
921 	/*
922 	 * The next available response entry should never exceed the maximum
923 	 * entries.  If it does, treat it as an adapter hardware error.
924 	 */
925 	portRspMax = pring->numRiocb;
926 	portRspPut = le32_to_cpu(pgp->rspPutInx);
927 	if (unlikely(portRspPut >= portRspMax)) {
928 		lpfc_sli_rsp_pointers_error(phba, pring);
929 		return;
930 	}
931 
932 	rmb();
933 	while (pring->rspidx != portRspPut) {
934 
935 		entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
936 
937 		if (++pring->rspidx >= portRspMax)
938 			pring->rspidx = 0;
939 
940 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
941 				      (uint32_t *) &rspiocbq.iocb,
942 				      sizeof (IOCB_t));
943 		irsp = &rspiocbq.iocb;
944 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
945 		pring->stats.iocb_rsp++;
946 		rsp_cmpl++;
947 
948 		if (unlikely(irsp->ulpStatus)) {
949 			/* Rsp ring <ringno> error: IOCB */
950 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
951 					"%d:0326 Rsp Ring %d error: IOCB Data: "
952 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
953 					phba->brd_no, pring->ringno,
954 					irsp->un.ulpWord[0],
955 					irsp->un.ulpWord[1],
956 					irsp->un.ulpWord[2],
957 					irsp->un.ulpWord[3],
958 					irsp->un.ulpWord[4],
959 					irsp->un.ulpWord[5],
960 					*(((uint32_t *) irsp) + 6),
961 					*(((uint32_t *) irsp) + 7));
962 		}
963 
964 		switch (type) {
965 		case LPFC_ABORT_IOCB:
966 		case LPFC_SOL_IOCB:
967 			/*
968 			 * Idle exchange closed via ABTS from port.  No iocb
969 			 * resources need to be recovered.
970 			 */
971 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
972 				printk(KERN_INFO "%s: IOCB cmd 0x%x processed."
973 				       " Skipping completion\n", __FUNCTION__,
974 				       irsp->ulpCommand);
975 				break;
976 			}
977 
978 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
979 							 &rspiocbq);
980 			if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
981 				(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
982 						      &rspiocbq);
983 			}
984 			break;
985 		default:
986 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
987 				char adaptermsg[LPFC_MAX_ADPTMSG];
988 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
989 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
990 				       MAX_MSG_DATA);
991 				dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
992 					 phba->brd_no, adaptermsg);
993 			} else {
994 				/* Unknown IOCB command */
995 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
996 						"%d:0321 Unknown IOCB command "
997 						"Data: x%x, x%x x%x x%x x%x\n",
998 						phba->brd_no, type,
999 						irsp->ulpCommand,
1000 						irsp->ulpStatus,
1001 						irsp->ulpIoTag,
1002 						irsp->ulpContext);
1003 			}
1004 			break;
1005 		}
1006 
1007 		/*
1008 		 * The response IOCB has been processed.  Update the ring
1009 		 * pointer in SLIM.  If the port response put pointer has not
1010 		 * been updated, sync the pgp->rspPutInx and fetch the new port
1011 		 * response put pointer.
1012 		 */
1013 		to_slim = phba->MBslimaddr +
1014 			(SLIMOFF + (pring->ringno * 2) + 1) * 4;
1015 		writeb(pring->rspidx, to_slim);
1016 
1017 		if (pring->rspidx == portRspPut)
1018 			portRspPut = le32_to_cpu(pgp->rspPutInx);
1019 	}
1020 
1021 	ha_copy = readl(phba->HAregaddr);
1022 	ha_copy >>= (LPFC_FCP_RING * 4);
1023 
1024 	if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1025 		pring->stats.iocb_rsp_full++;
1026 		status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1027 		writel(status, phba->CAregaddr);
1028 		readl(phba->CAregaddr);
1029 	}
1030 	if ((ha_copy & HA_R0CE_RSP) &&
1031 	    (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1032 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1033 		pring->stats.iocb_cmd_empty++;
1034 
1035 		/* Force update of the local copy of cmdGetInx */
1036 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1037 		lpfc_sli_resume_iocb(phba, pring);
1038 
1039 		if ((pring->lpfc_sli_cmd_available))
1040 			(pring->lpfc_sli_cmd_available) (phba, pring);
1041 
1042 	}
1043 
1044 	return;
1045 }
1046 
1047 /*
1048  * This routine presumes LPFC_FCP_RING handling and doesn't bother
1049  * to check it explicitly.
1050  */
1051 static int
1052 lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1053 				struct lpfc_sli_ring * pring, uint32_t mask)
1054 {
1055  	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1056 	IOCB_t *irsp = NULL;
1057 	IOCB_t *entry = NULL;
1058 	struct lpfc_iocbq *cmdiocbq = NULL;
1059 	struct lpfc_iocbq rspiocbq;
1060 	uint32_t status;
1061 	uint32_t portRspPut, portRspMax;
1062 	int rc = 1;
1063 	lpfc_iocb_type type;
1064 	unsigned long iflag;
1065 	uint32_t rsp_cmpl = 0;
1066 	void __iomem  *to_slim;
1067 
1068 	spin_lock_irqsave(phba->host->host_lock, iflag);
1069 	pring->stats.iocb_event++;
1070 
1071 	/*
1072 	 * The next available response entry should never exceed the maximum
1073 	 * entries.  If it does, treat it as an adapter hardware error.
1074 	 */
1075 	portRspMax = pring->numRiocb;
1076 	portRspPut = le32_to_cpu(pgp->rspPutInx);
1077 	if (unlikely(portRspPut >= portRspMax)) {
1078 		lpfc_sli_rsp_pointers_error(phba, pring);
1079 		spin_unlock_irqrestore(phba->host->host_lock, iflag);
1080 		return 1;
1081 	}
1082 
1083 	rmb();
1084 	while (pring->rspidx != portRspPut) {
1085 		/*
1086 		 * Fetch an entry off the ring and copy it into a local data
1087 		 * structure.  The copy involves a byte-swap since the
1088 		 * network byte order and pci byte orders are different.
1089 		 */
1090 		entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1091 
1092 		if (++pring->rspidx >= portRspMax)
1093 			pring->rspidx = 0;
1094 
1095 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1096 				      (uint32_t *) &rspiocbq.iocb,
1097 				      sizeof (IOCB_t));
1098 		irsp = &rspiocbq.iocb;
1099 
1100 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1101 		pring->stats.iocb_rsp++;
1102 		rsp_cmpl++;
1103 
1104 		if (unlikely(irsp->ulpStatus)) {
1105 			/* Rsp ring <ringno> error: IOCB */
1106 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1107 				"%d:0326 Rsp Ring %d error: IOCB Data: "
1108 				"x%x x%x x%x x%x x%x x%x x%x x%x\n",
1109 				phba->brd_no, pring->ringno,
1110 				irsp->un.ulpWord[0], irsp->un.ulpWord[1],
1111 				irsp->un.ulpWord[2], irsp->un.ulpWord[3],
1112 				irsp->un.ulpWord[4], irsp->un.ulpWord[5],
1113 				*(((uint32_t *) irsp) + 6),
1114 				*(((uint32_t *) irsp) + 7));
1115 		}
1116 
1117 		switch (type) {
1118 		case LPFC_ABORT_IOCB:
1119 		case LPFC_SOL_IOCB:
1120 			/*
1121 			 * Idle exchange closed via ABTS from port.  No iocb
1122 			 * resources need to be recovered.
1123 			 */
1124 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1125 				printk(KERN_INFO "%s: IOCB cmd 0x%x processed. "
1126 				       "Skipping completion\n", __FUNCTION__,
1127 				       irsp->ulpCommand);
1128 				break;
1129 			}
1130 
1131 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1132 							 &rspiocbq);
1133 			if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1134 				if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1135 					(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1136 							      &rspiocbq);
1137 				} else {
1138 					spin_unlock_irqrestore(
1139 						phba->host->host_lock, iflag);
1140 					(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1141 							      &rspiocbq);
1142 					spin_lock_irqsave(phba->host->host_lock,
1143 							  iflag);
1144 				}
1145 			}
1146 			break;
1147 		default:
1148 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1149 				char adaptermsg[LPFC_MAX_ADPTMSG];
1150 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1151 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
1152 				       MAX_MSG_DATA);
1153 				dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1154 					 phba->brd_no, adaptermsg);
1155 			} else {
1156 				/* Unknown IOCB command */
1157 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1158 					"%d:0321 Unknown IOCB command "
1159 					"Data: x%x, x%x x%x x%x x%x\n",
1160 					phba->brd_no, type, irsp->ulpCommand,
1161 					irsp->ulpStatus, irsp->ulpIoTag,
1162 					irsp->ulpContext);
1163 			}
1164 			break;
1165 		}
1166 
1167 		/*
1168 		 * The response IOCB has been processed.  Update the ring
1169 		 * pointer in SLIM.  If the port response put pointer has not
1170 		 * been updated, sync the pgp->rspPutInx and fetch the new port
1171 		 * response put pointer.
1172 		 */
1173 		to_slim = phba->MBslimaddr +
1174 			(SLIMOFF + (pring->ringno * 2) + 1) * 4;
1175 		writel(pring->rspidx, to_slim);
1176 
1177 		if (pring->rspidx == portRspPut)
1178 			portRspPut = le32_to_cpu(pgp->rspPutInx);
1179 	}
1180 
1181 	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1182 		pring->stats.iocb_rsp_full++;
1183 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1184 		writel(status, phba->CAregaddr);
1185 		readl(phba->CAregaddr);
1186 	}
1187 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1188 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1189 		pring->stats.iocb_cmd_empty++;
1190 
1191 		/* Force update of the local copy of cmdGetInx */
1192 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1193 		lpfc_sli_resume_iocb(phba, pring);
1194 
1195 		if ((pring->lpfc_sli_cmd_available))
1196 			(pring->lpfc_sli_cmd_available) (phba, pring);
1197 
1198 	}
1199 
1200 	spin_unlock_irqrestore(phba->host->host_lock, iflag);
1201 	return rc;
1202 }
1203 
1204 
1205 int
1206 lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1207 			   struct lpfc_sli_ring * pring, uint32_t mask)
1208 {
1209 	IOCB_t *entry;
1210 	IOCB_t *irsp = NULL;
1211 	struct lpfc_iocbq *rspiocbp = NULL;
1212 	struct lpfc_iocbq *next_iocb;
1213 	struct lpfc_iocbq *cmdiocbp;
1214 	struct lpfc_iocbq *saveq;
1215 	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1216 	uint8_t iocb_cmd_type;
1217 	lpfc_iocb_type type;
1218 	uint32_t status, free_saveq;
1219 	uint32_t portRspPut, portRspMax;
1220 	int rc = 1;
1221 	unsigned long iflag;
1222 	void __iomem  *to_slim;
1223 
1224 	spin_lock_irqsave(phba->host->host_lock, iflag);
1225 	pring->stats.iocb_event++;
1226 
1227 	/*
1228 	 * The next available response entry should never exceed the maximum
1229 	 * entries.  If it does, treat it as an adapter hardware error.
1230 	 */
1231 	portRspMax = pring->numRiocb;
1232 	portRspPut = le32_to_cpu(pgp->rspPutInx);
1233 	if (portRspPut >= portRspMax) {
1234 		/*
1235 		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1236 		 * rsp ring <portRspMax>
1237 		 */
1238 		lpfc_printf_log(phba,
1239 				KERN_ERR,
1240 				LOG_SLI,
1241 				"%d:0312 Ring %d handler: portRspPut %d "
1242 				"is bigger then rsp ring %d\n",
1243 				phba->brd_no,
1244 				pring->ringno, portRspPut, portRspMax);
1245 
1246 		phba->hba_state = LPFC_HBA_ERROR;
1247 		spin_unlock_irqrestore(phba->host->host_lock, iflag);
1248 
1249 		phba->work_hs = HS_FFER3;
1250 		lpfc_handle_eratt(phba);
1251 
1252 		return 1;
1253 	}
1254 
1255 	rmb();
1256 	while (pring->rspidx != portRspPut) {
1257 		/*
1258 		 * Build a completion list and call the appropriate handler.
1259 		 * The process is to get the next available response iocb, get
1260 		 * a free iocb from the list, copy the response data into the
1261 		 * free iocb, insert to the continuation list, and update the
1262 		 * next response index to slim.  This process makes response
1263 		 * iocb's in the ring available to DMA as fast as possible but
1264 		 * pays a penalty for a copy operation.  Since the iocb is
1265 		 * only 32 bytes, this penalty is considered small relative to
1266 		 * the PCI reads for register values and a slim write.  When
1267 		 * the ulpLe field is set, the entire Command has been
1268 		 * received.
1269 		 */
1270 		entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1271 		rspiocbp = lpfc_sli_get_iocbq(phba);
1272 		if (rspiocbp == NULL) {
1273 			printk(KERN_ERR "%s: out of buffers! Failing "
1274 			       "completion.\n", __FUNCTION__);
1275 			break;
1276 		}
1277 
1278 		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t));
1279 		irsp = &rspiocbp->iocb;
1280 
1281 		if (++pring->rspidx >= portRspMax)
1282 			pring->rspidx = 0;
1283 
1284 		to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
1285 					      + 1) * 4;
1286 		writel(pring->rspidx, to_slim);
1287 
1288 		if (list_empty(&(pring->iocb_continueq))) {
1289 			list_add(&rspiocbp->list, &(pring->iocb_continueq));
1290 		} else {
1291 			list_add_tail(&rspiocbp->list,
1292 				      &(pring->iocb_continueq));
1293 		}
1294 
1295 		pring->iocb_continueq_cnt++;
1296 		if (irsp->ulpLe) {
1297 			/*
1298 			 * By default, the driver expects to free all resources
1299 			 * associated with this iocb completion.
1300 			 */
1301 			free_saveq = 1;
1302 			saveq = list_get_first(&pring->iocb_continueq,
1303 					       struct lpfc_iocbq, list);
1304 			irsp = &(saveq->iocb);
1305 			list_del_init(&pring->iocb_continueq);
1306 			pring->iocb_continueq_cnt = 0;
1307 
1308 			pring->stats.iocb_rsp++;
1309 
1310 			if (irsp->ulpStatus) {
1311 				/* Rsp ring <ringno> error: IOCB */
1312 				lpfc_printf_log(phba,
1313 					KERN_WARNING,
1314 					LOG_SLI,
1315 					"%d:0328 Rsp Ring %d error: IOCB Data: "
1316 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
1317 					phba->brd_no,
1318 					pring->ringno,
1319 					irsp->un.ulpWord[0],
1320 					irsp->un.ulpWord[1],
1321 					irsp->un.ulpWord[2],
1322 					irsp->un.ulpWord[3],
1323 					irsp->un.ulpWord[4],
1324 					irsp->un.ulpWord[5],
1325 					*(((uint32_t *) irsp) + 6),
1326 					*(((uint32_t *) irsp) + 7));
1327 			}
1328 
1329 			/*
1330 			 * Fetch the IOCB command type and call the correct
1331 			 * completion routine.  Solicited and Unsolicited
1332 			 * IOCBs on the ELS ring get freed back to the
1333 			 * lpfc_iocb_list by the discovery kernel thread.
1334 			 */
1335 			iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1336 			type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1337 			if (type == LPFC_SOL_IOCB) {
1338 				spin_unlock_irqrestore(phba->host->host_lock,
1339 						       iflag);
1340 				rc = lpfc_sli_process_sol_iocb(phba, pring,
1341 					saveq);
1342 				spin_lock_irqsave(phba->host->host_lock, iflag);
1343 			} else if (type == LPFC_UNSOL_IOCB) {
1344 				spin_unlock_irqrestore(phba->host->host_lock,
1345 						       iflag);
1346 				rc = lpfc_sli_process_unsol_iocb(phba, pring,
1347 					saveq);
1348 				spin_lock_irqsave(phba->host->host_lock, iflag);
1349 			} else if (type == LPFC_ABORT_IOCB) {
1350 				if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1351 				    ((cmdiocbp =
1352 				      lpfc_sli_iocbq_lookup(phba, pring,
1353 							    saveq)))) {
1354 					/* Call the specified completion
1355 					   routine */
1356 					if (cmdiocbp->iocb_cmpl) {
1357 						spin_unlock_irqrestore(
1358 						       phba->host->host_lock,
1359 						       iflag);
1360 						(cmdiocbp->iocb_cmpl) (phba,
1361 							     cmdiocbp, saveq);
1362 						spin_lock_irqsave(
1363 							  phba->host->host_lock,
1364 							  iflag);
1365 					} else
1366 						lpfc_sli_release_iocbq(phba,
1367 								      cmdiocbp);
1368 				}
1369 			} else if (type == LPFC_UNKNOWN_IOCB) {
1370 				if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1371 
1372 					char adaptermsg[LPFC_MAX_ADPTMSG];
1373 
1374 					memset(adaptermsg, 0,
1375 					       LPFC_MAX_ADPTMSG);
1376 					memcpy(&adaptermsg[0], (uint8_t *) irsp,
1377 					       MAX_MSG_DATA);
1378 					dev_warn(&((phba->pcidev)->dev),
1379 						 "lpfc%d: %s",
1380 						 phba->brd_no, adaptermsg);
1381 				} else {
1382 					/* Unknown IOCB command */
1383 					lpfc_printf_log(phba,
1384 						KERN_ERR,
1385 						LOG_SLI,
1386 						"%d:0321 Unknown IOCB command "
1387 						"Data: x%x x%x x%x x%x\n",
1388 						phba->brd_no,
1389 						irsp->ulpCommand,
1390 						irsp->ulpStatus,
1391 						irsp->ulpIoTag,
1392 						irsp->ulpContext);
1393 				}
1394 			}
1395 
1396 			if (free_saveq) {
1397 				if (!list_empty(&saveq->list)) {
1398 					list_for_each_entry_safe(rspiocbp,
1399 								 next_iocb,
1400 								 &saveq->list,
1401 								 list) {
1402 						lpfc_sli_release_iocbq(phba,
1403 								     rspiocbp);
1404 					}
1405 				}
1406 
1407 				lpfc_sli_release_iocbq(phba, saveq);
1408 			}
1409 		}
1410 
1411 		/*
1412 		 * If the port response put pointer has not been updated, sync
1413 		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1414 		 * response put pointer.
1415 		 */
1416 		if (pring->rspidx == portRspPut) {
1417 			portRspPut = le32_to_cpu(pgp->rspPutInx);
1418 		}
1419 	} /* while (pring->rspidx != portRspPut) */
1420 
1421 	if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
1422 		/* At least one response entry has been freed */
1423 		pring->stats.iocb_rsp_full++;
1424 		/* SET RxRE_RSP in Chip Att register */
1425 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1426 		writel(status, phba->CAregaddr);
1427 		readl(phba->CAregaddr); /* flush */
1428 	}
1429 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1430 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1431 		pring->stats.iocb_cmd_empty++;
1432 
1433 		/* Force update of the local copy of cmdGetInx */
1434 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1435 		lpfc_sli_resume_iocb(phba, pring);
1436 
1437 		if ((pring->lpfc_sli_cmd_available))
1438 			(pring->lpfc_sli_cmd_available) (phba, pring);
1439 
1440 	}
1441 
1442 	spin_unlock_irqrestore(phba->host->host_lock, iflag);
1443 	return rc;
1444 }
1445 
1446 int
1447 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1448 {
1449 	struct lpfc_iocbq *iocb, *next_iocb;
1450 	IOCB_t *icmd = NULL, *cmd = NULL;
1451 	int errcnt;
1452 
1453 	errcnt = 0;
1454 
1455 	/* Error everything on txq and txcmplq
1456 	 * First do the txq.
1457 	 */
1458 	spin_lock_irq(phba->host->host_lock);
1459 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1460 		list_del_init(&iocb->list);
1461 		if (iocb->iocb_cmpl) {
1462 			icmd = &iocb->iocb;
1463 			icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1464 			icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1465 			spin_unlock_irq(phba->host->host_lock);
1466 			(iocb->iocb_cmpl) (phba, iocb, iocb);
1467 			spin_lock_irq(phba->host->host_lock);
1468 		} else
1469 			lpfc_sli_release_iocbq(phba, iocb);
1470 	}
1471 	pring->txq_cnt = 0;
1472 	INIT_LIST_HEAD(&(pring->txq));
1473 
1474 	/* Next issue ABTS for everything on the txcmplq */
1475 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1476 		cmd = &iocb->iocb;
1477 
1478 		/*
1479 		 * Imediate abort of IOCB, deque and call compl
1480 		 */
1481 
1482 		list_del_init(&iocb->list);
1483 		pring->txcmplq_cnt--;
1484 
1485 		if (iocb->iocb_cmpl) {
1486 			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1487 			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1488 			spin_unlock_irq(phba->host->host_lock);
1489 			(iocb->iocb_cmpl) (phba, iocb, iocb);
1490 			spin_lock_irq(phba->host->host_lock);
1491 		} else
1492 			lpfc_sli_release_iocbq(phba, iocb);
1493 	}
1494 
1495 	INIT_LIST_HEAD(&pring->txcmplq);
1496 	pring->txcmplq_cnt = 0;
1497 	spin_unlock_irq(phba->host->host_lock);
1498 
1499 	return errcnt;
1500 }
1501 
1502 int
1503 lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1504 {
1505 	uint32_t status;
1506 	int i = 0;
1507 	int retval = 0;
1508 
1509 	/* Read the HBA Host Status Register */
1510 	status = readl(phba->HSregaddr);
1511 
1512 	/*
1513 	 * Check status register every 100ms for 5 retries, then every
1514 	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
1515 	 * every 2.5 sec for 4.
1516 	 * Break our of the loop if errors occurred during init.
1517 	 */
1518 	while (((status & mask) != mask) &&
1519 	       !(status & HS_FFERM) &&
1520 	       i++ < 20) {
1521 
1522 		if (i <= 5)
1523 			msleep(10);
1524 		else if (i <= 10)
1525 			msleep(500);
1526 		else
1527 			msleep(2500);
1528 
1529 		if (i == 15) {
1530 			phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
1531 			lpfc_sli_brdrestart(phba);
1532 		}
1533 		/* Read the HBA Host Status Register */
1534 		status = readl(phba->HSregaddr);
1535 	}
1536 
1537 	/* Check to see if any errors occurred during init */
1538 	if ((status & HS_FFERM) || (i >= 20)) {
1539 		phba->hba_state = LPFC_HBA_ERROR;
1540 		retval = 1;
1541 	}
1542 
1543 	return retval;
1544 }
1545 
1546 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1547 
1548 void lpfc_reset_barrier(struct lpfc_hba * phba)
1549 {
1550 	uint32_t __iomem *resp_buf;
1551 	uint32_t __iomem *mbox_buf;
1552 	volatile uint32_t mbox;
1553 	uint32_t hc_copy;
1554 	int  i;
1555 	uint8_t hdrtype;
1556 
1557 	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1558 	if (hdrtype != 0x80 ||
1559 	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1560 	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1561 		return;
1562 
1563 	/*
1564 	 * Tell the other part of the chip to suspend temporarily all
1565 	 * its DMA activity.
1566 	 */
1567 	resp_buf = phba->MBslimaddr;
1568 
1569 	/* Disable the error attention */
1570 	hc_copy = readl(phba->HCregaddr);
1571 	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1572 	readl(phba->HCregaddr); /* flush */
1573 
1574 	if (readl(phba->HAregaddr) & HA_ERATT) {
1575 		/* Clear Chip error bit */
1576 		writel(HA_ERATT, phba->HAregaddr);
1577 		phba->stopped = 1;
1578 	}
1579 
1580 	mbox = 0;
1581 	((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1582 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1583 
1584 	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1585 	mbox_buf = phba->MBslimaddr;
1586 	writel(mbox, mbox_buf);
1587 
1588 	for (i = 0;
1589 	     readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1590 		mdelay(1);
1591 
1592 	if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1593 		if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1594 		    phba->stopped)
1595 			goto restore_hc;
1596 		else
1597 			goto clear_errat;
1598 	}
1599 
1600 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1601 	for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
1602 		mdelay(1);
1603 
1604 clear_errat:
1605 
1606 	while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1607 		mdelay(1);
1608 
1609 	if (readl(phba->HAregaddr) & HA_ERATT) {
1610 		writel(HA_ERATT, phba->HAregaddr);
1611 		phba->stopped = 1;
1612 	}
1613 
1614 restore_hc:
1615 	writel(hc_copy, phba->HCregaddr);
1616 	readl(phba->HCregaddr); /* flush */
1617 }
1618 
1619 int
1620 lpfc_sli_brdkill(struct lpfc_hba * phba)
1621 {
1622 	struct lpfc_sli *psli;
1623 	LPFC_MBOXQ_t *pmb;
1624 	uint32_t status;
1625 	uint32_t ha_copy;
1626 	int retval;
1627 	int i = 0;
1628 
1629 	psli = &phba->sli;
1630 
1631 	/* Kill HBA */
1632 	lpfc_printf_log(phba,
1633 		KERN_INFO,
1634 		LOG_SLI,
1635 		"%d:0329 Kill HBA Data: x%x x%x\n",
1636 		phba->brd_no,
1637 		phba->hba_state,
1638 		psli->sli_flag);
1639 
1640 	if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1641 						  GFP_KERNEL)) == 0)
1642 		return 1;
1643 
1644 	/* Disable the error attention */
1645 	spin_lock_irq(phba->host->host_lock);
1646 	status = readl(phba->HCregaddr);
1647 	status &= ~HC_ERINT_ENA;
1648 	writel(status, phba->HCregaddr);
1649 	readl(phba->HCregaddr); /* flush */
1650 	spin_unlock_irq(phba->host->host_lock);
1651 
1652 	lpfc_kill_board(phba, pmb);
1653 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1654 	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1655 
1656 	if (retval != MBX_SUCCESS) {
1657 		if (retval != MBX_BUSY)
1658 			mempool_free(pmb, phba->mbox_mem_pool);
1659 		return 1;
1660 	}
1661 
1662 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1663 
1664 	mempool_free(pmb, phba->mbox_mem_pool);
1665 
1666 	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1667 	 * attention every 100ms for 3 seconds. If we don't get ERATT after
1668 	 * 3 seconds we still set HBA_ERROR state because the status of the
1669 	 * board is now undefined.
1670 	 */
1671 	ha_copy = readl(phba->HAregaddr);
1672 
1673 	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1674 		mdelay(100);
1675 		ha_copy = readl(phba->HAregaddr);
1676 	}
1677 
1678 	del_timer_sync(&psli->mbox_tmo);
1679 	if (ha_copy & HA_ERATT) {
1680 		writel(HA_ERATT, phba->HAregaddr);
1681 		phba->stopped = 1;
1682 	}
1683 	spin_lock_irq(phba->host->host_lock);
1684 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1685 	spin_unlock_irq(phba->host->host_lock);
1686 
1687 	psli->mbox_active = NULL;
1688 	lpfc_hba_down_post(phba);
1689 	phba->hba_state = LPFC_HBA_ERROR;
1690 
1691 	return (ha_copy & HA_ERATT ? 0 : 1);
1692 }
1693 
1694 int
1695 lpfc_sli_brdreset(struct lpfc_hba * phba)
1696 {
1697 	struct lpfc_sli *psli;
1698 	struct lpfc_sli_ring *pring;
1699 	uint16_t cfg_value;
1700 	int i;
1701 
1702 	psli = &phba->sli;
1703 
1704 	/* Reset HBA */
1705 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1706 			"%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
1707 			phba->hba_state, psli->sli_flag);
1708 
1709 	/* perform board reset */
1710 	phba->fc_eventTag = 0;
1711 	phba->fc_myDID = 0;
1712 	phba->fc_prevDID = 0;
1713 
1714 	psli->sli_flag = 0;
1715 
1716 	/* Turn off parity checking and serr during the physical reset */
1717 	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1718 	pci_write_config_word(phba->pcidev, PCI_COMMAND,
1719 			      (cfg_value &
1720 			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1721 
1722 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1723 	/* Now toggle INITFF bit in the Host Control Register */
1724 	writel(HC_INITFF, phba->HCregaddr);
1725 	mdelay(1);
1726 	readl(phba->HCregaddr); /* flush */
1727 	writel(0, phba->HCregaddr);
1728 	readl(phba->HCregaddr); /* flush */
1729 
1730 	/* Restore PCI cmd register */
1731 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
1732 
1733 	/* Initialize relevant SLI info */
1734 	for (i = 0; i < psli->num_rings; i++) {
1735 		pring = &psli->ring[i];
1736 		pring->flag = 0;
1737 		pring->rspidx = 0;
1738 		pring->next_cmdidx  = 0;
1739 		pring->local_getidx = 0;
1740 		pring->cmdidx = 0;
1741 		pring->missbufcnt = 0;
1742 	}
1743 
1744 	phba->hba_state = LPFC_WARM_START;
1745 	return 0;
1746 }
1747 
1748 int
1749 lpfc_sli_brdrestart(struct lpfc_hba * phba)
1750 {
1751 	MAILBOX_t *mb;
1752 	struct lpfc_sli *psli;
1753 	uint16_t skip_post;
1754 	volatile uint32_t word0;
1755 	void __iomem *to_slim;
1756 
1757 	spin_lock_irq(phba->host->host_lock);
1758 
1759 	psli = &phba->sli;
1760 
1761 	/* Restart HBA */
1762 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1763 			"%d:0328 Restart HBA Data: x%x x%x\n", phba->brd_no,
1764 			phba->hba_state, psli->sli_flag);
1765 
1766 	word0 = 0;
1767 	mb = (MAILBOX_t *) &word0;
1768 	mb->mbxCommand = MBX_RESTART;
1769 	mb->mbxHc = 1;
1770 
1771 	lpfc_reset_barrier(phba);
1772 
1773 	to_slim = phba->MBslimaddr;
1774 	writel(*(uint32_t *) mb, to_slim);
1775 	readl(to_slim); /* flush */
1776 
1777 	/* Only skip post after fc_ffinit is completed */
1778 	if (phba->hba_state) {
1779 		skip_post = 1;
1780 		word0 = 1;	/* This is really setting up word1 */
1781 	} else {
1782 		skip_post = 0;
1783 		word0 = 0;	/* This is really setting up word1 */
1784 	}
1785 	to_slim = phba->MBslimaddr + sizeof (uint32_t);
1786 	writel(*(uint32_t *) mb, to_slim);
1787 	readl(to_slim); /* flush */
1788 
1789 	lpfc_sli_brdreset(phba);
1790 	phba->stopped = 0;
1791 	phba->hba_state = LPFC_INIT_START;
1792 
1793 	spin_unlock_irq(phba->host->host_lock);
1794 
1795 	if (skip_post)
1796 		mdelay(100);
1797 	else
1798 		mdelay(2000);
1799 
1800 	lpfc_hba_down_post(phba);
1801 
1802 	return 0;
1803 }
1804 
1805 static int
1806 lpfc_sli_chipset_init(struct lpfc_hba *phba)
1807 {
1808 	uint32_t status, i = 0;
1809 
1810 	/* Read the HBA Host Status Register */
1811 	status = readl(phba->HSregaddr);
1812 
1813 	/* Check status register to see what current state is */
1814 	i = 0;
1815 	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
1816 
1817 		/* Check every 100ms for 5 retries, then every 500ms for 5, then
1818 		 * every 2.5 sec for 5, then reset board and every 2.5 sec for
1819 		 * 4.
1820 		 */
1821 		if (i++ >= 20) {
1822 			/* Adapter failed to init, timeout, status reg
1823 			   <status> */
1824 			lpfc_printf_log(phba,
1825 					KERN_ERR,
1826 					LOG_INIT,
1827 					"%d:0436 Adapter failed to init, "
1828 					"timeout, status reg x%x\n",
1829 					phba->brd_no,
1830 					status);
1831 			phba->hba_state = LPFC_HBA_ERROR;
1832 			return -ETIMEDOUT;
1833 		}
1834 
1835 		/* Check to see if any errors occurred during init */
1836 		if (status & HS_FFERM) {
1837 			/* ERROR: During chipset initialization */
1838 			/* Adapter failed to init, chipset, status reg
1839 			   <status> */
1840 			lpfc_printf_log(phba,
1841 					KERN_ERR,
1842 					LOG_INIT,
1843 					"%d:0437 Adapter failed to init, "
1844 					"chipset, status reg x%x\n",
1845 					phba->brd_no,
1846 					status);
1847 			phba->hba_state = LPFC_HBA_ERROR;
1848 			return -EIO;
1849 		}
1850 
1851 		if (i <= 5) {
1852 			msleep(10);
1853 		} else if (i <= 10) {
1854 			msleep(500);
1855 		} else {
1856 			msleep(2500);
1857 		}
1858 
1859 		if (i == 15) {
1860 			phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
1861 			lpfc_sli_brdrestart(phba);
1862 		}
1863 		/* Read the HBA Host Status Register */
1864 		status = readl(phba->HSregaddr);
1865 	}
1866 
1867 	/* Check to see if any errors occurred during init */
1868 	if (status & HS_FFERM) {
1869 		/* ERROR: During chipset initialization */
1870 		/* Adapter failed to init, chipset, status reg <status> */
1871 		lpfc_printf_log(phba,
1872 				KERN_ERR,
1873 				LOG_INIT,
1874 				"%d:0438 Adapter failed to init, chipset, "
1875 				"status reg x%x\n",
1876 				phba->brd_no,
1877 				status);
1878 		phba->hba_state = LPFC_HBA_ERROR;
1879 		return -EIO;
1880 	}
1881 
1882 	/* Clear all interrupt enable conditions */
1883 	writel(0, phba->HCregaddr);
1884 	readl(phba->HCregaddr); /* flush */
1885 
1886 	/* setup host attn register */
1887 	writel(0xffffffff, phba->HAregaddr);
1888 	readl(phba->HAregaddr); /* flush */
1889 	return 0;
1890 }
1891 
1892 int
1893 lpfc_sli_hba_setup(struct lpfc_hba * phba)
1894 {
1895 	LPFC_MBOXQ_t *pmb;
1896 	uint32_t resetcount = 0, rc = 0, done = 0;
1897 
1898 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1899 	if (!pmb) {
1900 		phba->hba_state = LPFC_HBA_ERROR;
1901 		return -ENOMEM;
1902 	}
1903 
1904 	while (resetcount < 2 && !done) {
1905 		phba->hba_state = LPFC_STATE_UNKNOWN;
1906 		lpfc_sli_brdrestart(phba);
1907 		msleep(2500);
1908 		rc = lpfc_sli_chipset_init(phba);
1909 		if (rc)
1910 			break;
1911 
1912 		resetcount++;
1913 
1914 	/* Call pre CONFIG_PORT mailbox command initialization.  A value of 0
1915 	 * means the call was successful.  Any other nonzero value is a failure,
1916 	 * but if ERESTART is returned, the driver may reset the HBA and try
1917 	 * again.
1918 	 */
1919 		rc = lpfc_config_port_prep(phba);
1920 		if (rc == -ERESTART) {
1921 			phba->hba_state = 0;
1922 			continue;
1923 		} else if (rc) {
1924 			break;
1925 		}
1926 
1927 		phba->hba_state = LPFC_INIT_MBX_CMDS;
1928 		lpfc_config_port(phba, pmb);
1929 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1930 		if (rc == MBX_SUCCESS)
1931 			done = 1;
1932 		else {
1933 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1934 				"%d:0442 Adapter failed to init, mbxCmd x%x "
1935 				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1936 				phba->brd_no, pmb->mb.mbxCommand,
1937 				pmb->mb.mbxStatus, 0);
1938 			phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
1939 		}
1940 	}
1941 	if (!done)
1942 		goto lpfc_sli_hba_setup_error;
1943 
1944 	rc = lpfc_sli_ring_map(phba, pmb);
1945 
1946 	if (rc)
1947 		goto lpfc_sli_hba_setup_error;
1948 
1949 	phba->sli.sli_flag |= LPFC_PROCESS_LA;
1950 
1951 	rc = lpfc_config_port_post(phba);
1952 	if (rc)
1953 		goto lpfc_sli_hba_setup_error;
1954 
1955 	goto lpfc_sli_hba_setup_exit;
1956 lpfc_sli_hba_setup_error:
1957 	phba->hba_state = LPFC_HBA_ERROR;
1958 lpfc_sli_hba_setup_exit:
1959 	mempool_free(pmb, phba->mbox_mem_pool);
1960 	return rc;
1961 }
1962 
1963 static void
1964 lpfc_mbox_abort(struct lpfc_hba * phba)
1965 {
1966 	LPFC_MBOXQ_t *pmbox;
1967 	MAILBOX_t *mb;
1968 
1969 	if (phba->sli.mbox_active) {
1970 		del_timer_sync(&phba->sli.mbox_tmo);
1971 		phba->work_hba_events &= ~WORKER_MBOX_TMO;
1972 		pmbox = phba->sli.mbox_active;
1973 		mb = &pmbox->mb;
1974 		phba->sli.mbox_active = NULL;
1975 		if (pmbox->mbox_cmpl) {
1976 			mb->mbxStatus = MBX_NOT_FINISHED;
1977 			(pmbox->mbox_cmpl) (phba, pmbox);
1978 		}
1979 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1980 	}
1981 
1982 	/* Abort all the non active mailbox commands. */
1983 	spin_lock_irq(phba->host->host_lock);
1984 	pmbox = lpfc_mbox_get(phba);
1985 	while (pmbox) {
1986 		mb = &pmbox->mb;
1987 		if (pmbox->mbox_cmpl) {
1988 			mb->mbxStatus = MBX_NOT_FINISHED;
1989 			spin_unlock_irq(phba->host->host_lock);
1990 			(pmbox->mbox_cmpl) (phba, pmbox);
1991 			spin_lock_irq(phba->host->host_lock);
1992 		}
1993 		pmbox = lpfc_mbox_get(phba);
1994 	}
1995 	spin_unlock_irq(phba->host->host_lock);
1996 	return;
1997 }
1998 
1999 /*! lpfc_mbox_timeout
2000  *
2001  * \pre
2002  * \post
2003  * \param hba Pointer to per struct lpfc_hba structure
2004  * \param l1  Pointer to the driver's mailbox queue.
2005  * \return
2006  *   void
2007  *
2008  * \b Description:
2009  *
2010  * This routine handles mailbox timeout events at timer interrupt context.
2011  */
2012 void
2013 lpfc_mbox_timeout(unsigned long ptr)
2014 {
2015 	struct lpfc_hba *phba;
2016 	unsigned long iflag;
2017 
2018 	phba = (struct lpfc_hba *)ptr;
2019 	spin_lock_irqsave(phba->host->host_lock, iflag);
2020 	if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
2021 		phba->work_hba_events |= WORKER_MBOX_TMO;
2022 		if (phba->work_wait)
2023 			wake_up(phba->work_wait);
2024 	}
2025 	spin_unlock_irqrestore(phba->host->host_lock, iflag);
2026 }
2027 
2028 void
2029 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2030 {
2031 	LPFC_MBOXQ_t *pmbox;
2032 	MAILBOX_t *mb;
2033 
2034 	spin_lock_irq(phba->host->host_lock);
2035 	if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
2036 		spin_unlock_irq(phba->host->host_lock);
2037 		return;
2038 	}
2039 
2040 	phba->work_hba_events &= ~WORKER_MBOX_TMO;
2041 
2042 	pmbox = phba->sli.mbox_active;
2043 	mb = &pmbox->mb;
2044 
2045 	/* Mbox cmd <mbxCommand> timeout */
2046 	lpfc_printf_log(phba,
2047 		KERN_ERR,
2048 		LOG_MBOX | LOG_SLI,
2049 		"%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
2050 		phba->brd_no,
2051 		mb->mbxCommand,
2052 		phba->hba_state,
2053 		phba->sli.sli_flag,
2054 		phba->sli.mbox_active);
2055 
2056 	phba->sli.mbox_active = NULL;
2057 	if (pmbox->mbox_cmpl) {
2058 		mb->mbxStatus = MBX_NOT_FINISHED;
2059 		spin_unlock_irq(phba->host->host_lock);
2060 		(pmbox->mbox_cmpl) (phba, pmbox);
2061 		spin_lock_irq(phba->host->host_lock);
2062 	}
2063 	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2064 
2065 	spin_unlock_irq(phba->host->host_lock);
2066 	lpfc_mbox_abort(phba);
2067 	return;
2068 }
2069 
2070 int
2071 lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2072 {
2073 	MAILBOX_t *mb;
2074 	struct lpfc_sli *psli;
2075 	uint32_t status, evtctr;
2076 	uint32_t ha_copy;
2077 	int i;
2078 	unsigned long drvr_flag = 0;
2079 	volatile uint32_t word0, ldata;
2080 	void __iomem *to_slim;
2081 
2082 	psli = &phba->sli;
2083 
2084 	spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2085 
2086 
2087 	mb = &pmbox->mb;
2088 	status = MBX_SUCCESS;
2089 
2090 	if (phba->hba_state == LPFC_HBA_ERROR) {
2091 		spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2092 
2093 		/* Mbox command <mbxCommand> cannot issue */
2094 		LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2095 		return (MBX_NOT_FINISHED);
2096 	}
2097 
2098 	if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2099 	    !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2100 		spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2101 		LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2102 		return (MBX_NOT_FINISHED);
2103 	}
2104 
2105 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2106 		/* Polling for a mbox command when another one is already active
2107 		 * is not allowed in SLI. Also, the driver must have established
2108 		 * SLI2 mode to queue and process multiple mbox commands.
2109 		 */
2110 
2111 		if (flag & MBX_POLL) {
2112 			spin_unlock_irqrestore(phba->host->host_lock,
2113 					       drvr_flag);
2114 
2115 			/* Mbox command <mbxCommand> cannot issue */
2116 			LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2117 			return (MBX_NOT_FINISHED);
2118 		}
2119 
2120 		if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2121 			spin_unlock_irqrestore(phba->host->host_lock,
2122 					       drvr_flag);
2123 			/* Mbox command <mbxCommand> cannot issue */
2124 			LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2125 			return (MBX_NOT_FINISHED);
2126 		}
2127 
2128 		/* Handle STOP IOCB processing flag. This is only meaningful
2129 		 * if we are not polling for mbox completion.
2130 		 */
2131 		if (flag & MBX_STOP_IOCB) {
2132 			flag &= ~MBX_STOP_IOCB;
2133 			/* Now flag each ring */
2134 			for (i = 0; i < psli->num_rings; i++) {
2135 				/* If the ring is active, flag it */
2136 				if (psli->ring[i].cmdringaddr) {
2137 					psli->ring[i].flag |=
2138 					    LPFC_STOP_IOCB_MBX;
2139 				}
2140 			}
2141 		}
2142 
2143 		/* Another mailbox command is still being processed, queue this
2144 		 * command to be processed later.
2145 		 */
2146 		lpfc_mbox_put(phba, pmbox);
2147 
2148 		/* Mbox cmd issue - BUSY */
2149 		lpfc_printf_log(phba,
2150 			KERN_INFO,
2151 			LOG_MBOX | LOG_SLI,
2152 			"%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
2153 			phba->brd_no,
2154 			mb->mbxCommand,
2155 			phba->hba_state,
2156 			psli->sli_flag,
2157 			flag);
2158 
2159 		psli->slistat.mbox_busy++;
2160 		spin_unlock_irqrestore(phba->host->host_lock,
2161 				       drvr_flag);
2162 
2163 		return (MBX_BUSY);
2164 	}
2165 
2166 	/* Handle STOP IOCB processing flag. This is only meaningful
2167 	 * if we are not polling for mbox completion.
2168 	 */
2169 	if (flag & MBX_STOP_IOCB) {
2170 		flag &= ~MBX_STOP_IOCB;
2171 		if (flag == MBX_NOWAIT) {
2172 			/* Now flag each ring */
2173 			for (i = 0; i < psli->num_rings; i++) {
2174 				/* If the ring is active, flag it */
2175 				if (psli->ring[i].cmdringaddr) {
2176 					psli->ring[i].flag |=
2177 					    LPFC_STOP_IOCB_MBX;
2178 				}
2179 			}
2180 		}
2181 	}
2182 
2183 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2184 
2185 	/* If we are not polling, we MUST be in SLI2 mode */
2186 	if (flag != MBX_POLL) {
2187 		if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2188 		    (mb->mbxCommand != MBX_KILL_BOARD)) {
2189 			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2190 			spin_unlock_irqrestore(phba->host->host_lock,
2191 					       drvr_flag);
2192 			/* Mbox command <mbxCommand> cannot issue */
2193 			LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
2194 			return (MBX_NOT_FINISHED);
2195 		}
2196 		/* timeout active mbox command */
2197 		mod_timer(&psli->mbox_tmo, jiffies + HZ * LPFC_MBOX_TMO);
2198 	}
2199 
2200 	/* Mailbox cmd <cmd> issue */
2201 	lpfc_printf_log(phba,
2202 		KERN_INFO,
2203 		LOG_MBOX | LOG_SLI,
2204 		"%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
2205 		phba->brd_no,
2206 		mb->mbxCommand,
2207 		phba->hba_state,
2208 		psli->sli_flag,
2209 		flag);
2210 
2211 	psli->slistat.mbox_cmd++;
2212 	evtctr = psli->slistat.mbox_event;
2213 
2214 	/* next set own bit for the adapter and copy over command word */
2215 	mb->mbxOwner = OWN_CHIP;
2216 
2217 	if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2218 		/* First copy command data to host SLIM area */
2219 		lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2220 	} else {
2221 		if (mb->mbxCommand == MBX_CONFIG_PORT) {
2222 			/* copy command data into host mbox for cmpl */
2223 			lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2224 					MAILBOX_CMD_SIZE);
2225 		}
2226 
2227 		/* First copy mbox command data to HBA SLIM, skip past first
2228 		   word */
2229 		to_slim = phba->MBslimaddr + sizeof (uint32_t);
2230 		lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2231 			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
2232 
2233 		/* Next copy over first word, with mbxOwner set */
2234 		ldata = *((volatile uint32_t *)mb);
2235 		to_slim = phba->MBslimaddr;
2236 		writel(ldata, to_slim);
2237 		readl(to_slim); /* flush */
2238 
2239 		if (mb->mbxCommand == MBX_CONFIG_PORT) {
2240 			/* switch over to host mailbox */
2241 			psli->sli_flag |= LPFC_SLI2_ACTIVE;
2242 		}
2243 	}
2244 
2245 	wmb();
2246 	/* interrupt board to doit right away */
2247 	writel(CA_MBATT, phba->CAregaddr);
2248 	readl(phba->CAregaddr); /* flush */
2249 
2250 	switch (flag) {
2251 	case MBX_NOWAIT:
2252 		/* Don't wait for it to finish, just return */
2253 		psli->mbox_active = pmbox;
2254 		break;
2255 
2256 	case MBX_POLL:
2257 		i = 0;
2258 		psli->mbox_active = NULL;
2259 		if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2260 			/* First read mbox status word */
2261 			word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2262 			word0 = le32_to_cpu(word0);
2263 		} else {
2264 			/* First read mbox status word */
2265 			word0 = readl(phba->MBslimaddr);
2266 		}
2267 
2268 		/* Read the HBA Host Attention Register */
2269 		ha_copy = readl(phba->HAregaddr);
2270 
2271 		/* Wait for command to complete */
2272 		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2273 		       (!(ha_copy & HA_MBATT) &&
2274 			(phba->hba_state > LPFC_WARM_START))) {
2275 			if (i++ >= 100) {
2276 				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2277 				spin_unlock_irqrestore(phba->host->host_lock,
2278 						       drvr_flag);
2279 				return (MBX_NOT_FINISHED);
2280 			}
2281 
2282 			/* Check if we took a mbox interrupt while we were
2283 			   polling */
2284 			if (((word0 & OWN_CHIP) != OWN_CHIP)
2285 			    && (evtctr != psli->slistat.mbox_event))
2286 				break;
2287 
2288 			spin_unlock_irqrestore(phba->host->host_lock,
2289 					       drvr_flag);
2290 
2291 			/* Can be in interrupt context, do not sleep */
2292 			/* (or might be called with interrupts disabled) */
2293 			mdelay(i);
2294 
2295 			spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2296 
2297 			if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2298 				/* First copy command data */
2299 				word0 = *((volatile uint32_t *)
2300 						&phba->slim2p->mbx);
2301 				word0 = le32_to_cpu(word0);
2302 				if (mb->mbxCommand == MBX_CONFIG_PORT) {
2303 					MAILBOX_t *slimmb;
2304 					volatile uint32_t slimword0;
2305 					/* Check real SLIM for any errors */
2306 					slimword0 = readl(phba->MBslimaddr);
2307 					slimmb = (MAILBOX_t *) & slimword0;
2308 					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2309 					    && slimmb->mbxStatus) {
2310 						psli->sli_flag &=
2311 						    ~LPFC_SLI2_ACTIVE;
2312 						word0 = slimword0;
2313 					}
2314 				}
2315 			} else {
2316 				/* First copy command data */
2317 				word0 = readl(phba->MBslimaddr);
2318 			}
2319 			/* Read the HBA Host Attention Register */
2320 			ha_copy = readl(phba->HAregaddr);
2321 		}
2322 
2323 		if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2324 			/* copy results back to user */
2325 			lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2326 					MAILBOX_CMD_SIZE);
2327 		} else {
2328 			/* First copy command data */
2329 			lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2330 							MAILBOX_CMD_SIZE);
2331 			if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2332 				pmbox->context2) {
2333 				lpfc_memcpy_from_slim((void *)pmbox->context2,
2334 				      phba->MBslimaddr + DMP_RSP_OFFSET,
2335 						      mb->un.varDmp.word_cnt);
2336 			}
2337 		}
2338 
2339 		writel(HA_MBATT, phba->HAregaddr);
2340 		readl(phba->HAregaddr); /* flush */
2341 
2342 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2343 		status = mb->mbxStatus;
2344 	}
2345 
2346 	spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2347 	return (status);
2348 }
2349 
2350 static int
2351 lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2352 		    struct lpfc_iocbq * piocb)
2353 {
2354 	/* Insert the caller's iocb in the txq tail for later processing. */
2355 	list_add_tail(&piocb->list, &pring->txq);
2356 	pring->txq_cnt++;
2357 	return (0);
2358 }
2359 
2360 static struct lpfc_iocbq *
2361 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2362 		   struct lpfc_iocbq ** piocb)
2363 {
2364 	struct lpfc_iocbq * nextiocb;
2365 
2366 	nextiocb = lpfc_sli_ringtx_get(phba, pring);
2367 	if (!nextiocb) {
2368 		nextiocb = *piocb;
2369 		*piocb = NULL;
2370 	}
2371 
2372 	return nextiocb;
2373 }
2374 
2375 int
2376 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2377 		    struct lpfc_iocbq *piocb, uint32_t flag)
2378 {
2379 	struct lpfc_iocbq *nextiocb;
2380 	IOCB_t *iocb;
2381 
2382 	/*
2383 	 * We should never get an IOCB if we are in a < LINK_DOWN state
2384 	 */
2385 	if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
2386 		return IOCB_ERROR;
2387 
2388 	/*
2389 	 * Check to see if we are blocking IOCB processing because of a
2390 	 * outstanding mbox command.
2391 	 */
2392 	if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2393 		goto iocb_busy;
2394 
2395 	if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
2396 		/*
2397 		 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
2398 		 * can be issued if the link is not up.
2399 		 */
2400 		switch (piocb->iocb.ulpCommand) {
2401 		case CMD_QUE_RING_BUF_CN:
2402 		case CMD_QUE_RING_BUF64_CN:
2403 			/*
2404 			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2405 			 * completion, iocb_cmpl MUST be 0.
2406 			 */
2407 			if (piocb->iocb_cmpl)
2408 				piocb->iocb_cmpl = NULL;
2409 			/*FALLTHROUGH*/
2410 		case CMD_CREATE_XRI_CR:
2411 			break;
2412 		default:
2413 			goto iocb_busy;
2414 		}
2415 
2416 	/*
2417 	 * For FCP commands, we must be in a state where we can process link
2418 	 * attention events.
2419 	 */
2420 	} else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2421 		   !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
2422 		goto iocb_busy;
2423 
2424 	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2425 	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2426 		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2427 
2428 	if (iocb)
2429 		lpfc_sli_update_ring(phba, pring);
2430 	else
2431 		lpfc_sli_update_full_ring(phba, pring);
2432 
2433 	if (!piocb)
2434 		return IOCB_SUCCESS;
2435 
2436 	goto out_busy;
2437 
2438  iocb_busy:
2439 	pring->stats.iocb_cmd_delay++;
2440 
2441  out_busy:
2442 
2443 	if (!(flag & SLI_IOCB_RET_IOCB)) {
2444 		lpfc_sli_ringtx_put(phba, pring, piocb);
2445 		return IOCB_SUCCESS;
2446 	}
2447 
2448 	return IOCB_BUSY;
2449 }
2450 
2451 static int
2452 lpfc_extra_ring_setup( struct lpfc_hba *phba)
2453 {
2454 	struct lpfc_sli *psli;
2455 	struct lpfc_sli_ring *pring;
2456 
2457 	psli = &phba->sli;
2458 
2459 	/* Adjust cmd/rsp ring iocb entries more evenly */
2460 	pring = &psli->ring[psli->fcp_ring];
2461 	pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2462 	pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2463 	pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2464 	pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2465 
2466 	pring = &psli->ring[1];
2467 	pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2468 	pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2469 	pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2470 	pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2471 
2472 	/* Setup default profile for this ring */
2473 	pring->iotag_max = 4096;
2474 	pring->num_mask = 1;
2475 	pring->prt[0].profile = 0;      /* Mask 0 */
2476 	pring->prt[0].rctl = FC_UNSOL_DATA;
2477 	pring->prt[0].type = 5;
2478 	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2479 	return 0;
2480 }
2481 
2482 int
2483 lpfc_sli_setup(struct lpfc_hba *phba)
2484 {
2485 	int i, totiocb = 0;
2486 	struct lpfc_sli *psli = &phba->sli;
2487 	struct lpfc_sli_ring *pring;
2488 
2489 	psli->num_rings = MAX_CONFIGURED_RINGS;
2490 	psli->sli_flag = 0;
2491 	psli->fcp_ring = LPFC_FCP_RING;
2492 	psli->next_ring = LPFC_FCP_NEXT_RING;
2493 	psli->ip_ring = LPFC_IP_RING;
2494 
2495 	psli->iocbq_lookup = NULL;
2496 	psli->iocbq_lookup_len = 0;
2497 	psli->last_iotag = 0;
2498 
2499 	for (i = 0; i < psli->num_rings; i++) {
2500 		pring = &psli->ring[i];
2501 		switch (i) {
2502 		case LPFC_FCP_RING:	/* ring 0 - FCP */
2503 			/* numCiocb and numRiocb are used in config_port */
2504 			pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
2505 			pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
2506 			pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2507 			pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2508 			pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2509 			pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2510 			pring->iotag_ctr = 0;
2511 			pring->iotag_max =
2512 			    (phba->cfg_hba_queue_depth * 2);
2513 			pring->fast_iotag = pring->iotag_max;
2514 			pring->num_mask = 0;
2515 			break;
2516 		case LPFC_IP_RING:	/* ring 1 - IP */
2517 			/* numCiocb and numRiocb are used in config_port */
2518 			pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2519 			pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2520 			pring->num_mask = 0;
2521 			break;
2522 		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
2523 			/* numCiocb and numRiocb are used in config_port */
2524 			pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2525 			pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2526 			pring->fast_iotag = 0;
2527 			pring->iotag_ctr = 0;
2528 			pring->iotag_max = 4096;
2529 			pring->num_mask = 4;
2530 			pring->prt[0].profile = 0;	/* Mask 0 */
2531 			pring->prt[0].rctl = FC_ELS_REQ;
2532 			pring->prt[0].type = FC_ELS_DATA;
2533 			pring->prt[0].lpfc_sli_rcv_unsol_event =
2534 			    lpfc_els_unsol_event;
2535 			pring->prt[1].profile = 0;	/* Mask 1 */
2536 			pring->prt[1].rctl = FC_ELS_RSP;
2537 			pring->prt[1].type = FC_ELS_DATA;
2538 			pring->prt[1].lpfc_sli_rcv_unsol_event =
2539 			    lpfc_els_unsol_event;
2540 			pring->prt[2].profile = 0;	/* Mask 2 */
2541 			/* NameServer Inquiry */
2542 			pring->prt[2].rctl = FC_UNSOL_CTL;
2543 			/* NameServer */
2544 			pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
2545 			pring->prt[2].lpfc_sli_rcv_unsol_event =
2546 			    lpfc_ct_unsol_event;
2547 			pring->prt[3].profile = 0;	/* Mask 3 */
2548 			/* NameServer response */
2549 			pring->prt[3].rctl = FC_SOL_CTL;
2550 			/* NameServer */
2551 			pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
2552 			pring->prt[3].lpfc_sli_rcv_unsol_event =
2553 			    lpfc_ct_unsol_event;
2554 			break;
2555 		}
2556 		totiocb += (pring->numCiocb + pring->numRiocb);
2557 	}
2558 	if (totiocb > MAX_SLI2_IOCB) {
2559 		/* Too many cmd / rsp ring entries in SLI2 SLIM */
2560 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2561 				"%d:0462 Too many cmd / rsp ring entries in "
2562 				"SLI2 SLIM Data: x%x x%x\n",
2563 				phba->brd_no, totiocb, MAX_SLI2_IOCB);
2564 	}
2565 	if (phba->cfg_multi_ring_support == 2)
2566 		lpfc_extra_ring_setup(phba);
2567 
2568 	return 0;
2569 }
2570 
2571 int
2572 lpfc_sli_queue_setup(struct lpfc_hba * phba)
2573 {
2574 	struct lpfc_sli *psli;
2575 	struct lpfc_sli_ring *pring;
2576 	int i;
2577 
2578 	psli = &phba->sli;
2579 	spin_lock_irq(phba->host->host_lock);
2580 	INIT_LIST_HEAD(&psli->mboxq);
2581 	/* Initialize list headers for txq and txcmplq as double linked lists */
2582 	for (i = 0; i < psli->num_rings; i++) {
2583 		pring = &psli->ring[i];
2584 		pring->ringno = i;
2585 		pring->next_cmdidx  = 0;
2586 		pring->local_getidx = 0;
2587 		pring->cmdidx = 0;
2588 		INIT_LIST_HEAD(&pring->txq);
2589 		INIT_LIST_HEAD(&pring->txcmplq);
2590 		INIT_LIST_HEAD(&pring->iocb_continueq);
2591 		INIT_LIST_HEAD(&pring->postbufq);
2592 	}
2593 	spin_unlock_irq(phba->host->host_lock);
2594 	return (1);
2595 }
2596 
2597 int
2598 lpfc_sli_hba_down(struct lpfc_hba * phba)
2599 {
2600 	struct lpfc_sli *psli;
2601 	struct lpfc_sli_ring *pring;
2602 	LPFC_MBOXQ_t *pmb;
2603 	struct lpfc_iocbq *iocb, *next_iocb;
2604 	IOCB_t *icmd = NULL;
2605 	int i;
2606 	unsigned long flags = 0;
2607 
2608 	psli = &phba->sli;
2609 	lpfc_hba_down_prep(phba);
2610 
2611 	spin_lock_irqsave(phba->host->host_lock, flags);
2612 
2613 	for (i = 0; i < psli->num_rings; i++) {
2614 		pring = &psli->ring[i];
2615 		pring->flag |= LPFC_DEFERRED_RING_EVENT;
2616 
2617 		/*
2618 		 * Error everything on the txq since these iocbs have not been
2619 		 * given to the FW yet.
2620 		 */
2621 		pring->txq_cnt = 0;
2622 
2623 		list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2624 			list_del_init(&iocb->list);
2625 			if (iocb->iocb_cmpl) {
2626 				icmd = &iocb->iocb;
2627 				icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2628 				icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2629 				spin_unlock_irqrestore(phba->host->host_lock,
2630 						       flags);
2631 				(iocb->iocb_cmpl) (phba, iocb, iocb);
2632 				spin_lock_irqsave(phba->host->host_lock, flags);
2633 			} else
2634 				lpfc_sli_release_iocbq(phba, iocb);
2635 		}
2636 
2637 		INIT_LIST_HEAD(&(pring->txq));
2638 
2639 	}
2640 
2641 	spin_unlock_irqrestore(phba->host->host_lock, flags);
2642 
2643 	/* Return any active mbox cmds */
2644 	del_timer_sync(&psli->mbox_tmo);
2645 	spin_lock_irqsave(phba->host->host_lock, flags);
2646 	phba->work_hba_events &= ~WORKER_MBOX_TMO;
2647 	if (psli->mbox_active) {
2648 		pmb = psli->mbox_active;
2649 		pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2650 		if (pmb->mbox_cmpl) {
2651 			spin_unlock_irqrestore(phba->host->host_lock, flags);
2652 			pmb->mbox_cmpl(phba,pmb);
2653 			spin_lock_irqsave(phba->host->host_lock, flags);
2654 		}
2655 	}
2656 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2657 	psli->mbox_active = NULL;
2658 
2659 	/* Return any pending mbox cmds */
2660 	while ((pmb = lpfc_mbox_get(phba)) != NULL) {
2661 		pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2662 		if (pmb->mbox_cmpl) {
2663 			spin_unlock_irqrestore(phba->host->host_lock, flags);
2664 			pmb->mbox_cmpl(phba,pmb);
2665 			spin_lock_irqsave(phba->host->host_lock, flags);
2666 		}
2667 	}
2668 
2669 	INIT_LIST_HEAD(&psli->mboxq);
2670 
2671 	spin_unlock_irqrestore(phba->host->host_lock, flags);
2672 
2673 	return 1;
2674 }
2675 
2676 void
2677 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
2678 {
2679 	uint32_t *src = srcp;
2680 	uint32_t *dest = destp;
2681 	uint32_t ldata;
2682 	int i;
2683 
2684 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
2685 		ldata = *src;
2686 		ldata = le32_to_cpu(ldata);
2687 		*dest = ldata;
2688 		src++;
2689 		dest++;
2690 	}
2691 }
2692 
2693 int
2694 lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2695 			 struct lpfc_dmabuf * mp)
2696 {
2697 	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2698 	   later */
2699 	list_add_tail(&mp->list, &pring->postbufq);
2700 
2701 	pring->postbufq_cnt++;
2702 	return 0;
2703 }
2704 
2705 
2706 struct lpfc_dmabuf *
2707 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2708 			 dma_addr_t phys)
2709 {
2710 	struct lpfc_dmabuf *mp, *next_mp;
2711 	struct list_head *slp = &pring->postbufq;
2712 
2713 	/* Search postbufq, from the begining, looking for a match on phys */
2714 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
2715 		if (mp->phys == phys) {
2716 			list_del_init(&mp->list);
2717 			pring->postbufq_cnt--;
2718 			return mp;
2719 		}
2720 	}
2721 
2722 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2723 			"%d:0410 Cannot find virtual addr for mapped buf on "
2724 			"ring %d Data x%llx x%p x%p x%x\n",
2725 			phba->brd_no, pring->ringno, (unsigned long long)phys,
2726 			slp->next, slp->prev, pring->postbufq_cnt);
2727 	return NULL;
2728 }
2729 
2730 static void
2731 lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2732 			   struct lpfc_iocbq * rspiocb)
2733 {
2734 	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
2735 	/* Free the resources associated with the ELS_REQUEST64 IOCB the driver
2736 	 * just aborted.
2737 	 * In this case, context2  = cmd,  context2->next = rsp, context3 = bpl
2738 	 */
2739 	if (cmdiocb->context2) {
2740 		buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
2741 
2742 		/* Free the response IOCB before completing the abort
2743 		   command.  */
2744 		buf_ptr = NULL;
2745 		list_remove_head((&buf_ptr1->list), buf_ptr,
2746 				 struct lpfc_dmabuf, list);
2747 		if (buf_ptr) {
2748 			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2749 			kfree(buf_ptr);
2750 		}
2751 		lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2752 		kfree(buf_ptr1);
2753 	}
2754 
2755 	if (cmdiocb->context3) {
2756 		buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
2757 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2758 		kfree(buf_ptr);
2759 	}
2760 
2761 	lpfc_sli_release_iocbq(phba, cmdiocb);
2762 	return;
2763 }
2764 
2765 int
2766 lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2767 			     struct lpfc_sli_ring * pring,
2768 			     struct lpfc_iocbq * cmdiocb)
2769 {
2770 	struct lpfc_iocbq *abtsiocbp;
2771 	IOCB_t *icmd = NULL;
2772 	IOCB_t *iabt = NULL;
2773 
2774 	/* issue ABTS for this IOCB based on iotag */
2775 	abtsiocbp = lpfc_sli_get_iocbq(phba);
2776 	if (abtsiocbp == NULL)
2777 		return 0;
2778 
2779 	iabt = &abtsiocbp->iocb;
2780 	icmd = &cmdiocb->iocb;
2781 	switch (icmd->ulpCommand) {
2782 	case CMD_ELS_REQUEST64_CR:
2783 		/* Even though we abort the ELS command, the firmware may access
2784 		 * the BPL or other resources before it processes our
2785 		 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
2786 		 * resources till the actual abort request completes.
2787 		 */
2788 		abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
2789 		abtsiocbp->context2 = cmdiocb->context2;
2790 		abtsiocbp->context3 = cmdiocb->context3;
2791 		cmdiocb->context2 = NULL;
2792 		cmdiocb->context3 = NULL;
2793 		abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
2794 		break;
2795 	default:
2796 		lpfc_sli_release_iocbq(phba, abtsiocbp);
2797 		return 0;
2798 	}
2799 
2800 	iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
2801 	iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
2802 
2803 	iabt->ulpLe = 1;
2804 	iabt->ulpClass = CLASS3;
2805 	iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
2806 
2807 	if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
2808 		lpfc_sli_release_iocbq(phba, abtsiocbp);
2809 		return 0;
2810 	}
2811 
2812 	return 1;
2813 }
2814 
2815 static int
2816 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
2817 			   uint64_t lun_id, uint32_t ctx,
2818 			   lpfc_ctx_cmd ctx_cmd)
2819 {
2820 	struct lpfc_scsi_buf *lpfc_cmd;
2821 	struct scsi_cmnd *cmnd;
2822 	int rc = 1;
2823 
2824 	if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
2825 		return rc;
2826 
2827 	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
2828 	cmnd = lpfc_cmd->pCmd;
2829 
2830 	if (cmnd == NULL)
2831 		return rc;
2832 
2833 	switch (ctx_cmd) {
2834 	case LPFC_CTX_LUN:
2835 		if ((cmnd->device->id == tgt_id) &&
2836 		    (cmnd->device->lun == lun_id))
2837 			rc = 0;
2838 		break;
2839 	case LPFC_CTX_TGT:
2840 		if (cmnd->device->id == tgt_id)
2841 			rc = 0;
2842 		break;
2843 	case LPFC_CTX_CTX:
2844 		if (iocbq->iocb.ulpContext == ctx)
2845 			rc = 0;
2846 		break;
2847 	case LPFC_CTX_HOST:
2848 		rc = 0;
2849 		break;
2850 	default:
2851 		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
2852 			__FUNCTION__, ctx_cmd);
2853 		break;
2854 	}
2855 
2856 	return rc;
2857 }
2858 
2859 int
2860 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2861 		uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
2862 {
2863 	struct lpfc_iocbq *iocbq;
2864 	int sum, i;
2865 
2866 	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
2867 		iocbq = phba->sli.iocbq_lookup[i];
2868 
2869 		if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2870 						0, ctx_cmd) == 0)
2871 			sum++;
2872 	}
2873 
2874 	return sum;
2875 }
2876 
2877 void
2878 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2879 			   struct lpfc_iocbq * rspiocb)
2880 {
2881 	spin_lock_irq(phba->host->host_lock);
2882 	lpfc_sli_release_iocbq(phba, cmdiocb);
2883 	spin_unlock_irq(phba->host->host_lock);
2884 	return;
2885 }
2886 
2887 int
2888 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2889 		    uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
2890 		    lpfc_ctx_cmd abort_cmd)
2891 {
2892 	struct lpfc_iocbq *iocbq;
2893 	struct lpfc_iocbq *abtsiocb;
2894 	IOCB_t *cmd = NULL;
2895 	int errcnt = 0, ret_val = 0;
2896 	int i;
2897 
2898 	for (i = 1; i <= phba->sli.last_iotag; i++) {
2899 		iocbq = phba->sli.iocbq_lookup[i];
2900 
2901 		if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2902 						0, abort_cmd) != 0)
2903 			continue;
2904 
2905 		/* issue ABTS for this IOCB based on iotag */
2906 		abtsiocb = lpfc_sli_get_iocbq(phba);
2907 		if (abtsiocb == NULL) {
2908 			errcnt++;
2909 			continue;
2910 		}
2911 
2912 		cmd = &iocbq->iocb;
2913 		abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
2914 		abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
2915 		abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
2916 		abtsiocb->iocb.ulpLe = 1;
2917 		abtsiocb->iocb.ulpClass = cmd->ulpClass;
2918 
2919 		if (phba->hba_state >= LPFC_LINK_UP)
2920 			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
2921 		else
2922 			abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
2923 
2924 		/* Setup callback routine and issue the command. */
2925 		abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2926 		ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
2927 		if (ret_val == IOCB_ERROR) {
2928 			lpfc_sli_release_iocbq(phba, abtsiocb);
2929 			errcnt++;
2930 			continue;
2931 		}
2932 	}
2933 
2934 	return errcnt;
2935 }
2936 
2937 static void
2938 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
2939 			struct lpfc_iocbq *cmdiocbq,
2940 			struct lpfc_iocbq *rspiocbq)
2941 {
2942 	wait_queue_head_t *pdone_q;
2943 	unsigned long iflags;
2944 
2945 	spin_lock_irqsave(phba->host->host_lock, iflags);
2946 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
2947 	if (cmdiocbq->context2 && rspiocbq)
2948 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
2949 		       &rspiocbq->iocb, sizeof(IOCB_t));
2950 
2951 	pdone_q = cmdiocbq->context_un.wait_queue;
2952 	spin_unlock_irqrestore(phba->host->host_lock, iflags);
2953 	if (pdone_q)
2954 		wake_up(pdone_q);
2955 	return;
2956 }
2957 
2958 /*
2959  * Issue the caller's iocb and wait for its completion, but no longer than the
2960  * caller's timeout.  Note that iocb_flags is cleared before the
2961  * lpfc_sli_issue_call since the wake routine sets a unique value and by
2962  * definition this is a wait function.
2963  */
2964 int
2965 lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
2966 			 struct lpfc_sli_ring * pring,
2967 			 struct lpfc_iocbq * piocb,
2968 			 struct lpfc_iocbq * prspiocbq,
2969 			 uint32_t timeout)
2970 {
2971 	DECLARE_WAIT_QUEUE_HEAD(done_q);
2972 	long timeleft, timeout_req = 0;
2973 	int retval = IOCB_SUCCESS;
2974 	uint32_t creg_val;
2975 
2976 	/*
2977 	 * If the caller has provided a response iocbq buffer, then context2
2978 	 * is NULL or its an error.
2979 	 */
2980 	if (prspiocbq) {
2981 		if (piocb->context2)
2982 			return IOCB_ERROR;
2983 		piocb->context2 = prspiocbq;
2984 	}
2985 
2986 	piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
2987 	piocb->context_un.wait_queue = &done_q;
2988 	piocb->iocb_flag &= ~LPFC_IO_WAKE;
2989 
2990 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
2991 		creg_val = readl(phba->HCregaddr);
2992 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
2993 		writel(creg_val, phba->HCregaddr);
2994 		readl(phba->HCregaddr); /* flush */
2995 	}
2996 
2997 	retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
2998 	if (retval == IOCB_SUCCESS) {
2999 		timeout_req = timeout * HZ;
3000 		spin_unlock_irq(phba->host->host_lock);
3001 		timeleft = wait_event_timeout(done_q,
3002 				piocb->iocb_flag & LPFC_IO_WAKE,
3003 				timeout_req);
3004 		spin_lock_irq(phba->host->host_lock);
3005 
3006 		if (timeleft == 0) {
3007 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3008 					"%d:0329 IOCB wait timeout error - no "
3009 					"wake response Data x%x\n",
3010 					phba->brd_no, timeout);
3011 			retval = IOCB_TIMEDOUT;
3012 		} else if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
3013 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3014 					"%d:0330 IOCB wake NOT set, "
3015 					"Data x%x x%lx\n", phba->brd_no,
3016 					timeout, (timeleft / jiffies));
3017 			retval = IOCB_TIMEDOUT;
3018 		} else {
3019 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3020 					"%d:0331 IOCB wake signaled\n",
3021 					phba->brd_no);
3022 		}
3023 	} else {
3024 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3025 				"%d:0332 IOCB wait issue failed, Data x%x\n",
3026 				phba->brd_no, retval);
3027 		retval = IOCB_ERROR;
3028 	}
3029 
3030 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3031 		creg_val = readl(phba->HCregaddr);
3032 		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3033 		writel(creg_val, phba->HCregaddr);
3034 		readl(phba->HCregaddr); /* flush */
3035 	}
3036 
3037 	if (prspiocbq)
3038 		piocb->context2 = NULL;
3039 
3040 	piocb->context_un.wait_queue = NULL;
3041 	piocb->iocb_cmpl = NULL;
3042 	return retval;
3043 }
3044 
3045 int
3046 lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3047 			 uint32_t timeout)
3048 {
3049 	DECLARE_WAIT_QUEUE_HEAD(done_q);
3050 	DECLARE_WAITQUEUE(wq_entry, current);
3051 	uint32_t timeleft = 0;
3052 	int retval;
3053 
3054 	/* The caller must leave context1 empty. */
3055 	if (pmboxq->context1 != 0) {
3056 		return (MBX_NOT_FINISHED);
3057 	}
3058 
3059 	/* setup wake call as IOCB callback */
3060 	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3061 	/* setup context field to pass wait_queue pointer to wake function  */
3062 	pmboxq->context1 = &done_q;
3063 
3064 	/* start to sleep before we wait, to avoid races */
3065 	set_current_state(TASK_INTERRUPTIBLE);
3066 	add_wait_queue(&done_q, &wq_entry);
3067 
3068 	/* now issue the command */
3069 	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3070 
3071 	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3072 		timeleft = schedule_timeout(timeout * HZ);
3073 		pmboxq->context1 = NULL;
3074 		/* if schedule_timeout returns 0, we timed out and were not
3075 		   woken up */
3076 		if ((timeleft == 0) || signal_pending(current))
3077 			retval = MBX_TIMEOUT;
3078 		else
3079 			retval = MBX_SUCCESS;
3080 	}
3081 
3082 
3083 	set_current_state(TASK_RUNNING);
3084 	remove_wait_queue(&done_q, &wq_entry);
3085 	return retval;
3086 }
3087 
3088 int
3089 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3090 {
3091 	int i = 0;
3092 
3093 	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
3094 		if (i++ > LPFC_MBOX_TMO * 1000)
3095 			return 1;
3096 
3097 		if (lpfc_sli_handle_mb_event(phba) == 0)
3098 			i = 0;
3099 
3100 		msleep(1);
3101 	}
3102 
3103 	return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3104 }
3105 
3106 irqreturn_t
3107 lpfc_intr_handler(int irq, void *dev_id, struct pt_regs * regs)
3108 {
3109 	struct lpfc_hba *phba;
3110 	uint32_t ha_copy;
3111 	uint32_t work_ha_copy;
3112 	unsigned long status;
3113 	int i;
3114 	uint32_t control;
3115 
3116 	/*
3117 	 * Get the driver's phba structure from the dev_id and
3118 	 * assume the HBA is not interrupting.
3119 	 */
3120 	phba = (struct lpfc_hba *) dev_id;
3121 
3122 	if (unlikely(!phba))
3123 		return IRQ_NONE;
3124 
3125 	phba->sli.slistat.sli_intr++;
3126 
3127 	/*
3128 	 * Call the HBA to see if it is interrupting.  If not, don't claim
3129 	 * the interrupt
3130 	 */
3131 
3132 	/* Ignore all interrupts during initialization. */
3133 	if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
3134 		return IRQ_NONE;
3135 
3136 	/*
3137 	 * Read host attention register to determine interrupt source
3138 	 * Clear Attention Sources, except Error Attention (to
3139 	 * preserve status) and Link Attention
3140 	 */
3141 	spin_lock(phba->host->host_lock);
3142 	ha_copy = readl(phba->HAregaddr);
3143 	writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3144 	readl(phba->HAregaddr); /* flush */
3145 	spin_unlock(phba->host->host_lock);
3146 
3147 	if (unlikely(!ha_copy))
3148 		return IRQ_NONE;
3149 
3150 	work_ha_copy = ha_copy & phba->work_ha_mask;
3151 
3152 	if (unlikely(work_ha_copy)) {
3153 		if (work_ha_copy & HA_LATT) {
3154 			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3155 				/*
3156 				 * Turn off Link Attention interrupts
3157 				 * until CLEAR_LA done
3158 				 */
3159 				spin_lock(phba->host->host_lock);
3160 				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3161 				control = readl(phba->HCregaddr);
3162 				control &= ~HC_LAINT_ENA;
3163 				writel(control, phba->HCregaddr);
3164 				readl(phba->HCregaddr); /* flush */
3165 				spin_unlock(phba->host->host_lock);
3166 			}
3167 			else
3168 				work_ha_copy &= ~HA_LATT;
3169 		}
3170 
3171 		if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3172 			for (i = 0; i < phba->sli.num_rings; i++) {
3173 				if (work_ha_copy & (HA_RXATT << (4*i))) {
3174 					/*
3175 					 * Turn off Slow Rings interrupts
3176 					 */
3177 					spin_lock(phba->host->host_lock);
3178 					control = readl(phba->HCregaddr);
3179 					control &= ~(HC_R0INT_ENA << i);
3180 					writel(control, phba->HCregaddr);
3181 					readl(phba->HCregaddr); /* flush */
3182 					spin_unlock(phba->host->host_lock);
3183 				}
3184 			}
3185 		}
3186 
3187 		if (work_ha_copy & HA_ERATT) {
3188 			phba->hba_state = LPFC_HBA_ERROR;
3189 			/*
3190 			 * There was a link/board error.  Read the
3191 			 * status register to retrieve the error event
3192 			 * and process it.
3193 			 */
3194 			phba->sli.slistat.err_attn_event++;
3195 			/* Save status info */
3196 			phba->work_hs = readl(phba->HSregaddr);
3197 			phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3198 			phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3199 
3200 			/* Clear Chip error bit */
3201 			writel(HA_ERATT, phba->HAregaddr);
3202 			readl(phba->HAregaddr); /* flush */
3203 			phba->stopped = 1;
3204 		}
3205 
3206 		spin_lock(phba->host->host_lock);
3207 		phba->work_ha |= work_ha_copy;
3208 		if (phba->work_wait)
3209 			wake_up(phba->work_wait);
3210 		spin_unlock(phba->host->host_lock);
3211 	}
3212 
3213 	ha_copy &= ~(phba->work_ha_mask);
3214 
3215 	/*
3216 	 * Process all events on FCP ring.  Take the optimized path for
3217 	 * FCP IO.  Any other IO is slow path and is handled by
3218 	 * the worker thread.
3219 	 */
3220 	status = (ha_copy & (HA_RXMASK  << (4*LPFC_FCP_RING)));
3221 	status >>= (4*LPFC_FCP_RING);
3222 	if (status & HA_RXATT)
3223 		lpfc_sli_handle_fast_ring_event(phba,
3224 						&phba->sli.ring[LPFC_FCP_RING],
3225 						status);
3226 	return IRQ_HANDLED;
3227 
3228 } /* lpfc_intr_handler */
3229