xref: /linux/drivers/scsi/lpfc/lpfc_sli.c (revision c537b994505099b7197e7d3125b942ecbcc51eb6)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2004-2006 Emulex.  All rights reserved.           *
5  * EMULEX and SLI are trademarks of Emulex.                        *
6  * www.emulex.com                                                  *
7  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
8  *                                                                 *
9  * This program is free software; you can redistribute it and/or   *
10  * modify it under the terms of version 2 of the GNU General       *
11  * Public License as published by the Free Software Foundation.    *
12  * This program is distributed in the hope that it will be useful. *
13  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
14  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
15  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
16  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
18  * more details, a copy of which can be found in the file COPYING  *
19  * included with this package.                                     *
20  *******************************************************************/
21 
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
26 
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
32 
33 #include "lpfc_hw.h"
34 #include "lpfc_sli.h"
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
37 #include "lpfc.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
41 
42 /*
43  * Define macro to log: Mailbox command x%x cannot issue Data
44  * This allows multiple uses of lpfc_msgBlk0311
45  * w/o perturbing log msg utility.
46  */
47 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
48 			lpfc_printf_log(phba, \
49 				KERN_INFO, \
50 				LOG_MBOX | LOG_SLI, \
51 				"%d:0311 Mailbox command x%x cannot issue " \
52 				"Data: x%x x%x x%x\n", \
53 				phba->brd_no, \
54 				mb->mbxCommand,		\
55 				phba->hba_state,	\
56 				psli->sli_flag,	\
57 				flag);
58 
59 
60 /* There are only four IOCB completion types. */
61 typedef enum _lpfc_iocb_type {
62 	LPFC_UNKNOWN_IOCB,
63 	LPFC_UNSOL_IOCB,
64 	LPFC_SOL_IOCB,
65 	LPFC_ABORT_IOCB
66 } lpfc_iocb_type;
67 
68 struct lpfc_iocbq *
69 lpfc_sli_get_iocbq(struct lpfc_hba * phba)
70 {
71 	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
72 	struct lpfc_iocbq * iocbq = NULL;
73 
74 	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
75 	return iocbq;
76 }
77 
78 void
79 lpfc_sli_release_iocbq(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
80 {
81 	size_t start_clean = (size_t)(&((struct lpfc_iocbq *)NULL)->iocb);
82 
83 	/*
84 	 * Clean all volatile data fields, preserve iotag and node struct.
85 	 */
86 	memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
87 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
88 }
89 
90 /*
91  * Translate the iocb command to an iocb command type used to decide the final
92  * disposition of each completed IOCB.
93  */
94 static lpfc_iocb_type
95 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
96 {
97 	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
98 
99 	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
100 		return 0;
101 
102 	switch (iocb_cmnd) {
103 	case CMD_XMIT_SEQUENCE_CR:
104 	case CMD_XMIT_SEQUENCE_CX:
105 	case CMD_XMIT_BCAST_CN:
106 	case CMD_XMIT_BCAST_CX:
107 	case CMD_ELS_REQUEST_CR:
108 	case CMD_ELS_REQUEST_CX:
109 	case CMD_CREATE_XRI_CR:
110 	case CMD_CREATE_XRI_CX:
111 	case CMD_GET_RPI_CN:
112 	case CMD_XMIT_ELS_RSP_CX:
113 	case CMD_GET_RPI_CR:
114 	case CMD_FCP_IWRITE_CR:
115 	case CMD_FCP_IWRITE_CX:
116 	case CMD_FCP_IREAD_CR:
117 	case CMD_FCP_IREAD_CX:
118 	case CMD_FCP_ICMND_CR:
119 	case CMD_FCP_ICMND_CX:
120 	case CMD_FCP_TSEND_CX:
121 	case CMD_FCP_TRSP_CX:
122 	case CMD_FCP_TRECEIVE_CX:
123 	case CMD_FCP_AUTO_TRSP_CX:
124 	case CMD_ADAPTER_MSG:
125 	case CMD_ADAPTER_DUMP:
126 	case CMD_XMIT_SEQUENCE64_CR:
127 	case CMD_XMIT_SEQUENCE64_CX:
128 	case CMD_XMIT_BCAST64_CN:
129 	case CMD_XMIT_BCAST64_CX:
130 	case CMD_ELS_REQUEST64_CR:
131 	case CMD_ELS_REQUEST64_CX:
132 	case CMD_FCP_IWRITE64_CR:
133 	case CMD_FCP_IWRITE64_CX:
134 	case CMD_FCP_IREAD64_CR:
135 	case CMD_FCP_IREAD64_CX:
136 	case CMD_FCP_ICMND64_CR:
137 	case CMD_FCP_ICMND64_CX:
138 	case CMD_FCP_TSEND64_CX:
139 	case CMD_FCP_TRSP64_CX:
140 	case CMD_FCP_TRECEIVE64_CX:
141 	case CMD_GEN_REQUEST64_CR:
142 	case CMD_GEN_REQUEST64_CX:
143 	case CMD_XMIT_ELS_RSP64_CX:
144 		type = LPFC_SOL_IOCB;
145 		break;
146 	case CMD_ABORT_XRI_CN:
147 	case CMD_ABORT_XRI_CX:
148 	case CMD_CLOSE_XRI_CN:
149 	case CMD_CLOSE_XRI_CX:
150 	case CMD_XRI_ABORTED_CX:
151 	case CMD_ABORT_MXRI64_CN:
152 		type = LPFC_ABORT_IOCB;
153 		break;
154 	case CMD_RCV_SEQUENCE_CX:
155 	case CMD_RCV_ELS_REQ_CX:
156 	case CMD_RCV_SEQUENCE64_CX:
157 	case CMD_RCV_ELS_REQ64_CX:
158 		type = LPFC_UNSOL_IOCB;
159 		break;
160 	default:
161 		type = LPFC_UNKNOWN_IOCB;
162 		break;
163 	}
164 
165 	return type;
166 }
167 
168 static int
169 lpfc_sli_ring_map(struct lpfc_hba * phba, LPFC_MBOXQ_t *pmb)
170 {
171 	struct lpfc_sli *psli = &phba->sli;
172 	MAILBOX_t *pmbox = &pmb->mb;
173 	int i, rc;
174 
175 	for (i = 0; i < psli->num_rings; i++) {
176 		phba->hba_state = LPFC_INIT_MBX_CMDS;
177 		lpfc_config_ring(phba, i, pmb);
178 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
179 		if (rc != MBX_SUCCESS) {
180 			lpfc_printf_log(phba,
181 					KERN_ERR,
182 					LOG_INIT,
183 					"%d:0446 Adapter failed to init, "
184 					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
185 					"ring %d\n",
186 					phba->brd_no,
187 					pmbox->mbxCommand,
188 					pmbox->mbxStatus,
189 					i);
190 			phba->hba_state = LPFC_HBA_ERROR;
191 			return -ENXIO;
192 		}
193 	}
194 	return 0;
195 }
196 
197 static int
198 lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,
199 			struct lpfc_sli_ring * pring, struct lpfc_iocbq * piocb)
200 {
201 	list_add_tail(&piocb->list, &pring->txcmplq);
202 	pring->txcmplq_cnt++;
203 	if (unlikely(pring->ringno == LPFC_ELS_RING))
204 		mod_timer(&phba->els_tmofunc,
205 					jiffies + HZ * (phba->fc_ratov << 1));
206 
207 	return (0);
208 }
209 
210 static struct lpfc_iocbq *
211 lpfc_sli_ringtx_get(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
212 {
213 	struct list_head *dlp;
214 	struct lpfc_iocbq *cmd_iocb;
215 
216 	dlp = &pring->txq;
217 	cmd_iocb = NULL;
218 	list_remove_head((&pring->txq), cmd_iocb,
219 			 struct lpfc_iocbq,
220 			 list);
221 	if (cmd_iocb) {
222 		/* If the first ptr is not equal to the list header,
223 		 * deque the IOCBQ_t and return it.
224 		 */
225 		pring->txq_cnt--;
226 	}
227 	return (cmd_iocb);
228 }
229 
230 static IOCB_t *
231 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
232 {
233 	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
234 	uint32_t  max_cmd_idx = pring->numCiocb;
235 	IOCB_t *iocb = NULL;
236 
237 	if ((pring->next_cmdidx == pring->cmdidx) &&
238 	   (++pring->next_cmdidx >= max_cmd_idx))
239 		pring->next_cmdidx = 0;
240 
241 	if (unlikely(pring->local_getidx == pring->next_cmdidx)) {
242 
243 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
244 
245 		if (unlikely(pring->local_getidx >= max_cmd_idx)) {
246 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
247 					"%d:0315 Ring %d issue: portCmdGet %d "
248 					"is bigger then cmd ring %d\n",
249 					phba->brd_no, pring->ringno,
250 					pring->local_getidx, max_cmd_idx);
251 
252 			phba->hba_state = LPFC_HBA_ERROR;
253 			/*
254 			 * All error attention handlers are posted to
255 			 * worker thread
256 			 */
257 			phba->work_ha |= HA_ERATT;
258 			phba->work_hs = HS_FFER3;
259 			if (phba->work_wait)
260 				wake_up(phba->work_wait);
261 
262 			return NULL;
263 		}
264 
265 		if (pring->local_getidx == pring->next_cmdidx)
266 			return NULL;
267 	}
268 
269 	iocb = IOCB_ENTRY(pring->cmdringaddr, pring->cmdidx);
270 
271 	return iocb;
272 }
273 
274 uint16_t
275 lpfc_sli_next_iotag(struct lpfc_hba * phba, struct lpfc_iocbq * iocbq)
276 {
277 	struct lpfc_iocbq ** new_arr;
278 	struct lpfc_iocbq ** old_arr;
279 	size_t new_len;
280 	struct lpfc_sli *psli = &phba->sli;
281 	uint16_t iotag;
282 
283 	spin_lock_irq(phba->host->host_lock);
284 	iotag = psli->last_iotag;
285 	if(++iotag < psli->iocbq_lookup_len) {
286 		psli->last_iotag = iotag;
287 		psli->iocbq_lookup[iotag] = iocbq;
288 		spin_unlock_irq(phba->host->host_lock);
289 		iocbq->iotag = iotag;
290 		return iotag;
291 	}
292 	else if (psli->iocbq_lookup_len < (0xffff
293 					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
294 		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
295 		spin_unlock_irq(phba->host->host_lock);
296 		new_arr = kmalloc(new_len * sizeof (struct lpfc_iocbq *),
297 				  GFP_KERNEL);
298 		if (new_arr) {
299 			memset((char *)new_arr, 0,
300 			       new_len * sizeof (struct lpfc_iocbq *));
301 			spin_lock_irq(phba->host->host_lock);
302 			old_arr = psli->iocbq_lookup;
303 			if (new_len <= psli->iocbq_lookup_len) {
304 				/* highly unprobable case */
305 				kfree(new_arr);
306 				iotag = psli->last_iotag;
307 				if(++iotag < psli->iocbq_lookup_len) {
308 					psli->last_iotag = iotag;
309 					psli->iocbq_lookup[iotag] = iocbq;
310 					spin_unlock_irq(phba->host->host_lock);
311 					iocbq->iotag = iotag;
312 					return iotag;
313 				}
314 				spin_unlock_irq(phba->host->host_lock);
315 				return 0;
316 			}
317 			if (psli->iocbq_lookup)
318 				memcpy(new_arr, old_arr,
319 				       ((psli->last_iotag  + 1) *
320 	 				sizeof (struct lpfc_iocbq *)));
321 			psli->iocbq_lookup = new_arr;
322 			psli->iocbq_lookup_len = new_len;
323 			psli->last_iotag = iotag;
324 			psli->iocbq_lookup[iotag] = iocbq;
325 			spin_unlock_irq(phba->host->host_lock);
326 			iocbq->iotag = iotag;
327 			kfree(old_arr);
328 			return iotag;
329 		}
330 	} else
331 		spin_unlock_irq(phba->host->host_lock);
332 
333 	lpfc_printf_log(phba, KERN_ERR,LOG_SLI,
334 			"%d:0318 Failed to allocate IOTAG.last IOTAG is %d\n",
335 			phba->brd_no, psli->last_iotag);
336 
337 	return 0;
338 }
339 
340 static void
341 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
342 		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
343 {
344 	/*
345 	 * Set up an iotag
346 	 */
347 	nextiocb->iocb.ulpIoTag = (nextiocb->iocb_cmpl) ? nextiocb->iotag : 0;
348 
349 	/*
350 	 * Issue iocb command to adapter
351 	 */
352 	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, sizeof (IOCB_t));
353 	wmb();
354 	pring->stats.iocb_cmd++;
355 
356 	/*
357 	 * If there is no completion routine to call, we can release the
358 	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
359 	 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
360 	 */
361 	if (nextiocb->iocb_cmpl)
362 		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
363 	else
364 		lpfc_sli_release_iocbq(phba, nextiocb);
365 
366 	/*
367 	 * Let the HBA know what IOCB slot will be the next one the
368 	 * driver will put a command into.
369 	 */
370 	pring->cmdidx = pring->next_cmdidx;
371 	writel(pring->cmdidx, phba->MBslimaddr
372 	       + (SLIMOFF + (pring->ringno * 2)) * 4);
373 }
374 
375 static void
376 lpfc_sli_update_full_ring(struct lpfc_hba * phba,
377 			  struct lpfc_sli_ring *pring)
378 {
379 	int ringno = pring->ringno;
380 
381 	pring->flag |= LPFC_CALL_RING_AVAILABLE;
382 
383 	wmb();
384 
385 	/*
386 	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
387 	 * The HBA will tell us when an IOCB entry is available.
388 	 */
389 	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
390 	readl(phba->CAregaddr); /* flush */
391 
392 	pring->stats.iocb_cmd_full++;
393 }
394 
395 static void
396 lpfc_sli_update_ring(struct lpfc_hba * phba,
397 		     struct lpfc_sli_ring *pring)
398 {
399 	int ringno = pring->ringno;
400 
401 	/*
402 	 * Tell the HBA that there is work to do in this ring.
403 	 */
404 	wmb();
405 	writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
406 	readl(phba->CAregaddr); /* flush */
407 }
408 
409 static void
410 lpfc_sli_resume_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring)
411 {
412 	IOCB_t *iocb;
413 	struct lpfc_iocbq *nextiocb;
414 
415 	/*
416 	 * Check to see if:
417 	 *  (a) there is anything on the txq to send
418 	 *  (b) link is up
419 	 *  (c) link attention events can be processed (fcp ring only)
420 	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
421 	 */
422 	if (pring->txq_cnt &&
423 	    (phba->hba_state > LPFC_LINK_DOWN) &&
424 	    (pring->ringno != phba->sli.fcp_ring ||
425 	     phba->sli.sli_flag & LPFC_PROCESS_LA) &&
426 	    !(pring->flag & LPFC_STOP_IOCB_MBX)) {
427 
428 		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
429 		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
430 			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
431 
432 		if (iocb)
433 			lpfc_sli_update_ring(phba, pring);
434 		else
435 			lpfc_sli_update_full_ring(phba, pring);
436 	}
437 
438 	return;
439 }
440 
441 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
442 static void
443 lpfc_sli_turn_on_ring(struct lpfc_hba * phba, int ringno)
444 {
445 	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[ringno];
446 
447 	/* If the ring is active, flag it */
448 	if (phba->sli.ring[ringno].cmdringaddr) {
449 		if (phba->sli.ring[ringno].flag & LPFC_STOP_IOCB_MBX) {
450 			phba->sli.ring[ringno].flag &= ~LPFC_STOP_IOCB_MBX;
451 			/*
452 			 * Force update of the local copy of cmdGetInx
453 			 */
454 			phba->sli.ring[ringno].local_getidx
455 				= le32_to_cpu(pgp->cmdGetInx);
456 			spin_lock_irq(phba->host->host_lock);
457 			lpfc_sli_resume_iocb(phba, &phba->sli.ring[ringno]);
458 			spin_unlock_irq(phba->host->host_lock);
459 		}
460 	}
461 }
462 
463 static int
464 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
465 {
466 	uint8_t ret;
467 
468 	switch (mbxCommand) {
469 	case MBX_LOAD_SM:
470 	case MBX_READ_NV:
471 	case MBX_WRITE_NV:
472 	case MBX_RUN_BIU_DIAG:
473 	case MBX_INIT_LINK:
474 	case MBX_DOWN_LINK:
475 	case MBX_CONFIG_LINK:
476 	case MBX_CONFIG_RING:
477 	case MBX_RESET_RING:
478 	case MBX_READ_CONFIG:
479 	case MBX_READ_RCONFIG:
480 	case MBX_READ_SPARM:
481 	case MBX_READ_STATUS:
482 	case MBX_READ_RPI:
483 	case MBX_READ_XRI:
484 	case MBX_READ_REV:
485 	case MBX_READ_LNK_STAT:
486 	case MBX_REG_LOGIN:
487 	case MBX_UNREG_LOGIN:
488 	case MBX_READ_LA:
489 	case MBX_CLEAR_LA:
490 	case MBX_DUMP_MEMORY:
491 	case MBX_DUMP_CONTEXT:
492 	case MBX_RUN_DIAGS:
493 	case MBX_RESTART:
494 	case MBX_UPDATE_CFG:
495 	case MBX_DOWN_LOAD:
496 	case MBX_DEL_LD_ENTRY:
497 	case MBX_RUN_PROGRAM:
498 	case MBX_SET_MASK:
499 	case MBX_SET_SLIM:
500 	case MBX_UNREG_D_ID:
501 	case MBX_KILL_BOARD:
502 	case MBX_CONFIG_FARP:
503 	case MBX_BEACON:
504 	case MBX_LOAD_AREA:
505 	case MBX_RUN_BIU_DIAG64:
506 	case MBX_CONFIG_PORT:
507 	case MBX_READ_SPARM64:
508 	case MBX_READ_RPI64:
509 	case MBX_REG_LOGIN64:
510 	case MBX_READ_LA64:
511 	case MBX_FLASH_WR_ULA:
512 	case MBX_SET_DEBUG:
513 	case MBX_LOAD_EXP_ROM:
514 		ret = mbxCommand;
515 		break;
516 	default:
517 		ret = MBX_SHUTDOWN;
518 		break;
519 	}
520 	return (ret);
521 }
522 static void
523 lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
524 {
525 	wait_queue_head_t *pdone_q;
526 
527 	/*
528 	 * If pdone_q is empty, the driver thread gave up waiting and
529 	 * continued running.
530 	 */
531 	pdone_q = (wait_queue_head_t *) pmboxq->context1;
532 	if (pdone_q)
533 		wake_up_interruptible(pdone_q);
534 	return;
535 }
536 
537 void
538 lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
539 {
540 	struct lpfc_dmabuf *mp;
541 	mp = (struct lpfc_dmabuf *) (pmb->context1);
542 	if (mp) {
543 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
544 		kfree(mp);
545 	}
546 	mempool_free( pmb, phba->mbox_mem_pool);
547 	return;
548 }
549 
550 int
551 lpfc_sli_handle_mb_event(struct lpfc_hba * phba)
552 {
553 	MAILBOX_t *mbox;
554 	MAILBOX_t *pmbox;
555 	LPFC_MBOXQ_t *pmb;
556 	struct lpfc_sli *psli;
557 	int i, rc;
558 	uint32_t process_next;
559 
560 	psli = &phba->sli;
561 	/* We should only get here if we are in SLI2 mode */
562 	if (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE)) {
563 		return (1);
564 	}
565 
566 	phba->sli.slistat.mbox_event++;
567 
568 	/* Get a Mailbox buffer to setup mailbox commands for callback */
569 	if ((pmb = phba->sli.mbox_active)) {
570 		pmbox = &pmb->mb;
571 		mbox = &phba->slim2p->mbx;
572 
573 		/* First check out the status word */
574 		lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof (uint32_t));
575 
576 		/* Sanity check to ensure the host owns the mailbox */
577 		if (pmbox->mbxOwner != OWN_HOST) {
578 			/* Lets try for a while */
579 			for (i = 0; i < 10240; i++) {
580 				/* First copy command data */
581 				lpfc_sli_pcimem_bcopy(mbox, pmbox,
582 							sizeof (uint32_t));
583 				if (pmbox->mbxOwner == OWN_HOST)
584 					goto mbout;
585 			}
586 			/* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
587 			   <status> */
588 			lpfc_printf_log(phba,
589 					KERN_WARNING,
590 					LOG_MBOX | LOG_SLI,
591 					"%d:0304 Stray Mailbox Interrupt "
592 					"mbxCommand x%x mbxStatus x%x\n",
593 					phba->brd_no,
594 					pmbox->mbxCommand,
595 					pmbox->mbxStatus);
596 
597 			spin_lock_irq(phba->host->host_lock);
598 			phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
599 			spin_unlock_irq(phba->host->host_lock);
600 			return (1);
601 		}
602 
603 	      mbout:
604 		del_timer_sync(&phba->sli.mbox_tmo);
605 		phba->work_hba_events &= ~WORKER_MBOX_TMO;
606 
607 		/*
608 		 * It is a fatal error if unknown mbox command completion.
609 		 */
610 		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
611 		    MBX_SHUTDOWN) {
612 
613 			/* Unknow mailbox command compl */
614 			lpfc_printf_log(phba,
615 				KERN_ERR,
616 				LOG_MBOX | LOG_SLI,
617 				"%d:0323 Unknown Mailbox command %x Cmpl\n",
618 				phba->brd_no,
619 				pmbox->mbxCommand);
620 			phba->hba_state = LPFC_HBA_ERROR;
621 			phba->work_hs = HS_FFER3;
622 			lpfc_handle_eratt(phba);
623 			return (0);
624 		}
625 
626 		phba->sli.mbox_active = NULL;
627 		if (pmbox->mbxStatus) {
628 			phba->sli.slistat.mbox_stat_err++;
629 			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
630 				/* Mbox cmd cmpl error - RETRYing */
631 				lpfc_printf_log(phba,
632 					KERN_INFO,
633 					LOG_MBOX | LOG_SLI,
634 					"%d:0305 Mbox cmd cmpl error - "
635 					"RETRYing Data: x%x x%x x%x x%x\n",
636 					phba->brd_no,
637 					pmbox->mbxCommand,
638 					pmbox->mbxStatus,
639 					pmbox->un.varWords[0],
640 					phba->hba_state);
641 				pmbox->mbxStatus = 0;
642 				pmbox->mbxOwner = OWN_HOST;
643 				spin_lock_irq(phba->host->host_lock);
644 				phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
645 				spin_unlock_irq(phba->host->host_lock);
646 				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
647 				if (rc == MBX_SUCCESS)
648 					return (0);
649 			}
650 		}
651 
652 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
653 		lpfc_printf_log(phba,
654 				KERN_INFO,
655 				LOG_MBOX | LOG_SLI,
656 				"%d:0307 Mailbox cmd x%x Cmpl x%p "
657 				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
658 				phba->brd_no,
659 				pmbox->mbxCommand,
660 				pmb->mbox_cmpl,
661 				*((uint32_t *) pmbox),
662 				pmbox->un.varWords[0],
663 				pmbox->un.varWords[1],
664 				pmbox->un.varWords[2],
665 				pmbox->un.varWords[3],
666 				pmbox->un.varWords[4],
667 				pmbox->un.varWords[5],
668 				pmbox->un.varWords[6],
669 				pmbox->un.varWords[7]);
670 
671 		if (pmb->mbox_cmpl) {
672 			lpfc_sli_pcimem_bcopy(mbox, pmbox, MAILBOX_CMD_SIZE);
673 			pmb->mbox_cmpl(phba,pmb);
674 		}
675 	}
676 
677 
678 	do {
679 		process_next = 0;	/* by default don't loop */
680 		spin_lock_irq(phba->host->host_lock);
681 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
682 
683 		/* Process next mailbox command if there is one */
684 		if ((pmb = lpfc_mbox_get(phba))) {
685 			spin_unlock_irq(phba->host->host_lock);
686 			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
687 			if (rc == MBX_NOT_FINISHED) {
688 				pmb->mb.mbxStatus = MBX_NOT_FINISHED;
689 				pmb->mbox_cmpl(phba,pmb);
690 				process_next = 1;
691 				continue;	/* loop back */
692 			}
693 		} else {
694 			spin_unlock_irq(phba->host->host_lock);
695 			/* Turn on IOCB processing */
696 			for (i = 0; i < phba->sli.num_rings; i++) {
697 				lpfc_sli_turn_on_ring(phba, i);
698 			}
699 
700 			/* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
701 			while (!list_empty(&phba->freebufList)) {
702 				struct lpfc_dmabuf *mp;
703 
704 				mp = NULL;
705 				list_remove_head((&phba->freebufList),
706 						 mp,
707 						 struct lpfc_dmabuf,
708 						 list);
709 				if (mp) {
710 					lpfc_mbuf_free(phba, mp->virt,
711 						       mp->phys);
712 					kfree(mp);
713 				}
714 			}
715 		}
716 
717 	} while (process_next);
718 
719 	return (0);
720 }
721 static int
722 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
723 			    struct lpfc_iocbq *saveq)
724 {
725 	IOCB_t           * irsp;
726 	WORD5            * w5p;
727 	uint32_t           Rctl, Type;
728 	uint32_t           match, i;
729 
730 	match = 0;
731 	irsp = &(saveq->iocb);
732 	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX)
733 	    || (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX)) {
734 		Rctl = FC_ELS_REQ;
735 		Type = FC_ELS_DATA;
736 	} else {
737 		w5p =
738 		    (WORD5 *) & (saveq->iocb.un.
739 				 ulpWord[5]);
740 		Rctl = w5p->hcsw.Rctl;
741 		Type = w5p->hcsw.Type;
742 
743 		/* Firmware Workaround */
744 		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
745 			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX)) {
746 			Rctl = FC_ELS_REQ;
747 			Type = FC_ELS_DATA;
748 			w5p->hcsw.Rctl = Rctl;
749 			w5p->hcsw.Type = Type;
750 		}
751 	}
752 	/* unSolicited Responses */
753 	if (pring->prt[0].profile) {
754 		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
755 			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
756 									saveq);
757 		match = 1;
758 	} else {
759 		/* We must search, based on rctl / type
760 		   for the right routine */
761 		for (i = 0; i < pring->num_mask;
762 		     i++) {
763 			if ((pring->prt[i].rctl ==
764 			     Rctl)
765 			    && (pring->prt[i].
766 				type == Type)) {
767 				if (pring->prt[i].lpfc_sli_rcv_unsol_event)
768 					(pring->prt[i].lpfc_sli_rcv_unsol_event)
769 							(phba, pring, saveq);
770 				match = 1;
771 				break;
772 			}
773 		}
774 	}
775 	if (match == 0) {
776 		/* Unexpected Rctl / Type received */
777 		/* Ring <ringno> handler: unexpected
778 		   Rctl <Rctl> Type <Type> received */
779 		lpfc_printf_log(phba,
780 				KERN_WARNING,
781 				LOG_SLI,
782 				"%d:0313 Ring %d handler: unexpected Rctl x%x "
783 				"Type x%x received \n",
784 				phba->brd_no,
785 				pring->ringno,
786 				Rctl,
787 				Type);
788 	}
789 	return(1);
790 }
791 
792 static struct lpfc_iocbq *
793 lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,
794 		      struct lpfc_sli_ring * pring,
795 		      struct lpfc_iocbq * prspiocb)
796 {
797 	struct lpfc_iocbq *cmd_iocb = NULL;
798 	uint16_t iotag;
799 
800 	iotag = prspiocb->iocb.ulpIoTag;
801 
802 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
803 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
804 		list_del(&cmd_iocb->list);
805 		pring->txcmplq_cnt--;
806 		return cmd_iocb;
807 	}
808 
809 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
810 			"%d:0317 iotag x%x is out off "
811 			"range: max iotag x%x wd0 x%x\n",
812 			phba->brd_no, iotag,
813 			phba->sli.last_iotag,
814 			*(((uint32_t *) &prspiocb->iocb) + 7));
815 	return NULL;
816 }
817 
818 static int
819 lpfc_sli_process_sol_iocb(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
820 			  struct lpfc_iocbq *saveq)
821 {
822 	struct lpfc_iocbq * cmdiocbp;
823 	int rc = 1;
824 	unsigned long iflag;
825 
826 	/* Based on the iotag field, get the cmd IOCB from the txcmplq */
827 	spin_lock_irqsave(phba->host->host_lock, iflag);
828 	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
829 	if (cmdiocbp) {
830 		if (cmdiocbp->iocb_cmpl) {
831 			/*
832 			 * Post all ELS completions to the worker thread.
833 			 * All other are passed to the completion callback.
834 			 */
835 			if (pring->ringno == LPFC_ELS_RING) {
836 				spin_unlock_irqrestore(phba->host->host_lock,
837 						       iflag);
838 				(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
839 				spin_lock_irqsave(phba->host->host_lock, iflag);
840 			}
841 			else {
842 				spin_unlock_irqrestore(phba->host->host_lock,
843 						       iflag);
844 				(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
845 				spin_lock_irqsave(phba->host->host_lock, iflag);
846 			}
847 		} else
848 			lpfc_sli_release_iocbq(phba, cmdiocbp);
849 	} else {
850 		/*
851 		 * Unknown initiating command based on the response iotag.
852 		 * This could be the case on the ELS ring because of
853 		 * lpfc_els_abort().
854 		 */
855 		if (pring->ringno != LPFC_ELS_RING) {
856 			/*
857 			 * Ring <ringno> handler: unexpected completion IoTag
858 			 * <IoTag>
859 			 */
860 			lpfc_printf_log(phba,
861 				KERN_WARNING,
862 				LOG_SLI,
863 				"%d:0322 Ring %d handler: unexpected "
864 				"completion IoTag x%x Data: x%x x%x x%x x%x\n",
865 				phba->brd_no,
866 				pring->ringno,
867 				saveq->iocb.ulpIoTag,
868 				saveq->iocb.ulpStatus,
869 				saveq->iocb.un.ulpWord[4],
870 				saveq->iocb.ulpCommand,
871 				saveq->iocb.ulpContext);
872 		}
873 	}
874 
875 	spin_unlock_irqrestore(phba->host->host_lock, iflag);
876 	return rc;
877 }
878 
879 static void lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,
880 					struct lpfc_sli_ring * pring)
881 {
882 	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
883 	/*
884 	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
885 	 * rsp ring <portRspMax>
886 	 */
887 	lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
888 			"%d:0312 Ring %d handler: portRspPut %d "
889 			"is bigger then rsp ring %d\n",
890 			phba->brd_no, pring->ringno,
891 			le32_to_cpu(pgp->rspPutInx),
892 			pring->numRiocb);
893 
894 	phba->hba_state = LPFC_HBA_ERROR;
895 
896 	/*
897 	 * All error attention handlers are posted to
898 	 * worker thread
899 	 */
900 	phba->work_ha |= HA_ERATT;
901 	phba->work_hs = HS_FFER3;
902 	if (phba->work_wait)
903 		wake_up(phba->work_wait);
904 
905 	return;
906 }
907 
908 void lpfc_sli_poll_fcp_ring(struct lpfc_hba * phba)
909 {
910 	struct lpfc_sli      * psli   = &phba->sli;
911 	struct lpfc_sli_ring * pring = &psli->ring[LPFC_FCP_RING];
912 	IOCB_t *irsp = NULL;
913 	IOCB_t *entry = NULL;
914 	struct lpfc_iocbq *cmdiocbq = NULL;
915 	struct lpfc_iocbq rspiocbq;
916 	struct lpfc_pgp *pgp;
917 	uint32_t status;
918 	uint32_t portRspPut, portRspMax;
919 	int type;
920 	uint32_t rsp_cmpl = 0;
921 	void __iomem *to_slim;
922 	uint32_t ha_copy;
923 
924 	pring->stats.iocb_event++;
925 
926 	/* The driver assumes SLI-2 mode */
927 	pgp =  &phba->slim2p->mbx.us.s2.port[pring->ringno];
928 
929 	/*
930 	 * The next available response entry should never exceed the maximum
931 	 * entries.  If it does, treat it as an adapter hardware error.
932 	 */
933 	portRspMax = pring->numRiocb;
934 	portRspPut = le32_to_cpu(pgp->rspPutInx);
935 	if (unlikely(portRspPut >= portRspMax)) {
936 		lpfc_sli_rsp_pointers_error(phba, pring);
937 		return;
938 	}
939 
940 	rmb();
941 	while (pring->rspidx != portRspPut) {
942 
943 		entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
944 
945 		if (++pring->rspidx >= portRspMax)
946 			pring->rspidx = 0;
947 
948 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
949 				      (uint32_t *) &rspiocbq.iocb,
950 				      sizeof (IOCB_t));
951 		irsp = &rspiocbq.iocb;
952 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
953 		pring->stats.iocb_rsp++;
954 		rsp_cmpl++;
955 
956 		if (unlikely(irsp->ulpStatus)) {
957 			/* Rsp ring <ringno> error: IOCB */
958 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
959 					"%d:0326 Rsp Ring %d error: IOCB Data: "
960 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
961 					phba->brd_no, pring->ringno,
962 					irsp->un.ulpWord[0],
963 					irsp->un.ulpWord[1],
964 					irsp->un.ulpWord[2],
965 					irsp->un.ulpWord[3],
966 					irsp->un.ulpWord[4],
967 					irsp->un.ulpWord[5],
968 					*(((uint32_t *) irsp) + 6),
969 					*(((uint32_t *) irsp) + 7));
970 		}
971 
972 		switch (type) {
973 		case LPFC_ABORT_IOCB:
974 		case LPFC_SOL_IOCB:
975 			/*
976 			 * Idle exchange closed via ABTS from port.  No iocb
977 			 * resources need to be recovered.
978 			 */
979 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
980 				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
981 						"%d:0314 IOCB cmd 0x%x"
982 						" processed. Skipping"
983 						" completion", phba->brd_no,
984 						irsp->ulpCommand);
985 				break;
986 			}
987 
988 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
989 							 &rspiocbq);
990 			if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
991 				(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
992 						      &rspiocbq);
993 			}
994 			break;
995 		default:
996 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
997 				char adaptermsg[LPFC_MAX_ADPTMSG];
998 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
999 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
1000 				       MAX_MSG_DATA);
1001 				dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1002 					 phba->brd_no, adaptermsg);
1003 			} else {
1004 				/* Unknown IOCB command */
1005 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1006 						"%d:0321 Unknown IOCB command "
1007 						"Data: x%x, x%x x%x x%x x%x\n",
1008 						phba->brd_no, type,
1009 						irsp->ulpCommand,
1010 						irsp->ulpStatus,
1011 						irsp->ulpIoTag,
1012 						irsp->ulpContext);
1013 			}
1014 			break;
1015 		}
1016 
1017 		/*
1018 		 * The response IOCB has been processed.  Update the ring
1019 		 * pointer in SLIM.  If the port response put pointer has not
1020 		 * been updated, sync the pgp->rspPutInx and fetch the new port
1021 		 * response put pointer.
1022 		 */
1023 		to_slim = phba->MBslimaddr +
1024 			(SLIMOFF + (pring->ringno * 2) + 1) * 4;
1025 		writeb(pring->rspidx, to_slim);
1026 
1027 		if (pring->rspidx == portRspPut)
1028 			portRspPut = le32_to_cpu(pgp->rspPutInx);
1029 	}
1030 
1031 	ha_copy = readl(phba->HAregaddr);
1032 	ha_copy >>= (LPFC_FCP_RING * 4);
1033 
1034 	if ((rsp_cmpl > 0) && (ha_copy & HA_R0RE_REQ)) {
1035 		pring->stats.iocb_rsp_full++;
1036 		status = ((CA_R0ATT | CA_R0RE_RSP) << (LPFC_FCP_RING * 4));
1037 		writel(status, phba->CAregaddr);
1038 		readl(phba->CAregaddr);
1039 	}
1040 	if ((ha_copy & HA_R0CE_RSP) &&
1041 	    (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1042 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1043 		pring->stats.iocb_cmd_empty++;
1044 
1045 		/* Force update of the local copy of cmdGetInx */
1046 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1047 		lpfc_sli_resume_iocb(phba, pring);
1048 
1049 		if ((pring->lpfc_sli_cmd_available))
1050 			(pring->lpfc_sli_cmd_available) (phba, pring);
1051 
1052 	}
1053 
1054 	return;
1055 }
1056 
1057 /*
1058  * This routine presumes LPFC_FCP_RING handling and doesn't bother
1059  * to check it explicitly.
1060  */
1061 static int
1062 lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,
1063 				struct lpfc_sli_ring * pring, uint32_t mask)
1064 {
1065  	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1066 	IOCB_t *irsp = NULL;
1067 	IOCB_t *entry = NULL;
1068 	struct lpfc_iocbq *cmdiocbq = NULL;
1069 	struct lpfc_iocbq rspiocbq;
1070 	uint32_t status;
1071 	uint32_t portRspPut, portRspMax;
1072 	int rc = 1;
1073 	lpfc_iocb_type type;
1074 	unsigned long iflag;
1075 	uint32_t rsp_cmpl = 0;
1076 	void __iomem  *to_slim;
1077 
1078 	spin_lock_irqsave(phba->host->host_lock, iflag);
1079 	pring->stats.iocb_event++;
1080 
1081 	/*
1082 	 * The next available response entry should never exceed the maximum
1083 	 * entries.  If it does, treat it as an adapter hardware error.
1084 	 */
1085 	portRspMax = pring->numRiocb;
1086 	portRspPut = le32_to_cpu(pgp->rspPutInx);
1087 	if (unlikely(portRspPut >= portRspMax)) {
1088 		lpfc_sli_rsp_pointers_error(phba, pring);
1089 		spin_unlock_irqrestore(phba->host->host_lock, iflag);
1090 		return 1;
1091 	}
1092 
1093 	rmb();
1094 	while (pring->rspidx != portRspPut) {
1095 		/*
1096 		 * Fetch an entry off the ring and copy it into a local data
1097 		 * structure.  The copy involves a byte-swap since the
1098 		 * network byte order and pci byte orders are different.
1099 		 */
1100 		entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1101 
1102 		if (++pring->rspidx >= portRspMax)
1103 			pring->rspidx = 0;
1104 
1105 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
1106 				      (uint32_t *) &rspiocbq.iocb,
1107 				      sizeof (IOCB_t));
1108 		INIT_LIST_HEAD(&(rspiocbq.list));
1109 		irsp = &rspiocbq.iocb;
1110 
1111 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
1112 		pring->stats.iocb_rsp++;
1113 		rsp_cmpl++;
1114 
1115 		if (unlikely(irsp->ulpStatus)) {
1116 			/* Rsp ring <ringno> error: IOCB */
1117 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
1118 				"%d:0336 Rsp Ring %d error: IOCB Data: "
1119 				"x%x x%x x%x x%x x%x x%x x%x x%x\n",
1120 				phba->brd_no, pring->ringno,
1121 				irsp->un.ulpWord[0], irsp->un.ulpWord[1],
1122 				irsp->un.ulpWord[2], irsp->un.ulpWord[3],
1123 				irsp->un.ulpWord[4], irsp->un.ulpWord[5],
1124 				*(((uint32_t *) irsp) + 6),
1125 				*(((uint32_t *) irsp) + 7));
1126 		}
1127 
1128 		switch (type) {
1129 		case LPFC_ABORT_IOCB:
1130 		case LPFC_SOL_IOCB:
1131 			/*
1132 			 * Idle exchange closed via ABTS from port.  No iocb
1133 			 * resources need to be recovered.
1134 			 */
1135 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
1136 				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1137 						"%d:0333 IOCB cmd 0x%x"
1138 						" processed. Skipping"
1139 						" completion\n", phba->brd_no,
1140 						irsp->ulpCommand);
1141 				break;
1142 			}
1143 
1144 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
1145 							 &rspiocbq);
1146 			if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
1147 				if (phba->cfg_poll & ENABLE_FCP_RING_POLLING) {
1148 					(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1149 							      &rspiocbq);
1150 				} else {
1151 					spin_unlock_irqrestore(
1152 						phba->host->host_lock, iflag);
1153 					(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
1154 							      &rspiocbq);
1155 					spin_lock_irqsave(phba->host->host_lock,
1156 							  iflag);
1157 				}
1158 			}
1159 			break;
1160 		case LPFC_UNSOL_IOCB:
1161 			spin_unlock_irqrestore(phba->host->host_lock, iflag);
1162 			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
1163 			spin_lock_irqsave(phba->host->host_lock, iflag);
1164 			break;
1165 		default:
1166 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1167 				char adaptermsg[LPFC_MAX_ADPTMSG];
1168 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
1169 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
1170 				       MAX_MSG_DATA);
1171 				dev_warn(&((phba->pcidev)->dev), "lpfc%d: %s",
1172 					 phba->brd_no, adaptermsg);
1173 			} else {
1174 				/* Unknown IOCB command */
1175 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
1176 					"%d:0334 Unknown IOCB command "
1177 					"Data: x%x, x%x x%x x%x x%x\n",
1178 					phba->brd_no, type, irsp->ulpCommand,
1179 					irsp->ulpStatus, irsp->ulpIoTag,
1180 					irsp->ulpContext);
1181 			}
1182 			break;
1183 		}
1184 
1185 		/*
1186 		 * The response IOCB has been processed.  Update the ring
1187 		 * pointer in SLIM.  If the port response put pointer has not
1188 		 * been updated, sync the pgp->rspPutInx and fetch the new port
1189 		 * response put pointer.
1190 		 */
1191 		to_slim = phba->MBslimaddr +
1192 			(SLIMOFF + (pring->ringno * 2) + 1) * 4;
1193 		writel(pring->rspidx, to_slim);
1194 
1195 		if (pring->rspidx == portRspPut)
1196 			portRspPut = le32_to_cpu(pgp->rspPutInx);
1197 	}
1198 
1199 	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
1200 		pring->stats.iocb_rsp_full++;
1201 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1202 		writel(status, phba->CAregaddr);
1203 		readl(phba->CAregaddr);
1204 	}
1205 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1206 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1207 		pring->stats.iocb_cmd_empty++;
1208 
1209 		/* Force update of the local copy of cmdGetInx */
1210 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1211 		lpfc_sli_resume_iocb(phba, pring);
1212 
1213 		if ((pring->lpfc_sli_cmd_available))
1214 			(pring->lpfc_sli_cmd_available) (phba, pring);
1215 
1216 	}
1217 
1218 	spin_unlock_irqrestore(phba->host->host_lock, iflag);
1219 	return rc;
1220 }
1221 
1222 
1223 int
1224 lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,
1225 			   struct lpfc_sli_ring * pring, uint32_t mask)
1226 {
1227 	IOCB_t *entry;
1228 	IOCB_t *irsp = NULL;
1229 	struct lpfc_iocbq *rspiocbp = NULL;
1230 	struct lpfc_iocbq *next_iocb;
1231 	struct lpfc_iocbq *cmdiocbp;
1232 	struct lpfc_iocbq *saveq;
1233 	struct lpfc_pgp *pgp = &phba->slim2p->mbx.us.s2.port[pring->ringno];
1234 	uint8_t iocb_cmd_type;
1235 	lpfc_iocb_type type;
1236 	uint32_t status, free_saveq;
1237 	uint32_t portRspPut, portRspMax;
1238 	int rc = 1;
1239 	unsigned long iflag;
1240 	void __iomem  *to_slim;
1241 
1242 	spin_lock_irqsave(phba->host->host_lock, iflag);
1243 	pring->stats.iocb_event++;
1244 
1245 	/*
1246 	 * The next available response entry should never exceed the maximum
1247 	 * entries.  If it does, treat it as an adapter hardware error.
1248 	 */
1249 	portRspMax = pring->numRiocb;
1250 	portRspPut = le32_to_cpu(pgp->rspPutInx);
1251 	if (portRspPut >= portRspMax) {
1252 		/*
1253 		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1254 		 * rsp ring <portRspMax>
1255 		 */
1256 		lpfc_printf_log(phba,
1257 				KERN_ERR,
1258 				LOG_SLI,
1259 				"%d:0303 Ring %d handler: portRspPut %d "
1260 				"is bigger then rsp ring %d\n",
1261 				phba->brd_no,
1262 				pring->ringno, portRspPut, portRspMax);
1263 
1264 		phba->hba_state = LPFC_HBA_ERROR;
1265 		spin_unlock_irqrestore(phba->host->host_lock, iflag);
1266 
1267 		phba->work_hs = HS_FFER3;
1268 		lpfc_handle_eratt(phba);
1269 
1270 		return 1;
1271 	}
1272 
1273 	rmb();
1274 	while (pring->rspidx != portRspPut) {
1275 		/*
1276 		 * Build a completion list and call the appropriate handler.
1277 		 * The process is to get the next available response iocb, get
1278 		 * a free iocb from the list, copy the response data into the
1279 		 * free iocb, insert to the continuation list, and update the
1280 		 * next response index to slim.  This process makes response
1281 		 * iocb's in the ring available to DMA as fast as possible but
1282 		 * pays a penalty for a copy operation.  Since the iocb is
1283 		 * only 32 bytes, this penalty is considered small relative to
1284 		 * the PCI reads for register values and a slim write.  When
1285 		 * the ulpLe field is set, the entire Command has been
1286 		 * received.
1287 		 */
1288 		entry = IOCB_ENTRY(pring->rspringaddr, pring->rspidx);
1289 		rspiocbp = lpfc_sli_get_iocbq(phba);
1290 		if (rspiocbp == NULL) {
1291 			printk(KERN_ERR "%s: out of buffers! Failing "
1292 			       "completion.\n", __FUNCTION__);
1293 			break;
1294 		}
1295 
1296 		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb, sizeof (IOCB_t));
1297 		irsp = &rspiocbp->iocb;
1298 
1299 		if (++pring->rspidx >= portRspMax)
1300 			pring->rspidx = 0;
1301 
1302 		to_slim = phba->MBslimaddr + (SLIMOFF + (pring->ringno * 2)
1303 					      + 1) * 4;
1304 		writel(pring->rspidx, to_slim);
1305 
1306 		if (list_empty(&(pring->iocb_continueq))) {
1307 			list_add(&rspiocbp->list, &(pring->iocb_continueq));
1308 		} else {
1309 			list_add_tail(&rspiocbp->list,
1310 				      &(pring->iocb_continueq));
1311 		}
1312 
1313 		pring->iocb_continueq_cnt++;
1314 		if (irsp->ulpLe) {
1315 			/*
1316 			 * By default, the driver expects to free all resources
1317 			 * associated with this iocb completion.
1318 			 */
1319 			free_saveq = 1;
1320 			saveq = list_get_first(&pring->iocb_continueq,
1321 					       struct lpfc_iocbq, list);
1322 			irsp = &(saveq->iocb);
1323 			list_del_init(&pring->iocb_continueq);
1324 			pring->iocb_continueq_cnt = 0;
1325 
1326 			pring->stats.iocb_rsp++;
1327 
1328 			if (irsp->ulpStatus) {
1329 				/* Rsp ring <ringno> error: IOCB */
1330 				lpfc_printf_log(phba,
1331 					KERN_WARNING,
1332 					LOG_SLI,
1333 					"%d:0328 Rsp Ring %d error: IOCB Data: "
1334 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
1335 					phba->brd_no,
1336 					pring->ringno,
1337 					irsp->un.ulpWord[0],
1338 					irsp->un.ulpWord[1],
1339 					irsp->un.ulpWord[2],
1340 					irsp->un.ulpWord[3],
1341 					irsp->un.ulpWord[4],
1342 					irsp->un.ulpWord[5],
1343 					*(((uint32_t *) irsp) + 6),
1344 					*(((uint32_t *) irsp) + 7));
1345 			}
1346 
1347 			/*
1348 			 * Fetch the IOCB command type and call the correct
1349 			 * completion routine.  Solicited and Unsolicited
1350 			 * IOCBs on the ELS ring get freed back to the
1351 			 * lpfc_iocb_list by the discovery kernel thread.
1352 			 */
1353 			iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
1354 			type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
1355 			if (type == LPFC_SOL_IOCB) {
1356 				spin_unlock_irqrestore(phba->host->host_lock,
1357 						       iflag);
1358 				rc = lpfc_sli_process_sol_iocb(phba, pring,
1359 					saveq);
1360 				spin_lock_irqsave(phba->host->host_lock, iflag);
1361 			} else if (type == LPFC_UNSOL_IOCB) {
1362 				spin_unlock_irqrestore(phba->host->host_lock,
1363 						       iflag);
1364 				rc = lpfc_sli_process_unsol_iocb(phba, pring,
1365 					saveq);
1366 				spin_lock_irqsave(phba->host->host_lock, iflag);
1367 			} else if (type == LPFC_ABORT_IOCB) {
1368 				if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
1369 				    ((cmdiocbp =
1370 				      lpfc_sli_iocbq_lookup(phba, pring,
1371 							    saveq)))) {
1372 					/* Call the specified completion
1373 					   routine */
1374 					if (cmdiocbp->iocb_cmpl) {
1375 						spin_unlock_irqrestore(
1376 						       phba->host->host_lock,
1377 						       iflag);
1378 						(cmdiocbp->iocb_cmpl) (phba,
1379 							     cmdiocbp, saveq);
1380 						spin_lock_irqsave(
1381 							  phba->host->host_lock,
1382 							  iflag);
1383 					} else
1384 						lpfc_sli_release_iocbq(phba,
1385 								      cmdiocbp);
1386 				}
1387 			} else if (type == LPFC_UNKNOWN_IOCB) {
1388 				if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
1389 
1390 					char adaptermsg[LPFC_MAX_ADPTMSG];
1391 
1392 					memset(adaptermsg, 0,
1393 					       LPFC_MAX_ADPTMSG);
1394 					memcpy(&adaptermsg[0], (uint8_t *) irsp,
1395 					       MAX_MSG_DATA);
1396 					dev_warn(&((phba->pcidev)->dev),
1397 						 "lpfc%d: %s",
1398 						 phba->brd_no, adaptermsg);
1399 				} else {
1400 					/* Unknown IOCB command */
1401 					lpfc_printf_log(phba,
1402 						KERN_ERR,
1403 						LOG_SLI,
1404 						"%d:0335 Unknown IOCB command "
1405 						"Data: x%x x%x x%x x%x\n",
1406 						phba->brd_no,
1407 						irsp->ulpCommand,
1408 						irsp->ulpStatus,
1409 						irsp->ulpIoTag,
1410 						irsp->ulpContext);
1411 				}
1412 			}
1413 
1414 			if (free_saveq) {
1415 				if (!list_empty(&saveq->list)) {
1416 					list_for_each_entry_safe(rspiocbp,
1417 								 next_iocb,
1418 								 &saveq->list,
1419 								 list) {
1420 						list_del(&rspiocbp->list);
1421 						lpfc_sli_release_iocbq(phba,
1422 								     rspiocbp);
1423 					}
1424 				}
1425 				lpfc_sli_release_iocbq(phba, saveq);
1426 			}
1427 		}
1428 
1429 		/*
1430 		 * If the port response put pointer has not been updated, sync
1431 		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1432 		 * response put pointer.
1433 		 */
1434 		if (pring->rspidx == portRspPut) {
1435 			portRspPut = le32_to_cpu(pgp->rspPutInx);
1436 		}
1437 	} /* while (pring->rspidx != portRspPut) */
1438 
1439 	if ((rspiocbp != 0) && (mask & HA_R0RE_REQ)) {
1440 		/* At least one response entry has been freed */
1441 		pring->stats.iocb_rsp_full++;
1442 		/* SET RxRE_RSP in Chip Att register */
1443 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
1444 		writel(status, phba->CAregaddr);
1445 		readl(phba->CAregaddr); /* flush */
1446 	}
1447 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
1448 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
1449 		pring->stats.iocb_cmd_empty++;
1450 
1451 		/* Force update of the local copy of cmdGetInx */
1452 		pring->local_getidx = le32_to_cpu(pgp->cmdGetInx);
1453 		lpfc_sli_resume_iocb(phba, pring);
1454 
1455 		if ((pring->lpfc_sli_cmd_available))
1456 			(pring->lpfc_sli_cmd_available) (phba, pring);
1457 
1458 	}
1459 
1460 	spin_unlock_irqrestore(phba->host->host_lock, iflag);
1461 	return rc;
1462 }
1463 
1464 int
1465 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1466 {
1467 	struct lpfc_iocbq *iocb, *next_iocb;
1468 	IOCB_t *icmd = NULL, *cmd = NULL;
1469 	int errcnt;
1470 
1471 	errcnt = 0;
1472 
1473 	/* Error everything on txq and txcmplq
1474 	 * First do the txq.
1475 	 */
1476 	spin_lock_irq(phba->host->host_lock);
1477 	list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
1478 		list_del_init(&iocb->list);
1479 		if (iocb->iocb_cmpl) {
1480 			icmd = &iocb->iocb;
1481 			icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1482 			icmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1483 			spin_unlock_irq(phba->host->host_lock);
1484 			(iocb->iocb_cmpl) (phba, iocb, iocb);
1485 			spin_lock_irq(phba->host->host_lock);
1486 		} else
1487 			lpfc_sli_release_iocbq(phba, iocb);
1488 	}
1489 	pring->txq_cnt = 0;
1490 	INIT_LIST_HEAD(&(pring->txq));
1491 
1492 	/* Next issue ABTS for everything on the txcmplq */
1493 	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list) {
1494 		cmd = &iocb->iocb;
1495 
1496 		/*
1497 		 * Imediate abort of IOCB, deque and call compl
1498 		 */
1499 
1500 		list_del_init(&iocb->list);
1501 		pring->txcmplq_cnt--;
1502 
1503 		if (iocb->iocb_cmpl) {
1504 			cmd->ulpStatus = IOSTAT_LOCAL_REJECT;
1505 			cmd->un.ulpWord[4] = IOERR_SLI_ABORTED;
1506 			spin_unlock_irq(phba->host->host_lock);
1507 			(iocb->iocb_cmpl) (phba, iocb, iocb);
1508 			spin_lock_irq(phba->host->host_lock);
1509 		} else
1510 			lpfc_sli_release_iocbq(phba, iocb);
1511 	}
1512 
1513 	INIT_LIST_HEAD(&pring->txcmplq);
1514 	pring->txcmplq_cnt = 0;
1515 	spin_unlock_irq(phba->host->host_lock);
1516 
1517 	return errcnt;
1518 }
1519 
1520 int
1521 lpfc_sli_brdready(struct lpfc_hba * phba, uint32_t mask)
1522 {
1523 	uint32_t status;
1524 	int i = 0;
1525 	int retval = 0;
1526 
1527 	/* Read the HBA Host Status Register */
1528 	status = readl(phba->HSregaddr);
1529 
1530 	/*
1531 	 * Check status register every 100ms for 5 retries, then every
1532 	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
1533 	 * every 2.5 sec for 4.
1534 	 * Break our of the loop if errors occurred during init.
1535 	 */
1536 	while (((status & mask) != mask) &&
1537 	       !(status & HS_FFERM) &&
1538 	       i++ < 20) {
1539 
1540 		if (i <= 5)
1541 			msleep(10);
1542 		else if (i <= 10)
1543 			msleep(500);
1544 		else
1545 			msleep(2500);
1546 
1547 		if (i == 15) {
1548 			phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
1549 			lpfc_sli_brdrestart(phba);
1550 		}
1551 		/* Read the HBA Host Status Register */
1552 		status = readl(phba->HSregaddr);
1553 	}
1554 
1555 	/* Check to see if any errors occurred during init */
1556 	if ((status & HS_FFERM) || (i >= 20)) {
1557 		phba->hba_state = LPFC_HBA_ERROR;
1558 		retval = 1;
1559 	}
1560 
1561 	return retval;
1562 }
1563 
1564 #define BARRIER_TEST_PATTERN (0xdeadbeef)
1565 
1566 void lpfc_reset_barrier(struct lpfc_hba * phba)
1567 {
1568 	uint32_t __iomem *resp_buf;
1569 	uint32_t __iomem *mbox_buf;
1570 	volatile uint32_t mbox;
1571 	uint32_t hc_copy;
1572 	int  i;
1573 	uint8_t hdrtype;
1574 
1575 	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
1576 	if (hdrtype != 0x80 ||
1577 	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
1578 	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
1579 		return;
1580 
1581 	/*
1582 	 * Tell the other part of the chip to suspend temporarily all
1583 	 * its DMA activity.
1584 	 */
1585 	resp_buf = phba->MBslimaddr;
1586 
1587 	/* Disable the error attention */
1588 	hc_copy = readl(phba->HCregaddr);
1589 	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
1590 	readl(phba->HCregaddr); /* flush */
1591 
1592 	if (readl(phba->HAregaddr) & HA_ERATT) {
1593 		/* Clear Chip error bit */
1594 		writel(HA_ERATT, phba->HAregaddr);
1595 		phba->stopped = 1;
1596 	}
1597 
1598 	mbox = 0;
1599 	((MAILBOX_t *)&mbox)->mbxCommand = MBX_KILL_BOARD;
1600 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_CHIP;
1601 
1602 	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
1603 	mbox_buf = phba->MBslimaddr;
1604 	writel(mbox, mbox_buf);
1605 
1606 	for (i = 0;
1607 	     readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN) && i < 50; i++)
1608 		mdelay(1);
1609 
1610 	if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
1611 		if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
1612 		    phba->stopped)
1613 			goto restore_hc;
1614 		else
1615 			goto clear_errat;
1616 	}
1617 
1618 	((MAILBOX_t *)&mbox)->mbxOwner = OWN_HOST;
1619 	for (i = 0; readl(resp_buf) != mbox &&  i < 500; i++)
1620 		mdelay(1);
1621 
1622 clear_errat:
1623 
1624 	while (!(readl(phba->HAregaddr) & HA_ERATT) && ++i < 500)
1625 		mdelay(1);
1626 
1627 	if (readl(phba->HAregaddr) & HA_ERATT) {
1628 		writel(HA_ERATT, phba->HAregaddr);
1629 		phba->stopped = 1;
1630 	}
1631 
1632 restore_hc:
1633 	writel(hc_copy, phba->HCregaddr);
1634 	readl(phba->HCregaddr); /* flush */
1635 }
1636 
1637 int
1638 lpfc_sli_brdkill(struct lpfc_hba * phba)
1639 {
1640 	struct lpfc_sli *psli;
1641 	LPFC_MBOXQ_t *pmb;
1642 	uint32_t status;
1643 	uint32_t ha_copy;
1644 	int retval;
1645 	int i = 0;
1646 
1647 	psli = &phba->sli;
1648 
1649 	/* Kill HBA */
1650 	lpfc_printf_log(phba,
1651 		KERN_INFO,
1652 		LOG_SLI,
1653 		"%d:0329 Kill HBA Data: x%x x%x\n",
1654 		phba->brd_no,
1655 		phba->hba_state,
1656 		psli->sli_flag);
1657 
1658 	if ((pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool,
1659 						  GFP_KERNEL)) == 0)
1660 		return 1;
1661 
1662 	/* Disable the error attention */
1663 	spin_lock_irq(phba->host->host_lock);
1664 	status = readl(phba->HCregaddr);
1665 	status &= ~HC_ERINT_ENA;
1666 	writel(status, phba->HCregaddr);
1667 	readl(phba->HCregaddr); /* flush */
1668 	spin_unlock_irq(phba->host->host_lock);
1669 
1670 	lpfc_kill_board(phba, pmb);
1671 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
1672 	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
1673 
1674 	if (retval != MBX_SUCCESS) {
1675 		if (retval != MBX_BUSY)
1676 			mempool_free(pmb, phba->mbox_mem_pool);
1677 		return 1;
1678 	}
1679 
1680 	psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
1681 
1682 	mempool_free(pmb, phba->mbox_mem_pool);
1683 
1684 	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
1685 	 * attention every 100ms for 3 seconds. If we don't get ERATT after
1686 	 * 3 seconds we still set HBA_ERROR state because the status of the
1687 	 * board is now undefined.
1688 	 */
1689 	ha_copy = readl(phba->HAregaddr);
1690 
1691 	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
1692 		mdelay(100);
1693 		ha_copy = readl(phba->HAregaddr);
1694 	}
1695 
1696 	del_timer_sync(&psli->mbox_tmo);
1697 	if (ha_copy & HA_ERATT) {
1698 		writel(HA_ERATT, phba->HAregaddr);
1699 		phba->stopped = 1;
1700 	}
1701 	spin_lock_irq(phba->host->host_lock);
1702 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1703 	spin_unlock_irq(phba->host->host_lock);
1704 
1705 	psli->mbox_active = NULL;
1706 	lpfc_hba_down_post(phba);
1707 	phba->hba_state = LPFC_HBA_ERROR;
1708 
1709 	return (ha_copy & HA_ERATT ? 0 : 1);
1710 }
1711 
1712 int
1713 lpfc_sli_brdreset(struct lpfc_hba * phba)
1714 {
1715 	struct lpfc_sli *psli;
1716 	struct lpfc_sli_ring *pring;
1717 	uint16_t cfg_value;
1718 	int i;
1719 
1720 	psli = &phba->sli;
1721 
1722 	/* Reset HBA */
1723 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1724 			"%d:0325 Reset HBA Data: x%x x%x\n", phba->brd_no,
1725 			phba->hba_state, psli->sli_flag);
1726 
1727 	/* perform board reset */
1728 	phba->fc_eventTag = 0;
1729 	phba->fc_myDID = 0;
1730 	phba->fc_prevDID = 0;
1731 
1732 	/* Turn off parity checking and serr during the physical reset */
1733 	pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
1734 	pci_write_config_word(phba->pcidev, PCI_COMMAND,
1735 			      (cfg_value &
1736 			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
1737 
1738 	psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
1739 	/* Now toggle INITFF bit in the Host Control Register */
1740 	writel(HC_INITFF, phba->HCregaddr);
1741 	mdelay(1);
1742 	readl(phba->HCregaddr); /* flush */
1743 	writel(0, phba->HCregaddr);
1744 	readl(phba->HCregaddr); /* flush */
1745 
1746 	/* Restore PCI cmd register */
1747 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
1748 
1749 	/* Initialize relevant SLI info */
1750 	for (i = 0; i < psli->num_rings; i++) {
1751 		pring = &psli->ring[i];
1752 		pring->flag = 0;
1753 		pring->rspidx = 0;
1754 		pring->next_cmdidx  = 0;
1755 		pring->local_getidx = 0;
1756 		pring->cmdidx = 0;
1757 		pring->missbufcnt = 0;
1758 	}
1759 
1760 	phba->hba_state = LPFC_WARM_START;
1761 	return 0;
1762 }
1763 
1764 int
1765 lpfc_sli_brdrestart(struct lpfc_hba * phba)
1766 {
1767 	MAILBOX_t *mb;
1768 	struct lpfc_sli *psli;
1769 	uint16_t skip_post;
1770 	volatile uint32_t word0;
1771 	void __iomem *to_slim;
1772 
1773 	spin_lock_irq(phba->host->host_lock);
1774 
1775 	psli = &phba->sli;
1776 
1777 	/* Restart HBA */
1778 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1779 			"%d:0337 Restart HBA Data: x%x x%x\n", phba->brd_no,
1780 			phba->hba_state, psli->sli_flag);
1781 
1782 	word0 = 0;
1783 	mb = (MAILBOX_t *) &word0;
1784 	mb->mbxCommand = MBX_RESTART;
1785 	mb->mbxHc = 1;
1786 
1787 	lpfc_reset_barrier(phba);
1788 
1789 	to_slim = phba->MBslimaddr;
1790 	writel(*(uint32_t *) mb, to_slim);
1791 	readl(to_slim); /* flush */
1792 
1793 	/* Only skip post after fc_ffinit is completed */
1794 	if (phba->hba_state) {
1795 		skip_post = 1;
1796 		word0 = 1;	/* This is really setting up word1 */
1797 	} else {
1798 		skip_post = 0;
1799 		word0 = 0;	/* This is really setting up word1 */
1800 	}
1801 	to_slim = phba->MBslimaddr + sizeof (uint32_t);
1802 	writel(*(uint32_t *) mb, to_slim);
1803 	readl(to_slim); /* flush */
1804 
1805 	lpfc_sli_brdreset(phba);
1806 	phba->stopped = 0;
1807 	phba->hba_state = LPFC_INIT_START;
1808 
1809 	spin_unlock_irq(phba->host->host_lock);
1810 
1811 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
1812 	psli->stats_start = get_seconds();
1813 
1814 	if (skip_post)
1815 		mdelay(100);
1816 	else
1817 		mdelay(2000);
1818 
1819 	lpfc_hba_down_post(phba);
1820 
1821 	return 0;
1822 }
1823 
1824 static int
1825 lpfc_sli_chipset_init(struct lpfc_hba *phba)
1826 {
1827 	uint32_t status, i = 0;
1828 
1829 	/* Read the HBA Host Status Register */
1830 	status = readl(phba->HSregaddr);
1831 
1832 	/* Check status register to see what current state is */
1833 	i = 0;
1834 	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
1835 
1836 		/* Check every 100ms for 5 retries, then every 500ms for 5, then
1837 		 * every 2.5 sec for 5, then reset board and every 2.5 sec for
1838 		 * 4.
1839 		 */
1840 		if (i++ >= 20) {
1841 			/* Adapter failed to init, timeout, status reg
1842 			   <status> */
1843 			lpfc_printf_log(phba,
1844 					KERN_ERR,
1845 					LOG_INIT,
1846 					"%d:0436 Adapter failed to init, "
1847 					"timeout, status reg x%x\n",
1848 					phba->brd_no,
1849 					status);
1850 			phba->hba_state = LPFC_HBA_ERROR;
1851 			return -ETIMEDOUT;
1852 		}
1853 
1854 		/* Check to see if any errors occurred during init */
1855 		if (status & HS_FFERM) {
1856 			/* ERROR: During chipset initialization */
1857 			/* Adapter failed to init, chipset, status reg
1858 			   <status> */
1859 			lpfc_printf_log(phba,
1860 					KERN_ERR,
1861 					LOG_INIT,
1862 					"%d:0437 Adapter failed to init, "
1863 					"chipset, status reg x%x\n",
1864 					phba->brd_no,
1865 					status);
1866 			phba->hba_state = LPFC_HBA_ERROR;
1867 			return -EIO;
1868 		}
1869 
1870 		if (i <= 5) {
1871 			msleep(10);
1872 		} else if (i <= 10) {
1873 			msleep(500);
1874 		} else {
1875 			msleep(2500);
1876 		}
1877 
1878 		if (i == 15) {
1879 			phba->hba_state = LPFC_STATE_UNKNOWN; /* Do post */
1880 			lpfc_sli_brdrestart(phba);
1881 		}
1882 		/* Read the HBA Host Status Register */
1883 		status = readl(phba->HSregaddr);
1884 	}
1885 
1886 	/* Check to see if any errors occurred during init */
1887 	if (status & HS_FFERM) {
1888 		/* ERROR: During chipset initialization */
1889 		/* Adapter failed to init, chipset, status reg <status> */
1890 		lpfc_printf_log(phba,
1891 				KERN_ERR,
1892 				LOG_INIT,
1893 				"%d:0438 Adapter failed to init, chipset, "
1894 				"status reg x%x\n",
1895 				phba->brd_no,
1896 				status);
1897 		phba->hba_state = LPFC_HBA_ERROR;
1898 		return -EIO;
1899 	}
1900 
1901 	/* Clear all interrupt enable conditions */
1902 	writel(0, phba->HCregaddr);
1903 	readl(phba->HCregaddr); /* flush */
1904 
1905 	/* setup host attn register */
1906 	writel(0xffffffff, phba->HAregaddr);
1907 	readl(phba->HAregaddr); /* flush */
1908 	return 0;
1909 }
1910 
1911 int
1912 lpfc_sli_hba_setup(struct lpfc_hba * phba)
1913 {
1914 	LPFC_MBOXQ_t *pmb;
1915 	uint32_t resetcount = 0, rc = 0, done = 0;
1916 
1917 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1918 	if (!pmb) {
1919 		phba->hba_state = LPFC_HBA_ERROR;
1920 		return -ENOMEM;
1921 	}
1922 
1923 	while (resetcount < 2 && !done) {
1924 		spin_lock_irq(phba->host->host_lock);
1925 		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
1926 		spin_unlock_irq(phba->host->host_lock);
1927 		phba->hba_state = LPFC_STATE_UNKNOWN;
1928 		lpfc_sli_brdrestart(phba);
1929 		msleep(2500);
1930 		rc = lpfc_sli_chipset_init(phba);
1931 		if (rc)
1932 			break;
1933 
1934 		spin_lock_irq(phba->host->host_lock);
1935 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
1936 		spin_unlock_irq(phba->host->host_lock);
1937 		resetcount++;
1938 
1939 	/* Call pre CONFIG_PORT mailbox command initialization.  A value of 0
1940 	 * means the call was successful.  Any other nonzero value is a failure,
1941 	 * but if ERESTART is returned, the driver may reset the HBA and try
1942 	 * again.
1943 	 */
1944 		rc = lpfc_config_port_prep(phba);
1945 		if (rc == -ERESTART) {
1946 			phba->hba_state = 0;
1947 			continue;
1948 		} else if (rc) {
1949 			break;
1950 		}
1951 
1952 		phba->hba_state = LPFC_INIT_MBX_CMDS;
1953 		lpfc_config_port(phba, pmb);
1954 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1955 		if (rc == MBX_SUCCESS)
1956 			done = 1;
1957 		else {
1958 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
1959 				"%d:0442 Adapter failed to init, mbxCmd x%x "
1960 				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1961 				phba->brd_no, pmb->mb.mbxCommand,
1962 				pmb->mb.mbxStatus, 0);
1963 			phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
1964 		}
1965 	}
1966 	if (!done)
1967 		goto lpfc_sli_hba_setup_error;
1968 
1969 	rc = lpfc_sli_ring_map(phba, pmb);
1970 
1971 	if (rc)
1972 		goto lpfc_sli_hba_setup_error;
1973 
1974 	phba->sli.sli_flag |= LPFC_PROCESS_LA;
1975 
1976 	rc = lpfc_config_port_post(phba);
1977 	if (rc)
1978 		goto lpfc_sli_hba_setup_error;
1979 
1980 	goto lpfc_sli_hba_setup_exit;
1981 lpfc_sli_hba_setup_error:
1982 	phba->hba_state = LPFC_HBA_ERROR;
1983 lpfc_sli_hba_setup_exit:
1984 	mempool_free(pmb, phba->mbox_mem_pool);
1985 	return rc;
1986 }
1987 
1988 static void
1989 lpfc_mbox_abort(struct lpfc_hba * phba)
1990 {
1991 	LPFC_MBOXQ_t *pmbox;
1992 	MAILBOX_t *mb;
1993 
1994 	if (phba->sli.mbox_active) {
1995 		del_timer_sync(&phba->sli.mbox_tmo);
1996 		phba->work_hba_events &= ~WORKER_MBOX_TMO;
1997 		pmbox = phba->sli.mbox_active;
1998 		mb = &pmbox->mb;
1999 		phba->sli.mbox_active = NULL;
2000 		if (pmbox->mbox_cmpl) {
2001 			mb->mbxStatus = MBX_NOT_FINISHED;
2002 			(pmbox->mbox_cmpl) (phba, pmbox);
2003 		}
2004 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2005 	}
2006 
2007 	/* Abort all the non active mailbox commands. */
2008 	spin_lock_irq(phba->host->host_lock);
2009 	pmbox = lpfc_mbox_get(phba);
2010 	while (pmbox) {
2011 		mb = &pmbox->mb;
2012 		if (pmbox->mbox_cmpl) {
2013 			mb->mbxStatus = MBX_NOT_FINISHED;
2014 			spin_unlock_irq(phba->host->host_lock);
2015 			(pmbox->mbox_cmpl) (phba, pmbox);
2016 			spin_lock_irq(phba->host->host_lock);
2017 		}
2018 		pmbox = lpfc_mbox_get(phba);
2019 	}
2020 	spin_unlock_irq(phba->host->host_lock);
2021 	return;
2022 }
2023 
2024 /*! lpfc_mbox_timeout
2025  *
2026  * \pre
2027  * \post
2028  * \param hba Pointer to per struct lpfc_hba structure
2029  * \param l1  Pointer to the driver's mailbox queue.
2030  * \return
2031  *   void
2032  *
2033  * \b Description:
2034  *
2035  * This routine handles mailbox timeout events at timer interrupt context.
2036  */
2037 void
2038 lpfc_mbox_timeout(unsigned long ptr)
2039 {
2040 	struct lpfc_hba *phba;
2041 	unsigned long iflag;
2042 
2043 	phba = (struct lpfc_hba *)ptr;
2044 	spin_lock_irqsave(phba->host->host_lock, iflag);
2045 	if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
2046 		phba->work_hba_events |= WORKER_MBOX_TMO;
2047 		if (phba->work_wait)
2048 			wake_up(phba->work_wait);
2049 	}
2050 	spin_unlock_irqrestore(phba->host->host_lock, iflag);
2051 }
2052 
2053 void
2054 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
2055 {
2056 	LPFC_MBOXQ_t *pmbox;
2057 	MAILBOX_t *mb;
2058 
2059 	spin_lock_irq(phba->host->host_lock);
2060 	if (!(phba->work_hba_events & WORKER_MBOX_TMO)) {
2061 		spin_unlock_irq(phba->host->host_lock);
2062 		return;
2063 	}
2064 
2065 	phba->work_hba_events &= ~WORKER_MBOX_TMO;
2066 
2067 	pmbox = phba->sli.mbox_active;
2068 	mb = &pmbox->mb;
2069 
2070 	/* Mbox cmd <mbxCommand> timeout */
2071 	lpfc_printf_log(phba,
2072 		KERN_ERR,
2073 		LOG_MBOX | LOG_SLI,
2074 		"%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
2075 		phba->brd_no,
2076 		mb->mbxCommand,
2077 		phba->hba_state,
2078 		phba->sli.sli_flag,
2079 		phba->sli.mbox_active);
2080 
2081 	phba->sli.mbox_active = NULL;
2082 	if (pmbox->mbox_cmpl) {
2083 		mb->mbxStatus = MBX_NOT_FINISHED;
2084 		spin_unlock_irq(phba->host->host_lock);
2085 		(pmbox->mbox_cmpl) (phba, pmbox);
2086 		spin_lock_irq(phba->host->host_lock);
2087 	}
2088 	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2089 
2090 	spin_unlock_irq(phba->host->host_lock);
2091 	lpfc_mbox_abort(phba);
2092 	return;
2093 }
2094 
2095 int
2096 lpfc_sli_issue_mbox(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmbox, uint32_t flag)
2097 {
2098 	MAILBOX_t *mb;
2099 	struct lpfc_sli *psli;
2100 	uint32_t status, evtctr;
2101 	uint32_t ha_copy;
2102 	int i;
2103 	unsigned long drvr_flag = 0;
2104 	volatile uint32_t word0, ldata;
2105 	void __iomem *to_slim;
2106 
2107 	/* If the PCI channel is in offline state, do not post mbox. */
2108 	if (unlikely(pci_channel_offline(phba->pcidev)))
2109 		return MBX_NOT_FINISHED;
2110 
2111 	psli = &phba->sli;
2112 
2113 	spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2114 
2115 
2116 	mb = &pmbox->mb;
2117 	status = MBX_SUCCESS;
2118 
2119 	if (phba->hba_state == LPFC_HBA_ERROR) {
2120 		spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2121 
2122 		/* Mbox command <mbxCommand> cannot issue */
2123 		LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2124 		return (MBX_NOT_FINISHED);
2125 	}
2126 
2127 	if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
2128 	    !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
2129 		spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2130 		LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2131 		return (MBX_NOT_FINISHED);
2132 	}
2133 
2134 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
2135 		/* Polling for a mbox command when another one is already active
2136 		 * is not allowed in SLI. Also, the driver must have established
2137 		 * SLI2 mode to queue and process multiple mbox commands.
2138 		 */
2139 
2140 		if (flag & MBX_POLL) {
2141 			spin_unlock_irqrestore(phba->host->host_lock,
2142 					       drvr_flag);
2143 
2144 			/* Mbox command <mbxCommand> cannot issue */
2145 			LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2146 			return (MBX_NOT_FINISHED);
2147 		}
2148 
2149 		if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
2150 			spin_unlock_irqrestore(phba->host->host_lock,
2151 					       drvr_flag);
2152 			/* Mbox command <mbxCommand> cannot issue */
2153 			LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag)
2154 			return (MBX_NOT_FINISHED);
2155 		}
2156 
2157 		/* Handle STOP IOCB processing flag. This is only meaningful
2158 		 * if we are not polling for mbox completion.
2159 		 */
2160 		if (flag & MBX_STOP_IOCB) {
2161 			flag &= ~MBX_STOP_IOCB;
2162 			/* Now flag each ring */
2163 			for (i = 0; i < psli->num_rings; i++) {
2164 				/* If the ring is active, flag it */
2165 				if (psli->ring[i].cmdringaddr) {
2166 					psli->ring[i].flag |=
2167 					    LPFC_STOP_IOCB_MBX;
2168 				}
2169 			}
2170 		}
2171 
2172 		/* Another mailbox command is still being processed, queue this
2173 		 * command to be processed later.
2174 		 */
2175 		lpfc_mbox_put(phba, pmbox);
2176 
2177 		/* Mbox cmd issue - BUSY */
2178 		lpfc_printf_log(phba,
2179 			KERN_INFO,
2180 			LOG_MBOX | LOG_SLI,
2181 			"%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
2182 			phba->brd_no,
2183 			mb->mbxCommand,
2184 			phba->hba_state,
2185 			psli->sli_flag,
2186 			flag);
2187 
2188 		psli->slistat.mbox_busy++;
2189 		spin_unlock_irqrestore(phba->host->host_lock,
2190 				       drvr_flag);
2191 
2192 		return (MBX_BUSY);
2193 	}
2194 
2195 	/* Handle STOP IOCB processing flag. This is only meaningful
2196 	 * if we are not polling for mbox completion.
2197 	 */
2198 	if (flag & MBX_STOP_IOCB) {
2199 		flag &= ~MBX_STOP_IOCB;
2200 		if (flag == MBX_NOWAIT) {
2201 			/* Now flag each ring */
2202 			for (i = 0; i < psli->num_rings; i++) {
2203 				/* If the ring is active, flag it */
2204 				if (psli->ring[i].cmdringaddr) {
2205 					psli->ring[i].flag |=
2206 					    LPFC_STOP_IOCB_MBX;
2207 				}
2208 			}
2209 		}
2210 	}
2211 
2212 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
2213 
2214 	/* If we are not polling, we MUST be in SLI2 mode */
2215 	if (flag != MBX_POLL) {
2216 		if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
2217 		    (mb->mbxCommand != MBX_KILL_BOARD)) {
2218 			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2219 			spin_unlock_irqrestore(phba->host->host_lock,
2220 					       drvr_flag);
2221 			/* Mbox command <mbxCommand> cannot issue */
2222 			LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag);
2223 			return (MBX_NOT_FINISHED);
2224 		}
2225 		/* timeout active mbox command */
2226 		mod_timer(&psli->mbox_tmo, (jiffies +
2227 			       (HZ * lpfc_mbox_tmo_val(phba, mb->mbxCommand))));
2228 	}
2229 
2230 	/* Mailbox cmd <cmd> issue */
2231 	lpfc_printf_log(phba,
2232 		KERN_INFO,
2233 		LOG_MBOX | LOG_SLI,
2234 		"%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
2235 		phba->brd_no,
2236 		mb->mbxCommand,
2237 		phba->hba_state,
2238 		psli->sli_flag,
2239 		flag);
2240 
2241 	psli->slistat.mbox_cmd++;
2242 	evtctr = psli->slistat.mbox_event;
2243 
2244 	/* next set own bit for the adapter and copy over command word */
2245 	mb->mbxOwner = OWN_CHIP;
2246 
2247 	if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2248 		/* First copy command data to host SLIM area */
2249 		lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx, MAILBOX_CMD_SIZE);
2250 	} else {
2251 		if (mb->mbxCommand == MBX_CONFIG_PORT) {
2252 			/* copy command data into host mbox for cmpl */
2253 			lpfc_sli_pcimem_bcopy(mb, &phba->slim2p->mbx,
2254 					MAILBOX_CMD_SIZE);
2255 		}
2256 
2257 		/* First copy mbox command data to HBA SLIM, skip past first
2258 		   word */
2259 		to_slim = phba->MBslimaddr + sizeof (uint32_t);
2260 		lpfc_memcpy_to_slim(to_slim, &mb->un.varWords[0],
2261 			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
2262 
2263 		/* Next copy over first word, with mbxOwner set */
2264 		ldata = *((volatile uint32_t *)mb);
2265 		to_slim = phba->MBslimaddr;
2266 		writel(ldata, to_slim);
2267 		readl(to_slim); /* flush */
2268 
2269 		if (mb->mbxCommand == MBX_CONFIG_PORT) {
2270 			/* switch over to host mailbox */
2271 			psli->sli_flag |= LPFC_SLI2_ACTIVE;
2272 		}
2273 	}
2274 
2275 	wmb();
2276 	/* interrupt board to doit right away */
2277 	writel(CA_MBATT, phba->CAregaddr);
2278 	readl(phba->CAregaddr); /* flush */
2279 
2280 	switch (flag) {
2281 	case MBX_NOWAIT:
2282 		/* Don't wait for it to finish, just return */
2283 		psli->mbox_active = pmbox;
2284 		break;
2285 
2286 	case MBX_POLL:
2287 		psli->mbox_active = NULL;
2288 		if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2289 			/* First read mbox status word */
2290 			word0 = *((volatile uint32_t *)&phba->slim2p->mbx);
2291 			word0 = le32_to_cpu(word0);
2292 		} else {
2293 			/* First read mbox status word */
2294 			word0 = readl(phba->MBslimaddr);
2295 		}
2296 
2297 		/* Read the HBA Host Attention Register */
2298 		ha_copy = readl(phba->HAregaddr);
2299 
2300 		i = lpfc_mbox_tmo_val(phba, mb->mbxCommand);
2301 		i *= 1000; /* Convert to ms */
2302 
2303 		/* Wait for command to complete */
2304 		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
2305 		       (!(ha_copy & HA_MBATT) &&
2306 			(phba->hba_state > LPFC_WARM_START))) {
2307 			if (i-- <= 0) {
2308 				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2309 				spin_unlock_irqrestore(phba->host->host_lock,
2310 						       drvr_flag);
2311 				return (MBX_NOT_FINISHED);
2312 			}
2313 
2314 			/* Check if we took a mbox interrupt while we were
2315 			   polling */
2316 			if (((word0 & OWN_CHIP) != OWN_CHIP)
2317 			    && (evtctr != psli->slistat.mbox_event))
2318 				break;
2319 
2320 			spin_unlock_irqrestore(phba->host->host_lock,
2321 					       drvr_flag);
2322 
2323 			/* Can be in interrupt context, do not sleep */
2324 			/* (or might be called with interrupts disabled) */
2325 			mdelay(1);
2326 
2327 			spin_lock_irqsave(phba->host->host_lock, drvr_flag);
2328 
2329 			if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2330 				/* First copy command data */
2331 				word0 = *((volatile uint32_t *)
2332 						&phba->slim2p->mbx);
2333 				word0 = le32_to_cpu(word0);
2334 				if (mb->mbxCommand == MBX_CONFIG_PORT) {
2335 					MAILBOX_t *slimmb;
2336 					volatile uint32_t slimword0;
2337 					/* Check real SLIM for any errors */
2338 					slimword0 = readl(phba->MBslimaddr);
2339 					slimmb = (MAILBOX_t *) & slimword0;
2340 					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
2341 					    && slimmb->mbxStatus) {
2342 						psli->sli_flag &=
2343 						    ~LPFC_SLI2_ACTIVE;
2344 						word0 = slimword0;
2345 					}
2346 				}
2347 			} else {
2348 				/* First copy command data */
2349 				word0 = readl(phba->MBslimaddr);
2350 			}
2351 			/* Read the HBA Host Attention Register */
2352 			ha_copy = readl(phba->HAregaddr);
2353 		}
2354 
2355 		if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
2356 			/* copy results back to user */
2357 			lpfc_sli_pcimem_bcopy(&phba->slim2p->mbx, mb,
2358 					MAILBOX_CMD_SIZE);
2359 		} else {
2360 			/* First copy command data */
2361 			lpfc_memcpy_from_slim(mb, phba->MBslimaddr,
2362 							MAILBOX_CMD_SIZE);
2363 			if ((mb->mbxCommand == MBX_DUMP_MEMORY) &&
2364 				pmbox->context2) {
2365 				lpfc_memcpy_from_slim((void *)pmbox->context2,
2366 				      phba->MBslimaddr + DMP_RSP_OFFSET,
2367 						      mb->un.varDmp.word_cnt);
2368 			}
2369 		}
2370 
2371 		writel(HA_MBATT, phba->HAregaddr);
2372 		readl(phba->HAregaddr); /* flush */
2373 
2374 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2375 		status = mb->mbxStatus;
2376 	}
2377 
2378 	spin_unlock_irqrestore(phba->host->host_lock, drvr_flag);
2379 	return (status);
2380 }
2381 
2382 static int
2383 lpfc_sli_ringtx_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2384 		    struct lpfc_iocbq * piocb)
2385 {
2386 	/* Insert the caller's iocb in the txq tail for later processing. */
2387 	list_add_tail(&piocb->list, &pring->txq);
2388 	pring->txq_cnt++;
2389 	return (0);
2390 }
2391 
2392 static struct lpfc_iocbq *
2393 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2394 		   struct lpfc_iocbq ** piocb)
2395 {
2396 	struct lpfc_iocbq * nextiocb;
2397 
2398 	nextiocb = lpfc_sli_ringtx_get(phba, pring);
2399 	if (!nextiocb) {
2400 		nextiocb = *piocb;
2401 		*piocb = NULL;
2402 	}
2403 
2404 	return nextiocb;
2405 }
2406 
2407 int
2408 lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2409 		    struct lpfc_iocbq *piocb, uint32_t flag)
2410 {
2411 	struct lpfc_iocbq *nextiocb;
2412 	IOCB_t *iocb;
2413 
2414 	/* If the PCI channel is in offline state, do not post iocbs. */
2415 	if (unlikely(pci_channel_offline(phba->pcidev)))
2416 		return IOCB_ERROR;
2417 
2418 	/*
2419 	 * We should never get an IOCB if we are in a < LINK_DOWN state
2420 	 */
2421 	if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
2422 		return IOCB_ERROR;
2423 
2424 	/*
2425 	 * Check to see if we are blocking IOCB processing because of a
2426 	 * outstanding mbox command.
2427 	 */
2428 	if (unlikely(pring->flag & LPFC_STOP_IOCB_MBX))
2429 		goto iocb_busy;
2430 
2431 	if (unlikely(phba->hba_state == LPFC_LINK_DOWN)) {
2432 		/*
2433 		 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
2434 		 * can be issued if the link is not up.
2435 		 */
2436 		switch (piocb->iocb.ulpCommand) {
2437 		case CMD_QUE_RING_BUF_CN:
2438 		case CMD_QUE_RING_BUF64_CN:
2439 			/*
2440 			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2441 			 * completion, iocb_cmpl MUST be 0.
2442 			 */
2443 			if (piocb->iocb_cmpl)
2444 				piocb->iocb_cmpl = NULL;
2445 			/*FALLTHROUGH*/
2446 		case CMD_CREATE_XRI_CR:
2447 			break;
2448 		default:
2449 			goto iocb_busy;
2450 		}
2451 
2452 	/*
2453 	 * For FCP commands, we must be in a state where we can process link
2454 	 * attention events.
2455 	 */
2456 	} else if (unlikely(pring->ringno == phba->sli.fcp_ring &&
2457 		   !(phba->sli.sli_flag & LPFC_PROCESS_LA)))
2458 		goto iocb_busy;
2459 
2460 	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2461 	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
2462 		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2463 
2464 	if (iocb)
2465 		lpfc_sli_update_ring(phba, pring);
2466 	else
2467 		lpfc_sli_update_full_ring(phba, pring);
2468 
2469 	if (!piocb)
2470 		return IOCB_SUCCESS;
2471 
2472 	goto out_busy;
2473 
2474  iocb_busy:
2475 	pring->stats.iocb_cmd_delay++;
2476 
2477  out_busy:
2478 
2479 	if (!(flag & SLI_IOCB_RET_IOCB)) {
2480 		lpfc_sli_ringtx_put(phba, pring, piocb);
2481 		return IOCB_SUCCESS;
2482 	}
2483 
2484 	return IOCB_BUSY;
2485 }
2486 
2487 static int
2488 lpfc_extra_ring_setup( struct lpfc_hba *phba)
2489 {
2490 	struct lpfc_sli *psli;
2491 	struct lpfc_sli_ring *pring;
2492 
2493 	psli = &phba->sli;
2494 
2495 	/* Adjust cmd/rsp ring iocb entries more evenly */
2496 
2497 	/* Take some away from the FCP ring */
2498 	pring = &psli->ring[psli->fcp_ring];
2499 	pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2500 	pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2501 	pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2502 	pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2503 
2504 	/* and give them to the extra ring */
2505 	pring = &psli->ring[psli->extra_ring];
2506 
2507 	pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2508 	pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2509 	pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2510 	pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2511 
2512 	/* Setup default profile for this ring */
2513 	pring->iotag_max = 4096;
2514 	pring->num_mask = 1;
2515 	pring->prt[0].profile = 0;      /* Mask 0 */
2516 	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
2517 	pring->prt[0].type = phba->cfg_multi_ring_type;
2518 	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
2519 	return 0;
2520 }
2521 
2522 int
2523 lpfc_sli_setup(struct lpfc_hba *phba)
2524 {
2525 	int i, totiocb = 0;
2526 	struct lpfc_sli *psli = &phba->sli;
2527 	struct lpfc_sli_ring *pring;
2528 
2529 	psli->num_rings = MAX_CONFIGURED_RINGS;
2530 	psli->sli_flag = 0;
2531 	psli->fcp_ring = LPFC_FCP_RING;
2532 	psli->next_ring = LPFC_FCP_NEXT_RING;
2533 	psli->extra_ring = LPFC_EXTRA_RING;
2534 
2535 	psli->iocbq_lookup = NULL;
2536 	psli->iocbq_lookup_len = 0;
2537 	psli->last_iotag = 0;
2538 
2539 	for (i = 0; i < psli->num_rings; i++) {
2540 		pring = &psli->ring[i];
2541 		switch (i) {
2542 		case LPFC_FCP_RING:	/* ring 0 - FCP */
2543 			/* numCiocb and numRiocb are used in config_port */
2544 			pring->numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
2545 			pring->numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
2546 			pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
2547 			pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
2548 			pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
2549 			pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
2550 			pring->iotag_ctr = 0;
2551 			pring->iotag_max =
2552 			    (phba->cfg_hba_queue_depth * 2);
2553 			pring->fast_iotag = pring->iotag_max;
2554 			pring->num_mask = 0;
2555 			break;
2556 		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
2557 			/* numCiocb and numRiocb are used in config_port */
2558 			pring->numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
2559 			pring->numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
2560 			pring->num_mask = 0;
2561 			break;
2562 		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
2563 			/* numCiocb and numRiocb are used in config_port */
2564 			pring->numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
2565 			pring->numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
2566 			pring->fast_iotag = 0;
2567 			pring->iotag_ctr = 0;
2568 			pring->iotag_max = 4096;
2569 			pring->num_mask = 4;
2570 			pring->prt[0].profile = 0;	/* Mask 0 */
2571 			pring->prt[0].rctl = FC_ELS_REQ;
2572 			pring->prt[0].type = FC_ELS_DATA;
2573 			pring->prt[0].lpfc_sli_rcv_unsol_event =
2574 			    lpfc_els_unsol_event;
2575 			pring->prt[1].profile = 0;	/* Mask 1 */
2576 			pring->prt[1].rctl = FC_ELS_RSP;
2577 			pring->prt[1].type = FC_ELS_DATA;
2578 			pring->prt[1].lpfc_sli_rcv_unsol_event =
2579 			    lpfc_els_unsol_event;
2580 			pring->prt[2].profile = 0;	/* Mask 2 */
2581 			/* NameServer Inquiry */
2582 			pring->prt[2].rctl = FC_UNSOL_CTL;
2583 			/* NameServer */
2584 			pring->prt[2].type = FC_COMMON_TRANSPORT_ULP;
2585 			pring->prt[2].lpfc_sli_rcv_unsol_event =
2586 			    lpfc_ct_unsol_event;
2587 			pring->prt[3].profile = 0;	/* Mask 3 */
2588 			/* NameServer response */
2589 			pring->prt[3].rctl = FC_SOL_CTL;
2590 			/* NameServer */
2591 			pring->prt[3].type = FC_COMMON_TRANSPORT_ULP;
2592 			pring->prt[3].lpfc_sli_rcv_unsol_event =
2593 			    lpfc_ct_unsol_event;
2594 			break;
2595 		}
2596 		totiocb += (pring->numCiocb + pring->numRiocb);
2597 	}
2598 	if (totiocb > MAX_SLI2_IOCB) {
2599 		/* Too many cmd / rsp ring entries in SLI2 SLIM */
2600 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2601 				"%d:0462 Too many cmd / rsp ring entries in "
2602 				"SLI2 SLIM Data: x%x x%x\n",
2603 				phba->brd_no, totiocb, MAX_SLI2_IOCB);
2604 	}
2605 	if (phba->cfg_multi_ring_support == 2)
2606 		lpfc_extra_ring_setup(phba);
2607 
2608 	return 0;
2609 }
2610 
2611 int
2612 lpfc_sli_queue_setup(struct lpfc_hba * phba)
2613 {
2614 	struct lpfc_sli *psli;
2615 	struct lpfc_sli_ring *pring;
2616 	int i;
2617 
2618 	psli = &phba->sli;
2619 	spin_lock_irq(phba->host->host_lock);
2620 	INIT_LIST_HEAD(&psli->mboxq);
2621 	/* Initialize list headers for txq and txcmplq as double linked lists */
2622 	for (i = 0; i < psli->num_rings; i++) {
2623 		pring = &psli->ring[i];
2624 		pring->ringno = i;
2625 		pring->next_cmdidx  = 0;
2626 		pring->local_getidx = 0;
2627 		pring->cmdidx = 0;
2628 		INIT_LIST_HEAD(&pring->txq);
2629 		INIT_LIST_HEAD(&pring->txcmplq);
2630 		INIT_LIST_HEAD(&pring->iocb_continueq);
2631 		INIT_LIST_HEAD(&pring->postbufq);
2632 	}
2633 	spin_unlock_irq(phba->host->host_lock);
2634 	return (1);
2635 }
2636 
2637 int
2638 lpfc_sli_hba_down(struct lpfc_hba * phba)
2639 {
2640 	struct lpfc_sli *psli;
2641 	struct lpfc_sli_ring *pring;
2642 	LPFC_MBOXQ_t *pmb;
2643 	struct lpfc_iocbq *iocb, *next_iocb;
2644 	IOCB_t *icmd = NULL;
2645 	int i;
2646 	unsigned long flags = 0;
2647 
2648 	psli = &phba->sli;
2649 	lpfc_hba_down_prep(phba);
2650 
2651 	spin_lock_irqsave(phba->host->host_lock, flags);
2652 
2653 	for (i = 0; i < psli->num_rings; i++) {
2654 		pring = &psli->ring[i];
2655 		pring->flag |= LPFC_DEFERRED_RING_EVENT;
2656 
2657 		/*
2658 		 * Error everything on the txq since these iocbs have not been
2659 		 * given to the FW yet.
2660 		 */
2661 		pring->txq_cnt = 0;
2662 
2663 		list_for_each_entry_safe(iocb, next_iocb, &pring->txq, list) {
2664 			list_del_init(&iocb->list);
2665 			if (iocb->iocb_cmpl) {
2666 				icmd = &iocb->iocb;
2667 				icmd->ulpStatus = IOSTAT_LOCAL_REJECT;
2668 				icmd->un.ulpWord[4] = IOERR_SLI_DOWN;
2669 				spin_unlock_irqrestore(phba->host->host_lock,
2670 						       flags);
2671 				(iocb->iocb_cmpl) (phba, iocb, iocb);
2672 				spin_lock_irqsave(phba->host->host_lock, flags);
2673 			} else
2674 				lpfc_sli_release_iocbq(phba, iocb);
2675 		}
2676 
2677 		INIT_LIST_HEAD(&(pring->txq));
2678 
2679 	}
2680 
2681 	spin_unlock_irqrestore(phba->host->host_lock, flags);
2682 
2683 	/* Return any active mbox cmds */
2684 	del_timer_sync(&psli->mbox_tmo);
2685 	spin_lock_irqsave(phba->host->host_lock, flags);
2686 	phba->work_hba_events &= ~WORKER_MBOX_TMO;
2687 	if (psli->mbox_active) {
2688 		pmb = psli->mbox_active;
2689 		pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2690 		if (pmb->mbox_cmpl) {
2691 			spin_unlock_irqrestore(phba->host->host_lock, flags);
2692 			pmb->mbox_cmpl(phba,pmb);
2693 			spin_lock_irqsave(phba->host->host_lock, flags);
2694 		}
2695 	}
2696 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
2697 	psli->mbox_active = NULL;
2698 
2699 	/* Return any pending mbox cmds */
2700 	while ((pmb = lpfc_mbox_get(phba)) != NULL) {
2701 		pmb->mb.mbxStatus = MBX_NOT_FINISHED;
2702 		if (pmb->mbox_cmpl) {
2703 			spin_unlock_irqrestore(phba->host->host_lock, flags);
2704 			pmb->mbox_cmpl(phba,pmb);
2705 			spin_lock_irqsave(phba->host->host_lock, flags);
2706 		}
2707 	}
2708 
2709 	INIT_LIST_HEAD(&psli->mboxq);
2710 
2711 	spin_unlock_irqrestore(phba->host->host_lock, flags);
2712 
2713 	return 1;
2714 }
2715 
2716 void
2717 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
2718 {
2719 	uint32_t *src = srcp;
2720 	uint32_t *dest = destp;
2721 	uint32_t ldata;
2722 	int i;
2723 
2724 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
2725 		ldata = *src;
2726 		ldata = le32_to_cpu(ldata);
2727 		*dest = ldata;
2728 		src++;
2729 		dest++;
2730 	}
2731 }
2732 
2733 int
2734 lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba, struct lpfc_sli_ring * pring,
2735 			 struct lpfc_dmabuf * mp)
2736 {
2737 	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2738 	   later */
2739 	list_add_tail(&mp->list, &pring->postbufq);
2740 
2741 	pring->postbufq_cnt++;
2742 	return 0;
2743 }
2744 
2745 
2746 struct lpfc_dmabuf *
2747 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2748 			 dma_addr_t phys)
2749 {
2750 	struct lpfc_dmabuf *mp, *next_mp;
2751 	struct list_head *slp = &pring->postbufq;
2752 
2753 	/* Search postbufq, from the begining, looking for a match on phys */
2754 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
2755 		if (mp->phys == phys) {
2756 			list_del_init(&mp->list);
2757 			pring->postbufq_cnt--;
2758 			return mp;
2759 		}
2760 	}
2761 
2762 	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
2763 			"%d:0410 Cannot find virtual addr for mapped buf on "
2764 			"ring %d Data x%llx x%p x%p x%x\n",
2765 			phba->brd_no, pring->ringno, (unsigned long long)phys,
2766 			slp->next, slp->prev, pring->postbufq_cnt);
2767 	return NULL;
2768 }
2769 
2770 static void
2771 lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2772 			   struct lpfc_iocbq * rspiocb)
2773 {
2774 	struct lpfc_dmabuf *buf_ptr, *buf_ptr1;
2775 	/* Free the resources associated with the ELS_REQUEST64 IOCB the driver
2776 	 * just aborted.
2777 	 * In this case, context2  = cmd,  context2->next = rsp, context3 = bpl
2778 	 */
2779 	if (cmdiocb->context2) {
2780 		buf_ptr1 = (struct lpfc_dmabuf *) cmdiocb->context2;
2781 
2782 		/* Free the response IOCB before completing the abort
2783 		   command.  */
2784 		buf_ptr = NULL;
2785 		list_remove_head((&buf_ptr1->list), buf_ptr,
2786 				 struct lpfc_dmabuf, list);
2787 		if (buf_ptr) {
2788 			lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2789 			kfree(buf_ptr);
2790 		}
2791 		lpfc_mbuf_free(phba, buf_ptr1->virt, buf_ptr1->phys);
2792 		kfree(buf_ptr1);
2793 	}
2794 
2795 	if (cmdiocb->context3) {
2796 		buf_ptr = (struct lpfc_dmabuf *) cmdiocb->context3;
2797 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
2798 		kfree(buf_ptr);
2799 	}
2800 
2801 	lpfc_sli_release_iocbq(phba, cmdiocb);
2802 	return;
2803 }
2804 
2805 int
2806 lpfc_sli_issue_abort_iotag32(struct lpfc_hba * phba,
2807 			     struct lpfc_sli_ring * pring,
2808 			     struct lpfc_iocbq * cmdiocb)
2809 {
2810 	struct lpfc_iocbq *abtsiocbp;
2811 	IOCB_t *icmd = NULL;
2812 	IOCB_t *iabt = NULL;
2813 
2814 	/* issue ABTS for this IOCB based on iotag */
2815 	abtsiocbp = lpfc_sli_get_iocbq(phba);
2816 	if (abtsiocbp == NULL)
2817 		return 0;
2818 
2819 	iabt = &abtsiocbp->iocb;
2820 	icmd = &cmdiocb->iocb;
2821 	switch (icmd->ulpCommand) {
2822 	case CMD_ELS_REQUEST64_CR:
2823 		/* Even though we abort the ELS command, the firmware may access
2824 		 * the BPL or other resources before it processes our
2825 		 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
2826 		 * resources till the actual abort request completes.
2827 		 */
2828 		abtsiocbp->context1 = (void *)((unsigned long)icmd->ulpCommand);
2829 		abtsiocbp->context2 = cmdiocb->context2;
2830 		abtsiocbp->context3 = cmdiocb->context3;
2831 		cmdiocb->context2 = NULL;
2832 		cmdiocb->context3 = NULL;
2833 		abtsiocbp->iocb_cmpl = lpfc_sli_abort_elsreq_cmpl;
2834 		break;
2835 	default:
2836 		lpfc_sli_release_iocbq(phba, abtsiocbp);
2837 		return 0;
2838 	}
2839 
2840 	iabt->un.amxri.abortType = ABORT_TYPE_ABTS;
2841 	iabt->un.amxri.iotag32 = icmd->un.elsreq64.bdl.ulpIoTag32;
2842 
2843 	iabt->ulpLe = 1;
2844 	iabt->ulpClass = CLASS3;
2845 	iabt->ulpCommand = CMD_ABORT_MXRI64_CN;
2846 
2847 	if (lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0) == IOCB_ERROR) {
2848 		lpfc_sli_release_iocbq(phba, abtsiocbp);
2849 		return 0;
2850 	}
2851 
2852 	return 1;
2853 }
2854 
2855 static int
2856 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, uint16_t tgt_id,
2857 			   uint64_t lun_id, uint32_t ctx,
2858 			   lpfc_ctx_cmd ctx_cmd)
2859 {
2860 	struct lpfc_scsi_buf *lpfc_cmd;
2861 	struct scsi_cmnd *cmnd;
2862 	int rc = 1;
2863 
2864 	if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
2865 		return rc;
2866 
2867 	lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
2868 	cmnd = lpfc_cmd->pCmd;
2869 
2870 	if (cmnd == NULL)
2871 		return rc;
2872 
2873 	switch (ctx_cmd) {
2874 	case LPFC_CTX_LUN:
2875 		if ((cmnd->device->id == tgt_id) &&
2876 		    (cmnd->device->lun == lun_id))
2877 			rc = 0;
2878 		break;
2879 	case LPFC_CTX_TGT:
2880 		if (cmnd->device->id == tgt_id)
2881 			rc = 0;
2882 		break;
2883 	case LPFC_CTX_CTX:
2884 		if (iocbq->iocb.ulpContext == ctx)
2885 			rc = 0;
2886 		break;
2887 	case LPFC_CTX_HOST:
2888 		rc = 0;
2889 		break;
2890 	default:
2891 		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
2892 			__FUNCTION__, ctx_cmd);
2893 		break;
2894 	}
2895 
2896 	return rc;
2897 }
2898 
2899 int
2900 lpfc_sli_sum_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2901 		uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd ctx_cmd)
2902 {
2903 	struct lpfc_iocbq *iocbq;
2904 	int sum, i;
2905 
2906 	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
2907 		iocbq = phba->sli.iocbq_lookup[i];
2908 
2909 		if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2910 						0, ctx_cmd) == 0)
2911 			sum++;
2912 	}
2913 
2914 	return sum;
2915 }
2916 
2917 void
2918 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba, struct lpfc_iocbq * cmdiocb,
2919 			   struct lpfc_iocbq * rspiocb)
2920 {
2921 	spin_lock_irq(phba->host->host_lock);
2922 	lpfc_sli_release_iocbq(phba, cmdiocb);
2923 	spin_unlock_irq(phba->host->host_lock);
2924 	return;
2925 }
2926 
2927 int
2928 lpfc_sli_abort_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2929 		    uint16_t tgt_id, uint64_t lun_id, uint32_t ctx,
2930 		    lpfc_ctx_cmd abort_cmd)
2931 {
2932 	struct lpfc_iocbq *iocbq;
2933 	struct lpfc_iocbq *abtsiocb;
2934 	IOCB_t *cmd = NULL;
2935 	int errcnt = 0, ret_val = 0;
2936 	int i;
2937 
2938 	for (i = 1; i <= phba->sli.last_iotag; i++) {
2939 		iocbq = phba->sli.iocbq_lookup[i];
2940 
2941 		if (lpfc_sli_validate_fcp_iocb (iocbq, tgt_id, lun_id,
2942 						0, abort_cmd) != 0)
2943 			continue;
2944 
2945 		/* issue ABTS for this IOCB based on iotag */
2946 		abtsiocb = lpfc_sli_get_iocbq(phba);
2947 		if (abtsiocb == NULL) {
2948 			errcnt++;
2949 			continue;
2950 		}
2951 
2952 		cmd = &iocbq->iocb;
2953 		abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
2954 		abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
2955 		abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
2956 		abtsiocb->iocb.ulpLe = 1;
2957 		abtsiocb->iocb.ulpClass = cmd->ulpClass;
2958 
2959 		if (phba->hba_state >= LPFC_LINK_UP)
2960 			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
2961 		else
2962 			abtsiocb->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
2963 
2964 		/* Setup callback routine and issue the command. */
2965 		abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
2966 		ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
2967 		if (ret_val == IOCB_ERROR) {
2968 			lpfc_sli_release_iocbq(phba, abtsiocb);
2969 			errcnt++;
2970 			continue;
2971 		}
2972 	}
2973 
2974 	return errcnt;
2975 }
2976 
2977 static void
2978 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
2979 			struct lpfc_iocbq *cmdiocbq,
2980 			struct lpfc_iocbq *rspiocbq)
2981 {
2982 	wait_queue_head_t *pdone_q;
2983 	unsigned long iflags;
2984 
2985 	spin_lock_irqsave(phba->host->host_lock, iflags);
2986 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
2987 	if (cmdiocbq->context2 && rspiocbq)
2988 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
2989 		       &rspiocbq->iocb, sizeof(IOCB_t));
2990 
2991 	pdone_q = cmdiocbq->context_un.wait_queue;
2992 	spin_unlock_irqrestore(phba->host->host_lock, iflags);
2993 	if (pdone_q)
2994 		wake_up(pdone_q);
2995 	return;
2996 }
2997 
2998 /*
2999  * Issue the caller's iocb and wait for its completion, but no longer than the
3000  * caller's timeout.  Note that iocb_flags is cleared before the
3001  * lpfc_sli_issue_call since the wake routine sets a unique value and by
3002  * definition this is a wait function.
3003  */
3004 int
3005 lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,
3006 			 struct lpfc_sli_ring * pring,
3007 			 struct lpfc_iocbq * piocb,
3008 			 struct lpfc_iocbq * prspiocbq,
3009 			 uint32_t timeout)
3010 {
3011 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3012 	long timeleft, timeout_req = 0;
3013 	int retval = IOCB_SUCCESS;
3014 	uint32_t creg_val;
3015 
3016 	/*
3017 	 * If the caller has provided a response iocbq buffer, then context2
3018 	 * is NULL or its an error.
3019 	 */
3020 	if (prspiocbq) {
3021 		if (piocb->context2)
3022 			return IOCB_ERROR;
3023 		piocb->context2 = prspiocbq;
3024 	}
3025 
3026 	piocb->iocb_cmpl = lpfc_sli_wake_iocb_wait;
3027 	piocb->context_un.wait_queue = &done_q;
3028 	piocb->iocb_flag &= ~LPFC_IO_WAKE;
3029 
3030 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3031 		creg_val = readl(phba->HCregaddr);
3032 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
3033 		writel(creg_val, phba->HCregaddr);
3034 		readl(phba->HCregaddr); /* flush */
3035 	}
3036 
3037 	retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
3038 	if (retval == IOCB_SUCCESS) {
3039 		timeout_req = timeout * HZ;
3040 		spin_unlock_irq(phba->host->host_lock);
3041 		timeleft = wait_event_timeout(done_q,
3042 				piocb->iocb_flag & LPFC_IO_WAKE,
3043 				timeout_req);
3044 		spin_lock_irq(phba->host->host_lock);
3045 
3046 		if (timeleft == 0) {
3047 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3048 					"%d:0338 IOCB wait timeout error - no "
3049 					"wake response Data x%x\n",
3050 					phba->brd_no, timeout);
3051 			retval = IOCB_TIMEDOUT;
3052 		} else if (!(piocb->iocb_flag & LPFC_IO_WAKE)) {
3053 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
3054 					"%d:0330 IOCB wake NOT set, "
3055 					"Data x%x x%lx\n", phba->brd_no,
3056 					timeout, (timeleft / jiffies));
3057 			retval = IOCB_TIMEDOUT;
3058 		} else {
3059 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3060 					"%d:0331 IOCB wake signaled\n",
3061 					phba->brd_no);
3062 		}
3063 	} else {
3064 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3065 				"%d:0332 IOCB wait issue failed, Data x%x\n",
3066 				phba->brd_no, retval);
3067 		retval = IOCB_ERROR;
3068 	}
3069 
3070 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
3071 		creg_val = readl(phba->HCregaddr);
3072 		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
3073 		writel(creg_val, phba->HCregaddr);
3074 		readl(phba->HCregaddr); /* flush */
3075 	}
3076 
3077 	if (prspiocbq)
3078 		piocb->context2 = NULL;
3079 
3080 	piocb->context_un.wait_queue = NULL;
3081 	piocb->iocb_cmpl = NULL;
3082 	return retval;
3083 }
3084 
3085 int
3086 lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq,
3087 			 uint32_t timeout)
3088 {
3089 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
3090 	DECLARE_WAITQUEUE(wq_entry, current);
3091 	uint32_t timeleft = 0;
3092 	int retval;
3093 
3094 	/* The caller must leave context1 empty. */
3095 	if (pmboxq->context1 != 0) {
3096 		return (MBX_NOT_FINISHED);
3097 	}
3098 
3099 	/* setup wake call as IOCB callback */
3100 	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
3101 	/* setup context field to pass wait_queue pointer to wake function  */
3102 	pmboxq->context1 = &done_q;
3103 
3104 	/* start to sleep before we wait, to avoid races */
3105 	set_current_state(TASK_INTERRUPTIBLE);
3106 	add_wait_queue(&done_q, &wq_entry);
3107 
3108 	/* now issue the command */
3109 	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3110 
3111 	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
3112 		timeleft = schedule_timeout(timeout * HZ);
3113 		pmboxq->context1 = NULL;
3114 		/* if schedule_timeout returns 0, we timed out and were not
3115 		   woken up */
3116 		if ((timeleft == 0) || signal_pending(current))
3117 			retval = MBX_TIMEOUT;
3118 		else
3119 			retval = MBX_SUCCESS;
3120 	}
3121 
3122 
3123 	set_current_state(TASK_RUNNING);
3124 	remove_wait_queue(&done_q, &wq_entry);
3125 	return retval;
3126 }
3127 
3128 int
3129 lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
3130 {
3131 	int i = 0;
3132 
3133 	while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !phba->stopped) {
3134 		if (i++ > LPFC_MBOX_TMO * 1000)
3135 			return 1;
3136 
3137 		if (lpfc_sli_handle_mb_event(phba) == 0)
3138 			i = 0;
3139 
3140 		msleep(1);
3141 	}
3142 
3143 	return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
3144 }
3145 
3146 irqreturn_t
3147 lpfc_intr_handler(int irq, void *dev_id)
3148 {
3149 	struct lpfc_hba *phba;
3150 	uint32_t ha_copy;
3151 	uint32_t work_ha_copy;
3152 	unsigned long status;
3153 	int i;
3154 	uint32_t control;
3155 
3156 	/*
3157 	 * Get the driver's phba structure from the dev_id and
3158 	 * assume the HBA is not interrupting.
3159 	 */
3160 	phba = (struct lpfc_hba *) dev_id;
3161 
3162 	if (unlikely(!phba))
3163 		return IRQ_NONE;
3164 
3165 	/* If the pci channel is offline, ignore all the interrupts. */
3166 	if (unlikely(pci_channel_offline(phba->pcidev)))
3167 		return IRQ_NONE;
3168 
3169 	phba->sli.slistat.sli_intr++;
3170 
3171 	/*
3172 	 * Call the HBA to see if it is interrupting.  If not, don't claim
3173 	 * the interrupt
3174 	 */
3175 
3176 	/* Ignore all interrupts during initialization. */
3177 	if (unlikely(phba->hba_state < LPFC_LINK_DOWN))
3178 		return IRQ_NONE;
3179 
3180 	/*
3181 	 * Read host attention register to determine interrupt source
3182 	 * Clear Attention Sources, except Error Attention (to
3183 	 * preserve status) and Link Attention
3184 	 */
3185 	spin_lock(phba->host->host_lock);
3186 	ha_copy = readl(phba->HAregaddr);
3187 	writel((ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
3188 	readl(phba->HAregaddr); /* flush */
3189 	spin_unlock(phba->host->host_lock);
3190 
3191 	if (unlikely(!ha_copy))
3192 		return IRQ_NONE;
3193 
3194 	work_ha_copy = ha_copy & phba->work_ha_mask;
3195 
3196 	if (unlikely(work_ha_copy)) {
3197 		if (work_ha_copy & HA_LATT) {
3198 			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
3199 				/*
3200 				 * Turn off Link Attention interrupts
3201 				 * until CLEAR_LA done
3202 				 */
3203 				spin_lock(phba->host->host_lock);
3204 				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
3205 				control = readl(phba->HCregaddr);
3206 				control &= ~HC_LAINT_ENA;
3207 				writel(control, phba->HCregaddr);
3208 				readl(phba->HCregaddr); /* flush */
3209 				spin_unlock(phba->host->host_lock);
3210 			}
3211 			else
3212 				work_ha_copy &= ~HA_LATT;
3213 		}
3214 
3215 		if (work_ha_copy & ~(HA_ERATT|HA_MBATT|HA_LATT)) {
3216 			for (i = 0; i < phba->sli.num_rings; i++) {
3217 				if (work_ha_copy & (HA_RXATT << (4*i))) {
3218 					/*
3219 					 * Turn off Slow Rings interrupts
3220 					 */
3221 					spin_lock(phba->host->host_lock);
3222 					control = readl(phba->HCregaddr);
3223 					control &= ~(HC_R0INT_ENA << i);
3224 					writel(control, phba->HCregaddr);
3225 					readl(phba->HCregaddr); /* flush */
3226 					spin_unlock(phba->host->host_lock);
3227 				}
3228 			}
3229 		}
3230 
3231 		if (work_ha_copy & HA_ERATT) {
3232 			phba->hba_state = LPFC_HBA_ERROR;
3233 			/*
3234 			 * There was a link/board error.  Read the
3235 			 * status register to retrieve the error event
3236 			 * and process it.
3237 			 */
3238 			phba->sli.slistat.err_attn_event++;
3239 			/* Save status info */
3240 			phba->work_hs = readl(phba->HSregaddr);
3241 			phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
3242 			phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
3243 
3244 			/* Clear Chip error bit */
3245 			writel(HA_ERATT, phba->HAregaddr);
3246 			readl(phba->HAregaddr); /* flush */
3247 			phba->stopped = 1;
3248 		}
3249 
3250 		spin_lock(phba->host->host_lock);
3251 		phba->work_ha |= work_ha_copy;
3252 		if (phba->work_wait)
3253 			wake_up(phba->work_wait);
3254 		spin_unlock(phba->host->host_lock);
3255 	}
3256 
3257 	ha_copy &= ~(phba->work_ha_mask);
3258 
3259 	/*
3260 	 * Process all events on FCP ring.  Take the optimized path for
3261 	 * FCP IO.  Any other IO is slow path and is handled by
3262 	 * the worker thread.
3263 	 */
3264 	status = (ha_copy & (HA_RXMASK  << (4*LPFC_FCP_RING)));
3265 	status >>= (4*LPFC_FCP_RING);
3266 	if (status & HA_RXATT)
3267 		lpfc_sli_handle_fast_ring_event(phba,
3268 						&phba->sli.ring[LPFC_FCP_RING],
3269 						status);
3270 
3271 	if (phba->cfg_multi_ring_support == 2) {
3272 		/*
3273 		 * Process all events on extra ring.  Take the optimized path
3274 		 * for extra ring IO.  Any other IO is slow path and is handled
3275 		 * by the worker thread.
3276 		 */
3277 		status = (ha_copy & (HA_RXMASK  << (4*LPFC_EXTRA_RING)));
3278 		status >>= (4*LPFC_EXTRA_RING);
3279 		if (status & HA_RXATT) {
3280 			lpfc_sli_handle_fast_ring_event(phba,
3281 					&phba->sli.ring[LPFC_EXTRA_RING],
3282 					status);
3283 		}
3284 	}
3285 	return IRQ_HANDLED;
3286 
3287 } /* lpfc_intr_handler */
3288