xref: /linux/drivers/scsi/lpfc/lpfc_sli.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30 
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_device.h>
34 #include <scsi/scsi_host.h>
35 #include <scsi/scsi_transport_fc.h>
36 #include <scsi/fc/fc_fs.h>
37 #include <linux/crash_dump.h>
38 #ifdef CONFIG_X86
39 #include <asm/set_memory.h>
40 #endif
41 
42 #include "lpfc_hw4.h"
43 #include "lpfc_hw.h"
44 #include "lpfc_sli.h"
45 #include "lpfc_sli4.h"
46 #include "lpfc_nl.h"
47 #include "lpfc_disc.h"
48 #include "lpfc.h"
49 #include "lpfc_scsi.h"
50 #include "lpfc_nvme.h"
51 #include "lpfc_crtn.h"
52 #include "lpfc_logmsg.h"
53 #include "lpfc_compat.h"
54 #include "lpfc_debugfs.h"
55 #include "lpfc_vport.h"
56 #include "lpfc_version.h"
57 
58 /* There are only four IOCB completion types. */
59 typedef enum _lpfc_iocb_type {
60 	LPFC_UNKNOWN_IOCB,
61 	LPFC_UNSOL_IOCB,
62 	LPFC_SOL_IOCB,
63 	LPFC_ABORT_IOCB
64 } lpfc_iocb_type;
65 
66 
67 /* Provide function prototypes local to this module. */
68 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
69 				  uint32_t);
70 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 			      uint8_t *, uint32_t *);
72 static struct lpfc_iocbq *
73 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
74 				  struct lpfc_iocbq *rspiocbq);
75 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
76 				      struct hbq_dmabuf *);
77 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
78 					  struct hbq_dmabuf *dmabuf);
79 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
80 				   struct lpfc_queue *cq, struct lpfc_cqe *cqe);
81 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
82 				       int);
83 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
84 				     struct lpfc_queue *eq,
85 				     struct lpfc_eqe *eqe,
86 				     enum lpfc_poll_mode poll_mode);
87 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
88 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
89 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
90 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
91 				    struct lpfc_queue *cq,
92 				    struct lpfc_cqe *cqe);
93 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
94 				 struct lpfc_iocbq *pwqeq,
95 				 struct lpfc_sglq *sglq);
96 
97 union lpfc_wqe128 lpfc_iread_cmd_template;
98 union lpfc_wqe128 lpfc_iwrite_cmd_template;
99 union lpfc_wqe128 lpfc_icmnd_cmd_template;
100 
101 /* Setup WQE templates for IOs */
102 void lpfc_wqe_cmd_template(void)
103 {
104 	union lpfc_wqe128 *wqe;
105 
106 	/* IREAD template */
107 	wqe = &lpfc_iread_cmd_template;
108 	memset(wqe, 0, sizeof(union lpfc_wqe128));
109 
110 	/* Word 0, 1, 2 - BDE is variable */
111 
112 	/* Word 3 - cmd_buff_len, payload_offset_len is zero */
113 
114 	/* Word 4 - total_xfer_len is variable */
115 
116 	/* Word 5 - is zero */
117 
118 	/* Word 6 - ctxt_tag, xri_tag is variable */
119 
120 	/* Word 7 */
121 	bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
122 	bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
123 	bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
124 	bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
125 
126 	/* Word 8 - abort_tag is variable */
127 
128 	/* Word 9  - reqtag is variable */
129 
130 	/* Word 10 - dbde, wqes is variable */
131 	bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
132 	bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
133 	bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
134 	bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
135 	bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
136 
137 	/* Word 11 - pbde is variable */
138 	bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
139 	bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
140 	bf_set(wqe_pbde, &wqe->fcp_iread.wqe_com, 0);
141 
142 	/* Word 12 - is zero */
143 
144 	/* Word 13, 14, 15 - PBDE is variable */
145 
146 	/* IWRITE template */
147 	wqe = &lpfc_iwrite_cmd_template;
148 	memset(wqe, 0, sizeof(union lpfc_wqe128));
149 
150 	/* Word 0, 1, 2 - BDE is variable */
151 
152 	/* Word 3 - cmd_buff_len, payload_offset_len is zero */
153 
154 	/* Word 4 - total_xfer_len is variable */
155 
156 	/* Word 5 - initial_xfer_len is variable */
157 
158 	/* Word 6 - ctxt_tag, xri_tag is variable */
159 
160 	/* Word 7 */
161 	bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
162 	bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
163 	bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
164 	bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
165 
166 	/* Word 8 - abort_tag is variable */
167 
168 	/* Word 9  - reqtag is variable */
169 
170 	/* Word 10 - dbde, wqes is variable */
171 	bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
172 	bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
173 	bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
174 	bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
175 	bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
176 
177 	/* Word 11 - pbde is variable */
178 	bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
179 	bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
180 	bf_set(wqe_pbde, &wqe->fcp_iwrite.wqe_com, 0);
181 
182 	/* Word 12 - is zero */
183 
184 	/* Word 13, 14, 15 - PBDE is variable */
185 
186 	/* ICMND template */
187 	wqe = &lpfc_icmnd_cmd_template;
188 	memset(wqe, 0, sizeof(union lpfc_wqe128));
189 
190 	/* Word 0, 1, 2 - BDE is variable */
191 
192 	/* Word 3 - payload_offset_len is variable */
193 
194 	/* Word 4, 5 - is zero */
195 
196 	/* Word 6 - ctxt_tag, xri_tag is variable */
197 
198 	/* Word 7 */
199 	bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
200 	bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
201 	bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
202 	bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
203 
204 	/* Word 8 - abort_tag is variable */
205 
206 	/* Word 9  - reqtag is variable */
207 
208 	/* Word 10 - dbde, wqes is variable */
209 	bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
210 	bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
211 	bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
212 	bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
213 	bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
214 
215 	/* Word 11 */
216 	bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
217 	bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
218 	bf_set(wqe_pbde, &wqe->fcp_icmd.wqe_com, 0);
219 
220 	/* Word 12, 13, 14, 15 - is zero */
221 }
222 
223 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
224 /**
225  * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
226  * @srcp: Source memory pointer.
227  * @destp: Destination memory pointer.
228  * @cnt: Number of words required to be copied.
229  *       Must be a multiple of sizeof(uint64_t)
230  *
231  * This function is used for copying data between driver memory
232  * and the SLI WQ. This function also changes the endianness
233  * of each word if native endianness is different from SLI
234  * endianness. This function can be called with or without
235  * lock.
236  **/
237 static void
238 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
239 {
240 	uint64_t *src = srcp;
241 	uint64_t *dest = destp;
242 	int i;
243 
244 	for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
245 		*dest++ = *src++;
246 }
247 #else
248 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
249 #endif
250 
251 /**
252  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
253  * @q: The Work Queue to operate on.
254  * @wqe: The work Queue Entry to put on the Work queue.
255  *
256  * This routine will copy the contents of @wqe to the next available entry on
257  * the @q. This function will then ring the Work Queue Doorbell to signal the
258  * HBA to start processing the Work Queue Entry. This function returns 0 if
259  * successful. If no entries are available on @q then this function will return
260  * -ENOMEM.
261  * The caller is expected to hold the hbalock when calling this routine.
262  **/
263 static int
264 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
265 {
266 	union lpfc_wqe *temp_wqe;
267 	struct lpfc_register doorbell;
268 	uint32_t host_index;
269 	uint32_t idx;
270 	uint32_t i = 0;
271 	uint8_t *tmp;
272 	u32 if_type;
273 
274 	/* sanity check on queue memory */
275 	if (unlikely(!q))
276 		return -ENOMEM;
277 
278 	temp_wqe = lpfc_sli4_qe(q, q->host_index);
279 
280 	/* If the host has not yet processed the next entry then we are done */
281 	idx = ((q->host_index + 1) % q->entry_count);
282 	if (idx == q->hba_index) {
283 		q->WQ_overflow++;
284 		return -EBUSY;
285 	}
286 	q->WQ_posted++;
287 	/* set consumption flag every once in a while */
288 	if (!((q->host_index + 1) % q->notify_interval))
289 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
290 	else
291 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
292 	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
293 		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
294 	lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
295 	if (q->dpp_enable && q->phba->cfg_enable_dpp) {
296 		/* write to DPP aperture taking advatage of Combined Writes */
297 		tmp = (uint8_t *)temp_wqe;
298 #ifdef __raw_writeq
299 		for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
300 			__raw_writeq(*((uint64_t *)(tmp + i)),
301 					q->dpp_regaddr + i);
302 #else
303 		for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
304 			__raw_writel(*((uint32_t *)(tmp + i)),
305 					q->dpp_regaddr + i);
306 #endif
307 	}
308 	/* ensure WQE bcopy and DPP flushed before doorbell write */
309 	wmb();
310 
311 	/* Update the host index before invoking device */
312 	host_index = q->host_index;
313 
314 	q->host_index = idx;
315 
316 	/* Ring Doorbell */
317 	doorbell.word0 = 0;
318 	if (q->db_format == LPFC_DB_LIST_FORMAT) {
319 		if (q->dpp_enable && q->phba->cfg_enable_dpp) {
320 			bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
321 			bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
322 			bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
323 			    q->dpp_id);
324 			bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
325 			    q->queue_id);
326 		} else {
327 			bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
328 			bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
329 
330 			/* Leave bits <23:16> clear for if_type 6 dpp */
331 			if_type = bf_get(lpfc_sli_intf_if_type,
332 					 &q->phba->sli4_hba.sli_intf);
333 			if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
334 				bf_set(lpfc_wq_db_list_fm_index, &doorbell,
335 				       host_index);
336 		}
337 	} else if (q->db_format == LPFC_DB_RING_FORMAT) {
338 		bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
339 		bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
340 	} else {
341 		return -EINVAL;
342 	}
343 	writel(doorbell.word0, q->db_regaddr);
344 
345 	return 0;
346 }
347 
348 /**
349  * lpfc_sli4_wq_release - Updates internal hba index for WQ
350  * @q: The Work Queue to operate on.
351  * @index: The index to advance the hba index to.
352  *
353  * This routine will update the HBA index of a queue to reflect consumption of
354  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
355  * an entry the host calls this function to update the queue's internal
356  * pointers.
357  **/
358 static void
359 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
360 {
361 	/* sanity check on queue memory */
362 	if (unlikely(!q))
363 		return;
364 
365 	q->hba_index = index;
366 }
367 
368 /**
369  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
370  * @q: The Mailbox Queue to operate on.
371  * @mqe: The Mailbox Queue Entry to put on the Work queue.
372  *
373  * This routine will copy the contents of @mqe to the next available entry on
374  * the @q. This function will then ring the Work Queue Doorbell to signal the
375  * HBA to start processing the Work Queue Entry. This function returns 0 if
376  * successful. If no entries are available on @q then this function will return
377  * -ENOMEM.
378  * The caller is expected to hold the hbalock when calling this routine.
379  **/
380 static uint32_t
381 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
382 {
383 	struct lpfc_mqe *temp_mqe;
384 	struct lpfc_register doorbell;
385 
386 	/* sanity check on queue memory */
387 	if (unlikely(!q))
388 		return -ENOMEM;
389 	temp_mqe = lpfc_sli4_qe(q, q->host_index);
390 
391 	/* If the host has not yet processed the next entry then we are done */
392 	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
393 		return -ENOMEM;
394 	lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
395 	/* Save off the mailbox pointer for completion */
396 	q->phba->mbox = (MAILBOX_t *)temp_mqe;
397 
398 	/* Update the host index before invoking device */
399 	q->host_index = ((q->host_index + 1) % q->entry_count);
400 
401 	/* Ring Doorbell */
402 	doorbell.word0 = 0;
403 	bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
404 	bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
405 	writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
406 	return 0;
407 }
408 
409 /**
410  * lpfc_sli4_mq_release - Updates internal hba index for MQ
411  * @q: The Mailbox Queue to operate on.
412  *
413  * This routine will update the HBA index of a queue to reflect consumption of
414  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
415  * an entry the host calls this function to update the queue's internal
416  * pointers. This routine returns the number of entries that were consumed by
417  * the HBA.
418  **/
419 static uint32_t
420 lpfc_sli4_mq_release(struct lpfc_queue *q)
421 {
422 	/* sanity check on queue memory */
423 	if (unlikely(!q))
424 		return 0;
425 
426 	/* Clear the mailbox pointer for completion */
427 	q->phba->mbox = NULL;
428 	q->hba_index = ((q->hba_index + 1) % q->entry_count);
429 	return 1;
430 }
431 
432 /**
433  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
434  * @q: The Event Queue to get the first valid EQE from
435  *
436  * This routine will get the first valid Event Queue Entry from @q, update
437  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
438  * the Queue (no more work to do), or the Queue is full of EQEs that have been
439  * processed, but not popped back to the HBA then this routine will return NULL.
440  **/
441 static struct lpfc_eqe *
442 lpfc_sli4_eq_get(struct lpfc_queue *q)
443 {
444 	struct lpfc_eqe *eqe;
445 
446 	/* sanity check on queue memory */
447 	if (unlikely(!q))
448 		return NULL;
449 	eqe = lpfc_sli4_qe(q, q->host_index);
450 
451 	/* If the next EQE is not valid then we are done */
452 	if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
453 		return NULL;
454 
455 	/*
456 	 * insert barrier for instruction interlock : data from the hardware
457 	 * must have the valid bit checked before it can be copied and acted
458 	 * upon. Speculative instructions were allowing a bcopy at the start
459 	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
460 	 * after our return, to copy data before the valid bit check above
461 	 * was done. As such, some of the copied data was stale. The barrier
462 	 * ensures the check is before any data is copied.
463 	 */
464 	mb();
465 	return eqe;
466 }
467 
468 /**
469  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
470  * @q: The Event Queue to disable interrupts
471  *
472  **/
473 void
474 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
475 {
476 	struct lpfc_register doorbell;
477 
478 	doorbell.word0 = 0;
479 	bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
480 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
481 	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
482 		(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
483 	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
484 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
485 }
486 
487 /**
488  * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
489  * @q: The Event Queue to disable interrupts
490  *
491  **/
492 void
493 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
494 {
495 	struct lpfc_register doorbell;
496 
497 	doorbell.word0 = 0;
498 	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
499 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
500 }
501 
502 /**
503  * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
504  * @phba: adapter with EQ
505  * @q: The Event Queue that the host has completed processing for.
506  * @count: Number of elements that have been consumed
507  * @arm: Indicates whether the host wants to arms this CQ.
508  *
509  * This routine will notify the HBA, by ringing the doorbell, that count
510  * number of EQEs have been processed. The @arm parameter indicates whether
511  * the queue should be rearmed when ringing the doorbell.
512  **/
513 void
514 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
515 		     uint32_t count, bool arm)
516 {
517 	struct lpfc_register doorbell;
518 
519 	/* sanity check on queue memory */
520 	if (unlikely(!q || (count == 0 && !arm)))
521 		return;
522 
523 	/* ring doorbell for number popped */
524 	doorbell.word0 = 0;
525 	if (arm) {
526 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
527 		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
528 	}
529 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
530 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
531 	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
532 			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
533 	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
534 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
535 	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
536 	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
537 		readl(q->phba->sli4_hba.EQDBregaddr);
538 }
539 
540 /**
541  * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
542  * @phba: adapter with EQ
543  * @q: The Event Queue that the host has completed processing for.
544  * @count: Number of elements that have been consumed
545  * @arm: Indicates whether the host wants to arms this CQ.
546  *
547  * This routine will notify the HBA, by ringing the doorbell, that count
548  * number of EQEs have been processed. The @arm parameter indicates whether
549  * the queue should be rearmed when ringing the doorbell.
550  **/
551 void
552 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
553 			  uint32_t count, bool arm)
554 {
555 	struct lpfc_register doorbell;
556 
557 	/* sanity check on queue memory */
558 	if (unlikely(!q || (count == 0 && !arm)))
559 		return;
560 
561 	/* ring doorbell for number popped */
562 	doorbell.word0 = 0;
563 	if (arm)
564 		bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
565 	bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
566 	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
567 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
568 	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
569 	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
570 		readl(q->phba->sli4_hba.EQDBregaddr);
571 }
572 
573 static void
574 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
575 			struct lpfc_eqe *eqe)
576 {
577 	if (!phba->sli4_hba.pc_sli4_params.eqav)
578 		bf_set_le32(lpfc_eqe_valid, eqe, 0);
579 
580 	eq->host_index = ((eq->host_index + 1) % eq->entry_count);
581 
582 	/* if the index wrapped around, toggle the valid bit */
583 	if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
584 		eq->qe_valid = (eq->qe_valid) ? 0 : 1;
585 }
586 
587 static void
588 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
589 {
590 	struct lpfc_eqe *eqe = NULL;
591 	u32 eq_count = 0, cq_count = 0;
592 	struct lpfc_cqe *cqe = NULL;
593 	struct lpfc_queue *cq = NULL, *childq = NULL;
594 	int cqid = 0;
595 
596 	/* walk all the EQ entries and drop on the floor */
597 	eqe = lpfc_sli4_eq_get(eq);
598 	while (eqe) {
599 		/* Get the reference to the corresponding CQ */
600 		cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
601 		cq = NULL;
602 
603 		list_for_each_entry(childq, &eq->child_list, list) {
604 			if (childq->queue_id == cqid) {
605 				cq = childq;
606 				break;
607 			}
608 		}
609 		/* If CQ is valid, iterate through it and drop all the CQEs */
610 		if (cq) {
611 			cqe = lpfc_sli4_cq_get(cq);
612 			while (cqe) {
613 				__lpfc_sli4_consume_cqe(phba, cq, cqe);
614 				cq_count++;
615 				cqe = lpfc_sli4_cq_get(cq);
616 			}
617 			/* Clear and re-arm the CQ */
618 			phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
619 			    LPFC_QUEUE_REARM);
620 			cq_count = 0;
621 		}
622 		__lpfc_sli4_consume_eqe(phba, eq, eqe);
623 		eq_count++;
624 		eqe = lpfc_sli4_eq_get(eq);
625 	}
626 
627 	/* Clear and re-arm the EQ */
628 	phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
629 }
630 
631 static int
632 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
633 		     u8 rearm, enum lpfc_poll_mode poll_mode)
634 {
635 	struct lpfc_eqe *eqe;
636 	int count = 0, consumed = 0;
637 
638 	if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
639 		goto rearm_and_exit;
640 
641 	eqe = lpfc_sli4_eq_get(eq);
642 	while (eqe) {
643 		lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
644 		__lpfc_sli4_consume_eqe(phba, eq, eqe);
645 
646 		consumed++;
647 		if (!(++count % eq->max_proc_limit))
648 			break;
649 
650 		if (!(count % eq->notify_interval)) {
651 			phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
652 							LPFC_QUEUE_NOARM);
653 			consumed = 0;
654 		}
655 
656 		eqe = lpfc_sli4_eq_get(eq);
657 	}
658 	eq->EQ_processed += count;
659 
660 	/* Track the max number of EQEs processed in 1 intr */
661 	if (count > eq->EQ_max_eqe)
662 		eq->EQ_max_eqe = count;
663 
664 	xchg(&eq->queue_claimed, 0);
665 
666 rearm_and_exit:
667 	/* Always clear the EQ. */
668 	phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
669 
670 	return count;
671 }
672 
673 /**
674  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
675  * @q: The Completion Queue to get the first valid CQE from
676  *
677  * This routine will get the first valid Completion Queue Entry from @q, update
678  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
679  * the Queue (no more work to do), or the Queue is full of CQEs that have been
680  * processed, but not popped back to the HBA then this routine will return NULL.
681  **/
682 static struct lpfc_cqe *
683 lpfc_sli4_cq_get(struct lpfc_queue *q)
684 {
685 	struct lpfc_cqe *cqe;
686 
687 	/* sanity check on queue memory */
688 	if (unlikely(!q))
689 		return NULL;
690 	cqe = lpfc_sli4_qe(q, q->host_index);
691 
692 	/* If the next CQE is not valid then we are done */
693 	if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
694 		return NULL;
695 
696 	/*
697 	 * insert barrier for instruction interlock : data from the hardware
698 	 * must have the valid bit checked before it can be copied and acted
699 	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
700 	 * instructions allowing action on content before valid bit checked,
701 	 * add barrier here as well. May not be needed as "content" is a
702 	 * single 32-bit entity here (vs multi word structure for cq's).
703 	 */
704 	mb();
705 	return cqe;
706 }
707 
708 static void
709 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
710 			struct lpfc_cqe *cqe)
711 {
712 	if (!phba->sli4_hba.pc_sli4_params.cqav)
713 		bf_set_le32(lpfc_cqe_valid, cqe, 0);
714 
715 	cq->host_index = ((cq->host_index + 1) % cq->entry_count);
716 
717 	/* if the index wrapped around, toggle the valid bit */
718 	if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
719 		cq->qe_valid = (cq->qe_valid) ? 0 : 1;
720 }
721 
722 /**
723  * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
724  * @phba: the adapter with the CQ
725  * @q: The Completion Queue that the host has completed processing for.
726  * @count: the number of elements that were consumed
727  * @arm: Indicates whether the host wants to arms this CQ.
728  *
729  * This routine will notify the HBA, by ringing the doorbell, that the
730  * CQEs have been processed. The @arm parameter specifies whether the
731  * queue should be rearmed when ringing the doorbell.
732  **/
733 void
734 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
735 		     uint32_t count, bool arm)
736 {
737 	struct lpfc_register doorbell;
738 
739 	/* sanity check on queue memory */
740 	if (unlikely(!q || (count == 0 && !arm)))
741 		return;
742 
743 	/* ring doorbell for number popped */
744 	doorbell.word0 = 0;
745 	if (arm)
746 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
747 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
748 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
749 	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
750 			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
751 	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
752 	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
753 }
754 
755 /**
756  * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
757  * @phba: the adapter with the CQ
758  * @q: The Completion Queue that the host has completed processing for.
759  * @count: the number of elements that were consumed
760  * @arm: Indicates whether the host wants to arms this CQ.
761  *
762  * This routine will notify the HBA, by ringing the doorbell, that the
763  * CQEs have been processed. The @arm parameter specifies whether the
764  * queue should be rearmed when ringing the doorbell.
765  **/
766 void
767 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
768 			 uint32_t count, bool arm)
769 {
770 	struct lpfc_register doorbell;
771 
772 	/* sanity check on queue memory */
773 	if (unlikely(!q || (count == 0 && !arm)))
774 		return;
775 
776 	/* ring doorbell for number popped */
777 	doorbell.word0 = 0;
778 	if (arm)
779 		bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
780 	bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
781 	bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
782 	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
783 }
784 
785 /*
786  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
787  *
788  * This routine will copy the contents of @wqe to the next available entry on
789  * the @q. This function will then ring the Receive Queue Doorbell to signal the
790  * HBA to start processing the Receive Queue Entry. This function returns the
791  * index that the rqe was copied to if successful. If no entries are available
792  * on @q then this function will return -ENOMEM.
793  * The caller is expected to hold the hbalock when calling this routine.
794  **/
795 int
796 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
797 		 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
798 {
799 	struct lpfc_rqe *temp_hrqe;
800 	struct lpfc_rqe *temp_drqe;
801 	struct lpfc_register doorbell;
802 	int hq_put_index;
803 	int dq_put_index;
804 
805 	/* sanity check on queue memory */
806 	if (unlikely(!hq) || unlikely(!dq))
807 		return -ENOMEM;
808 	hq_put_index = hq->host_index;
809 	dq_put_index = dq->host_index;
810 	temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
811 	temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
812 
813 	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
814 		return -EINVAL;
815 	if (hq_put_index != dq_put_index)
816 		return -EINVAL;
817 	/* If the host has not yet processed the next entry then we are done */
818 	if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
819 		return -EBUSY;
820 	lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
821 	lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
822 
823 	/* Update the host index to point to the next slot */
824 	hq->host_index = ((hq_put_index + 1) % hq->entry_count);
825 	dq->host_index = ((dq_put_index + 1) % dq->entry_count);
826 	hq->RQ_buf_posted++;
827 
828 	/* Ring The Header Receive Queue Doorbell */
829 	if (!(hq->host_index % hq->notify_interval)) {
830 		doorbell.word0 = 0;
831 		if (hq->db_format == LPFC_DB_RING_FORMAT) {
832 			bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
833 			       hq->notify_interval);
834 			bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
835 		} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
836 			bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
837 			       hq->notify_interval);
838 			bf_set(lpfc_rq_db_list_fm_index, &doorbell,
839 			       hq->host_index);
840 			bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
841 		} else {
842 			return -EINVAL;
843 		}
844 		writel(doorbell.word0, hq->db_regaddr);
845 	}
846 	return hq_put_index;
847 }
848 
849 /*
850  * lpfc_sli4_rq_release - Updates internal hba index for RQ
851  *
852  * This routine will update the HBA index of a queue to reflect consumption of
853  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
854  * consumed an entry the host calls this function to update the queue's
855  * internal pointers. This routine returns the number of entries that were
856  * consumed by the HBA.
857  **/
858 static uint32_t
859 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
860 {
861 	/* sanity check on queue memory */
862 	if (unlikely(!hq) || unlikely(!dq))
863 		return 0;
864 
865 	if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
866 		return 0;
867 	hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
868 	dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
869 	return 1;
870 }
871 
872 /**
873  * lpfc_cmd_iocb - Get next command iocb entry in the ring
874  * @phba: Pointer to HBA context object.
875  * @pring: Pointer to driver SLI ring object.
876  *
877  * This function returns pointer to next command iocb entry
878  * in the command ring. The caller must hold hbalock to prevent
879  * other threads consume the next command iocb.
880  * SLI-2/SLI-3 provide different sized iocbs.
881  **/
882 static inline IOCB_t *
883 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
884 {
885 	return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
886 			   pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
887 }
888 
889 /**
890  * lpfc_resp_iocb - Get next response iocb entry in the ring
891  * @phba: Pointer to HBA context object.
892  * @pring: Pointer to driver SLI ring object.
893  *
894  * This function returns pointer to next response iocb entry
895  * in the response ring. The caller must hold hbalock to make sure
896  * that no other thread consume the next response iocb.
897  * SLI-2/SLI-3 provide different sized iocbs.
898  **/
899 static inline IOCB_t *
900 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
901 {
902 	return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
903 			   pring->sli.sli3.rspidx * phba->iocb_rsp_size);
904 }
905 
906 /**
907  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
908  * @phba: Pointer to HBA context object.
909  *
910  * This function is called with hbalock held. This function
911  * allocates a new driver iocb object from the iocb pool. If the
912  * allocation is successful, it returns pointer to the newly
913  * allocated iocb object else it returns NULL.
914  **/
915 struct lpfc_iocbq *
916 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
917 {
918 	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
919 	struct lpfc_iocbq * iocbq = NULL;
920 
921 	lockdep_assert_held(&phba->hbalock);
922 
923 	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
924 	if (iocbq)
925 		phba->iocb_cnt++;
926 	if (phba->iocb_cnt > phba->iocb_max)
927 		phba->iocb_max = phba->iocb_cnt;
928 	return iocbq;
929 }
930 
931 /**
932  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
933  * @phba: Pointer to HBA context object.
934  * @xritag: XRI value.
935  *
936  * This function clears the sglq pointer from the array of active
937  * sglq's. The xritag that is passed in is used to index into the
938  * array. Before the xritag can be used it needs to be adjusted
939  * by subtracting the xribase.
940  *
941  * Returns sglq ponter = success, NULL = Failure.
942  **/
943 struct lpfc_sglq *
944 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
945 {
946 	struct lpfc_sglq *sglq;
947 
948 	sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
949 	phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
950 	return sglq;
951 }
952 
953 /**
954  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
955  * @phba: Pointer to HBA context object.
956  * @xritag: XRI value.
957  *
958  * This function returns the sglq pointer from the array of active
959  * sglq's. The xritag that is passed in is used to index into the
960  * array. Before the xritag can be used it needs to be adjusted
961  * by subtracting the xribase.
962  *
963  * Returns sglq ponter = success, NULL = Failure.
964  **/
965 struct lpfc_sglq *
966 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
967 {
968 	struct lpfc_sglq *sglq;
969 
970 	sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
971 	return sglq;
972 }
973 
974 /**
975  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
976  * @phba: Pointer to HBA context object.
977  * @xritag: xri used in this exchange.
978  * @rrq: The RRQ to be cleared.
979  *
980  **/
981 void
982 lpfc_clr_rrq_active(struct lpfc_hba *phba,
983 		    uint16_t xritag,
984 		    struct lpfc_node_rrq *rrq)
985 {
986 	struct lpfc_nodelist *ndlp = NULL;
987 
988 	/* Lookup did to verify if did is still active on this vport */
989 	if (rrq->vport)
990 		ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
991 
992 	if (!ndlp)
993 		goto out;
994 
995 	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
996 		rrq->send_rrq = 0;
997 		rrq->xritag = 0;
998 		rrq->rrq_stop_time = 0;
999 	}
1000 out:
1001 	mempool_free(rrq, phba->rrq_pool);
1002 }
1003 
1004 /**
1005  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1006  * @phba: Pointer to HBA context object.
1007  *
1008  * This function is called with hbalock held. This function
1009  * Checks if stop_time (ratov from setting rrq active) has
1010  * been reached, if it has and the send_rrq flag is set then
1011  * it will call lpfc_send_rrq. If the send_rrq flag is not set
1012  * then it will just call the routine to clear the rrq and
1013  * free the rrq resource.
1014  * The timer is set to the next rrq that is going to expire before
1015  * leaving the routine.
1016  *
1017  **/
1018 void
1019 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1020 {
1021 	struct lpfc_node_rrq *rrq;
1022 	struct lpfc_node_rrq *nextrrq;
1023 	unsigned long next_time;
1024 	unsigned long iflags;
1025 	LIST_HEAD(send_rrq);
1026 
1027 	clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1028 	next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1029 	spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1030 	list_for_each_entry_safe(rrq, nextrrq,
1031 				 &phba->active_rrq_list, list) {
1032 		if (time_after(jiffies, rrq->rrq_stop_time))
1033 			list_move(&rrq->list, &send_rrq);
1034 		else if (time_before(rrq->rrq_stop_time, next_time))
1035 			next_time = rrq->rrq_stop_time;
1036 	}
1037 	spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1038 	if ((!list_empty(&phba->active_rrq_list)) &&
1039 	    (!test_bit(FC_UNLOADING, &phba->pport->load_flag)))
1040 		mod_timer(&phba->rrq_tmr, next_time);
1041 	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1042 		list_del(&rrq->list);
1043 		if (!rrq->send_rrq) {
1044 			/* this call will free the rrq */
1045 			lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1046 		} else if (lpfc_send_rrq(phba, rrq)) {
1047 			/* if we send the rrq then the completion handler
1048 			*  will clear the bit in the xribitmap.
1049 			*/
1050 			lpfc_clr_rrq_active(phba, rrq->xritag,
1051 					    rrq);
1052 		}
1053 	}
1054 }
1055 
1056 /**
1057  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1058  * @vport: Pointer to vport context object.
1059  * @xri: The xri used in the exchange.
1060  * @did: The targets DID for this exchange.
1061  *
1062  * returns NULL = rrq not found in the phba->active_rrq_list.
1063  *         rrq = rrq for this xri and target.
1064  **/
1065 struct lpfc_node_rrq *
1066 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1067 {
1068 	struct lpfc_hba *phba = vport->phba;
1069 	struct lpfc_node_rrq *rrq;
1070 	struct lpfc_node_rrq *nextrrq;
1071 	unsigned long iflags;
1072 
1073 	if (phba->sli_rev != LPFC_SLI_REV4)
1074 		return NULL;
1075 	spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1076 	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1077 		if (rrq->vport == vport && rrq->xritag == xri &&
1078 				rrq->nlp_DID == did){
1079 			list_del(&rrq->list);
1080 			spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1081 			return rrq;
1082 		}
1083 	}
1084 	spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1085 	return NULL;
1086 }
1087 
1088 /**
1089  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1090  * @vport: Pointer to vport context object.
1091  * @ndlp: Pointer to the lpfc_node_list structure.
1092  * If ndlp is NULL Remove all active RRQs for this vport from the
1093  * phba->active_rrq_list and clear the rrq.
1094  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1095  **/
1096 void
1097 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1098 
1099 {
1100 	struct lpfc_hba *phba = vport->phba;
1101 	struct lpfc_node_rrq *rrq;
1102 	struct lpfc_node_rrq *nextrrq;
1103 	unsigned long iflags;
1104 	LIST_HEAD(rrq_list);
1105 
1106 	if (phba->sli_rev != LPFC_SLI_REV4)
1107 		return;
1108 	if (!ndlp) {
1109 		lpfc_sli4_vport_delete_els_xri_aborted(vport);
1110 		lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1111 	}
1112 	spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1113 	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1114 		if (rrq->vport != vport)
1115 			continue;
1116 
1117 		if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1118 			list_move(&rrq->list, &rrq_list);
1119 
1120 	}
1121 	spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1122 
1123 	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1124 		list_del(&rrq->list);
1125 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1126 	}
1127 }
1128 
1129 /**
1130  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1131  * @phba: Pointer to HBA context object.
1132  * @ndlp: Targets nodelist pointer for this exchange.
1133  * @xritag: the xri in the bitmap to test.
1134  *
1135  * This function returns:
1136  * 0 = rrq not active for this xri
1137  * 1 = rrq is valid for this xri.
1138  **/
1139 int
1140 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1141 			uint16_t  xritag)
1142 {
1143 	if (!ndlp)
1144 		return 0;
1145 	if (!ndlp->active_rrqs_xri_bitmap)
1146 		return 0;
1147 	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1148 		return 1;
1149 	else
1150 		return 0;
1151 }
1152 
1153 /**
1154  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1155  * @phba: Pointer to HBA context object.
1156  * @ndlp: nodelist pointer for this target.
1157  * @xritag: xri used in this exchange.
1158  * @rxid: Remote Exchange ID.
1159  * @send_rrq: Flag used to determine if we should send rrq els cmd.
1160  *
1161  * This function takes the hbalock.
1162  * The active bit is always set in the active rrq xri_bitmap even
1163  * if there is no slot avaiable for the other rrq information.
1164  *
1165  * returns 0 rrq actived for this xri
1166  *         < 0 No memory or invalid ndlp.
1167  **/
1168 int
1169 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1170 		    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1171 {
1172 	unsigned long iflags;
1173 	struct lpfc_node_rrq *rrq;
1174 	int empty;
1175 
1176 	if (!ndlp)
1177 		return -EINVAL;
1178 
1179 	if (!phba->cfg_enable_rrq)
1180 		return -EINVAL;
1181 
1182 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1183 		clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1184 		goto outnl;
1185 	}
1186 
1187 	spin_lock_irqsave(&phba->hbalock, iflags);
1188 	if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag))
1189 		goto out;
1190 
1191 	if (!ndlp->active_rrqs_xri_bitmap)
1192 		goto out;
1193 
1194 	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1195 		goto out;
1196 
1197 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1198 	rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1199 	if (!rrq) {
1200 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1201 				"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1202 				" DID:0x%x Send:%d\n",
1203 				xritag, rxid, ndlp->nlp_DID, send_rrq);
1204 		return -EINVAL;
1205 	}
1206 	if (phba->cfg_enable_rrq == 1)
1207 		rrq->send_rrq = send_rrq;
1208 	else
1209 		rrq->send_rrq = 0;
1210 	rrq->xritag = xritag;
1211 	rrq->rrq_stop_time = jiffies +
1212 				msecs_to_jiffies(1000 * (phba->fc_ratov + 1));
1213 	rrq->nlp_DID = ndlp->nlp_DID;
1214 	rrq->vport = ndlp->vport;
1215 	rrq->rxid = rxid;
1216 
1217 	spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1218 	empty = list_empty(&phba->active_rrq_list);
1219 	list_add_tail(&rrq->list, &phba->active_rrq_list);
1220 	spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1221 	set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1222 	if (empty)
1223 		lpfc_worker_wake_up(phba);
1224 	return 0;
1225 out:
1226 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1227 outnl:
1228 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1229 			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
1230 			" DID:0x%x Send:%d\n",
1231 			xritag, rxid, ndlp->nlp_DID, send_rrq);
1232 	return -EINVAL;
1233 }
1234 
1235 /**
1236  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1237  * @phba: Pointer to HBA context object.
1238  * @piocbq: Pointer to the iocbq.
1239  *
1240  * The driver calls this function with either the nvme ls ring lock
1241  * or the fc els ring lock held depending on the iocb usage.  This function
1242  * gets a new driver sglq object from the sglq list. If the list is not empty
1243  * then it is successful, it returns pointer to the newly allocated sglq
1244  * object else it returns NULL.
1245  **/
1246 static struct lpfc_sglq *
1247 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1248 {
1249 	struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1250 	struct lpfc_sglq *sglq = NULL;
1251 	struct lpfc_sglq *start_sglq = NULL;
1252 	struct lpfc_io_buf *lpfc_cmd;
1253 	struct lpfc_nodelist *ndlp;
1254 	int found = 0;
1255 	u8 cmnd;
1256 
1257 	cmnd = get_job_cmnd(phba, piocbq);
1258 
1259 	if (piocbq->cmd_flag & LPFC_IO_FCP) {
1260 		lpfc_cmd = piocbq->io_buf;
1261 		ndlp = lpfc_cmd->rdata->pnode;
1262 	} else  if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1263 			!(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1264 		ndlp = piocbq->ndlp;
1265 	} else  if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1266 		if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1267 			ndlp = NULL;
1268 		else
1269 			ndlp = piocbq->ndlp;
1270 	} else {
1271 		ndlp = piocbq->ndlp;
1272 	}
1273 
1274 	spin_lock(&phba->sli4_hba.sgl_list_lock);
1275 	list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1276 	start_sglq = sglq;
1277 	while (!found) {
1278 		if (!sglq)
1279 			break;
1280 		if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1281 		    test_bit(sglq->sli4_lxritag,
1282 		    ndlp->active_rrqs_xri_bitmap)) {
1283 			/* This xri has an rrq outstanding for this DID.
1284 			 * put it back in the list and get another xri.
1285 			 */
1286 			list_add_tail(&sglq->list, lpfc_els_sgl_list);
1287 			sglq = NULL;
1288 			list_remove_head(lpfc_els_sgl_list, sglq,
1289 						struct lpfc_sglq, list);
1290 			if (sglq == start_sglq) {
1291 				list_add_tail(&sglq->list, lpfc_els_sgl_list);
1292 				sglq = NULL;
1293 				break;
1294 			} else
1295 				continue;
1296 		}
1297 		sglq->ndlp = ndlp;
1298 		found = 1;
1299 		phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1300 		sglq->state = SGL_ALLOCATED;
1301 	}
1302 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
1303 	return sglq;
1304 }
1305 
1306 /**
1307  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1308  * @phba: Pointer to HBA context object.
1309  * @piocbq: Pointer to the iocbq.
1310  *
1311  * This function is called with the sgl_list lock held. This function
1312  * gets a new driver sglq object from the sglq list. If the
1313  * list is not empty then it is successful, it returns pointer to the newly
1314  * allocated sglq object else it returns NULL.
1315  **/
1316 struct lpfc_sglq *
1317 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1318 {
1319 	struct list_head *lpfc_nvmet_sgl_list;
1320 	struct lpfc_sglq *sglq = NULL;
1321 
1322 	lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1323 
1324 	lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1325 
1326 	list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1327 	if (!sglq)
1328 		return NULL;
1329 	phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1330 	sglq->state = SGL_ALLOCATED;
1331 	return sglq;
1332 }
1333 
1334 /**
1335  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1336  * @phba: Pointer to HBA context object.
1337  *
1338  * This function is called with no lock held. This function
1339  * allocates a new driver iocb object from the iocb pool. If the
1340  * allocation is successful, it returns pointer to the newly
1341  * allocated iocb object else it returns NULL.
1342  **/
1343 struct lpfc_iocbq *
1344 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1345 {
1346 	struct lpfc_iocbq * iocbq = NULL;
1347 	unsigned long iflags;
1348 
1349 	spin_lock_irqsave(&phba->hbalock, iflags);
1350 	iocbq = __lpfc_sli_get_iocbq(phba);
1351 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1352 	return iocbq;
1353 }
1354 
1355 /**
1356  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1357  * @phba: Pointer to HBA context object.
1358  * @iocbq: Pointer to driver iocb object.
1359  *
1360  * This function is called to release the driver iocb object
1361  * to the iocb pool. The iotag in the iocb object
1362  * does not change for each use of the iocb object. This function
1363  * clears all other fields of the iocb object when it is freed.
1364  * The sqlq structure that holds the xritag and phys and virtual
1365  * mappings for the scatter gather list is retrieved from the
1366  * active array of sglq. The get of the sglq pointer also clears
1367  * the entry in the array. If the status of the IO indiactes that
1368  * this IO was aborted then the sglq entry it put on the
1369  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1370  * IO has good status or fails for any other reason then the sglq
1371  * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1372  *  asserted held in the code path calling this routine.
1373  **/
1374 static void
1375 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1376 {
1377 	struct lpfc_sglq *sglq;
1378 	unsigned long iflag = 0;
1379 	struct lpfc_sli_ring *pring;
1380 
1381 	if (iocbq->sli4_xritag == NO_XRI)
1382 		sglq = NULL;
1383 	else
1384 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1385 
1386 
1387 	if (sglq)  {
1388 		if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1389 			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1390 					  iflag);
1391 			sglq->state = SGL_FREED;
1392 			sglq->ndlp = NULL;
1393 			list_add_tail(&sglq->list,
1394 				      &phba->sli4_hba.lpfc_nvmet_sgl_list);
1395 			spin_unlock_irqrestore(
1396 				&phba->sli4_hba.sgl_list_lock, iflag);
1397 			goto out;
1398 		}
1399 
1400 		if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1401 		    (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1402 		    sglq->state != SGL_XRI_ABORTED) {
1403 			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1404 					  iflag);
1405 
1406 			/* Check if we can get a reference on ndlp */
1407 			if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1408 				sglq->ndlp = NULL;
1409 
1410 			list_add(&sglq->list,
1411 				 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1412 			spin_unlock_irqrestore(
1413 				&phba->sli4_hba.sgl_list_lock, iflag);
1414 		} else {
1415 			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1416 					  iflag);
1417 			sglq->state = SGL_FREED;
1418 			sglq->ndlp = NULL;
1419 			list_add_tail(&sglq->list,
1420 				      &phba->sli4_hba.lpfc_els_sgl_list);
1421 			spin_unlock_irqrestore(
1422 				&phba->sli4_hba.sgl_list_lock, iflag);
1423 			pring = lpfc_phba_elsring(phba);
1424 			/* Check if TXQ queue needs to be serviced */
1425 			if (pring && (!list_empty(&pring->txq)))
1426 				lpfc_worker_wake_up(phba);
1427 		}
1428 	}
1429 
1430 out:
1431 	/*
1432 	 * Clean all volatile data fields, preserve iotag and node struct.
1433 	 */
1434 	memset_startat(iocbq, 0, wqe);
1435 	iocbq->sli4_lxritag = NO_XRI;
1436 	iocbq->sli4_xritag = NO_XRI;
1437 	iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1438 			      LPFC_IO_NVME_LS);
1439 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1440 }
1441 
1442 
1443 /**
1444  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1445  * @phba: Pointer to HBA context object.
1446  * @iocbq: Pointer to driver iocb object.
1447  *
1448  * This function is called to release the driver iocb object to the
1449  * iocb pool. The iotag in the iocb object does not change for each
1450  * use of the iocb object. This function clears all other fields of
1451  * the iocb object when it is freed. The hbalock is asserted held in
1452  * the code path calling this routine.
1453  **/
1454 static void
1455 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1456 {
1457 
1458 	/*
1459 	 * Clean all volatile data fields, preserve iotag and node struct.
1460 	 */
1461 	memset_startat(iocbq, 0, iocb);
1462 	iocbq->sli4_xritag = NO_XRI;
1463 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1464 }
1465 
1466 /**
1467  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1468  * @phba: Pointer to HBA context object.
1469  * @iocbq: Pointer to driver iocb object.
1470  *
1471  * This function is called with hbalock held to release driver
1472  * iocb object to the iocb pool. The iotag in the iocb object
1473  * does not change for each use of the iocb object. This function
1474  * clears all other fields of the iocb object when it is freed.
1475  **/
1476 static void
1477 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1478 {
1479 	lockdep_assert_held(&phba->hbalock);
1480 
1481 	phba->__lpfc_sli_release_iocbq(phba, iocbq);
1482 	phba->iocb_cnt--;
1483 }
1484 
1485 /**
1486  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1487  * @phba: Pointer to HBA context object.
1488  * @iocbq: Pointer to driver iocb object.
1489  *
1490  * This function is called with no lock held to release the iocb to
1491  * iocb pool.
1492  **/
1493 void
1494 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1495 {
1496 	unsigned long iflags;
1497 
1498 	/*
1499 	 * Clean all volatile data fields, preserve iotag and node struct.
1500 	 */
1501 	spin_lock_irqsave(&phba->hbalock, iflags);
1502 	__lpfc_sli_release_iocbq(phba, iocbq);
1503 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1504 }
1505 
1506 /**
1507  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1508  * @phba: Pointer to HBA context object.
1509  * @iocblist: List of IOCBs.
1510  * @ulpstatus: ULP status in IOCB command field.
1511  * @ulpWord4: ULP word-4 in IOCB command field.
1512  *
1513  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1514  * on the list by invoking the complete callback function associated with the
1515  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1516  * fields.
1517  **/
1518 void
1519 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1520 		      uint32_t ulpstatus, uint32_t ulpWord4)
1521 {
1522 	struct lpfc_iocbq *piocb;
1523 
1524 	while (!list_empty(iocblist)) {
1525 		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1526 		if (piocb->cmd_cmpl) {
1527 			if (piocb->cmd_flag & LPFC_IO_NVME) {
1528 				lpfc_nvme_cancel_iocb(phba, piocb,
1529 						      ulpstatus, ulpWord4);
1530 			} else {
1531 				if (phba->sli_rev == LPFC_SLI_REV4) {
1532 					bf_set(lpfc_wcqe_c_status,
1533 					       &piocb->wcqe_cmpl, ulpstatus);
1534 					piocb->wcqe_cmpl.parameter = ulpWord4;
1535 				} else {
1536 					piocb->iocb.ulpStatus = ulpstatus;
1537 					piocb->iocb.un.ulpWord[4] = ulpWord4;
1538 				}
1539 				(piocb->cmd_cmpl) (phba, piocb, piocb);
1540 			}
1541 		} else {
1542 			lpfc_sli_release_iocbq(phba, piocb);
1543 		}
1544 	}
1545 	return;
1546 }
1547 
1548 /**
1549  * lpfc_sli_iocb_cmd_type - Get the iocb type
1550  * @iocb_cmnd: iocb command code.
1551  *
1552  * This function is called by ring event handler function to get the iocb type.
1553  * This function translates the iocb command to an iocb command type used to
1554  * decide the final disposition of each completed IOCB.
1555  * The function returns
1556  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1557  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1558  * LPFC_ABORT_IOCB   if it is an abort iocb
1559  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1560  *
1561  * The caller is not required to hold any lock.
1562  **/
1563 static lpfc_iocb_type
1564 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1565 {
1566 	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1567 
1568 	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1569 		return 0;
1570 
1571 	switch (iocb_cmnd) {
1572 	case CMD_XMIT_SEQUENCE_CR:
1573 	case CMD_XMIT_SEQUENCE_CX:
1574 	case CMD_XMIT_BCAST_CN:
1575 	case CMD_XMIT_BCAST_CX:
1576 	case CMD_ELS_REQUEST_CR:
1577 	case CMD_ELS_REQUEST_CX:
1578 	case CMD_CREATE_XRI_CR:
1579 	case CMD_CREATE_XRI_CX:
1580 	case CMD_GET_RPI_CN:
1581 	case CMD_XMIT_ELS_RSP_CX:
1582 	case CMD_GET_RPI_CR:
1583 	case CMD_FCP_IWRITE_CR:
1584 	case CMD_FCP_IWRITE_CX:
1585 	case CMD_FCP_IREAD_CR:
1586 	case CMD_FCP_IREAD_CX:
1587 	case CMD_FCP_ICMND_CR:
1588 	case CMD_FCP_ICMND_CX:
1589 	case CMD_FCP_TSEND_CX:
1590 	case CMD_FCP_TRSP_CX:
1591 	case CMD_FCP_TRECEIVE_CX:
1592 	case CMD_FCP_AUTO_TRSP_CX:
1593 	case CMD_ADAPTER_MSG:
1594 	case CMD_ADAPTER_DUMP:
1595 	case CMD_XMIT_SEQUENCE64_CR:
1596 	case CMD_XMIT_SEQUENCE64_CX:
1597 	case CMD_XMIT_BCAST64_CN:
1598 	case CMD_XMIT_BCAST64_CX:
1599 	case CMD_ELS_REQUEST64_CR:
1600 	case CMD_ELS_REQUEST64_CX:
1601 	case CMD_FCP_IWRITE64_CR:
1602 	case CMD_FCP_IWRITE64_CX:
1603 	case CMD_FCP_IREAD64_CR:
1604 	case CMD_FCP_IREAD64_CX:
1605 	case CMD_FCP_ICMND64_CR:
1606 	case CMD_FCP_ICMND64_CX:
1607 	case CMD_FCP_TSEND64_CX:
1608 	case CMD_FCP_TRSP64_CX:
1609 	case CMD_FCP_TRECEIVE64_CX:
1610 	case CMD_GEN_REQUEST64_CR:
1611 	case CMD_GEN_REQUEST64_CX:
1612 	case CMD_XMIT_ELS_RSP64_CX:
1613 	case DSSCMD_IWRITE64_CR:
1614 	case DSSCMD_IWRITE64_CX:
1615 	case DSSCMD_IREAD64_CR:
1616 	case DSSCMD_IREAD64_CX:
1617 	case CMD_SEND_FRAME:
1618 		type = LPFC_SOL_IOCB;
1619 		break;
1620 	case CMD_ABORT_XRI_CN:
1621 	case CMD_ABORT_XRI_CX:
1622 	case CMD_CLOSE_XRI_CN:
1623 	case CMD_CLOSE_XRI_CX:
1624 	case CMD_XRI_ABORTED_CX:
1625 	case CMD_ABORT_MXRI64_CN:
1626 	case CMD_XMIT_BLS_RSP64_CX:
1627 		type = LPFC_ABORT_IOCB;
1628 		break;
1629 	case CMD_RCV_SEQUENCE_CX:
1630 	case CMD_RCV_ELS_REQ_CX:
1631 	case CMD_RCV_SEQUENCE64_CX:
1632 	case CMD_RCV_ELS_REQ64_CX:
1633 	case CMD_ASYNC_STATUS:
1634 	case CMD_IOCB_RCV_SEQ64_CX:
1635 	case CMD_IOCB_RCV_ELS64_CX:
1636 	case CMD_IOCB_RCV_CONT64_CX:
1637 	case CMD_IOCB_RET_XRI64_CX:
1638 		type = LPFC_UNSOL_IOCB;
1639 		break;
1640 	case CMD_IOCB_XMIT_MSEQ64_CR:
1641 	case CMD_IOCB_XMIT_MSEQ64_CX:
1642 	case CMD_IOCB_RCV_SEQ_LIST64_CX:
1643 	case CMD_IOCB_RCV_ELS_LIST64_CX:
1644 	case CMD_IOCB_CLOSE_EXTENDED_CN:
1645 	case CMD_IOCB_ABORT_EXTENDED_CN:
1646 	case CMD_IOCB_RET_HBQE64_CN:
1647 	case CMD_IOCB_FCP_IBIDIR64_CR:
1648 	case CMD_IOCB_FCP_IBIDIR64_CX:
1649 	case CMD_IOCB_FCP_ITASKMGT64_CX:
1650 	case CMD_IOCB_LOGENTRY_CN:
1651 	case CMD_IOCB_LOGENTRY_ASYNC_CN:
1652 		printk("%s - Unhandled SLI-3 Command x%x\n",
1653 				__func__, iocb_cmnd);
1654 		type = LPFC_UNKNOWN_IOCB;
1655 		break;
1656 	default:
1657 		type = LPFC_UNKNOWN_IOCB;
1658 		break;
1659 	}
1660 
1661 	return type;
1662 }
1663 
1664 /**
1665  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1666  * @phba: Pointer to HBA context object.
1667  *
1668  * This function is called from SLI initialization code
1669  * to configure every ring of the HBA's SLI interface. The
1670  * caller is not required to hold any lock. This function issues
1671  * a config_ring mailbox command for each ring.
1672  * This function returns zero if successful else returns a negative
1673  * error code.
1674  **/
1675 static int
1676 lpfc_sli_ring_map(struct lpfc_hba *phba)
1677 {
1678 	struct lpfc_sli *psli = &phba->sli;
1679 	LPFC_MBOXQ_t *pmb;
1680 	MAILBOX_t *pmbox;
1681 	int i, rc, ret = 0;
1682 
1683 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1684 	if (!pmb)
1685 		return -ENOMEM;
1686 	pmbox = &pmb->u.mb;
1687 	phba->link_state = LPFC_INIT_MBX_CMDS;
1688 	for (i = 0; i < psli->num_rings; i++) {
1689 		lpfc_config_ring(phba, i, pmb);
1690 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1691 		if (rc != MBX_SUCCESS) {
1692 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1693 					"0446 Adapter failed to init (%d), "
1694 					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
1695 					"ring %d\n",
1696 					rc, pmbox->mbxCommand,
1697 					pmbox->mbxStatus, i);
1698 			phba->link_state = LPFC_HBA_ERROR;
1699 			ret = -ENXIO;
1700 			break;
1701 		}
1702 	}
1703 	mempool_free(pmb, phba->mbox_mem_pool);
1704 	return ret;
1705 }
1706 
1707 /**
1708  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1709  * @phba: Pointer to HBA context object.
1710  * @pring: Pointer to driver SLI ring object.
1711  * @piocb: Pointer to the driver iocb object.
1712  *
1713  * The driver calls this function with the hbalock held for SLI3 ports or
1714  * the ring lock held for SLI4 ports. The function adds the
1715  * new iocb to txcmplq of the given ring. This function always returns
1716  * 0. If this function is called for ELS ring, this function checks if
1717  * there is a vport associated with the ELS command. This function also
1718  * starts els_tmofunc timer if this is an ELS command.
1719  **/
1720 static int
1721 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1722 			struct lpfc_iocbq *piocb)
1723 {
1724 	u32 ulp_command = 0;
1725 
1726 	BUG_ON(!piocb);
1727 	ulp_command = get_job_cmnd(phba, piocb);
1728 
1729 	list_add_tail(&piocb->list, &pring->txcmplq);
1730 	piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1731 	pring->txcmplq_cnt++;
1732 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1733 	   (ulp_command != CMD_ABORT_XRI_WQE) &&
1734 	   (ulp_command != CMD_ABORT_XRI_CN) &&
1735 	   (ulp_command != CMD_CLOSE_XRI_CN)) {
1736 		BUG_ON(!piocb->vport);
1737 		if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
1738 			mod_timer(&piocb->vport->els_tmofunc,
1739 				  jiffies +
1740 				  msecs_to_jiffies(1000 * (phba->fc_ratov << 1)));
1741 	}
1742 
1743 	return 0;
1744 }
1745 
1746 /**
1747  * lpfc_sli_ringtx_get - Get first element of the txq
1748  * @phba: Pointer to HBA context object.
1749  * @pring: Pointer to driver SLI ring object.
1750  *
1751  * This function is called with hbalock held to get next
1752  * iocb in txq of the given ring. If there is any iocb in
1753  * the txq, the function returns first iocb in the list after
1754  * removing the iocb from the list, else it returns NULL.
1755  **/
1756 struct lpfc_iocbq *
1757 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1758 {
1759 	struct lpfc_iocbq *cmd_iocb;
1760 
1761 	lockdep_assert_held(&phba->hbalock);
1762 
1763 	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1764 	return cmd_iocb;
1765 }
1766 
1767 /**
1768  * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1769  * @phba: Pointer to HBA context object.
1770  * @cmdiocb: Pointer to driver command iocb object.
1771  * @rspiocb: Pointer to driver response iocb object.
1772  *
1773  * This routine will inform the driver of any BW adjustments we need
1774  * to make. These changes will be picked up during the next CMF
1775  * timer interrupt. In addition, any BW changes will be logged
1776  * with LOG_CGN_MGMT.
1777  **/
1778 static void
1779 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1780 		   struct lpfc_iocbq *rspiocb)
1781 {
1782 	union lpfc_wqe128 *wqe;
1783 	uint32_t status, info;
1784 	struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1785 	uint64_t bw, bwdif, slop;
1786 	uint64_t pcent, bwpcent;
1787 	int asig, afpin, sigcnt, fpincnt;
1788 	int wsigmax, wfpinmax, cg, tdp;
1789 	char *s;
1790 
1791 	/* First check for error */
1792 	status = bf_get(lpfc_wcqe_c_status, wcqe);
1793 	if (status) {
1794 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1795 				"6211 CMF_SYNC_WQE Error "
1796 				"req_tag x%x status x%x hwstatus x%x "
1797 				"tdatap x%x parm x%x\n",
1798 				bf_get(lpfc_wcqe_c_request_tag, wcqe),
1799 				bf_get(lpfc_wcqe_c_status, wcqe),
1800 				bf_get(lpfc_wcqe_c_hw_status, wcqe),
1801 				wcqe->total_data_placed,
1802 				wcqe->parameter);
1803 		goto out;
1804 	}
1805 
1806 	/* Gather congestion information on a successful cmpl */
1807 	info = wcqe->parameter;
1808 	phba->cmf_active_info = info;
1809 
1810 	/* See if firmware info count is valid or has changed */
1811 	if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1812 		info = 0;
1813 	else
1814 		phba->cmf_info_per_interval = info;
1815 
1816 	tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1817 	cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1818 
1819 	/* Get BW requirement from firmware */
1820 	bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1821 	if (!bw) {
1822 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1823 				"6212 CMF_SYNC_WQE x%x: NULL bw\n",
1824 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
1825 		goto out;
1826 	}
1827 
1828 	/* Gather information needed for logging if a BW change is required */
1829 	wqe = &cmdiocb->wqe;
1830 	asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1831 	afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1832 	fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1833 	sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1834 	if (phba->cmf_max_bytes_per_interval != bw ||
1835 	    (asig || afpin || sigcnt || fpincnt)) {
1836 		/* Are we increasing or decreasing BW */
1837 		if (phba->cmf_max_bytes_per_interval <  bw) {
1838 			bwdif = bw - phba->cmf_max_bytes_per_interval;
1839 			s = "Increase";
1840 		} else {
1841 			bwdif = phba->cmf_max_bytes_per_interval - bw;
1842 			s = "Decrease";
1843 		}
1844 
1845 		/* What is the change percentage */
1846 		slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1847 		pcent = div64_u64(bwdif * 100 + slop,
1848 				  phba->cmf_link_byte_count);
1849 		bwpcent = div64_u64(bw * 100 + slop,
1850 				    phba->cmf_link_byte_count);
1851 		/* Because of bytes adjustment due to shorter timer in
1852 		 * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and
1853 		 * may seem like BW is above 100%.
1854 		 */
1855 		if (bwpcent > 100)
1856 			bwpcent = 100;
1857 
1858 		if (phba->cmf_max_bytes_per_interval < bw &&
1859 		    bwpcent > 95)
1860 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1861 					"6208 Congestion bandwidth "
1862 					"limits removed\n");
1863 		else if ((phba->cmf_max_bytes_per_interval > bw) &&
1864 			 ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95))
1865 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1866 					"6209 Congestion bandwidth "
1867 					"limits in effect\n");
1868 
1869 		if (asig) {
1870 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1871 					"6237 BW Threshold %lld%% (%lld): "
1872 					"%lld%% %s: Signal Alarm: cg:%d "
1873 					"Info:%u\n",
1874 					bwpcent, bw, pcent, s, cg,
1875 					phba->cmf_active_info);
1876 		} else if (afpin) {
1877 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1878 					"6238 BW Threshold %lld%% (%lld): "
1879 					"%lld%% %s: FPIN Alarm: cg:%d "
1880 					"Info:%u\n",
1881 					bwpcent, bw, pcent, s, cg,
1882 					phba->cmf_active_info);
1883 		} else if (sigcnt) {
1884 			wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1885 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1886 					"6239 BW Threshold %lld%% (%lld): "
1887 					"%lld%% %s: Signal Warning: "
1888 					"Cnt %d Max %d: cg:%d Info:%u\n",
1889 					bwpcent, bw, pcent, s, sigcnt,
1890 					wsigmax, cg, phba->cmf_active_info);
1891 		} else if (fpincnt) {
1892 			wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1893 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1894 					"6240 BW Threshold %lld%% (%lld): "
1895 					"%lld%% %s: FPIN Warning: "
1896 					"Cnt %d Max %d: cg:%d Info:%u\n",
1897 					bwpcent, bw, pcent, s, fpincnt,
1898 					wfpinmax, cg, phba->cmf_active_info);
1899 		} else {
1900 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1901 					"6241 BW Threshold %lld%% (%lld): "
1902 					"CMF %lld%% %s: cg:%d Info:%u\n",
1903 					bwpcent, bw, pcent, s, cg,
1904 					phba->cmf_active_info);
1905 		}
1906 	} else if (info) {
1907 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1908 				"6246 Info Threshold %u\n", info);
1909 	}
1910 
1911 	/* Save BW change to be picked up during next timer interrupt */
1912 	phba->cmf_last_sync_bw = bw;
1913 out:
1914 	lpfc_sli_release_iocbq(phba, cmdiocb);
1915 }
1916 
1917 /**
1918  * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1919  * @phba: Pointer to HBA context object.
1920  * @ms:   ms to set in WQE interval, 0 means use init op
1921  * @total: Total rcv bytes for this interval
1922  *
1923  * This routine is called every CMF timer interrupt. Its purpose is
1924  * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1925  * that may indicate we have congestion (FPINs or Signals). Upon
1926  * completion, the firmware will indicate any BW restrictions the
1927  * driver may need to take.
1928  **/
1929 int
1930 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1931 {
1932 	union lpfc_wqe128 *wqe;
1933 	struct lpfc_iocbq *sync_buf;
1934 	unsigned long iflags;
1935 	u32 ret_val;
1936 	u32 atot, wtot, max;
1937 	u8 warn_sync_period = 0;
1938 
1939 	/* First address any alarm / warning activity */
1940 	atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1941 	wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1942 
1943 	/* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1944 	if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1945 	    phba->link_state == LPFC_LINK_DOWN)
1946 		return 0;
1947 
1948 	spin_lock_irqsave(&phba->hbalock, iflags);
1949 	sync_buf = __lpfc_sli_get_iocbq(phba);
1950 	if (!sync_buf) {
1951 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1952 				"6244 No available WQEs for CMF_SYNC_WQE\n");
1953 		ret_val = ENOMEM;
1954 		goto out_unlock;
1955 	}
1956 
1957 	wqe = &sync_buf->wqe;
1958 
1959 	/* WQEs are reused.  Clear stale data and set key fields to zero */
1960 	memset(wqe, 0, sizeof(*wqe));
1961 
1962 	/* If this is the very first CMF_SYNC_WQE, issue an init operation */
1963 	if (!ms) {
1964 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1965 				"6441 CMF Init %d - CMF_SYNC_WQE\n",
1966 				phba->fc_eventTag);
1967 		bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1968 		bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1969 		goto initpath;
1970 	}
1971 
1972 	bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1973 	bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1974 
1975 	/* Check for alarms / warnings */
1976 	if (atot) {
1977 		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1978 			/* We hit an Signal alarm condition */
1979 			bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1980 		} else {
1981 			/* We hit a FPIN alarm condition */
1982 			bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1983 		}
1984 	} else if (wtot) {
1985 		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1986 		    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1987 			/* We hit an Signal warning condition */
1988 			max = LPFC_SEC_TO_MSEC / lpfc_fabric_cgn_frequency *
1989 				lpfc_acqe_cgn_frequency;
1990 			bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1991 			bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1992 			warn_sync_period = lpfc_acqe_cgn_frequency;
1993 		} else {
1994 			/* We hit a FPIN warning condition */
1995 			bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1996 			bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1997 			if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
1998 				warn_sync_period =
1999 				LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
2000 		}
2001 	}
2002 
2003 	/* Update total read blocks during previous timer interval */
2004 	wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
2005 
2006 initpath:
2007 	bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
2008 	wqe->cmf_sync.event_tag = phba->fc_eventTag;
2009 	bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
2010 
2011 	/* Setup reqtag to match the wqe completion. */
2012 	bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
2013 
2014 	bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
2015 	bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
2016 
2017 	bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
2018 	bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
2019 	bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2020 
2021 	sync_buf->vport = phba->pport;
2022 	sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
2023 	sync_buf->cmd_dmabuf = NULL;
2024 	sync_buf->rsp_dmabuf = NULL;
2025 	sync_buf->bpl_dmabuf = NULL;
2026 	sync_buf->sli4_xritag = NO_XRI;
2027 
2028 	sync_buf->cmd_flag |= LPFC_IO_CMF;
2029 	ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2030 	if (ret_val) {
2031 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2032 				"6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2033 				ret_val);
2034 		__lpfc_sli_release_iocbq(phba, sync_buf);
2035 	}
2036 out_unlock:
2037 	spin_unlock_irqrestore(&phba->hbalock, iflags);
2038 	return ret_val;
2039 }
2040 
2041 /**
2042  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2043  * @phba: Pointer to HBA context object.
2044  * @pring: Pointer to driver SLI ring object.
2045  *
2046  * This function is called with hbalock held and the caller must post the
2047  * iocb without releasing the lock. If the caller releases the lock,
2048  * iocb slot returned by the function is not guaranteed to be available.
2049  * The function returns pointer to the next available iocb slot if there
2050  * is available slot in the ring, else it returns NULL.
2051  * If the get index of the ring is ahead of the put index, the function
2052  * will post an error attention event to the worker thread to take the
2053  * HBA to offline state.
2054  **/
2055 static IOCB_t *
2056 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2057 {
2058 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2059 	uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
2060 
2061 	lockdep_assert_held(&phba->hbalock);
2062 
2063 	if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2064 	   (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2065 		pring->sli.sli3.next_cmdidx = 0;
2066 
2067 	if (unlikely(pring->sli.sli3.local_getidx ==
2068 		pring->sli.sli3.next_cmdidx)) {
2069 
2070 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2071 
2072 		if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2073 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2074 					"0315 Ring %d issue: portCmdGet %d "
2075 					"is bigger than cmd ring %d\n",
2076 					pring->ringno,
2077 					pring->sli.sli3.local_getidx,
2078 					max_cmd_idx);
2079 
2080 			phba->link_state = LPFC_HBA_ERROR;
2081 			/*
2082 			 * All error attention handlers are posted to
2083 			 * worker thread
2084 			 */
2085 			phba->work_ha |= HA_ERATT;
2086 			phba->work_hs = HS_FFER3;
2087 
2088 			lpfc_worker_wake_up(phba);
2089 
2090 			return NULL;
2091 		}
2092 
2093 		if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2094 			return NULL;
2095 	}
2096 
2097 	return lpfc_cmd_iocb(phba, pring);
2098 }
2099 
2100 /**
2101  * lpfc_sli_next_iotag - Get an iotag for the iocb
2102  * @phba: Pointer to HBA context object.
2103  * @iocbq: Pointer to driver iocb object.
2104  *
2105  * This function gets an iotag for the iocb. If there is no unused iotag and
2106  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2107  * array and assigns a new iotag.
2108  * The function returns the allocated iotag if successful, else returns zero.
2109  * Zero is not a valid iotag.
2110  * The caller is not required to hold any lock.
2111  **/
2112 uint16_t
2113 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2114 {
2115 	struct lpfc_iocbq **new_arr;
2116 	struct lpfc_iocbq **old_arr;
2117 	size_t new_len;
2118 	struct lpfc_sli *psli = &phba->sli;
2119 	uint16_t iotag;
2120 
2121 	spin_lock_irq(&phba->hbalock);
2122 	iotag = psli->last_iotag;
2123 	if(++iotag < psli->iocbq_lookup_len) {
2124 		psli->last_iotag = iotag;
2125 		psli->iocbq_lookup[iotag] = iocbq;
2126 		spin_unlock_irq(&phba->hbalock);
2127 		iocbq->iotag = iotag;
2128 		return iotag;
2129 	} else if (psli->iocbq_lookup_len < (0xffff
2130 					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2131 		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2132 		spin_unlock_irq(&phba->hbalock);
2133 		new_arr = kcalloc(new_len, sizeof(struct lpfc_iocbq *),
2134 				  GFP_KERNEL);
2135 		if (new_arr) {
2136 			spin_lock_irq(&phba->hbalock);
2137 			old_arr = psli->iocbq_lookup;
2138 			if (new_len <= psli->iocbq_lookup_len) {
2139 				/* highly unprobable case */
2140 				kfree(new_arr);
2141 				iotag = psli->last_iotag;
2142 				if(++iotag < psli->iocbq_lookup_len) {
2143 					psli->last_iotag = iotag;
2144 					psli->iocbq_lookup[iotag] = iocbq;
2145 					spin_unlock_irq(&phba->hbalock);
2146 					iocbq->iotag = iotag;
2147 					return iotag;
2148 				}
2149 				spin_unlock_irq(&phba->hbalock);
2150 				return 0;
2151 			}
2152 			if (psli->iocbq_lookup)
2153 				memcpy(new_arr, old_arr,
2154 				       ((psli->last_iotag  + 1) *
2155 					sizeof (struct lpfc_iocbq *)));
2156 			psli->iocbq_lookup = new_arr;
2157 			psli->iocbq_lookup_len = new_len;
2158 			psli->last_iotag = iotag;
2159 			psli->iocbq_lookup[iotag] = iocbq;
2160 			spin_unlock_irq(&phba->hbalock);
2161 			iocbq->iotag = iotag;
2162 			kfree(old_arr);
2163 			return iotag;
2164 		}
2165 	} else
2166 		spin_unlock_irq(&phba->hbalock);
2167 
2168 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2169 			"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2170 			psli->last_iotag);
2171 
2172 	return 0;
2173 }
2174 
2175 /**
2176  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2177  * @phba: Pointer to HBA context object.
2178  * @pring: Pointer to driver SLI ring object.
2179  * @iocb: Pointer to iocb slot in the ring.
2180  * @nextiocb: Pointer to driver iocb object which need to be
2181  *            posted to firmware.
2182  *
2183  * This function is called to post a new iocb to the firmware. This
2184  * function copies the new iocb to ring iocb slot and updates the
2185  * ring pointers. It adds the new iocb to txcmplq if there is
2186  * a completion call back for this iocb else the function will free the
2187  * iocb object.  The hbalock is asserted held in the code path calling
2188  * this routine.
2189  **/
2190 static void
2191 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2192 		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2193 {
2194 	/*
2195 	 * Set up an iotag
2196 	 */
2197 	nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2198 
2199 
2200 	if (pring->ringno == LPFC_ELS_RING) {
2201 		lpfc_debugfs_slow_ring_trc(phba,
2202 			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
2203 			*(((uint32_t *) &nextiocb->iocb) + 4),
2204 			*(((uint32_t *) &nextiocb->iocb) + 6),
2205 			*(((uint32_t *) &nextiocb->iocb) + 7));
2206 	}
2207 
2208 	/*
2209 	 * Issue iocb command to adapter
2210 	 */
2211 	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2212 	wmb();
2213 	pring->stats.iocb_cmd++;
2214 
2215 	/*
2216 	 * If there is no completion routine to call, we can release the
2217 	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2218 	 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2219 	 */
2220 	if (nextiocb->cmd_cmpl)
2221 		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2222 	else
2223 		__lpfc_sli_release_iocbq(phba, nextiocb);
2224 
2225 	/*
2226 	 * Let the HBA know what IOCB slot will be the next one the
2227 	 * driver will put a command into.
2228 	 */
2229 	pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2230 	writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2231 }
2232 
2233 /**
2234  * lpfc_sli_update_full_ring - Update the chip attention register
2235  * @phba: Pointer to HBA context object.
2236  * @pring: Pointer to driver SLI ring object.
2237  *
2238  * The caller is not required to hold any lock for calling this function.
2239  * This function updates the chip attention bits for the ring to inform firmware
2240  * that there are pending work to be done for this ring and requests an
2241  * interrupt when there is space available in the ring. This function is
2242  * called when the driver is unable to post more iocbs to the ring due
2243  * to unavailability of space in the ring.
2244  **/
2245 static void
2246 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2247 {
2248 	int ringno = pring->ringno;
2249 
2250 	pring->flag |= LPFC_CALL_RING_AVAILABLE;
2251 
2252 	wmb();
2253 
2254 	/*
2255 	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2256 	 * The HBA will tell us when an IOCB entry is available.
2257 	 */
2258 	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2259 	readl(phba->CAregaddr); /* flush */
2260 
2261 	pring->stats.iocb_cmd_full++;
2262 }
2263 
2264 /**
2265  * lpfc_sli_update_ring - Update chip attention register
2266  * @phba: Pointer to HBA context object.
2267  * @pring: Pointer to driver SLI ring object.
2268  *
2269  * This function updates the chip attention register bit for the
2270  * given ring to inform HBA that there is more work to be done
2271  * in this ring. The caller is not required to hold any lock.
2272  **/
2273 static void
2274 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2275 {
2276 	int ringno = pring->ringno;
2277 
2278 	/*
2279 	 * Tell the HBA that there is work to do in this ring.
2280 	 */
2281 	if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2282 		wmb();
2283 		writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2284 		readl(phba->CAregaddr); /* flush */
2285 	}
2286 }
2287 
2288 /**
2289  * lpfc_sli_resume_iocb - Process iocbs in the txq
2290  * @phba: Pointer to HBA context object.
2291  * @pring: Pointer to driver SLI ring object.
2292  *
2293  * This function is called with hbalock held to post pending iocbs
2294  * in the txq to the firmware. This function is called when driver
2295  * detects space available in the ring.
2296  **/
2297 static void
2298 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2299 {
2300 	IOCB_t *iocb;
2301 	struct lpfc_iocbq *nextiocb;
2302 
2303 	lockdep_assert_held(&phba->hbalock);
2304 
2305 	/*
2306 	 * Check to see if:
2307 	 *  (a) there is anything on the txq to send
2308 	 *  (b) link is up
2309 	 *  (c) link attention events can be processed (fcp ring only)
2310 	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
2311 	 */
2312 
2313 	if (lpfc_is_link_up(phba) &&
2314 	    (!list_empty(&pring->txq)) &&
2315 	    (pring->ringno != LPFC_FCP_RING ||
2316 	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2317 
2318 		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2319 		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2320 			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2321 
2322 		if (iocb)
2323 			lpfc_sli_update_ring(phba, pring);
2324 		else
2325 			lpfc_sli_update_full_ring(phba, pring);
2326 	}
2327 
2328 	return;
2329 }
2330 
2331 /**
2332  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2333  * @phba: Pointer to HBA context object.
2334  * @hbqno: HBQ number.
2335  *
2336  * This function is called with hbalock held to get the next
2337  * available slot for the given HBQ. If there is free slot
2338  * available for the HBQ it will return pointer to the next available
2339  * HBQ entry else it will return NULL.
2340  **/
2341 static struct lpfc_hbq_entry *
2342 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2343 {
2344 	struct hbq_s *hbqp = &phba->hbqs[hbqno];
2345 
2346 	lockdep_assert_held(&phba->hbalock);
2347 
2348 	if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2349 	    ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2350 		hbqp->next_hbqPutIdx = 0;
2351 
2352 	if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2353 		uint32_t raw_index = phba->hbq_get[hbqno];
2354 		uint32_t getidx = le32_to_cpu(raw_index);
2355 
2356 		hbqp->local_hbqGetIdx = getidx;
2357 
2358 		if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2359 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2360 					"1802 HBQ %d: local_hbqGetIdx "
2361 					"%u is > than hbqp->entry_count %u\n",
2362 					hbqno, hbqp->local_hbqGetIdx,
2363 					hbqp->entry_count);
2364 
2365 			phba->link_state = LPFC_HBA_ERROR;
2366 			return NULL;
2367 		}
2368 
2369 		if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2370 			return NULL;
2371 	}
2372 
2373 	return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2374 			hbqp->hbqPutIdx;
2375 }
2376 
2377 /**
2378  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2379  * @phba: Pointer to HBA context object.
2380  *
2381  * This function is called with no lock held to free all the
2382  * hbq buffers while uninitializing the SLI interface. It also
2383  * frees the HBQ buffers returned by the firmware but not yet
2384  * processed by the upper layers.
2385  **/
2386 void
2387 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2388 {
2389 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2390 	struct hbq_dmabuf *hbq_buf;
2391 	unsigned long flags;
2392 	int i, hbq_count;
2393 
2394 	hbq_count = lpfc_sli_hbq_count();
2395 	/* Return all memory used by all HBQs */
2396 	spin_lock_irqsave(&phba->hbalock, flags);
2397 	for (i = 0; i < hbq_count; ++i) {
2398 		list_for_each_entry_safe(dmabuf, next_dmabuf,
2399 				&phba->hbqs[i].hbq_buffer_list, list) {
2400 			hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2401 			list_del(&hbq_buf->dbuf.list);
2402 			(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2403 		}
2404 		phba->hbqs[i].buffer_count = 0;
2405 	}
2406 
2407 	/* Mark the HBQs not in use */
2408 	phba->hbq_in_use = 0;
2409 	spin_unlock_irqrestore(&phba->hbalock, flags);
2410 }
2411 
2412 /**
2413  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2414  * @phba: Pointer to HBA context object.
2415  * @hbqno: HBQ number.
2416  * @hbq_buf: Pointer to HBQ buffer.
2417  *
2418  * This function is called with the hbalock held to post a
2419  * hbq buffer to the firmware. If the function finds an empty
2420  * slot in the HBQ, it will post the buffer. The function will return
2421  * pointer to the hbq entry if it successfully post the buffer
2422  * else it will return NULL.
2423  **/
2424 static int
2425 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2426 			 struct hbq_dmabuf *hbq_buf)
2427 {
2428 	lockdep_assert_held(&phba->hbalock);
2429 	return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2430 }
2431 
2432 /**
2433  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2434  * @phba: Pointer to HBA context object.
2435  * @hbqno: HBQ number.
2436  * @hbq_buf: Pointer to HBQ buffer.
2437  *
2438  * This function is called with the hbalock held to post a hbq buffer to the
2439  * firmware. If the function finds an empty slot in the HBQ, it will post the
2440  * buffer and place it on the hbq_buffer_list. The function will return zero if
2441  * it successfully post the buffer else it will return an error.
2442  **/
2443 static int
2444 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2445 			    struct hbq_dmabuf *hbq_buf)
2446 {
2447 	struct lpfc_hbq_entry *hbqe;
2448 	dma_addr_t physaddr = hbq_buf->dbuf.phys;
2449 
2450 	lockdep_assert_held(&phba->hbalock);
2451 	/* Get next HBQ entry slot to use */
2452 	hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2453 	if (hbqe) {
2454 		struct hbq_s *hbqp = &phba->hbqs[hbqno];
2455 
2456 		hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2457 		hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2458 		hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2459 		hbqe->bde.tus.f.bdeFlags = 0;
2460 		hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2461 		hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2462 				/* Sync SLIM */
2463 		hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2464 		writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2465 				/* flush */
2466 		readl(phba->hbq_put + hbqno);
2467 		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2468 		return 0;
2469 	} else
2470 		return -ENOMEM;
2471 }
2472 
2473 /**
2474  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2475  * @phba: Pointer to HBA context object.
2476  * @hbqno: HBQ number.
2477  * @hbq_buf: Pointer to HBQ buffer.
2478  *
2479  * This function is called with the hbalock held to post an RQE to the SLI4
2480  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2481  * the hbq_buffer_list and return zero, otherwise it will return an error.
2482  **/
2483 static int
2484 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2485 			    struct hbq_dmabuf *hbq_buf)
2486 {
2487 	int rc;
2488 	struct lpfc_rqe hrqe;
2489 	struct lpfc_rqe drqe;
2490 	struct lpfc_queue *hrq;
2491 	struct lpfc_queue *drq;
2492 
2493 	if (hbqno != LPFC_ELS_HBQ)
2494 		return 1;
2495 	hrq = phba->sli4_hba.hdr_rq;
2496 	drq = phba->sli4_hba.dat_rq;
2497 
2498 	lockdep_assert_held(&phba->hbalock);
2499 	hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2500 	hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2501 	drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2502 	drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2503 	rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2504 	if (rc < 0)
2505 		return rc;
2506 	hbq_buf->tag = (rc | (hbqno << 16));
2507 	list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2508 	return 0;
2509 }
2510 
2511 /* HBQ for ELS and CT traffic. */
2512 static struct lpfc_hbq_init lpfc_els_hbq = {
2513 	.rn = 1,
2514 	.entry_count = 256,
2515 	.mask_count = 0,
2516 	.profile = 0,
2517 	.ring_mask = (1 << LPFC_ELS_RING),
2518 	.buffer_count = 0,
2519 	.init_count = 40,
2520 	.add_count = 40,
2521 };
2522 
2523 /* Array of HBQs */
2524 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2525 	&lpfc_els_hbq,
2526 };
2527 
2528 /**
2529  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2530  * @phba: Pointer to HBA context object.
2531  * @hbqno: HBQ number.
2532  * @count: Number of HBQ buffers to be posted.
2533  *
2534  * This function is called with no lock held to post more hbq buffers to the
2535  * given HBQ. The function returns the number of HBQ buffers successfully
2536  * posted.
2537  **/
2538 static int
2539 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2540 {
2541 	uint32_t i, posted = 0;
2542 	unsigned long flags;
2543 	struct hbq_dmabuf *hbq_buffer;
2544 	LIST_HEAD(hbq_buf_list);
2545 	if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2546 		return 0;
2547 
2548 	if ((phba->hbqs[hbqno].buffer_count + count) >
2549 	    lpfc_hbq_defs[hbqno]->entry_count)
2550 		count = lpfc_hbq_defs[hbqno]->entry_count -
2551 					phba->hbqs[hbqno].buffer_count;
2552 	if (!count)
2553 		return 0;
2554 	/* Allocate HBQ entries */
2555 	for (i = 0; i < count; i++) {
2556 		hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2557 		if (!hbq_buffer)
2558 			break;
2559 		list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2560 	}
2561 	/* Check whether HBQ is still in use */
2562 	spin_lock_irqsave(&phba->hbalock, flags);
2563 	if (!phba->hbq_in_use)
2564 		goto err;
2565 	while (!list_empty(&hbq_buf_list)) {
2566 		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2567 				 dbuf.list);
2568 		hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2569 				      (hbqno << 16));
2570 		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2571 			phba->hbqs[hbqno].buffer_count++;
2572 			posted++;
2573 		} else
2574 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2575 	}
2576 	spin_unlock_irqrestore(&phba->hbalock, flags);
2577 	return posted;
2578 err:
2579 	spin_unlock_irqrestore(&phba->hbalock, flags);
2580 	while (!list_empty(&hbq_buf_list)) {
2581 		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2582 				 dbuf.list);
2583 		(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2584 	}
2585 	return 0;
2586 }
2587 
2588 /**
2589  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2590  * @phba: Pointer to HBA context object.
2591  * @qno: HBQ number.
2592  *
2593  * This function posts more buffers to the HBQ. This function
2594  * is called with no lock held. The function returns the number of HBQ entries
2595  * successfully allocated.
2596  **/
2597 int
2598 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2599 {
2600 	if (phba->sli_rev == LPFC_SLI_REV4)
2601 		return 0;
2602 	else
2603 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2604 					 lpfc_hbq_defs[qno]->add_count);
2605 }
2606 
2607 /**
2608  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2609  * @phba: Pointer to HBA context object.
2610  * @qno:  HBQ queue number.
2611  *
2612  * This function is called from SLI initialization code path with
2613  * no lock held to post initial HBQ buffers to firmware. The
2614  * function returns the number of HBQ entries successfully allocated.
2615  **/
2616 static int
2617 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2618 {
2619 	if (phba->sli_rev == LPFC_SLI_REV4)
2620 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2621 					lpfc_hbq_defs[qno]->entry_count);
2622 	else
2623 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2624 					 lpfc_hbq_defs[qno]->init_count);
2625 }
2626 
2627 /*
2628  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2629  *
2630  * This function removes the first hbq buffer on an hbq list and returns a
2631  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2632  **/
2633 static struct hbq_dmabuf *
2634 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2635 {
2636 	struct lpfc_dmabuf *d_buf;
2637 
2638 	list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2639 	if (!d_buf)
2640 		return NULL;
2641 	return container_of(d_buf, struct hbq_dmabuf, dbuf);
2642 }
2643 
2644 /**
2645  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2646  * @phba: Pointer to HBA context object.
2647  * @hrq: HBQ number.
2648  *
2649  * This function removes the first RQ buffer on an RQ buffer list and returns a
2650  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2651  **/
2652 static struct rqb_dmabuf *
2653 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2654 {
2655 	struct lpfc_dmabuf *h_buf;
2656 	struct lpfc_rqb *rqbp;
2657 
2658 	rqbp = hrq->rqbp;
2659 	list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2660 			 struct lpfc_dmabuf, list);
2661 	if (!h_buf)
2662 		return NULL;
2663 	rqbp->buffer_count--;
2664 	return container_of(h_buf, struct rqb_dmabuf, hbuf);
2665 }
2666 
2667 /**
2668  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2669  * @phba: Pointer to HBA context object.
2670  * @tag: Tag of the hbq buffer.
2671  *
2672  * This function searches for the hbq buffer associated with the given tag in
2673  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2674  * otherwise it returns NULL.
2675  **/
2676 static struct hbq_dmabuf *
2677 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2678 {
2679 	struct lpfc_dmabuf *d_buf;
2680 	struct hbq_dmabuf *hbq_buf;
2681 	uint32_t hbqno;
2682 
2683 	hbqno = tag >> 16;
2684 	if (hbqno >= LPFC_MAX_HBQS)
2685 		return NULL;
2686 
2687 	spin_lock_irq(&phba->hbalock);
2688 	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2689 		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2690 		if (hbq_buf->tag == tag) {
2691 			spin_unlock_irq(&phba->hbalock);
2692 			return hbq_buf;
2693 		}
2694 	}
2695 	spin_unlock_irq(&phba->hbalock);
2696 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2697 			"1803 Bad hbq tag. Data: x%x x%x\n",
2698 			tag, phba->hbqs[tag >> 16].buffer_count);
2699 	return NULL;
2700 }
2701 
2702 /**
2703  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2704  * @phba: Pointer to HBA context object.
2705  * @hbq_buffer: Pointer to HBQ buffer.
2706  *
2707  * This function is called with hbalock. This function gives back
2708  * the hbq buffer to firmware. If the HBQ does not have space to
2709  * post the buffer, it will free the buffer.
2710  **/
2711 void
2712 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2713 {
2714 	uint32_t hbqno;
2715 
2716 	if (hbq_buffer) {
2717 		hbqno = hbq_buffer->tag >> 16;
2718 		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2719 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2720 	}
2721 }
2722 
2723 /**
2724  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2725  * @mbxCommand: mailbox command code.
2726  *
2727  * This function is called by the mailbox event handler function to verify
2728  * that the completed mailbox command is a legitimate mailbox command. If the
2729  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2730  * and the mailbox event handler will take the HBA offline.
2731  **/
2732 static int
2733 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2734 {
2735 	uint8_t ret;
2736 
2737 	switch (mbxCommand) {
2738 	case MBX_LOAD_SM:
2739 	case MBX_READ_NV:
2740 	case MBX_WRITE_NV:
2741 	case MBX_WRITE_VPARMS:
2742 	case MBX_RUN_BIU_DIAG:
2743 	case MBX_INIT_LINK:
2744 	case MBX_DOWN_LINK:
2745 	case MBX_CONFIG_LINK:
2746 	case MBX_CONFIG_RING:
2747 	case MBX_RESET_RING:
2748 	case MBX_READ_CONFIG:
2749 	case MBX_READ_RCONFIG:
2750 	case MBX_READ_SPARM:
2751 	case MBX_READ_STATUS:
2752 	case MBX_READ_RPI:
2753 	case MBX_READ_XRI:
2754 	case MBX_READ_REV:
2755 	case MBX_READ_LNK_STAT:
2756 	case MBX_REG_LOGIN:
2757 	case MBX_UNREG_LOGIN:
2758 	case MBX_CLEAR_LA:
2759 	case MBX_DUMP_MEMORY:
2760 	case MBX_DUMP_CONTEXT:
2761 	case MBX_RUN_DIAGS:
2762 	case MBX_RESTART:
2763 	case MBX_UPDATE_CFG:
2764 	case MBX_DOWN_LOAD:
2765 	case MBX_DEL_LD_ENTRY:
2766 	case MBX_RUN_PROGRAM:
2767 	case MBX_SET_MASK:
2768 	case MBX_SET_VARIABLE:
2769 	case MBX_UNREG_D_ID:
2770 	case MBX_KILL_BOARD:
2771 	case MBX_CONFIG_FARP:
2772 	case MBX_BEACON:
2773 	case MBX_LOAD_AREA:
2774 	case MBX_RUN_BIU_DIAG64:
2775 	case MBX_CONFIG_PORT:
2776 	case MBX_READ_SPARM64:
2777 	case MBX_READ_RPI64:
2778 	case MBX_REG_LOGIN64:
2779 	case MBX_READ_TOPOLOGY:
2780 	case MBX_WRITE_WWN:
2781 	case MBX_SET_DEBUG:
2782 	case MBX_LOAD_EXP_ROM:
2783 	case MBX_ASYNCEVT_ENABLE:
2784 	case MBX_REG_VPI:
2785 	case MBX_UNREG_VPI:
2786 	case MBX_HEARTBEAT:
2787 	case MBX_PORT_CAPABILITIES:
2788 	case MBX_PORT_IOV_CONTROL:
2789 	case MBX_SLI4_CONFIG:
2790 	case MBX_SLI4_REQ_FTRS:
2791 	case MBX_REG_FCFI:
2792 	case MBX_UNREG_FCFI:
2793 	case MBX_REG_VFI:
2794 	case MBX_UNREG_VFI:
2795 	case MBX_INIT_VPI:
2796 	case MBX_INIT_VFI:
2797 	case MBX_RESUME_RPI:
2798 	case MBX_READ_EVENT_LOG_STATUS:
2799 	case MBX_READ_EVENT_LOG:
2800 	case MBX_SECURITY_MGMT:
2801 	case MBX_AUTH_PORT:
2802 	case MBX_ACCESS_VDATA:
2803 		ret = mbxCommand;
2804 		break;
2805 	default:
2806 		ret = MBX_SHUTDOWN;
2807 		break;
2808 	}
2809 	return ret;
2810 }
2811 
2812 /**
2813  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2814  * @phba: Pointer to HBA context object.
2815  * @pmboxq: Pointer to mailbox command.
2816  *
2817  * This is completion handler function for mailbox commands issued from
2818  * lpfc_sli_issue_mbox_wait function. This function is called by the
2819  * mailbox event handler function with no lock held. This function
2820  * will wake up thread waiting on the wait queue pointed by context1
2821  * of the mailbox.
2822  **/
2823 void
2824 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2825 {
2826 	unsigned long drvr_flag;
2827 	struct completion *pmbox_done;
2828 
2829 	/*
2830 	 * If pmbox_done is empty, the driver thread gave up waiting and
2831 	 * continued running.
2832 	 */
2833 	pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2834 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
2835 	pmbox_done = pmboxq->ctx_u.mbox_wait;
2836 	if (pmbox_done)
2837 		complete(pmbox_done);
2838 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2839 	return;
2840 }
2841 
2842 static void
2843 __lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2844 {
2845 	unsigned long iflags;
2846 
2847 	if (ndlp->nlp_flag & NLP_RELEASE_RPI) {
2848 		lpfc_sli4_free_rpi(vport->phba, ndlp->nlp_rpi);
2849 		spin_lock_irqsave(&ndlp->lock, iflags);
2850 		ndlp->nlp_flag &= ~NLP_RELEASE_RPI;
2851 		ndlp->nlp_rpi = LPFC_RPI_ALLOC_ERROR;
2852 		spin_unlock_irqrestore(&ndlp->lock, iflags);
2853 	}
2854 	ndlp->nlp_flag &= ~NLP_UNREG_INP;
2855 }
2856 
2857 void
2858 lpfc_sli_rpi_release(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
2859 {
2860 	__lpfc_sli_rpi_release(vport, ndlp);
2861 }
2862 
2863 /**
2864  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2865  * @phba: Pointer to HBA context object.
2866  * @pmb: Pointer to mailbox object.
2867  *
2868  * This function is the default mailbox completion handler. It
2869  * frees the memory resources associated with the completed mailbox
2870  * command. If the completed command is a REG_LOGIN mailbox command,
2871  * this function will issue a UREG_LOGIN to re-claim the RPI.
2872  **/
2873 void
2874 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2875 {
2876 	struct lpfc_vport  *vport = pmb->vport;
2877 	struct lpfc_dmabuf *mp;
2878 	struct lpfc_nodelist *ndlp;
2879 	struct Scsi_Host *shost;
2880 	uint16_t rpi, vpi;
2881 	int rc;
2882 
2883 	/*
2884 	 * If a REG_LOGIN succeeded  after node is destroyed or node
2885 	 * is in re-discovery driver need to cleanup the RPI.
2886 	 */
2887 	if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
2888 	    pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2889 	    !pmb->u.mb.mbxStatus) {
2890 		mp = pmb->ctx_buf;
2891 		if (mp) {
2892 			pmb->ctx_buf = NULL;
2893 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
2894 			kfree(mp);
2895 		}
2896 		rpi = pmb->u.mb.un.varWords[0];
2897 		vpi = pmb->u.mb.un.varRegLogin.vpi;
2898 		if (phba->sli_rev == LPFC_SLI_REV4)
2899 			vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2900 		lpfc_unreg_login(phba, vpi, rpi, pmb);
2901 		pmb->vport = vport;
2902 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2903 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2904 		if (rc != MBX_NOT_FINISHED)
2905 			return;
2906 	}
2907 
2908 	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2909 		!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
2910 		!pmb->u.mb.mbxStatus) {
2911 		shost = lpfc_shost_from_vport(vport);
2912 		spin_lock_irq(shost->host_lock);
2913 		vport->vpi_state |= LPFC_VPI_REGISTERED;
2914 		spin_unlock_irq(shost->host_lock);
2915 		clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
2916 	}
2917 
2918 	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2919 		ndlp = pmb->ctx_ndlp;
2920 		lpfc_nlp_put(ndlp);
2921 	}
2922 
2923 	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2924 		ndlp = pmb->ctx_ndlp;
2925 
2926 		/* Check to see if there are any deferred events to process */
2927 		if (ndlp) {
2928 			lpfc_printf_vlog(
2929 				vport,
2930 				KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2931 				"1438 UNREG cmpl deferred mbox x%x "
2932 				"on NPort x%x Data: x%x x%x x%px x%lx x%x\n",
2933 				ndlp->nlp_rpi, ndlp->nlp_DID,
2934 				ndlp->nlp_flag, ndlp->nlp_defer_did,
2935 				ndlp, vport->load_flag, kref_read(&ndlp->kref));
2936 
2937 			if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
2938 			    (ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING)) {
2939 				ndlp->nlp_flag &= ~NLP_UNREG_INP;
2940 				ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2941 				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2942 			} else {
2943 				__lpfc_sli_rpi_release(vport, ndlp);
2944 			}
2945 
2946 			/* The unreg_login mailbox is complete and had a
2947 			 * reference that has to be released.  The PLOGI
2948 			 * got its own ref.
2949 			 */
2950 			lpfc_nlp_put(ndlp);
2951 			pmb->ctx_ndlp = NULL;
2952 		}
2953 	}
2954 
2955 	/* This nlp_put pairs with lpfc_sli4_resume_rpi */
2956 	if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2957 		ndlp = pmb->ctx_ndlp;
2958 		lpfc_nlp_put(ndlp);
2959 	}
2960 
2961 	/* Check security permission status on INIT_LINK mailbox command */
2962 	if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2963 	    (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2964 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2965 				"2860 SLI authentication is required "
2966 				"for INIT_LINK but has not done yet\n");
2967 
2968 	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2969 		lpfc_sli4_mbox_cmd_free(phba, pmb);
2970 	else
2971 		lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2972 }
2973  /**
2974  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2975  * @phba: Pointer to HBA context object.
2976  * @pmb: Pointer to mailbox object.
2977  *
2978  * This function is the unreg rpi mailbox completion handler. It
2979  * frees the memory resources associated with the completed mailbox
2980  * command. An additional reference is put on the ndlp to prevent
2981  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2982  * the unreg mailbox command completes, this routine puts the
2983  * reference back.
2984  *
2985  **/
2986 void
2987 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2988 {
2989 	struct lpfc_vport  *vport = pmb->vport;
2990 	struct lpfc_nodelist *ndlp;
2991 
2992 	ndlp = pmb->ctx_ndlp;
2993 	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2994 		if (phba->sli_rev == LPFC_SLI_REV4 &&
2995 		    (bf_get(lpfc_sli_intf_if_type,
2996 		     &phba->sli4_hba.sli_intf) >=
2997 		     LPFC_SLI_INTF_IF_TYPE_2)) {
2998 			if (ndlp) {
2999 				lpfc_printf_vlog(
3000 					 vport, KERN_INFO,
3001 					 LOG_MBOX | LOG_SLI | LOG_NODE,
3002 					 "0010 UNREG_LOGIN vpi:x%x "
3003 					 "rpi:%x DID:%x defer x%x flg x%x "
3004 					 "x%px\n",
3005 					 vport->vpi, ndlp->nlp_rpi,
3006 					 ndlp->nlp_DID, ndlp->nlp_defer_did,
3007 					 ndlp->nlp_flag,
3008 					 ndlp);
3009 				ndlp->nlp_flag &= ~NLP_LOGO_ACC;
3010 
3011 				/* Check to see if there are any deferred
3012 				 * events to process
3013 				 */
3014 				if ((ndlp->nlp_flag & NLP_UNREG_INP) &&
3015 				    (ndlp->nlp_defer_did !=
3016 				    NLP_EVT_NOTHING_PENDING)) {
3017 					lpfc_printf_vlog(
3018 						vport, KERN_INFO,
3019 						LOG_MBOX | LOG_SLI | LOG_NODE,
3020 						"4111 UNREG cmpl deferred "
3021 						"clr x%x on "
3022 						"NPort x%x Data: x%x x%px\n",
3023 						ndlp->nlp_rpi, ndlp->nlp_DID,
3024 						ndlp->nlp_defer_did, ndlp);
3025 					ndlp->nlp_flag &= ~NLP_UNREG_INP;
3026 					ndlp->nlp_defer_did =
3027 						NLP_EVT_NOTHING_PENDING;
3028 					lpfc_issue_els_plogi(
3029 						vport, ndlp->nlp_DID, 0);
3030 				} else {
3031 					__lpfc_sli_rpi_release(vport, ndlp);
3032 				}
3033 				lpfc_nlp_put(ndlp);
3034 			}
3035 		}
3036 	}
3037 
3038 	mempool_free(pmb, phba->mbox_mem_pool);
3039 }
3040 
3041 /**
3042  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3043  * @phba: Pointer to HBA context object.
3044  *
3045  * This function is called with no lock held. This function processes all
3046  * the completed mailbox commands and gives it to upper layers. The interrupt
3047  * service routine processes mailbox completion interrupt and adds completed
3048  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3049  * Worker thread call lpfc_sli_handle_mb_event, which will return the
3050  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3051  * function returns the mailbox commands to the upper layer by calling the
3052  * completion handler function of each mailbox.
3053  **/
3054 int
3055 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3056 {
3057 	MAILBOX_t *pmbox;
3058 	LPFC_MBOXQ_t *pmb;
3059 	int rc;
3060 	LIST_HEAD(cmplq);
3061 
3062 	phba->sli.slistat.mbox_event++;
3063 
3064 	/* Get all completed mailboxe buffers into the cmplq */
3065 	spin_lock_irq(&phba->hbalock);
3066 	list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3067 	spin_unlock_irq(&phba->hbalock);
3068 
3069 	/* Get a Mailbox buffer to setup mailbox commands for callback */
3070 	do {
3071 		list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3072 		if (pmb == NULL)
3073 			break;
3074 
3075 		pmbox = &pmb->u.mb;
3076 
3077 		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3078 			if (pmb->vport) {
3079 				lpfc_debugfs_disc_trc(pmb->vport,
3080 					LPFC_DISC_TRC_MBOX_VPORT,
3081 					"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3082 					(uint32_t)pmbox->mbxCommand,
3083 					pmbox->un.varWords[0],
3084 					pmbox->un.varWords[1]);
3085 			}
3086 			else {
3087 				lpfc_debugfs_disc_trc(phba->pport,
3088 					LPFC_DISC_TRC_MBOX,
3089 					"MBOX cmpl:       cmd:x%x mb:x%x x%x",
3090 					(uint32_t)pmbox->mbxCommand,
3091 					pmbox->un.varWords[0],
3092 					pmbox->un.varWords[1]);
3093 			}
3094 		}
3095 
3096 		/*
3097 		 * It is a fatal error if unknown mbox command completion.
3098 		 */
3099 		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3100 		    MBX_SHUTDOWN) {
3101 			/* Unknown mailbox command compl */
3102 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3103 					"(%d):0323 Unknown Mailbox command "
3104 					"x%x (x%x/x%x) Cmpl\n",
3105 					pmb->vport ? pmb->vport->vpi :
3106 					LPFC_VPORT_UNKNOWN,
3107 					pmbox->mbxCommand,
3108 					lpfc_sli_config_mbox_subsys_get(phba,
3109 									pmb),
3110 					lpfc_sli_config_mbox_opcode_get(phba,
3111 									pmb));
3112 			phba->link_state = LPFC_HBA_ERROR;
3113 			phba->work_hs = HS_FFER3;
3114 			lpfc_handle_eratt(phba);
3115 			continue;
3116 		}
3117 
3118 		if (pmbox->mbxStatus) {
3119 			phba->sli.slistat.mbox_stat_err++;
3120 			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3121 				/* Mbox cmd cmpl error - RETRYing */
3122 				lpfc_printf_log(phba, KERN_INFO,
3123 					LOG_MBOX | LOG_SLI,
3124 					"(%d):0305 Mbox cmd cmpl "
3125 					"error - RETRYing Data: x%x "
3126 					"(x%x/x%x) x%x x%x x%x\n",
3127 					pmb->vport ? pmb->vport->vpi :
3128 					LPFC_VPORT_UNKNOWN,
3129 					pmbox->mbxCommand,
3130 					lpfc_sli_config_mbox_subsys_get(phba,
3131 									pmb),
3132 					lpfc_sli_config_mbox_opcode_get(phba,
3133 									pmb),
3134 					pmbox->mbxStatus,
3135 					pmbox->un.varWords[0],
3136 					pmb->vport ? pmb->vport->port_state :
3137 					LPFC_VPORT_UNKNOWN);
3138 				pmbox->mbxStatus = 0;
3139 				pmbox->mbxOwner = OWN_HOST;
3140 				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3141 				if (rc != MBX_NOT_FINISHED)
3142 					continue;
3143 			}
3144 		}
3145 
3146 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
3147 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3148 				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3149 				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3150 				"x%x x%x x%x\n",
3151 				pmb->vport ? pmb->vport->vpi : 0,
3152 				pmbox->mbxCommand,
3153 				lpfc_sli_config_mbox_subsys_get(phba, pmb),
3154 				lpfc_sli_config_mbox_opcode_get(phba, pmb),
3155 				pmb->mbox_cmpl,
3156 				*((uint32_t *) pmbox),
3157 				pmbox->un.varWords[0],
3158 				pmbox->un.varWords[1],
3159 				pmbox->un.varWords[2],
3160 				pmbox->un.varWords[3],
3161 				pmbox->un.varWords[4],
3162 				pmbox->un.varWords[5],
3163 				pmbox->un.varWords[6],
3164 				pmbox->un.varWords[7],
3165 				pmbox->un.varWords[8],
3166 				pmbox->un.varWords[9],
3167 				pmbox->un.varWords[10]);
3168 
3169 		if (pmb->mbox_cmpl)
3170 			pmb->mbox_cmpl(phba,pmb);
3171 	} while (1);
3172 	return 0;
3173 }
3174 
3175 /**
3176  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3177  * @phba: Pointer to HBA context object.
3178  * @pring: Pointer to driver SLI ring object.
3179  * @tag: buffer tag.
3180  *
3181  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3182  * is set in the tag the buffer is posted for a particular exchange,
3183  * the function will return the buffer without replacing the buffer.
3184  * If the buffer is for unsolicited ELS or CT traffic, this function
3185  * returns the buffer and also posts another buffer to the firmware.
3186  **/
3187 static struct lpfc_dmabuf *
3188 lpfc_sli_get_buff(struct lpfc_hba *phba,
3189 		  struct lpfc_sli_ring *pring,
3190 		  uint32_t tag)
3191 {
3192 	struct hbq_dmabuf *hbq_entry;
3193 
3194 	if (tag & QUE_BUFTAG_BIT)
3195 		return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3196 	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3197 	if (!hbq_entry)
3198 		return NULL;
3199 	return &hbq_entry->dbuf;
3200 }
3201 
3202 /**
3203  * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3204  *                              containing a NVME LS request.
3205  * @phba: pointer to lpfc hba data structure.
3206  * @piocb: pointer to the iocbq struct representing the sequence starting
3207  *        frame.
3208  *
3209  * This routine initially validates the NVME LS, validates there is a login
3210  * with the port that sent the LS, and then calls the appropriate nvme host
3211  * or target LS request handler.
3212  **/
3213 static void
3214 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3215 {
3216 	struct lpfc_nodelist *ndlp;
3217 	struct lpfc_dmabuf *d_buf;
3218 	struct hbq_dmabuf *nvmebuf;
3219 	struct fc_frame_header *fc_hdr;
3220 	struct lpfc_async_xchg_ctx *axchg = NULL;
3221 	char *failwhy = NULL;
3222 	uint32_t oxid, sid, did, fctl, size;
3223 	int ret = 1;
3224 
3225 	d_buf = piocb->cmd_dmabuf;
3226 
3227 	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3228 	fc_hdr = nvmebuf->hbuf.virt;
3229 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3230 	sid = sli4_sid_from_fc_hdr(fc_hdr);
3231 	did = sli4_did_from_fc_hdr(fc_hdr);
3232 	fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3233 		fc_hdr->fh_f_ctl[1] << 8 |
3234 		fc_hdr->fh_f_ctl[2]);
3235 	size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3236 
3237 	lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
3238 			 oxid, size, sid);
3239 
3240 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
3241 		failwhy = "Driver Unloading";
3242 	} else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3243 		failwhy = "NVME FC4 Disabled";
3244 	} else if (!phba->nvmet_support && !phba->pport->localport) {
3245 		failwhy = "No Localport";
3246 	} else if (phba->nvmet_support && !phba->targetport) {
3247 		failwhy = "No Targetport";
3248 	} else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3249 		failwhy = "Bad NVME LS R_CTL";
3250 	} else if (unlikely((fctl & 0x00FF0000) !=
3251 			(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3252 		failwhy = "Bad NVME LS F_CTL";
3253 	} else {
3254 		axchg = kzalloc(sizeof(*axchg), GFP_ATOMIC);
3255 		if (!axchg)
3256 			failwhy = "No CTX memory";
3257 	}
3258 
3259 	if (unlikely(failwhy)) {
3260 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3261 				"6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3262 				sid, oxid, failwhy);
3263 		goto out_fail;
3264 	}
3265 
3266 	/* validate the source of the LS is logged in */
3267 	ndlp = lpfc_findnode_did(phba->pport, sid);
3268 	if (!ndlp ||
3269 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3270 	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3271 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3272 				"6216 NVME Unsol rcv: No ndlp: "
3273 				"NPort_ID x%x oxid x%x\n",
3274 				sid, oxid);
3275 		goto out_fail;
3276 	}
3277 
3278 	axchg->phba = phba;
3279 	axchg->ndlp = ndlp;
3280 	axchg->size = size;
3281 	axchg->oxid = oxid;
3282 	axchg->sid = sid;
3283 	axchg->wqeq = NULL;
3284 	axchg->state = LPFC_NVME_STE_LS_RCV;
3285 	axchg->entry_cnt = 1;
3286 	axchg->rqb_buffer = (void *)nvmebuf;
3287 	axchg->hdwq = &phba->sli4_hba.hdwq[0];
3288 	axchg->payload = nvmebuf->dbuf.virt;
3289 	INIT_LIST_HEAD(&axchg->list);
3290 
3291 	if (phba->nvmet_support) {
3292 		ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3293 		spin_lock_irq(&ndlp->lock);
3294 		if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3295 			ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3296 			spin_unlock_irq(&ndlp->lock);
3297 
3298 			/* This reference is a single occurrence to hold the
3299 			 * node valid until the nvmet transport calls
3300 			 * host_release.
3301 			 */
3302 			if (!lpfc_nlp_get(ndlp))
3303 				goto out_fail;
3304 
3305 			lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3306 					"6206 NVMET unsol ls_req ndlp x%px "
3307 					"DID x%x xflags x%x refcnt %d\n",
3308 					ndlp, ndlp->nlp_DID,
3309 					ndlp->fc4_xpt_flags,
3310 					kref_read(&ndlp->kref));
3311 		} else {
3312 			spin_unlock_irq(&ndlp->lock);
3313 		}
3314 	} else {
3315 		ret = lpfc_nvme_handle_lsreq(phba, axchg);
3316 	}
3317 
3318 	/* if zero, LS was successfully handled. If non-zero, LS not handled */
3319 	if (!ret)
3320 		return;
3321 
3322 out_fail:
3323 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3324 			"6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3325 			"NVMe%s handler failed %d\n",
3326 			did, sid, oxid,
3327 			(phba->nvmet_support) ? "T" : "I", ret);
3328 
3329 	/* recycle receive buffer */
3330 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3331 
3332 	/* If start of new exchange, abort it */
3333 	if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3334 		ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3335 
3336 	if (ret)
3337 		kfree(axchg);
3338 }
3339 
3340 /**
3341  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3342  * @phba: Pointer to HBA context object.
3343  * @pring: Pointer to driver SLI ring object.
3344  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3345  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3346  * @fch_type: the type for the first frame of the sequence.
3347  *
3348  * This function is called with no lock held. This function uses the r_ctl and
3349  * type of the received sequence to find the correct callback function to call
3350  * to process the sequence.
3351  **/
3352 static int
3353 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3354 			 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3355 			 uint32_t fch_type)
3356 {
3357 	int i;
3358 
3359 	switch (fch_type) {
3360 	case FC_TYPE_NVME:
3361 		lpfc_nvme_unsol_ls_handler(phba, saveq);
3362 		return 1;
3363 	default:
3364 		break;
3365 	}
3366 
3367 	/* unSolicited Responses */
3368 	if (pring->prt[0].profile) {
3369 		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3370 			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3371 									saveq);
3372 		return 1;
3373 	}
3374 	/* We must search, based on rctl / type
3375 	   for the right routine */
3376 	for (i = 0; i < pring->num_mask; i++) {
3377 		if ((pring->prt[i].rctl == fch_r_ctl) &&
3378 		    (pring->prt[i].type == fch_type)) {
3379 			if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3380 				(pring->prt[i].lpfc_sli_rcv_unsol_event)
3381 						(phba, pring, saveq);
3382 			return 1;
3383 		}
3384 	}
3385 	return 0;
3386 }
3387 
3388 static void
3389 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3390 			struct lpfc_iocbq *saveq)
3391 {
3392 	IOCB_t *irsp;
3393 	union lpfc_wqe128 *wqe;
3394 	u16 i = 0;
3395 
3396 	irsp = &saveq->iocb;
3397 	wqe = &saveq->wqe;
3398 
3399 	/* Fill wcqe with the IOCB status fields */
3400 	bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3401 	saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3402 	saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3403 	saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3404 
3405 	/* Source ID */
3406 	bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3407 
3408 	/* rx-id of the response frame */
3409 	bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3410 
3411 	/* ox-id of the frame */
3412 	bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3413 	       irsp->unsli3.rcvsli3.ox_id);
3414 
3415 	/* DID */
3416 	bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3417 	       irsp->un.rcvels.remoteID);
3418 
3419 	/* unsol data len */
3420 	for (i = 0; i < irsp->ulpBdeCount; i++) {
3421 		struct lpfc_hbq_entry *hbqe = NULL;
3422 
3423 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3424 			if (i == 0) {
3425 				hbqe = (struct lpfc_hbq_entry *)
3426 					&irsp->un.ulpWord[0];
3427 				saveq->wqe.gen_req.bde.tus.f.bdeSize =
3428 					hbqe->bde.tus.f.bdeSize;
3429 			} else if (i == 1) {
3430 				hbqe = (struct lpfc_hbq_entry *)
3431 					&irsp->unsli3.sli3Words[4];
3432 				saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3433 			}
3434 		}
3435 	}
3436 }
3437 
3438 /**
3439  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3440  * @phba: Pointer to HBA context object.
3441  * @pring: Pointer to driver SLI ring object.
3442  * @saveq: Pointer to the unsolicited iocb.
3443  *
3444  * This function is called with no lock held by the ring event handler
3445  * when there is an unsolicited iocb posted to the response ring by the
3446  * firmware. This function gets the buffer associated with the iocbs
3447  * and calls the event handler for the ring. This function handles both
3448  * qring buffers and hbq buffers.
3449  * When the function returns 1 the caller can free the iocb object otherwise
3450  * upper layer functions will free the iocb objects.
3451  **/
3452 static int
3453 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3454 			    struct lpfc_iocbq *saveq)
3455 {
3456 	IOCB_t           * irsp;
3457 	WORD5            * w5p;
3458 	dma_addr_t	 paddr;
3459 	uint32_t           Rctl, Type;
3460 	struct lpfc_iocbq *iocbq;
3461 	struct lpfc_dmabuf *dmzbuf;
3462 
3463 	irsp = &saveq->iocb;
3464 	saveq->vport = phba->pport;
3465 
3466 	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3467 		if (pring->lpfc_sli_rcv_async_status)
3468 			pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3469 		else
3470 			lpfc_printf_log(phba,
3471 					KERN_WARNING,
3472 					LOG_SLI,
3473 					"0316 Ring %d handler: unexpected "
3474 					"ASYNC_STATUS iocb received evt_code "
3475 					"0x%x\n",
3476 					pring->ringno,
3477 					irsp->un.asyncstat.evt_code);
3478 		return 1;
3479 	}
3480 
3481 	if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3482 	    (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3483 		if (irsp->ulpBdeCount > 0) {
3484 			dmzbuf = lpfc_sli_get_buff(phba, pring,
3485 						   irsp->un.ulpWord[3]);
3486 			lpfc_in_buf_free(phba, dmzbuf);
3487 		}
3488 
3489 		if (irsp->ulpBdeCount > 1) {
3490 			dmzbuf = lpfc_sli_get_buff(phba, pring,
3491 						   irsp->unsli3.sli3Words[3]);
3492 			lpfc_in_buf_free(phba, dmzbuf);
3493 		}
3494 
3495 		if (irsp->ulpBdeCount > 2) {
3496 			dmzbuf = lpfc_sli_get_buff(phba, pring,
3497 						   irsp->unsli3.sli3Words[7]);
3498 			lpfc_in_buf_free(phba, dmzbuf);
3499 		}
3500 
3501 		return 1;
3502 	}
3503 
3504 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3505 		if (irsp->ulpBdeCount != 0) {
3506 			saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3507 						irsp->un.ulpWord[3]);
3508 			if (!saveq->cmd_dmabuf)
3509 				lpfc_printf_log(phba,
3510 					KERN_ERR,
3511 					LOG_SLI,
3512 					"0341 Ring %d Cannot find buffer for "
3513 					"an unsolicited iocb. tag 0x%x\n",
3514 					pring->ringno,
3515 					irsp->un.ulpWord[3]);
3516 		}
3517 		if (irsp->ulpBdeCount == 2) {
3518 			saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3519 						irsp->unsli3.sli3Words[7]);
3520 			if (!saveq->bpl_dmabuf)
3521 				lpfc_printf_log(phba,
3522 					KERN_ERR,
3523 					LOG_SLI,
3524 					"0342 Ring %d Cannot find buffer for an"
3525 					" unsolicited iocb. tag 0x%x\n",
3526 					pring->ringno,
3527 					irsp->unsli3.sli3Words[7]);
3528 		}
3529 		list_for_each_entry(iocbq, &saveq->list, list) {
3530 			irsp = &iocbq->iocb;
3531 			if (irsp->ulpBdeCount != 0) {
3532 				iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3533 							pring,
3534 							irsp->un.ulpWord[3]);
3535 				if (!iocbq->cmd_dmabuf)
3536 					lpfc_printf_log(phba,
3537 						KERN_ERR,
3538 						LOG_SLI,
3539 						"0343 Ring %d Cannot find "
3540 						"buffer for an unsolicited iocb"
3541 						". tag 0x%x\n", pring->ringno,
3542 						irsp->un.ulpWord[3]);
3543 			}
3544 			if (irsp->ulpBdeCount == 2) {
3545 				iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3546 						pring,
3547 						irsp->unsli3.sli3Words[7]);
3548 				if (!iocbq->bpl_dmabuf)
3549 					lpfc_printf_log(phba,
3550 						KERN_ERR,
3551 						LOG_SLI,
3552 						"0344 Ring %d Cannot find "
3553 						"buffer for an unsolicited "
3554 						"iocb. tag 0x%x\n",
3555 						pring->ringno,
3556 						irsp->unsli3.sli3Words[7]);
3557 			}
3558 		}
3559 	} else {
3560 		paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3561 				 irsp->un.cont64[0].addrLow);
3562 		saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3563 							     paddr);
3564 		if (irsp->ulpBdeCount == 2) {
3565 			paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3566 					 irsp->un.cont64[1].addrLow);
3567 			saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3568 								   pring,
3569 								   paddr);
3570 		}
3571 	}
3572 
3573 	if (irsp->ulpBdeCount != 0 &&
3574 	    (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3575 	     irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3576 		int found = 0;
3577 
3578 		/* search continue save q for same XRI */
3579 		list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3580 			if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3581 				saveq->iocb.unsli3.rcvsli3.ox_id) {
3582 				list_add_tail(&saveq->list, &iocbq->list);
3583 				found = 1;
3584 				break;
3585 			}
3586 		}
3587 		if (!found)
3588 			list_add_tail(&saveq->clist,
3589 				      &pring->iocb_continue_saveq);
3590 
3591 		if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3592 			list_del_init(&iocbq->clist);
3593 			saveq = iocbq;
3594 			irsp = &saveq->iocb;
3595 		} else {
3596 			return 0;
3597 		}
3598 	}
3599 	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3600 	    (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3601 	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3602 		Rctl = FC_RCTL_ELS_REQ;
3603 		Type = FC_TYPE_ELS;
3604 	} else {
3605 		w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3606 		Rctl = w5p->hcsw.Rctl;
3607 		Type = w5p->hcsw.Type;
3608 
3609 		/* Firmware Workaround */
3610 		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3611 			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3612 			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3613 			Rctl = FC_RCTL_ELS_REQ;
3614 			Type = FC_TYPE_ELS;
3615 			w5p->hcsw.Rctl = Rctl;
3616 			w5p->hcsw.Type = Type;
3617 		}
3618 	}
3619 
3620 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3621 	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3622 	    irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3623 		if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3624 			saveq->vport = phba->pport;
3625 		else
3626 			saveq->vport = lpfc_find_vport_by_vpid(phba,
3627 					       irsp->unsli3.rcvsli3.vpi);
3628 	}
3629 
3630 	/* Prepare WQE with Unsol frame */
3631 	lpfc_sli_prep_unsol_wqe(phba, saveq);
3632 
3633 	if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3634 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3635 				"0313 Ring %d handler: unexpected Rctl x%x "
3636 				"Type x%x received\n",
3637 				pring->ringno, Rctl, Type);
3638 
3639 	return 1;
3640 }
3641 
3642 /**
3643  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3644  * @phba: Pointer to HBA context object.
3645  * @pring: Pointer to driver SLI ring object.
3646  * @prspiocb: Pointer to response iocb object.
3647  *
3648  * This function looks up the iocb_lookup table to get the command iocb
3649  * corresponding to the given response iocb using the iotag of the
3650  * response iocb. The driver calls this function with the hbalock held
3651  * for SLI3 ports or the ring lock held for SLI4 ports.
3652  * This function returns the command iocb object if it finds the command
3653  * iocb else returns NULL.
3654  **/
3655 static struct lpfc_iocbq *
3656 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3657 		      struct lpfc_sli_ring *pring,
3658 		      struct lpfc_iocbq *prspiocb)
3659 {
3660 	struct lpfc_iocbq *cmd_iocb = NULL;
3661 	u16 iotag;
3662 
3663 	if (phba->sli_rev == LPFC_SLI_REV4)
3664 		iotag = get_wqe_reqtag(prspiocb);
3665 	else
3666 		iotag = prspiocb->iocb.ulpIoTag;
3667 
3668 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3669 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
3670 		if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3671 			/* remove from txcmpl queue list */
3672 			list_del_init(&cmd_iocb->list);
3673 			cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3674 			pring->txcmplq_cnt--;
3675 			return cmd_iocb;
3676 		}
3677 	}
3678 
3679 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3680 			"0317 iotag x%x is out of "
3681 			"range: max iotag x%x\n",
3682 			iotag, phba->sli.last_iotag);
3683 	return NULL;
3684 }
3685 
3686 /**
3687  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3688  * @phba: Pointer to HBA context object.
3689  * @pring: Pointer to driver SLI ring object.
3690  * @iotag: IOCB tag.
3691  *
3692  * This function looks up the iocb_lookup table to get the command iocb
3693  * corresponding to the given iotag. The driver calls this function with
3694  * the ring lock held because this function is an SLI4 port only helper.
3695  * This function returns the command iocb object if it finds the command
3696  * iocb else returns NULL.
3697  **/
3698 static struct lpfc_iocbq *
3699 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3700 			     struct lpfc_sli_ring *pring, uint16_t iotag)
3701 {
3702 	struct lpfc_iocbq *cmd_iocb = NULL;
3703 
3704 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3705 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
3706 		if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3707 			/* remove from txcmpl queue list */
3708 			list_del_init(&cmd_iocb->list);
3709 			cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3710 			pring->txcmplq_cnt--;
3711 			return cmd_iocb;
3712 		}
3713 	}
3714 
3715 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3716 			"0372 iotag x%x lookup error: max iotag (x%x) "
3717 			"cmd_flag x%x\n",
3718 			iotag, phba->sli.last_iotag,
3719 			cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3720 	return NULL;
3721 }
3722 
3723 /**
3724  * lpfc_sli_process_sol_iocb - process solicited iocb completion
3725  * @phba: Pointer to HBA context object.
3726  * @pring: Pointer to driver SLI ring object.
3727  * @saveq: Pointer to the response iocb to be processed.
3728  *
3729  * This function is called by the ring event handler for non-fcp
3730  * rings when there is a new response iocb in the response ring.
3731  * The caller is not required to hold any locks. This function
3732  * gets the command iocb associated with the response iocb and
3733  * calls the completion handler for the command iocb. If there
3734  * is no completion handler, the function will free the resources
3735  * associated with command iocb. If the response iocb is for
3736  * an already aborted command iocb, the status of the completion
3737  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3738  * This function always returns 1.
3739  **/
3740 static int
3741 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3742 			  struct lpfc_iocbq *saveq)
3743 {
3744 	struct lpfc_iocbq *cmdiocbp;
3745 	unsigned long iflag;
3746 	u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3747 
3748 	if (phba->sli_rev == LPFC_SLI_REV4)
3749 		spin_lock_irqsave(&pring->ring_lock, iflag);
3750 	else
3751 		spin_lock_irqsave(&phba->hbalock, iflag);
3752 	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3753 	if (phba->sli_rev == LPFC_SLI_REV4)
3754 		spin_unlock_irqrestore(&pring->ring_lock, iflag);
3755 	else
3756 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3757 
3758 	ulp_command = get_job_cmnd(phba, saveq);
3759 	ulp_status = get_job_ulpstatus(phba, saveq);
3760 	ulp_word4 = get_job_word4(phba, saveq);
3761 	ulp_context = get_job_ulpcontext(phba, saveq);
3762 	if (phba->sli_rev == LPFC_SLI_REV4)
3763 		iotag = get_wqe_reqtag(saveq);
3764 	else
3765 		iotag = saveq->iocb.ulpIoTag;
3766 
3767 	if (cmdiocbp) {
3768 		ulp_command = get_job_cmnd(phba, cmdiocbp);
3769 		if (cmdiocbp->cmd_cmpl) {
3770 			/*
3771 			 * If an ELS command failed send an event to mgmt
3772 			 * application.
3773 			 */
3774 			if (ulp_status &&
3775 			     (pring->ringno == LPFC_ELS_RING) &&
3776 			     (ulp_command == CMD_ELS_REQUEST64_CR))
3777 				lpfc_send_els_failure_event(phba,
3778 					cmdiocbp, saveq);
3779 
3780 			/*
3781 			 * Post all ELS completions to the worker thread.
3782 			 * All other are passed to the completion callback.
3783 			 */
3784 			if (pring->ringno == LPFC_ELS_RING) {
3785 				if ((phba->sli_rev < LPFC_SLI_REV4) &&
3786 				    (cmdiocbp->cmd_flag &
3787 							LPFC_DRIVER_ABORTED)) {
3788 					spin_lock_irqsave(&phba->hbalock,
3789 							  iflag);
3790 					cmdiocbp->cmd_flag &=
3791 						~LPFC_DRIVER_ABORTED;
3792 					spin_unlock_irqrestore(&phba->hbalock,
3793 							       iflag);
3794 					saveq->iocb.ulpStatus =
3795 						IOSTAT_LOCAL_REJECT;
3796 					saveq->iocb.un.ulpWord[4] =
3797 						IOERR_SLI_ABORTED;
3798 
3799 					/* Firmware could still be in progress
3800 					 * of DMAing payload, so don't free data
3801 					 * buffer till after a hbeat.
3802 					 */
3803 					spin_lock_irqsave(&phba->hbalock,
3804 							  iflag);
3805 					saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3806 					spin_unlock_irqrestore(&phba->hbalock,
3807 							       iflag);
3808 				}
3809 				if (phba->sli_rev == LPFC_SLI_REV4) {
3810 					if (saveq->cmd_flag &
3811 					    LPFC_EXCHANGE_BUSY) {
3812 						/* Set cmdiocb flag for the
3813 						 * exchange busy so sgl (xri)
3814 						 * will not be released until
3815 						 * the abort xri is received
3816 						 * from hba.
3817 						 */
3818 						spin_lock_irqsave(
3819 							&phba->hbalock, iflag);
3820 						cmdiocbp->cmd_flag |=
3821 							LPFC_EXCHANGE_BUSY;
3822 						spin_unlock_irqrestore(
3823 							&phba->hbalock, iflag);
3824 					}
3825 					if (cmdiocbp->cmd_flag &
3826 					    LPFC_DRIVER_ABORTED) {
3827 						/*
3828 						 * Clear LPFC_DRIVER_ABORTED
3829 						 * bit in case it was driver
3830 						 * initiated abort.
3831 						 */
3832 						spin_lock_irqsave(
3833 							&phba->hbalock, iflag);
3834 						cmdiocbp->cmd_flag &=
3835 							~LPFC_DRIVER_ABORTED;
3836 						spin_unlock_irqrestore(
3837 							&phba->hbalock, iflag);
3838 						set_job_ulpstatus(cmdiocbp,
3839 								  IOSTAT_LOCAL_REJECT);
3840 						set_job_ulpword4(cmdiocbp,
3841 								 IOERR_ABORT_REQUESTED);
3842 						/*
3843 						 * For SLI4, irspiocb contains
3844 						 * NO_XRI in sli_xritag, it
3845 						 * shall not affect releasing
3846 						 * sgl (xri) process.
3847 						 */
3848 						set_job_ulpstatus(saveq,
3849 								  IOSTAT_LOCAL_REJECT);
3850 						set_job_ulpword4(saveq,
3851 								 IOERR_SLI_ABORTED);
3852 						spin_lock_irqsave(
3853 							&phba->hbalock, iflag);
3854 						saveq->cmd_flag |=
3855 							LPFC_DELAY_MEM_FREE;
3856 						spin_unlock_irqrestore(
3857 							&phba->hbalock, iflag);
3858 					}
3859 				}
3860 			}
3861 			cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3862 		} else
3863 			lpfc_sli_release_iocbq(phba, cmdiocbp);
3864 	} else {
3865 		/*
3866 		 * Unknown initiating command based on the response iotag.
3867 		 * This could be the case on the ELS ring because of
3868 		 * lpfc_els_abort().
3869 		 */
3870 		if (pring->ringno != LPFC_ELS_RING) {
3871 			/*
3872 			 * Ring <ringno> handler: unexpected completion IoTag
3873 			 * <IoTag>
3874 			 */
3875 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3876 					 "0322 Ring %d handler: "
3877 					 "unexpected completion IoTag x%x "
3878 					 "Data: x%x x%x x%x x%x\n",
3879 					 pring->ringno, iotag, ulp_status,
3880 					 ulp_word4, ulp_command, ulp_context);
3881 		}
3882 	}
3883 
3884 	return 1;
3885 }
3886 
3887 /**
3888  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3889  * @phba: Pointer to HBA context object.
3890  * @pring: Pointer to driver SLI ring object.
3891  *
3892  * This function is called from the iocb ring event handlers when
3893  * put pointer is ahead of the get pointer for a ring. This function signal
3894  * an error attention condition to the worker thread and the worker
3895  * thread will transition the HBA to offline state.
3896  **/
3897 static void
3898 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3899 {
3900 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3901 	/*
3902 	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3903 	 * rsp ring <portRspMax>
3904 	 */
3905 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3906 			"0312 Ring %d handler: portRspPut %d "
3907 			"is bigger than rsp ring %d\n",
3908 			pring->ringno, le32_to_cpu(pgp->rspPutInx),
3909 			pring->sli.sli3.numRiocb);
3910 
3911 	phba->link_state = LPFC_HBA_ERROR;
3912 
3913 	/*
3914 	 * All error attention handlers are posted to
3915 	 * worker thread
3916 	 */
3917 	phba->work_ha |= HA_ERATT;
3918 	phba->work_hs = HS_FFER3;
3919 
3920 	lpfc_worker_wake_up(phba);
3921 
3922 	return;
3923 }
3924 
3925 /**
3926  * lpfc_poll_eratt - Error attention polling timer timeout handler
3927  * @t: Context to fetch pointer to address of HBA context object from.
3928  *
3929  * This function is invoked by the Error Attention polling timer when the
3930  * timer times out. It will check the SLI Error Attention register for
3931  * possible attention events. If so, it will post an Error Attention event
3932  * and wake up worker thread to process it. Otherwise, it will set up the
3933  * Error Attention polling timer for the next poll.
3934  **/
3935 void lpfc_poll_eratt(struct timer_list *t)
3936 {
3937 	struct lpfc_hba *phba;
3938 	uint32_t eratt = 0;
3939 	uint64_t sli_intr, cnt;
3940 
3941 	phba = from_timer(phba, t, eratt_poll);
3942 	if (!test_bit(HBA_SETUP, &phba->hba_flag))
3943 		return;
3944 
3945 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
3946 		return;
3947 
3948 	/* Here we will also keep track of interrupts per sec of the hba */
3949 	sli_intr = phba->sli.slistat.sli_intr;
3950 
3951 	if (phba->sli.slistat.sli_prev_intr > sli_intr)
3952 		cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3953 			sli_intr);
3954 	else
3955 		cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3956 
3957 	/* 64-bit integer division not supported on 32-bit x86 - use do_div */
3958 	do_div(cnt, phba->eratt_poll_interval);
3959 	phba->sli.slistat.sli_ips = cnt;
3960 
3961 	phba->sli.slistat.sli_prev_intr = sli_intr;
3962 
3963 	/* Check chip HA register for error event */
3964 	eratt = lpfc_sli_check_eratt(phba);
3965 
3966 	if (eratt)
3967 		/* Tell the worker thread there is work to do */
3968 		lpfc_worker_wake_up(phba);
3969 	else
3970 		/* Restart the timer for next eratt poll */
3971 		mod_timer(&phba->eratt_poll,
3972 			  jiffies +
3973 			  msecs_to_jiffies(1000 * phba->eratt_poll_interval));
3974 	return;
3975 }
3976 
3977 
3978 /**
3979  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3980  * @phba: Pointer to HBA context object.
3981  * @pring: Pointer to driver SLI ring object.
3982  * @mask: Host attention register mask for this ring.
3983  *
3984  * This function is called from the interrupt context when there is a ring
3985  * event for the fcp ring. The caller does not hold any lock.
3986  * The function processes each response iocb in the response ring until it
3987  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3988  * LE bit set. The function will call the completion handler of the command iocb
3989  * if the response iocb indicates a completion for a command iocb or it is
3990  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3991  * function if this is an unsolicited iocb.
3992  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3993  * to check it explicitly.
3994  */
3995 int
3996 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3997 				struct lpfc_sli_ring *pring, uint32_t mask)
3998 {
3999 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
4000 	IOCB_t *irsp = NULL;
4001 	IOCB_t *entry = NULL;
4002 	struct lpfc_iocbq *cmdiocbq = NULL;
4003 	struct lpfc_iocbq rspiocbq;
4004 	uint32_t status;
4005 	uint32_t portRspPut, portRspMax;
4006 	int rc = 1;
4007 	lpfc_iocb_type type;
4008 	unsigned long iflag;
4009 	uint32_t rsp_cmpl = 0;
4010 
4011 	spin_lock_irqsave(&phba->hbalock, iflag);
4012 	pring->stats.iocb_event++;
4013 
4014 	/*
4015 	 * The next available response entry should never exceed the maximum
4016 	 * entries.  If it does, treat it as an adapter hardware error.
4017 	 */
4018 	portRspMax = pring->sli.sli3.numRiocb;
4019 	portRspPut = le32_to_cpu(pgp->rspPutInx);
4020 	if (unlikely(portRspPut >= portRspMax)) {
4021 		lpfc_sli_rsp_pointers_error(phba, pring);
4022 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4023 		return 1;
4024 	}
4025 	if (phba->fcp_ring_in_use) {
4026 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4027 		return 1;
4028 	} else
4029 		phba->fcp_ring_in_use = 1;
4030 
4031 	rmb();
4032 	while (pring->sli.sli3.rspidx != portRspPut) {
4033 		/*
4034 		 * Fetch an entry off the ring and copy it into a local data
4035 		 * structure.  The copy involves a byte-swap since the
4036 		 * network byte order and pci byte orders are different.
4037 		 */
4038 		entry = lpfc_resp_iocb(phba, pring);
4039 		phba->last_completion_time = jiffies;
4040 
4041 		if (++pring->sli.sli3.rspidx >= portRspMax)
4042 			pring->sli.sli3.rspidx = 0;
4043 
4044 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4045 				      (uint32_t *) &rspiocbq.iocb,
4046 				      phba->iocb_rsp_size);
4047 		INIT_LIST_HEAD(&(rspiocbq.list));
4048 		irsp = &rspiocbq.iocb;
4049 
4050 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4051 		pring->stats.iocb_rsp++;
4052 		rsp_cmpl++;
4053 
4054 		if (unlikely(irsp->ulpStatus)) {
4055 			/*
4056 			 * If resource errors reported from HBA, reduce
4057 			 * queuedepths of the SCSI device.
4058 			 */
4059 			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4060 			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4061 			     IOERR_NO_RESOURCES)) {
4062 				spin_unlock_irqrestore(&phba->hbalock, iflag);
4063 				phba->lpfc_rampdown_queue_depth(phba);
4064 				spin_lock_irqsave(&phba->hbalock, iflag);
4065 			}
4066 
4067 			/* Rsp ring <ringno> error: IOCB */
4068 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4069 					"0336 Rsp Ring %d error: IOCB Data: "
4070 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
4071 					pring->ringno,
4072 					irsp->un.ulpWord[0],
4073 					irsp->un.ulpWord[1],
4074 					irsp->un.ulpWord[2],
4075 					irsp->un.ulpWord[3],
4076 					irsp->un.ulpWord[4],
4077 					irsp->un.ulpWord[5],
4078 					*(uint32_t *)&irsp->un1,
4079 					*((uint32_t *)&irsp->un1 + 1));
4080 		}
4081 
4082 		switch (type) {
4083 		case LPFC_ABORT_IOCB:
4084 		case LPFC_SOL_IOCB:
4085 			/*
4086 			 * Idle exchange closed via ABTS from port.  No iocb
4087 			 * resources need to be recovered.
4088 			 */
4089 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4090 				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4091 						"0333 IOCB cmd 0x%x"
4092 						" processed. Skipping"
4093 						" completion\n",
4094 						irsp->ulpCommand);
4095 				break;
4096 			}
4097 
4098 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4099 							 &rspiocbq);
4100 			if (unlikely(!cmdiocbq))
4101 				break;
4102 			if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4103 				cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4104 			if (cmdiocbq->cmd_cmpl) {
4105 				spin_unlock_irqrestore(&phba->hbalock, iflag);
4106 				cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4107 				spin_lock_irqsave(&phba->hbalock, iflag);
4108 			}
4109 			break;
4110 		case LPFC_UNSOL_IOCB:
4111 			spin_unlock_irqrestore(&phba->hbalock, iflag);
4112 			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4113 			spin_lock_irqsave(&phba->hbalock, iflag);
4114 			break;
4115 		default:
4116 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4117 				char adaptermsg[LPFC_MAX_ADPTMSG];
4118 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4119 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
4120 				       MAX_MSG_DATA);
4121 				dev_warn(&((phba->pcidev)->dev),
4122 					 "lpfc%d: %s\n",
4123 					 phba->brd_no, adaptermsg);
4124 			} else {
4125 				/* Unknown IOCB command */
4126 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4127 						"0334 Unknown IOCB command "
4128 						"Data: x%x, x%x x%x x%x x%x\n",
4129 						type, irsp->ulpCommand,
4130 						irsp->ulpStatus,
4131 						irsp->ulpIoTag,
4132 						irsp->ulpContext);
4133 			}
4134 			break;
4135 		}
4136 
4137 		/*
4138 		 * The response IOCB has been processed.  Update the ring
4139 		 * pointer in SLIM.  If the port response put pointer has not
4140 		 * been updated, sync the pgp->rspPutInx and fetch the new port
4141 		 * response put pointer.
4142 		 */
4143 		writel(pring->sli.sli3.rspidx,
4144 			&phba->host_gp[pring->ringno].rspGetInx);
4145 
4146 		if (pring->sli.sli3.rspidx == portRspPut)
4147 			portRspPut = le32_to_cpu(pgp->rspPutInx);
4148 	}
4149 
4150 	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4151 		pring->stats.iocb_rsp_full++;
4152 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4153 		writel(status, phba->CAregaddr);
4154 		readl(phba->CAregaddr);
4155 	}
4156 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4157 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4158 		pring->stats.iocb_cmd_empty++;
4159 
4160 		/* Force update of the local copy of cmdGetInx */
4161 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4162 		lpfc_sli_resume_iocb(phba, pring);
4163 
4164 		if ((pring->lpfc_sli_cmd_available))
4165 			(pring->lpfc_sli_cmd_available) (phba, pring);
4166 
4167 	}
4168 
4169 	phba->fcp_ring_in_use = 0;
4170 	spin_unlock_irqrestore(&phba->hbalock, iflag);
4171 	return rc;
4172 }
4173 
4174 /**
4175  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4176  * @phba: Pointer to HBA context object.
4177  * @pring: Pointer to driver SLI ring object.
4178  * @rspiocbp: Pointer to driver response IOCB object.
4179  *
4180  * This function is called from the worker thread when there is a slow-path
4181  * response IOCB to process. This function chains all the response iocbs until
4182  * seeing the iocb with the LE bit set. The function will call
4183  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4184  * completion of a command iocb. The function will call the
4185  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4186  * The function frees the resources or calls the completion handler if this
4187  * iocb is an abort completion. The function returns NULL when the response
4188  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4189  * this function shall chain the iocb on to the iocb_continueq and return the
4190  * response iocb passed in.
4191  **/
4192 static struct lpfc_iocbq *
4193 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4194 			struct lpfc_iocbq *rspiocbp)
4195 {
4196 	struct lpfc_iocbq *saveq;
4197 	struct lpfc_iocbq *cmdiocb;
4198 	struct lpfc_iocbq *next_iocb;
4199 	IOCB_t *irsp;
4200 	uint32_t free_saveq;
4201 	u8 cmd_type;
4202 	lpfc_iocb_type type;
4203 	unsigned long iflag;
4204 	u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4205 	u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4206 	u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4207 	int rc;
4208 
4209 	spin_lock_irqsave(&phba->hbalock, iflag);
4210 	/* First add the response iocb to the countinueq list */
4211 	list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4212 	pring->iocb_continueq_cnt++;
4213 
4214 	/*
4215 	 * By default, the driver expects to free all resources
4216 	 * associated with this iocb completion.
4217 	 */
4218 	free_saveq = 1;
4219 	saveq = list_get_first(&pring->iocb_continueq,
4220 			       struct lpfc_iocbq, list);
4221 	list_del_init(&pring->iocb_continueq);
4222 	pring->iocb_continueq_cnt = 0;
4223 
4224 	pring->stats.iocb_rsp++;
4225 
4226 	/*
4227 	 * If resource errors reported from HBA, reduce
4228 	 * queuedepths of the SCSI device.
4229 	 */
4230 	if (ulp_status == IOSTAT_LOCAL_REJECT &&
4231 	    ((ulp_word4 & IOERR_PARAM_MASK) ==
4232 	     IOERR_NO_RESOURCES)) {
4233 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4234 		phba->lpfc_rampdown_queue_depth(phba);
4235 		spin_lock_irqsave(&phba->hbalock, iflag);
4236 	}
4237 
4238 	if (ulp_status) {
4239 		/* Rsp ring <ringno> error: IOCB */
4240 		if (phba->sli_rev < LPFC_SLI_REV4) {
4241 			irsp = &rspiocbp->iocb;
4242 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4243 					"0328 Rsp Ring %d error: ulp_status x%x "
4244 					"IOCB Data: "
4245 					"x%08x x%08x x%08x x%08x "
4246 					"x%08x x%08x x%08x x%08x "
4247 					"x%08x x%08x x%08x x%08x "
4248 					"x%08x x%08x x%08x x%08x\n",
4249 					pring->ringno, ulp_status,
4250 					get_job_ulpword(rspiocbp, 0),
4251 					get_job_ulpword(rspiocbp, 1),
4252 					get_job_ulpword(rspiocbp, 2),
4253 					get_job_ulpword(rspiocbp, 3),
4254 					get_job_ulpword(rspiocbp, 4),
4255 					get_job_ulpword(rspiocbp, 5),
4256 					*(((uint32_t *)irsp) + 6),
4257 					*(((uint32_t *)irsp) + 7),
4258 					*(((uint32_t *)irsp) + 8),
4259 					*(((uint32_t *)irsp) + 9),
4260 					*(((uint32_t *)irsp) + 10),
4261 					*(((uint32_t *)irsp) + 11),
4262 					*(((uint32_t *)irsp) + 12),
4263 					*(((uint32_t *)irsp) + 13),
4264 					*(((uint32_t *)irsp) + 14),
4265 					*(((uint32_t *)irsp) + 15));
4266 		} else {
4267 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4268 					"0321 Rsp Ring %d error: "
4269 					"IOCB Data: "
4270 					"x%x x%x x%x x%x\n",
4271 					pring->ringno,
4272 					rspiocbp->wcqe_cmpl.word0,
4273 					rspiocbp->wcqe_cmpl.total_data_placed,
4274 					rspiocbp->wcqe_cmpl.parameter,
4275 					rspiocbp->wcqe_cmpl.word3);
4276 		}
4277 	}
4278 
4279 
4280 	/*
4281 	 * Fetch the iocb command type and call the correct completion
4282 	 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4283 	 * get freed back to the lpfc_iocb_list by the discovery
4284 	 * kernel thread.
4285 	 */
4286 	cmd_type = ulp_command & CMD_IOCB_MASK;
4287 	type = lpfc_sli_iocb_cmd_type(cmd_type);
4288 	switch (type) {
4289 	case LPFC_SOL_IOCB:
4290 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4291 		rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4292 		spin_lock_irqsave(&phba->hbalock, iflag);
4293 		break;
4294 	case LPFC_UNSOL_IOCB:
4295 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4296 		rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4297 		spin_lock_irqsave(&phba->hbalock, iflag);
4298 		if (!rc)
4299 			free_saveq = 0;
4300 		break;
4301 	case LPFC_ABORT_IOCB:
4302 		cmdiocb = NULL;
4303 		if (ulp_command != CMD_XRI_ABORTED_CX)
4304 			cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4305 							saveq);
4306 		if (cmdiocb) {
4307 			/* Call the specified completion routine */
4308 			if (cmdiocb->cmd_cmpl) {
4309 				spin_unlock_irqrestore(&phba->hbalock, iflag);
4310 				cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4311 				spin_lock_irqsave(&phba->hbalock, iflag);
4312 			} else {
4313 				__lpfc_sli_release_iocbq(phba, cmdiocb);
4314 			}
4315 		}
4316 		break;
4317 	case LPFC_UNKNOWN_IOCB:
4318 		if (ulp_command == CMD_ADAPTER_MSG) {
4319 			char adaptermsg[LPFC_MAX_ADPTMSG];
4320 
4321 			memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4322 			memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4323 			       MAX_MSG_DATA);
4324 			dev_warn(&((phba->pcidev)->dev),
4325 				 "lpfc%d: %s\n",
4326 				 phba->brd_no, adaptermsg);
4327 		} else {
4328 			/* Unknown command */
4329 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4330 					"0335 Unknown IOCB "
4331 					"command Data: x%x "
4332 					"x%x x%x x%x\n",
4333 					ulp_command,
4334 					ulp_status,
4335 					get_wqe_reqtag(rspiocbp),
4336 					get_job_ulpcontext(phba, rspiocbp));
4337 		}
4338 		break;
4339 	}
4340 
4341 	if (free_saveq) {
4342 		list_for_each_entry_safe(rspiocbp, next_iocb,
4343 					 &saveq->list, list) {
4344 			list_del_init(&rspiocbp->list);
4345 			__lpfc_sli_release_iocbq(phba, rspiocbp);
4346 		}
4347 		__lpfc_sli_release_iocbq(phba, saveq);
4348 	}
4349 	rspiocbp = NULL;
4350 	spin_unlock_irqrestore(&phba->hbalock, iflag);
4351 	return rspiocbp;
4352 }
4353 
4354 /**
4355  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4356  * @phba: Pointer to HBA context object.
4357  * @pring: Pointer to driver SLI ring object.
4358  * @mask: Host attention register mask for this ring.
4359  *
4360  * This routine wraps the actual slow_ring event process routine from the
4361  * API jump table function pointer from the lpfc_hba struct.
4362  **/
4363 void
4364 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4365 				struct lpfc_sli_ring *pring, uint32_t mask)
4366 {
4367 	phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4368 }
4369 
4370 /**
4371  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4372  * @phba: Pointer to HBA context object.
4373  * @pring: Pointer to driver SLI ring object.
4374  * @mask: Host attention register mask for this ring.
4375  *
4376  * This function is called from the worker thread when there is a ring event
4377  * for non-fcp rings. The caller does not hold any lock. The function will
4378  * remove each response iocb in the response ring and calls the handle
4379  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4380  **/
4381 static void
4382 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4383 				   struct lpfc_sli_ring *pring, uint32_t mask)
4384 {
4385 	struct lpfc_pgp *pgp;
4386 	IOCB_t *entry;
4387 	IOCB_t *irsp = NULL;
4388 	struct lpfc_iocbq *rspiocbp = NULL;
4389 	uint32_t portRspPut, portRspMax;
4390 	unsigned long iflag;
4391 	uint32_t status;
4392 
4393 	pgp = &phba->port_gp[pring->ringno];
4394 	spin_lock_irqsave(&phba->hbalock, iflag);
4395 	pring->stats.iocb_event++;
4396 
4397 	/*
4398 	 * The next available response entry should never exceed the maximum
4399 	 * entries.  If it does, treat it as an adapter hardware error.
4400 	 */
4401 	portRspMax = pring->sli.sli3.numRiocb;
4402 	portRspPut = le32_to_cpu(pgp->rspPutInx);
4403 	if (portRspPut >= portRspMax) {
4404 		/*
4405 		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4406 		 * rsp ring <portRspMax>
4407 		 */
4408 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4409 				"0303 Ring %d handler: portRspPut %d "
4410 				"is bigger than rsp ring %d\n",
4411 				pring->ringno, portRspPut, portRspMax);
4412 
4413 		phba->link_state = LPFC_HBA_ERROR;
4414 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4415 
4416 		phba->work_hs = HS_FFER3;
4417 		lpfc_handle_eratt(phba);
4418 
4419 		return;
4420 	}
4421 
4422 	rmb();
4423 	while (pring->sli.sli3.rspidx != portRspPut) {
4424 		/*
4425 		 * Build a completion list and call the appropriate handler.
4426 		 * The process is to get the next available response iocb, get
4427 		 * a free iocb from the list, copy the response data into the
4428 		 * free iocb, insert to the continuation list, and update the
4429 		 * next response index to slim.  This process makes response
4430 		 * iocb's in the ring available to DMA as fast as possible but
4431 		 * pays a penalty for a copy operation.  Since the iocb is
4432 		 * only 32 bytes, this penalty is considered small relative to
4433 		 * the PCI reads for register values and a slim write.  When
4434 		 * the ulpLe field is set, the entire Command has been
4435 		 * received.
4436 		 */
4437 		entry = lpfc_resp_iocb(phba, pring);
4438 
4439 		phba->last_completion_time = jiffies;
4440 		rspiocbp = __lpfc_sli_get_iocbq(phba);
4441 		if (rspiocbp == NULL) {
4442 			printk(KERN_ERR "%s: out of buffers! Failing "
4443 			       "completion.\n", __func__);
4444 			break;
4445 		}
4446 
4447 		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4448 				      phba->iocb_rsp_size);
4449 		irsp = &rspiocbp->iocb;
4450 
4451 		if (++pring->sli.sli3.rspidx >= portRspMax)
4452 			pring->sli.sli3.rspidx = 0;
4453 
4454 		if (pring->ringno == LPFC_ELS_RING) {
4455 			lpfc_debugfs_slow_ring_trc(phba,
4456 			"IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
4457 				*(((uint32_t *) irsp) + 4),
4458 				*(((uint32_t *) irsp) + 6),
4459 				*(((uint32_t *) irsp) + 7));
4460 		}
4461 
4462 		writel(pring->sli.sli3.rspidx,
4463 			&phba->host_gp[pring->ringno].rspGetInx);
4464 
4465 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4466 		/* Handle the response IOCB */
4467 		rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4468 		spin_lock_irqsave(&phba->hbalock, iflag);
4469 
4470 		/*
4471 		 * If the port response put pointer has not been updated, sync
4472 		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4473 		 * response put pointer.
4474 		 */
4475 		if (pring->sli.sli3.rspidx == portRspPut) {
4476 			portRspPut = le32_to_cpu(pgp->rspPutInx);
4477 		}
4478 	} /* while (pring->sli.sli3.rspidx != portRspPut) */
4479 
4480 	if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4481 		/* At least one response entry has been freed */
4482 		pring->stats.iocb_rsp_full++;
4483 		/* SET RxRE_RSP in Chip Att register */
4484 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4485 		writel(status, phba->CAregaddr);
4486 		readl(phba->CAregaddr); /* flush */
4487 	}
4488 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4489 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4490 		pring->stats.iocb_cmd_empty++;
4491 
4492 		/* Force update of the local copy of cmdGetInx */
4493 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4494 		lpfc_sli_resume_iocb(phba, pring);
4495 
4496 		if ((pring->lpfc_sli_cmd_available))
4497 			(pring->lpfc_sli_cmd_available) (phba, pring);
4498 
4499 	}
4500 
4501 	spin_unlock_irqrestore(&phba->hbalock, iflag);
4502 	return;
4503 }
4504 
4505 /**
4506  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4507  * @phba: Pointer to HBA context object.
4508  * @pring: Pointer to driver SLI ring object.
4509  * @mask: Host attention register mask for this ring.
4510  *
4511  * This function is called from the worker thread when there is a pending
4512  * ELS response iocb on the driver internal slow-path response iocb worker
4513  * queue. The caller does not hold any lock. The function will remove each
4514  * response iocb from the response worker queue and calls the handle
4515  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4516  **/
4517 static void
4518 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4519 				   struct lpfc_sli_ring *pring, uint32_t mask)
4520 {
4521 	struct lpfc_iocbq *irspiocbq;
4522 	struct hbq_dmabuf *dmabuf;
4523 	struct lpfc_cq_event *cq_event;
4524 	unsigned long iflag;
4525 	int count = 0;
4526 
4527 	clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
4528 	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4529 		/* Get the response iocb from the head of work queue */
4530 		spin_lock_irqsave(&phba->hbalock, iflag);
4531 		list_remove_head(&phba->sli4_hba.sp_queue_event,
4532 				 cq_event, struct lpfc_cq_event, list);
4533 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4534 
4535 		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4536 		case CQE_CODE_COMPL_WQE:
4537 			irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4538 						 cq_event);
4539 			/* Translate ELS WCQE to response IOCBQ */
4540 			irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4541 								      irspiocbq);
4542 			if (irspiocbq)
4543 				lpfc_sli_sp_handle_rspiocb(phba, pring,
4544 							   irspiocbq);
4545 			count++;
4546 			break;
4547 		case CQE_CODE_RECEIVE:
4548 		case CQE_CODE_RECEIVE_V1:
4549 			dmabuf = container_of(cq_event, struct hbq_dmabuf,
4550 					      cq_event);
4551 			lpfc_sli4_handle_received_buffer(phba, dmabuf);
4552 			count++;
4553 			break;
4554 		default:
4555 			break;
4556 		}
4557 
4558 		/* Limit the number of events to 64 to avoid soft lockups */
4559 		if (count == 64)
4560 			break;
4561 	}
4562 }
4563 
4564 /**
4565  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4566  * @phba: Pointer to HBA context object.
4567  * @pring: Pointer to driver SLI ring object.
4568  *
4569  * This function aborts all iocbs in the given ring and frees all the iocb
4570  * objects in txq. This function issues an abort iocb for all the iocb commands
4571  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4572  * the return of this function. The caller is not required to hold any locks.
4573  **/
4574 void
4575 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4576 {
4577 	LIST_HEAD(tx_completions);
4578 	LIST_HEAD(txcmplq_completions);
4579 	struct lpfc_iocbq *iocb, *next_iocb;
4580 	int offline;
4581 
4582 	if (pring->ringno == LPFC_ELS_RING) {
4583 		lpfc_fabric_abort_hba(phba);
4584 	}
4585 	offline = pci_channel_offline(phba->pcidev);
4586 
4587 	/* Error everything on txq and txcmplq
4588 	 * First do the txq.
4589 	 */
4590 	if (phba->sli_rev >= LPFC_SLI_REV4) {
4591 		spin_lock_irq(&pring->ring_lock);
4592 		list_splice_init(&pring->txq, &tx_completions);
4593 		pring->txq_cnt = 0;
4594 
4595 		if (offline) {
4596 			list_splice_init(&pring->txcmplq,
4597 					 &txcmplq_completions);
4598 		} else {
4599 			/* Next issue ABTS for everything on the txcmplq */
4600 			list_for_each_entry_safe(iocb, next_iocb,
4601 						 &pring->txcmplq, list)
4602 				lpfc_sli_issue_abort_iotag(phba, pring,
4603 							   iocb, NULL);
4604 		}
4605 		spin_unlock_irq(&pring->ring_lock);
4606 	} else {
4607 		spin_lock_irq(&phba->hbalock);
4608 		list_splice_init(&pring->txq, &tx_completions);
4609 		pring->txq_cnt = 0;
4610 
4611 		if (offline) {
4612 			list_splice_init(&pring->txcmplq, &txcmplq_completions);
4613 		} else {
4614 			/* Next issue ABTS for everything on the txcmplq */
4615 			list_for_each_entry_safe(iocb, next_iocb,
4616 						 &pring->txcmplq, list)
4617 				lpfc_sli_issue_abort_iotag(phba, pring,
4618 							   iocb, NULL);
4619 		}
4620 		spin_unlock_irq(&phba->hbalock);
4621 	}
4622 
4623 	if (offline) {
4624 		/* Cancel all the IOCBs from the completions list */
4625 		lpfc_sli_cancel_iocbs(phba, &txcmplq_completions,
4626 				      IOSTAT_LOCAL_REJECT, IOERR_SLI_ABORTED);
4627 	} else {
4628 		/* Make sure HBA is alive */
4629 		lpfc_issue_hb_tmo(phba);
4630 	}
4631 	/* Cancel all the IOCBs from the completions list */
4632 	lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4633 			      IOERR_SLI_ABORTED);
4634 }
4635 
4636 /**
4637  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4638  * @phba: Pointer to HBA context object.
4639  *
4640  * This function aborts all iocbs in FCP rings and frees all the iocb
4641  * objects in txq. This function issues an abort iocb for all the iocb commands
4642  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4643  * the return of this function. The caller is not required to hold any locks.
4644  **/
4645 void
4646 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4647 {
4648 	struct lpfc_sli *psli = &phba->sli;
4649 	struct lpfc_sli_ring  *pring;
4650 	uint32_t i;
4651 
4652 	/* Look on all the FCP Rings for the iotag */
4653 	if (phba->sli_rev >= LPFC_SLI_REV4) {
4654 		for (i = 0; i < phba->cfg_hdw_queue; i++) {
4655 			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4656 			lpfc_sli_abort_iocb_ring(phba, pring);
4657 		}
4658 	} else {
4659 		pring = &psli->sli3_ring[LPFC_FCP_RING];
4660 		lpfc_sli_abort_iocb_ring(phba, pring);
4661 	}
4662 }
4663 
4664 /**
4665  * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4666  * @phba: Pointer to HBA context object.
4667  *
4668  * This function flushes all iocbs in the IO ring and frees all the iocb
4669  * objects in txq and txcmplq. This function will not issue abort iocbs
4670  * for all the iocb commands in txcmplq, they will just be returned with
4671  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4672  * slot has been permanently disabled.
4673  **/
4674 void
4675 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4676 {
4677 	LIST_HEAD(txq);
4678 	LIST_HEAD(txcmplq);
4679 	struct lpfc_sli *psli = &phba->sli;
4680 	struct lpfc_sli_ring  *pring;
4681 	uint32_t i;
4682 	struct lpfc_iocbq *piocb, *next_iocb;
4683 
4684 	/* Indicate the I/O queues are flushed */
4685 	set_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
4686 
4687 	/* Look on all the FCP Rings for the iotag */
4688 	if (phba->sli_rev >= LPFC_SLI_REV4) {
4689 		for (i = 0; i < phba->cfg_hdw_queue; i++) {
4690 			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4691 
4692 			spin_lock_irq(&pring->ring_lock);
4693 			/* Retrieve everything on txq */
4694 			list_splice_init(&pring->txq, &txq);
4695 			list_for_each_entry_safe(piocb, next_iocb,
4696 						 &pring->txcmplq, list)
4697 				piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4698 			/* Retrieve everything on the txcmplq */
4699 			list_splice_init(&pring->txcmplq, &txcmplq);
4700 			pring->txq_cnt = 0;
4701 			pring->txcmplq_cnt = 0;
4702 			spin_unlock_irq(&pring->ring_lock);
4703 
4704 			/* Flush the txq */
4705 			lpfc_sli_cancel_iocbs(phba, &txq,
4706 					      IOSTAT_LOCAL_REJECT,
4707 					      IOERR_SLI_DOWN);
4708 			/* Flush the txcmplq */
4709 			lpfc_sli_cancel_iocbs(phba, &txcmplq,
4710 					      IOSTAT_LOCAL_REJECT,
4711 					      IOERR_SLI_DOWN);
4712 			if (unlikely(pci_channel_offline(phba->pcidev)))
4713 				lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4714 		}
4715 	} else {
4716 		pring = &psli->sli3_ring[LPFC_FCP_RING];
4717 
4718 		spin_lock_irq(&phba->hbalock);
4719 		/* Retrieve everything on txq */
4720 		list_splice_init(&pring->txq, &txq);
4721 		list_for_each_entry_safe(piocb, next_iocb,
4722 					 &pring->txcmplq, list)
4723 			piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4724 		/* Retrieve everything on the txcmplq */
4725 		list_splice_init(&pring->txcmplq, &txcmplq);
4726 		pring->txq_cnt = 0;
4727 		pring->txcmplq_cnt = 0;
4728 		spin_unlock_irq(&phba->hbalock);
4729 
4730 		/* Flush the txq */
4731 		lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4732 				      IOERR_SLI_DOWN);
4733 		/* Flush the txcmpq */
4734 		lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4735 				      IOERR_SLI_DOWN);
4736 	}
4737 }
4738 
4739 /**
4740  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4741  * @phba: Pointer to HBA context object.
4742  * @mask: Bit mask to be checked.
4743  *
4744  * This function reads the host status register and compares
4745  * with the provided bit mask to check if HBA completed
4746  * the restart. This function will wait in a loop for the
4747  * HBA to complete restart. If the HBA does not restart within
4748  * 15 iterations, the function will reset the HBA again. The
4749  * function returns 1 when HBA fail to restart otherwise returns
4750  * zero.
4751  **/
4752 static int
4753 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4754 {
4755 	uint32_t status;
4756 	int i = 0;
4757 	int retval = 0;
4758 
4759 	/* Read the HBA Host Status Register */
4760 	if (lpfc_readl(phba->HSregaddr, &status))
4761 		return 1;
4762 
4763 	set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
4764 
4765 	/*
4766 	 * Check status register every 100ms for 5 retries, then every
4767 	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4768 	 * every 2.5 sec for 4.
4769 	 * Break our of the loop if errors occurred during init.
4770 	 */
4771 	while (((status & mask) != mask) &&
4772 	       !(status & HS_FFERM) &&
4773 	       i++ < 20) {
4774 
4775 		if (i <= 5)
4776 			msleep(10);
4777 		else if (i <= 10)
4778 			msleep(500);
4779 		else
4780 			msleep(2500);
4781 
4782 		if (i == 15) {
4783 				/* Do post */
4784 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4785 			lpfc_sli_brdrestart(phba);
4786 		}
4787 		/* Read the HBA Host Status Register */
4788 		if (lpfc_readl(phba->HSregaddr, &status)) {
4789 			retval = 1;
4790 			break;
4791 		}
4792 	}
4793 
4794 	/* Check to see if any errors occurred during init */
4795 	if ((status & HS_FFERM) || (i >= 20)) {
4796 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4797 				"2751 Adapter failed to restart, "
4798 				"status reg x%x, FW Data: A8 x%x AC x%x\n",
4799 				status,
4800 				readl(phba->MBslimaddr + 0xa8),
4801 				readl(phba->MBslimaddr + 0xac));
4802 		phba->link_state = LPFC_HBA_ERROR;
4803 		retval = 1;
4804 	}
4805 
4806 	return retval;
4807 }
4808 
4809 /**
4810  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4811  * @phba: Pointer to HBA context object.
4812  * @mask: Bit mask to be checked.
4813  *
4814  * This function checks the host status register to check if HBA is
4815  * ready. This function will wait in a loop for the HBA to be ready
4816  * If the HBA is not ready , the function will will reset the HBA PCI
4817  * function again. The function returns 1 when HBA fail to be ready
4818  * otherwise returns zero.
4819  **/
4820 static int
4821 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4822 {
4823 	uint32_t status;
4824 	int retval = 0;
4825 
4826 	/* Read the HBA Host Status Register */
4827 	status = lpfc_sli4_post_status_check(phba);
4828 
4829 	if (status) {
4830 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4831 		lpfc_sli_brdrestart(phba);
4832 		status = lpfc_sli4_post_status_check(phba);
4833 	}
4834 
4835 	/* Check to see if any errors occurred during init */
4836 	if (status) {
4837 		phba->link_state = LPFC_HBA_ERROR;
4838 		retval = 1;
4839 	} else
4840 		phba->sli4_hba.intr_enable = 0;
4841 
4842 	clear_bit(HBA_SETUP, &phba->hba_flag);
4843 	return retval;
4844 }
4845 
4846 /**
4847  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4848  * @phba: Pointer to HBA context object.
4849  * @mask: Bit mask to be checked.
4850  *
4851  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4852  * from the API jump table function pointer from the lpfc_hba struct.
4853  **/
4854 int
4855 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4856 {
4857 	return phba->lpfc_sli_brdready(phba, mask);
4858 }
4859 
4860 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4861 
4862 /**
4863  * lpfc_reset_barrier - Make HBA ready for HBA reset
4864  * @phba: Pointer to HBA context object.
4865  *
4866  * This function is called before resetting an HBA. This function is called
4867  * with hbalock held and requests HBA to quiesce DMAs before a reset.
4868  **/
4869 void lpfc_reset_barrier(struct lpfc_hba *phba)
4870 {
4871 	uint32_t __iomem *resp_buf;
4872 	uint32_t __iomem *mbox_buf;
4873 	volatile struct MAILBOX_word0 mbox;
4874 	uint32_t hc_copy, ha_copy, resp_data;
4875 	int  i;
4876 	uint8_t hdrtype;
4877 
4878 	lockdep_assert_held(&phba->hbalock);
4879 
4880 	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4881 	if (hdrtype != PCI_HEADER_TYPE_MFD ||
4882 	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4883 	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4884 		return;
4885 
4886 	/*
4887 	 * Tell the other part of the chip to suspend temporarily all
4888 	 * its DMA activity.
4889 	 */
4890 	resp_buf = phba->MBslimaddr;
4891 
4892 	/* Disable the error attention */
4893 	if (lpfc_readl(phba->HCregaddr, &hc_copy))
4894 		return;
4895 	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4896 	readl(phba->HCregaddr); /* flush */
4897 	phba->link_flag |= LS_IGNORE_ERATT;
4898 
4899 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
4900 		return;
4901 	if (ha_copy & HA_ERATT) {
4902 		/* Clear Chip error bit */
4903 		writel(HA_ERATT, phba->HAregaddr);
4904 		phba->pport->stopped = 1;
4905 	}
4906 
4907 	mbox.word0 = 0;
4908 	mbox.mbxCommand = MBX_KILL_BOARD;
4909 	mbox.mbxOwner = OWN_CHIP;
4910 
4911 	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4912 	mbox_buf = phba->MBslimaddr;
4913 	writel(mbox.word0, mbox_buf);
4914 
4915 	for (i = 0; i < 50; i++) {
4916 		if (lpfc_readl((resp_buf + 1), &resp_data))
4917 			return;
4918 		if (resp_data != ~(BARRIER_TEST_PATTERN))
4919 			mdelay(1);
4920 		else
4921 			break;
4922 	}
4923 	resp_data = 0;
4924 	if (lpfc_readl((resp_buf + 1), &resp_data))
4925 		return;
4926 	if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4927 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4928 		    phba->pport->stopped)
4929 			goto restore_hc;
4930 		else
4931 			goto clear_errat;
4932 	}
4933 
4934 	mbox.mbxOwner = OWN_HOST;
4935 	resp_data = 0;
4936 	for (i = 0; i < 500; i++) {
4937 		if (lpfc_readl(resp_buf, &resp_data))
4938 			return;
4939 		if (resp_data != mbox.word0)
4940 			mdelay(1);
4941 		else
4942 			break;
4943 	}
4944 
4945 clear_errat:
4946 
4947 	while (++i < 500) {
4948 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
4949 			return;
4950 		if (!(ha_copy & HA_ERATT))
4951 			mdelay(1);
4952 		else
4953 			break;
4954 	}
4955 
4956 	if (readl(phba->HAregaddr) & HA_ERATT) {
4957 		writel(HA_ERATT, phba->HAregaddr);
4958 		phba->pport->stopped = 1;
4959 	}
4960 
4961 restore_hc:
4962 	phba->link_flag &= ~LS_IGNORE_ERATT;
4963 	writel(hc_copy, phba->HCregaddr);
4964 	readl(phba->HCregaddr); /* flush */
4965 }
4966 
4967 /**
4968  * lpfc_sli_brdkill - Issue a kill_board mailbox command
4969  * @phba: Pointer to HBA context object.
4970  *
4971  * This function issues a kill_board mailbox command and waits for
4972  * the error attention interrupt. This function is called for stopping
4973  * the firmware processing. The caller is not required to hold any
4974  * locks. This function calls lpfc_hba_down_post function to free
4975  * any pending commands after the kill. The function will return 1 when it
4976  * fails to kill the board else will return 0.
4977  **/
4978 int
4979 lpfc_sli_brdkill(struct lpfc_hba *phba)
4980 {
4981 	struct lpfc_sli *psli;
4982 	LPFC_MBOXQ_t *pmb;
4983 	uint32_t status;
4984 	uint32_t ha_copy;
4985 	int retval;
4986 	int i = 0;
4987 
4988 	psli = &phba->sli;
4989 
4990 	/* Kill HBA */
4991 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4992 			"0329 Kill HBA Data: x%x x%x\n",
4993 			phba->pport->port_state, psli->sli_flag);
4994 
4995 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4996 	if (!pmb)
4997 		return 1;
4998 
4999 	/* Disable the error attention */
5000 	spin_lock_irq(&phba->hbalock);
5001 	if (lpfc_readl(phba->HCregaddr, &status)) {
5002 		spin_unlock_irq(&phba->hbalock);
5003 		mempool_free(pmb, phba->mbox_mem_pool);
5004 		return 1;
5005 	}
5006 	status &= ~HC_ERINT_ENA;
5007 	writel(status, phba->HCregaddr);
5008 	readl(phba->HCregaddr); /* flush */
5009 	phba->link_flag |= LS_IGNORE_ERATT;
5010 	spin_unlock_irq(&phba->hbalock);
5011 
5012 	lpfc_kill_board(phba, pmb);
5013 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
5014 	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
5015 
5016 	if (retval != MBX_SUCCESS) {
5017 		if (retval != MBX_BUSY)
5018 			mempool_free(pmb, phba->mbox_mem_pool);
5019 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5020 				"2752 KILL_BOARD command failed retval %d\n",
5021 				retval);
5022 		spin_lock_irq(&phba->hbalock);
5023 		phba->link_flag &= ~LS_IGNORE_ERATT;
5024 		spin_unlock_irq(&phba->hbalock);
5025 		return 1;
5026 	}
5027 
5028 	spin_lock_irq(&phba->hbalock);
5029 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
5030 	spin_unlock_irq(&phba->hbalock);
5031 
5032 	mempool_free(pmb, phba->mbox_mem_pool);
5033 
5034 	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
5035 	 * attention every 100ms for 3 seconds. If we don't get ERATT after
5036 	 * 3 seconds we still set HBA_ERROR state because the status of the
5037 	 * board is now undefined.
5038 	 */
5039 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
5040 		return 1;
5041 	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5042 		mdelay(100);
5043 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
5044 			return 1;
5045 	}
5046 
5047 	del_timer_sync(&psli->mbox_tmo);
5048 	if (ha_copy & HA_ERATT) {
5049 		writel(HA_ERATT, phba->HAregaddr);
5050 		phba->pport->stopped = 1;
5051 	}
5052 	spin_lock_irq(&phba->hbalock);
5053 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5054 	psli->mbox_active = NULL;
5055 	phba->link_flag &= ~LS_IGNORE_ERATT;
5056 	spin_unlock_irq(&phba->hbalock);
5057 
5058 	lpfc_hba_down_post(phba);
5059 	phba->link_state = LPFC_HBA_ERROR;
5060 
5061 	return ha_copy & HA_ERATT ? 0 : 1;
5062 }
5063 
5064 /**
5065  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5066  * @phba: Pointer to HBA context object.
5067  *
5068  * This function resets the HBA by writing HC_INITFF to the control
5069  * register. After the HBA resets, this function resets all the iocb ring
5070  * indices. This function disables PCI layer parity checking during
5071  * the reset.
5072  * This function returns 0 always.
5073  * The caller is not required to hold any locks.
5074  **/
5075 int
5076 lpfc_sli_brdreset(struct lpfc_hba *phba)
5077 {
5078 	struct lpfc_sli *psli;
5079 	struct lpfc_sli_ring *pring;
5080 	uint16_t cfg_value;
5081 	int i;
5082 
5083 	psli = &phba->sli;
5084 
5085 	/* Reset HBA */
5086 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5087 			"0325 Reset HBA Data: x%x x%x\n",
5088 			(phba->pport) ? phba->pport->port_state : 0,
5089 			psli->sli_flag);
5090 
5091 	/* perform board reset */
5092 	phba->fc_eventTag = 0;
5093 	phba->link_events = 0;
5094 	set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
5095 	if (phba->pport) {
5096 		phba->pport->fc_myDID = 0;
5097 		phba->pport->fc_prevDID = 0;
5098 	}
5099 
5100 	/* Turn off parity checking and serr during the physical reset */
5101 	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5102 		return -EIO;
5103 
5104 	pci_write_config_word(phba->pcidev, PCI_COMMAND,
5105 			      (cfg_value &
5106 			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5107 
5108 	psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5109 
5110 	/* Now toggle INITFF bit in the Host Control Register */
5111 	writel(HC_INITFF, phba->HCregaddr);
5112 	mdelay(1);
5113 	readl(phba->HCregaddr); /* flush */
5114 	writel(0, phba->HCregaddr);
5115 	readl(phba->HCregaddr); /* flush */
5116 
5117 	/* Restore PCI cmd register */
5118 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5119 
5120 	/* Initialize relevant SLI info */
5121 	for (i = 0; i < psli->num_rings; i++) {
5122 		pring = &psli->sli3_ring[i];
5123 		pring->flag = 0;
5124 		pring->sli.sli3.rspidx = 0;
5125 		pring->sli.sli3.next_cmdidx  = 0;
5126 		pring->sli.sli3.local_getidx = 0;
5127 		pring->sli.sli3.cmdidx = 0;
5128 		pring->missbufcnt = 0;
5129 	}
5130 
5131 	phba->link_state = LPFC_WARM_START;
5132 	return 0;
5133 }
5134 
5135 /**
5136  * lpfc_sli4_brdreset - Reset a sli-4 HBA
5137  * @phba: Pointer to HBA context object.
5138  *
5139  * This function resets a SLI4 HBA. This function disables PCI layer parity
5140  * checking during resets the device. The caller is not required to hold
5141  * any locks.
5142  *
5143  * This function returns 0 on success else returns negative error code.
5144  **/
5145 int
5146 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5147 {
5148 	struct lpfc_sli *psli = &phba->sli;
5149 	uint16_t cfg_value;
5150 	int rc = 0;
5151 
5152 	/* Reset HBA */
5153 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5154 			"0295 Reset HBA Data: x%x x%x x%lx\n",
5155 			phba->pport->port_state, psli->sli_flag,
5156 			phba->hba_flag);
5157 
5158 	/* perform board reset */
5159 	phba->fc_eventTag = 0;
5160 	phba->link_events = 0;
5161 	phba->pport->fc_myDID = 0;
5162 	phba->pport->fc_prevDID = 0;
5163 	clear_bit(HBA_SETUP, &phba->hba_flag);
5164 
5165 	spin_lock_irq(&phba->hbalock);
5166 	psli->sli_flag &= ~(LPFC_PROCESS_LA);
5167 	phba->fcf.fcf_flag = 0;
5168 	spin_unlock_irq(&phba->hbalock);
5169 
5170 	/* Now physically reset the device */
5171 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5172 			"0389 Performing PCI function reset!\n");
5173 
5174 	/* Turn off parity checking and serr during the physical reset */
5175 	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5176 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5177 				"3205 PCI read Config failed\n");
5178 		return -EIO;
5179 	}
5180 
5181 	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5182 			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5183 
5184 	/* Perform FCoE PCI function reset before freeing queue memory */
5185 	rc = lpfc_pci_function_reset(phba);
5186 
5187 	/* Restore PCI cmd register */
5188 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5189 
5190 	return rc;
5191 }
5192 
5193 /**
5194  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5195  * @phba: Pointer to HBA context object.
5196  *
5197  * This function is called in the SLI initialization code path to
5198  * restart the HBA. The caller is not required to hold any lock.
5199  * This function writes MBX_RESTART mailbox command to the SLIM and
5200  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5201  * function to free any pending commands. The function enables
5202  * POST only during the first initialization. The function returns zero.
5203  * The function does not guarantee completion of MBX_RESTART mailbox
5204  * command before the return of this function.
5205  **/
5206 static int
5207 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5208 {
5209 	volatile struct MAILBOX_word0 mb;
5210 	struct lpfc_sli *psli;
5211 	void __iomem *to_slim;
5212 
5213 	spin_lock_irq(&phba->hbalock);
5214 
5215 	psli = &phba->sli;
5216 
5217 	/* Restart HBA */
5218 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5219 			"0337 Restart HBA Data: x%x x%x\n",
5220 			(phba->pport) ? phba->pport->port_state : 0,
5221 			psli->sli_flag);
5222 
5223 	mb.word0 = 0;
5224 	mb.mbxCommand = MBX_RESTART;
5225 	mb.mbxHc = 1;
5226 
5227 	lpfc_reset_barrier(phba);
5228 
5229 	to_slim = phba->MBslimaddr;
5230 	writel(mb.word0, to_slim);
5231 	readl(to_slim); /* flush */
5232 
5233 	/* Only skip post after fc_ffinit is completed */
5234 	if (phba->pport && phba->pport->port_state)
5235 		mb.word0 = 1;	/* This is really setting up word1 */
5236 	else
5237 		mb.word0 = 0;	/* This is really setting up word1 */
5238 	to_slim = phba->MBslimaddr + sizeof (uint32_t);
5239 	writel(mb.word0, to_slim);
5240 	readl(to_slim); /* flush */
5241 
5242 	lpfc_sli_brdreset(phba);
5243 	if (phba->pport)
5244 		phba->pport->stopped = 0;
5245 	phba->link_state = LPFC_INIT_START;
5246 	phba->hba_flag = 0;
5247 	spin_unlock_irq(&phba->hbalock);
5248 
5249 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5250 	psli->stats_start = ktime_get_seconds();
5251 
5252 	/* Give the INITFF and Post time to settle. */
5253 	mdelay(100);
5254 
5255 	lpfc_hba_down_post(phba);
5256 
5257 	return 0;
5258 }
5259 
5260 /**
5261  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5262  * @phba: Pointer to HBA context object.
5263  *
5264  * This function is called in the SLI initialization code path to restart
5265  * a SLI4 HBA. The caller is not required to hold any lock.
5266  * At the end of the function, it calls lpfc_hba_down_post function to
5267  * free any pending commands.
5268  **/
5269 static int
5270 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5271 {
5272 	struct lpfc_sli *psli = &phba->sli;
5273 	int rc;
5274 
5275 	/* Restart HBA */
5276 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5277 			"0296 Restart HBA Data: x%x x%x\n",
5278 			phba->pport->port_state, psli->sli_flag);
5279 
5280 	rc = lpfc_sli4_brdreset(phba);
5281 	if (rc) {
5282 		phba->link_state = LPFC_HBA_ERROR;
5283 		goto hba_down_queue;
5284 	}
5285 
5286 	spin_lock_irq(&phba->hbalock);
5287 	phba->pport->stopped = 0;
5288 	phba->link_state = LPFC_INIT_START;
5289 	phba->hba_flag = 0;
5290 	/* Preserve FA-PWWN expectation */
5291 	phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
5292 	spin_unlock_irq(&phba->hbalock);
5293 
5294 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5295 	psli->stats_start = ktime_get_seconds();
5296 
5297 hba_down_queue:
5298 	lpfc_hba_down_post(phba);
5299 	lpfc_sli4_queue_destroy(phba);
5300 
5301 	return rc;
5302 }
5303 
5304 /**
5305  * lpfc_sli_brdrestart - Wrapper func for restarting hba
5306  * @phba: Pointer to HBA context object.
5307  *
5308  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5309  * API jump table function pointer from the lpfc_hba struct.
5310 **/
5311 int
5312 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5313 {
5314 	return phba->lpfc_sli_brdrestart(phba);
5315 }
5316 
5317 /**
5318  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5319  * @phba: Pointer to HBA context object.
5320  *
5321  * This function is called after a HBA restart to wait for successful
5322  * restart of the HBA. Successful restart of the HBA is indicated by
5323  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5324  * iteration, the function will restart the HBA again. The function returns
5325  * zero if HBA successfully restarted else returns negative error code.
5326  **/
5327 int
5328 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5329 {
5330 	uint32_t status, i = 0;
5331 
5332 	/* Read the HBA Host Status Register */
5333 	if (lpfc_readl(phba->HSregaddr, &status))
5334 		return -EIO;
5335 
5336 	/* Check status register to see what current state is */
5337 	i = 0;
5338 	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5339 
5340 		/* Check every 10ms for 10 retries, then every 100ms for 90
5341 		 * retries, then every 1 sec for 50 retires for a total of
5342 		 * ~60 seconds before reset the board again and check every
5343 		 * 1 sec for 50 retries. The up to 60 seconds before the
5344 		 * board ready is required by the Falcon FIPS zeroization
5345 		 * complete, and any reset the board in between shall cause
5346 		 * restart of zeroization, further delay the board ready.
5347 		 */
5348 		if (i++ >= 200) {
5349 			/* Adapter failed to init, timeout, status reg
5350 			   <status> */
5351 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5352 					"0436 Adapter failed to init, "
5353 					"timeout, status reg x%x, "
5354 					"FW Data: A8 x%x AC x%x\n", status,
5355 					readl(phba->MBslimaddr + 0xa8),
5356 					readl(phba->MBslimaddr + 0xac));
5357 			phba->link_state = LPFC_HBA_ERROR;
5358 			return -ETIMEDOUT;
5359 		}
5360 
5361 		/* Check to see if any errors occurred during init */
5362 		if (status & HS_FFERM) {
5363 			/* ERROR: During chipset initialization */
5364 			/* Adapter failed to init, chipset, status reg
5365 			   <status> */
5366 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5367 					"0437 Adapter failed to init, "
5368 					"chipset, status reg x%x, "
5369 					"FW Data: A8 x%x AC x%x\n", status,
5370 					readl(phba->MBslimaddr + 0xa8),
5371 					readl(phba->MBslimaddr + 0xac));
5372 			phba->link_state = LPFC_HBA_ERROR;
5373 			return -EIO;
5374 		}
5375 
5376 		if (i <= 10)
5377 			msleep(10);
5378 		else if (i <= 100)
5379 			msleep(100);
5380 		else
5381 			msleep(1000);
5382 
5383 		if (i == 150) {
5384 			/* Do post */
5385 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5386 			lpfc_sli_brdrestart(phba);
5387 		}
5388 		/* Read the HBA Host Status Register */
5389 		if (lpfc_readl(phba->HSregaddr, &status))
5390 			return -EIO;
5391 	}
5392 
5393 	/* Check to see if any errors occurred during init */
5394 	if (status & HS_FFERM) {
5395 		/* ERROR: During chipset initialization */
5396 		/* Adapter failed to init, chipset, status reg <status> */
5397 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5398 				"0438 Adapter failed to init, chipset, "
5399 				"status reg x%x, "
5400 				"FW Data: A8 x%x AC x%x\n", status,
5401 				readl(phba->MBslimaddr + 0xa8),
5402 				readl(phba->MBslimaddr + 0xac));
5403 		phba->link_state = LPFC_HBA_ERROR;
5404 		return -EIO;
5405 	}
5406 
5407 	set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
5408 
5409 	/* Clear all interrupt enable conditions */
5410 	writel(0, phba->HCregaddr);
5411 	readl(phba->HCregaddr); /* flush */
5412 
5413 	/* setup host attn register */
5414 	writel(0xffffffff, phba->HAregaddr);
5415 	readl(phba->HAregaddr); /* flush */
5416 	return 0;
5417 }
5418 
5419 /**
5420  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5421  *
5422  * This function calculates and returns the number of HBQs required to be
5423  * configured.
5424  **/
5425 int
5426 lpfc_sli_hbq_count(void)
5427 {
5428 	return ARRAY_SIZE(lpfc_hbq_defs);
5429 }
5430 
5431 /**
5432  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5433  *
5434  * This function adds the number of hbq entries in every HBQ to get
5435  * the total number of hbq entries required for the HBA and returns
5436  * the total count.
5437  **/
5438 static int
5439 lpfc_sli_hbq_entry_count(void)
5440 {
5441 	int  hbq_count = lpfc_sli_hbq_count();
5442 	int  count = 0;
5443 	int  i;
5444 
5445 	for (i = 0; i < hbq_count; ++i)
5446 		count += lpfc_hbq_defs[i]->entry_count;
5447 	return count;
5448 }
5449 
5450 /**
5451  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5452  *
5453  * This function calculates amount of memory required for all hbq entries
5454  * to be configured and returns the total memory required.
5455  **/
5456 int
5457 lpfc_sli_hbq_size(void)
5458 {
5459 	return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5460 }
5461 
5462 /**
5463  * lpfc_sli_hbq_setup - configure and initialize HBQs
5464  * @phba: Pointer to HBA context object.
5465  *
5466  * This function is called during the SLI initialization to configure
5467  * all the HBQs and post buffers to the HBQ. The caller is not
5468  * required to hold any locks. This function will return zero if successful
5469  * else it will return negative error code.
5470  **/
5471 static int
5472 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5473 {
5474 	int  hbq_count = lpfc_sli_hbq_count();
5475 	LPFC_MBOXQ_t *pmb;
5476 	MAILBOX_t *pmbox;
5477 	uint32_t hbqno;
5478 	uint32_t hbq_entry_index;
5479 
5480 				/* Get a Mailbox buffer to setup mailbox
5481 				 * commands for HBA initialization
5482 				 */
5483 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5484 
5485 	if (!pmb)
5486 		return -ENOMEM;
5487 
5488 	pmbox = &pmb->u.mb;
5489 
5490 	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
5491 	phba->link_state = LPFC_INIT_MBX_CMDS;
5492 	phba->hbq_in_use = 1;
5493 
5494 	hbq_entry_index = 0;
5495 	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5496 		phba->hbqs[hbqno].next_hbqPutIdx = 0;
5497 		phba->hbqs[hbqno].hbqPutIdx      = 0;
5498 		phba->hbqs[hbqno].local_hbqGetIdx   = 0;
5499 		phba->hbqs[hbqno].entry_count =
5500 			lpfc_hbq_defs[hbqno]->entry_count;
5501 		lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5502 			hbq_entry_index, pmb);
5503 		hbq_entry_index += phba->hbqs[hbqno].entry_count;
5504 
5505 		if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5506 			/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5507 			   mbxStatus <status>, ring <num> */
5508 
5509 			lpfc_printf_log(phba, KERN_ERR,
5510 					LOG_SLI | LOG_VPORT,
5511 					"1805 Adapter failed to init. "
5512 					"Data: x%x x%x x%x\n",
5513 					pmbox->mbxCommand,
5514 					pmbox->mbxStatus, hbqno);
5515 
5516 			phba->link_state = LPFC_HBA_ERROR;
5517 			mempool_free(pmb, phba->mbox_mem_pool);
5518 			return -ENXIO;
5519 		}
5520 	}
5521 	phba->hbq_count = hbq_count;
5522 
5523 	mempool_free(pmb, phba->mbox_mem_pool);
5524 
5525 	/* Initially populate or replenish the HBQs */
5526 	for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5527 		lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5528 	return 0;
5529 }
5530 
5531 /**
5532  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5533  * @phba: Pointer to HBA context object.
5534  *
5535  * This function is called during the SLI initialization to configure
5536  * all the HBQs and post buffers to the HBQ. The caller is not
5537  * required to hold any locks. This function will return zero if successful
5538  * else it will return negative error code.
5539  **/
5540 static int
5541 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5542 {
5543 	phba->hbq_in_use = 1;
5544 	/**
5545 	 * Specific case when the MDS diagnostics is enabled and supported.
5546 	 * The receive buffer count is truncated to manage the incoming
5547 	 * traffic.
5548 	 **/
5549 	if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5550 		phba->hbqs[LPFC_ELS_HBQ].entry_count =
5551 			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5552 	else
5553 		phba->hbqs[LPFC_ELS_HBQ].entry_count =
5554 			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5555 	phba->hbq_count = 1;
5556 	lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5557 	/* Initially populate or replenish the HBQs */
5558 	return 0;
5559 }
5560 
5561 /**
5562  * lpfc_sli_config_port - Issue config port mailbox command
5563  * @phba: Pointer to HBA context object.
5564  * @sli_mode: sli mode - 2/3
5565  *
5566  * This function is called by the sli initialization code path
5567  * to issue config_port mailbox command. This function restarts the
5568  * HBA firmware and issues a config_port mailbox command to configure
5569  * the SLI interface in the sli mode specified by sli_mode
5570  * variable. The caller is not required to hold any locks.
5571  * The function returns 0 if successful, else returns negative error
5572  * code.
5573  **/
5574 int
5575 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5576 {
5577 	LPFC_MBOXQ_t *pmb;
5578 	uint32_t resetcount = 0, rc = 0, done = 0;
5579 
5580 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5581 	if (!pmb) {
5582 		phba->link_state = LPFC_HBA_ERROR;
5583 		return -ENOMEM;
5584 	}
5585 
5586 	phba->sli_rev = sli_mode;
5587 	while (resetcount < 2 && !done) {
5588 		spin_lock_irq(&phba->hbalock);
5589 		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5590 		spin_unlock_irq(&phba->hbalock);
5591 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5592 		lpfc_sli_brdrestart(phba);
5593 		rc = lpfc_sli_chipset_init(phba);
5594 		if (rc)
5595 			break;
5596 
5597 		spin_lock_irq(&phba->hbalock);
5598 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5599 		spin_unlock_irq(&phba->hbalock);
5600 		resetcount++;
5601 
5602 		/* Call pre CONFIG_PORT mailbox command initialization.  A
5603 		 * value of 0 means the call was successful.  Any other
5604 		 * nonzero value is a failure, but if ERESTART is returned,
5605 		 * the driver may reset the HBA and try again.
5606 		 */
5607 		rc = lpfc_config_port_prep(phba);
5608 		if (rc == -ERESTART) {
5609 			phba->link_state = LPFC_LINK_UNKNOWN;
5610 			continue;
5611 		} else if (rc)
5612 			break;
5613 
5614 		phba->link_state = LPFC_INIT_MBX_CMDS;
5615 		lpfc_config_port(phba, pmb);
5616 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5617 		phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5618 					LPFC_SLI3_HBQ_ENABLED |
5619 					LPFC_SLI3_CRP_ENABLED |
5620 					LPFC_SLI3_DSS_ENABLED);
5621 		if (rc != MBX_SUCCESS) {
5622 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5623 				"0442 Adapter failed to init, mbxCmd x%x "
5624 				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5625 				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5626 			spin_lock_irq(&phba->hbalock);
5627 			phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5628 			spin_unlock_irq(&phba->hbalock);
5629 			rc = -ENXIO;
5630 		} else {
5631 			/* Allow asynchronous mailbox command to go through */
5632 			spin_lock_irq(&phba->hbalock);
5633 			phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5634 			spin_unlock_irq(&phba->hbalock);
5635 			done = 1;
5636 
5637 			if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5638 			    (pmb->u.mb.un.varCfgPort.gasabt == 0))
5639 				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5640 					"3110 Port did not grant ASABT\n");
5641 		}
5642 	}
5643 	if (!done) {
5644 		rc = -EINVAL;
5645 		goto do_prep_failed;
5646 	}
5647 	if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5648 		if (!pmb->u.mb.un.varCfgPort.cMA) {
5649 			rc = -ENXIO;
5650 			goto do_prep_failed;
5651 		}
5652 		if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5653 			phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5654 			phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5655 			phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5656 				phba->max_vpi : phba->max_vports;
5657 
5658 		} else
5659 			phba->max_vpi = 0;
5660 		if (pmb->u.mb.un.varCfgPort.gerbm)
5661 			phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5662 		if (pmb->u.mb.un.varCfgPort.gcrp)
5663 			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5664 
5665 		phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5666 		phba->port_gp = phba->mbox->us.s3_pgp.port;
5667 
5668 		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5669 			if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5670 				phba->cfg_enable_bg = 0;
5671 				phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5672 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5673 						"0443 Adapter did not grant "
5674 						"BlockGuard\n");
5675 			}
5676 		}
5677 	} else {
5678 		phba->hbq_get = NULL;
5679 		phba->port_gp = phba->mbox->us.s2.port;
5680 		phba->max_vpi = 0;
5681 	}
5682 do_prep_failed:
5683 	mempool_free(pmb, phba->mbox_mem_pool);
5684 	return rc;
5685 }
5686 
5687 
5688 /**
5689  * lpfc_sli_hba_setup - SLI initialization function
5690  * @phba: Pointer to HBA context object.
5691  *
5692  * This function is the main SLI initialization function. This function
5693  * is called by the HBA initialization code, HBA reset code and HBA
5694  * error attention handler code. Caller is not required to hold any
5695  * locks. This function issues config_port mailbox command to configure
5696  * the SLI, setup iocb rings and HBQ rings. In the end the function
5697  * calls the config_port_post function to issue init_link mailbox
5698  * command and to start the discovery. The function will return zero
5699  * if successful, else it will return negative error code.
5700  **/
5701 int
5702 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5703 {
5704 	uint32_t rc;
5705 	int  i;
5706 	int longs;
5707 
5708 	/* Enable ISR already does config_port because of config_msi mbx */
5709 	if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) {
5710 		rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5711 		if (rc)
5712 			return -EIO;
5713 		clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
5714 	}
5715 	phba->fcp_embed_io = 0;	/* SLI4 FC support only */
5716 
5717 	if (phba->sli_rev == 3) {
5718 		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5719 		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5720 	} else {
5721 		phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5722 		phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5723 		phba->sli3_options = 0;
5724 	}
5725 
5726 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5727 			"0444 Firmware in SLI %x mode. Max_vpi %d\n",
5728 			phba->sli_rev, phba->max_vpi);
5729 	rc = lpfc_sli_ring_map(phba);
5730 
5731 	if (rc)
5732 		goto lpfc_sli_hba_setup_error;
5733 
5734 	/* Initialize VPIs. */
5735 	if (phba->sli_rev == LPFC_SLI_REV3) {
5736 		/*
5737 		 * The VPI bitmask and physical ID array are allocated
5738 		 * and initialized once only - at driver load.  A port
5739 		 * reset doesn't need to reinitialize this memory.
5740 		 */
5741 		if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5742 			longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5743 			phba->vpi_bmask = kcalloc(longs,
5744 						  sizeof(unsigned long),
5745 						  GFP_KERNEL);
5746 			if (!phba->vpi_bmask) {
5747 				rc = -ENOMEM;
5748 				goto lpfc_sli_hba_setup_error;
5749 			}
5750 
5751 			phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5752 						sizeof(uint16_t),
5753 						GFP_KERNEL);
5754 			if (!phba->vpi_ids) {
5755 				kfree(phba->vpi_bmask);
5756 				rc = -ENOMEM;
5757 				goto lpfc_sli_hba_setup_error;
5758 			}
5759 			for (i = 0; i < phba->max_vpi; i++)
5760 				phba->vpi_ids[i] = i;
5761 		}
5762 	}
5763 
5764 	/* Init HBQs */
5765 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5766 		rc = lpfc_sli_hbq_setup(phba);
5767 		if (rc)
5768 			goto lpfc_sli_hba_setup_error;
5769 	}
5770 	spin_lock_irq(&phba->hbalock);
5771 	phba->sli.sli_flag |= LPFC_PROCESS_LA;
5772 	spin_unlock_irq(&phba->hbalock);
5773 
5774 	rc = lpfc_config_port_post(phba);
5775 	if (rc)
5776 		goto lpfc_sli_hba_setup_error;
5777 
5778 	return rc;
5779 
5780 lpfc_sli_hba_setup_error:
5781 	phba->link_state = LPFC_HBA_ERROR;
5782 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5783 			"0445 Firmware initialization failed\n");
5784 	return rc;
5785 }
5786 
5787 /**
5788  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5789  * @phba: Pointer to HBA context object.
5790  *
5791  * This function issue a dump mailbox command to read config region
5792  * 23 and parse the records in the region and populate driver
5793  * data structure.
5794  **/
5795 static int
5796 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5797 {
5798 	LPFC_MBOXQ_t *mboxq;
5799 	struct lpfc_dmabuf *mp;
5800 	struct lpfc_mqe *mqe;
5801 	uint32_t data_length;
5802 	int rc;
5803 
5804 	/* Program the default value of vlan_id and fc_map */
5805 	phba->valid_vlan = 0;
5806 	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5807 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5808 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5809 
5810 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5811 	if (!mboxq)
5812 		return -ENOMEM;
5813 
5814 	mqe = &mboxq->u.mqe;
5815 	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5816 		rc = -ENOMEM;
5817 		goto out_free_mboxq;
5818 	}
5819 
5820 	mp = mboxq->ctx_buf;
5821 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5822 
5823 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5824 			"(%d):2571 Mailbox cmd x%x Status x%x "
5825 			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5826 			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5827 			"CQ: x%x x%x x%x x%x\n",
5828 			mboxq->vport ? mboxq->vport->vpi : 0,
5829 			bf_get(lpfc_mqe_command, mqe),
5830 			bf_get(lpfc_mqe_status, mqe),
5831 			mqe->un.mb_words[0], mqe->un.mb_words[1],
5832 			mqe->un.mb_words[2], mqe->un.mb_words[3],
5833 			mqe->un.mb_words[4], mqe->un.mb_words[5],
5834 			mqe->un.mb_words[6], mqe->un.mb_words[7],
5835 			mqe->un.mb_words[8], mqe->un.mb_words[9],
5836 			mqe->un.mb_words[10], mqe->un.mb_words[11],
5837 			mqe->un.mb_words[12], mqe->un.mb_words[13],
5838 			mqe->un.mb_words[14], mqe->un.mb_words[15],
5839 			mqe->un.mb_words[16], mqe->un.mb_words[50],
5840 			mboxq->mcqe.word0,
5841 			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
5842 			mboxq->mcqe.trailer);
5843 
5844 	if (rc) {
5845 		rc = -EIO;
5846 		goto out_free_mboxq;
5847 	}
5848 	data_length = mqe->un.mb_words[5];
5849 	if (data_length > DMP_RGN23_SIZE) {
5850 		rc = -EIO;
5851 		goto out_free_mboxq;
5852 	}
5853 
5854 	lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5855 	rc = 0;
5856 
5857 out_free_mboxq:
5858 	lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5859 	return rc;
5860 }
5861 
5862 /**
5863  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5864  * @phba: pointer to lpfc hba data structure.
5865  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5866  * @vpd: pointer to the memory to hold resulting port vpd data.
5867  * @vpd_size: On input, the number of bytes allocated to @vpd.
5868  *	      On output, the number of data bytes in @vpd.
5869  *
5870  * This routine executes a READ_REV SLI4 mailbox command.  In
5871  * addition, this routine gets the port vpd data.
5872  *
5873  * Return codes
5874  * 	0 - successful
5875  * 	-ENOMEM - could not allocated memory.
5876  **/
5877 static int
5878 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5879 		    uint8_t *vpd, uint32_t *vpd_size)
5880 {
5881 	int rc = 0;
5882 	uint32_t dma_size;
5883 	struct lpfc_dmabuf *dmabuf;
5884 	struct lpfc_mqe *mqe;
5885 
5886 	dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
5887 	if (!dmabuf)
5888 		return -ENOMEM;
5889 
5890 	/*
5891 	 * Get a DMA buffer for the vpd data resulting from the READ_REV
5892 	 * mailbox command.
5893 	 */
5894 	dma_size = *vpd_size;
5895 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5896 					  &dmabuf->phys, GFP_KERNEL);
5897 	if (!dmabuf->virt) {
5898 		kfree(dmabuf);
5899 		return -ENOMEM;
5900 	}
5901 
5902 	/*
5903 	 * The SLI4 implementation of READ_REV conflicts at word1,
5904 	 * bits 31:16 and SLI4 adds vpd functionality not present
5905 	 * in SLI3.  This code corrects the conflicts.
5906 	 */
5907 	lpfc_read_rev(phba, mboxq);
5908 	mqe = &mboxq->u.mqe;
5909 	mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5910 	mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5911 	mqe->un.read_rev.word1 &= 0x0000FFFF;
5912 	bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5913 	bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5914 
5915 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5916 	if (rc) {
5917 		dma_free_coherent(&phba->pcidev->dev, dma_size,
5918 				  dmabuf->virt, dmabuf->phys);
5919 		kfree(dmabuf);
5920 		return -EIO;
5921 	}
5922 
5923 	/*
5924 	 * The available vpd length cannot be bigger than the
5925 	 * DMA buffer passed to the port.  Catch the less than
5926 	 * case and update the caller's size.
5927 	 */
5928 	if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5929 		*vpd_size = mqe->un.read_rev.avail_vpd_len;
5930 
5931 	memcpy(vpd, dmabuf->virt, *vpd_size);
5932 
5933 	dma_free_coherent(&phba->pcidev->dev, dma_size,
5934 			  dmabuf->virt, dmabuf->phys);
5935 	kfree(dmabuf);
5936 	return 0;
5937 }
5938 
5939 /**
5940  * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5941  * @phba: pointer to lpfc hba data structure.
5942  *
5943  * This routine retrieves SLI4 device physical port name this PCI function
5944  * is attached to.
5945  *
5946  * Return codes
5947  *      0 - successful
5948  *      otherwise - failed to retrieve controller attributes
5949  **/
5950 static int
5951 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5952 {
5953 	LPFC_MBOXQ_t *mboxq;
5954 	struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5955 	struct lpfc_controller_attribute *cntl_attr;
5956 	void *virtaddr = NULL;
5957 	uint32_t alloclen, reqlen;
5958 	uint32_t shdr_status, shdr_add_status;
5959 	union lpfc_sli4_cfg_shdr *shdr;
5960 	int rc;
5961 
5962 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5963 	if (!mboxq)
5964 		return -ENOMEM;
5965 
5966 	/* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5967 	reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5968 	alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5969 			LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5970 			LPFC_SLI4_MBX_NEMBED);
5971 
5972 	if (alloclen < reqlen) {
5973 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5974 				"3084 Allocated DMA memory size (%d) is "
5975 				"less than the requested DMA memory size "
5976 				"(%d)\n", alloclen, reqlen);
5977 		rc = -ENOMEM;
5978 		goto out_free_mboxq;
5979 	}
5980 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5981 	virtaddr = mboxq->sge_array->addr[0];
5982 	mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5983 	shdr = &mbx_cntl_attr->cfg_shdr;
5984 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5985 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5986 	if (shdr_status || shdr_add_status || rc) {
5987 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5988 				"3085 Mailbox x%x (x%x/x%x) failed, "
5989 				"rc:x%x, status:x%x, add_status:x%x\n",
5990 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5991 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5992 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5993 				rc, shdr_status, shdr_add_status);
5994 		rc = -ENXIO;
5995 		goto out_free_mboxq;
5996 	}
5997 
5998 	cntl_attr = &mbx_cntl_attr->cntl_attr;
5999 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
6000 	phba->sli4_hba.lnk_info.lnk_tp =
6001 		bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
6002 	phba->sli4_hba.lnk_info.lnk_no =
6003 		bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
6004 	phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
6005 	phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
6006 
6007 	memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
6008 	strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
6009 		sizeof(phba->BIOSVersion));
6010 
6011 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6012 			"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
6013 			"flash_id: x%02x, asic_rev: x%02x\n",
6014 			phba->sli4_hba.lnk_info.lnk_tp,
6015 			phba->sli4_hba.lnk_info.lnk_no,
6016 			phba->BIOSVersion, phba->sli4_hba.flash_id,
6017 			phba->sli4_hba.asic_rev);
6018 out_free_mboxq:
6019 	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6020 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
6021 	else
6022 		mempool_free(mboxq, phba->mbox_mem_pool);
6023 	return rc;
6024 }
6025 
6026 /**
6027  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6028  * @phba: pointer to lpfc hba data structure.
6029  *
6030  * This routine retrieves SLI4 device physical port name this PCI function
6031  * is attached to.
6032  *
6033  * Return codes
6034  *      0 - successful
6035  *      otherwise - failed to retrieve physical port name
6036  **/
6037 static int
6038 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6039 {
6040 	LPFC_MBOXQ_t *mboxq;
6041 	struct lpfc_mbx_get_port_name *get_port_name;
6042 	uint32_t shdr_status, shdr_add_status;
6043 	union lpfc_sli4_cfg_shdr *shdr;
6044 	char cport_name = 0;
6045 	int rc;
6046 
6047 	/* We assume nothing at this point */
6048 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6049 	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6050 
6051 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6052 	if (!mboxq)
6053 		return -ENOMEM;
6054 	/* obtain link type and link number via READ_CONFIG */
6055 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6056 	lpfc_sli4_read_config(phba);
6057 
6058 	if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
6059 		phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
6060 
6061 	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6062 		goto retrieve_ppname;
6063 
6064 	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6065 	rc = lpfc_sli4_get_ctl_attr(phba);
6066 	if (rc)
6067 		goto out_free_mboxq;
6068 
6069 retrieve_ppname:
6070 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6071 		LPFC_MBOX_OPCODE_GET_PORT_NAME,
6072 		sizeof(struct lpfc_mbx_get_port_name) -
6073 		sizeof(struct lpfc_sli4_cfg_mhdr),
6074 		LPFC_SLI4_MBX_EMBED);
6075 	get_port_name = &mboxq->u.mqe.un.get_port_name;
6076 	shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6077 	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6078 	bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6079 		phba->sli4_hba.lnk_info.lnk_tp);
6080 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6081 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6082 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6083 	if (shdr_status || shdr_add_status || rc) {
6084 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6085 				"3087 Mailbox x%x (x%x/x%x) failed: "
6086 				"rc:x%x, status:x%x, add_status:x%x\n",
6087 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6088 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6089 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6090 				rc, shdr_status, shdr_add_status);
6091 		rc = -ENXIO;
6092 		goto out_free_mboxq;
6093 	}
6094 	switch (phba->sli4_hba.lnk_info.lnk_no) {
6095 	case LPFC_LINK_NUMBER_0:
6096 		cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6097 				&get_port_name->u.response);
6098 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6099 		break;
6100 	case LPFC_LINK_NUMBER_1:
6101 		cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6102 				&get_port_name->u.response);
6103 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6104 		break;
6105 	case LPFC_LINK_NUMBER_2:
6106 		cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6107 				&get_port_name->u.response);
6108 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6109 		break;
6110 	case LPFC_LINK_NUMBER_3:
6111 		cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6112 				&get_port_name->u.response);
6113 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6114 		break;
6115 	default:
6116 		break;
6117 	}
6118 
6119 	if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6120 		phba->Port[0] = cport_name;
6121 		phba->Port[1] = '\0';
6122 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6123 				"3091 SLI get port name: %s\n", phba->Port);
6124 	}
6125 
6126 out_free_mboxq:
6127 	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6128 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
6129 	else
6130 		mempool_free(mboxq, phba->mbox_mem_pool);
6131 	return rc;
6132 }
6133 
6134 /**
6135  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6136  * @phba: pointer to lpfc hba data structure.
6137  *
6138  * This routine is called to explicitly arm the SLI4 device's completion and
6139  * event queues
6140  **/
6141 static void
6142 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6143 {
6144 	int qidx;
6145 	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6146 	struct lpfc_sli4_hdw_queue *qp;
6147 	struct lpfc_queue *eq;
6148 
6149 	sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6150 	sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6151 	if (sli4_hba->nvmels_cq)
6152 		sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6153 					   LPFC_QUEUE_REARM);
6154 
6155 	if (sli4_hba->hdwq) {
6156 		/* Loop thru all Hardware Queues */
6157 		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6158 			qp = &sli4_hba->hdwq[qidx];
6159 			/* ARM the corresponding CQ */
6160 			sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6161 						LPFC_QUEUE_REARM);
6162 		}
6163 
6164 		/* Loop thru all IRQ vectors */
6165 		for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6166 			eq = sli4_hba->hba_eq_hdl[qidx].eq;
6167 			/* ARM the corresponding EQ */
6168 			sli4_hba->sli4_write_eq_db(phba, eq,
6169 						   0, LPFC_QUEUE_REARM);
6170 		}
6171 	}
6172 
6173 	if (phba->nvmet_support) {
6174 		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6175 			sli4_hba->sli4_write_cq_db(phba,
6176 				sli4_hba->nvmet_cqset[qidx], 0,
6177 				LPFC_QUEUE_REARM);
6178 		}
6179 	}
6180 }
6181 
6182 /**
6183  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6184  * @phba: Pointer to HBA context object.
6185  * @type: The resource extent type.
6186  * @extnt_count: buffer to hold port available extent count.
6187  * @extnt_size: buffer to hold element count per extent.
6188  *
6189  * This function calls the port and retrievs the number of available
6190  * extents and their size for a particular extent type.
6191  *
6192  * Returns: 0 if successful.  Nonzero otherwise.
6193  **/
6194 int
6195 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6196 			       uint16_t *extnt_count, uint16_t *extnt_size)
6197 {
6198 	int rc = 0;
6199 	uint32_t length;
6200 	uint32_t mbox_tmo;
6201 	struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6202 	LPFC_MBOXQ_t *mbox;
6203 
6204 	*extnt_count = 0;
6205 	*extnt_size = 0;
6206 
6207 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6208 	if (!mbox)
6209 		return -ENOMEM;
6210 
6211 	/* Find out how many extents are available for this resource type */
6212 	length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6213 		  sizeof(struct lpfc_sli4_cfg_mhdr));
6214 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6215 			 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6216 			 length, LPFC_SLI4_MBX_EMBED);
6217 
6218 	/* Send an extents count of 0 - the GET doesn't use it. */
6219 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6220 					LPFC_SLI4_MBX_EMBED);
6221 	if (unlikely(rc)) {
6222 		rc = -EIO;
6223 		goto err_exit;
6224 	}
6225 
6226 	if (!phba->sli4_hba.intr_enable)
6227 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6228 	else {
6229 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6230 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6231 	}
6232 	if (unlikely(rc)) {
6233 		rc = -EIO;
6234 		goto err_exit;
6235 	}
6236 
6237 	rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6238 	if (bf_get(lpfc_mbox_hdr_status,
6239 		   &rsrc_info->header.cfg_shdr.response)) {
6240 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6241 				"2930 Failed to get resource extents "
6242 				"Status 0x%x Add'l Status 0x%x\n",
6243 				bf_get(lpfc_mbox_hdr_status,
6244 				       &rsrc_info->header.cfg_shdr.response),
6245 				bf_get(lpfc_mbox_hdr_add_status,
6246 				       &rsrc_info->header.cfg_shdr.response));
6247 		rc = -EIO;
6248 		goto err_exit;
6249 	}
6250 
6251 	*extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6252 			      &rsrc_info->u.rsp);
6253 	*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6254 			     &rsrc_info->u.rsp);
6255 
6256 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6257 			"3162 Retrieved extents type-%d from port: count:%d, "
6258 			"size:%d\n", type, *extnt_count, *extnt_size);
6259 
6260 err_exit:
6261 	mempool_free(mbox, phba->mbox_mem_pool);
6262 	return rc;
6263 }
6264 
6265 /**
6266  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6267  * @phba: Pointer to HBA context object.
6268  * @type: The extent type to check.
6269  *
6270  * This function reads the current available extents from the port and checks
6271  * if the extent count or extent size has changed since the last access.
6272  * Callers use this routine post port reset to understand if there is a
6273  * extent reprovisioning requirement.
6274  *
6275  * Returns:
6276  *   -Error: error indicates problem.
6277  *   1: Extent count or size has changed.
6278  *   0: No changes.
6279  **/
6280 static int
6281 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6282 {
6283 	uint16_t curr_ext_cnt, rsrc_ext_cnt;
6284 	uint16_t size_diff, rsrc_ext_size;
6285 	int rc = 0;
6286 	struct lpfc_rsrc_blks *rsrc_entry;
6287 	struct list_head *rsrc_blk_list = NULL;
6288 
6289 	size_diff = 0;
6290 	curr_ext_cnt = 0;
6291 	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6292 					    &rsrc_ext_cnt,
6293 					    &rsrc_ext_size);
6294 	if (unlikely(rc))
6295 		return -EIO;
6296 
6297 	switch (type) {
6298 	case LPFC_RSC_TYPE_FCOE_RPI:
6299 		rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6300 		break;
6301 	case LPFC_RSC_TYPE_FCOE_VPI:
6302 		rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6303 		break;
6304 	case LPFC_RSC_TYPE_FCOE_XRI:
6305 		rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6306 		break;
6307 	case LPFC_RSC_TYPE_FCOE_VFI:
6308 		rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6309 		break;
6310 	default:
6311 		break;
6312 	}
6313 
6314 	list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6315 		curr_ext_cnt++;
6316 		if (rsrc_entry->rsrc_size != rsrc_ext_size)
6317 			size_diff++;
6318 	}
6319 
6320 	if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6321 		rc = 1;
6322 
6323 	return rc;
6324 }
6325 
6326 /**
6327  * lpfc_sli4_cfg_post_extnts -
6328  * @phba: Pointer to HBA context object.
6329  * @extnt_cnt: number of available extents.
6330  * @type: the extent type (rpi, xri, vfi, vpi).
6331  * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6332  * @mbox: pointer to the caller's allocated mailbox structure.
6333  *
6334  * This function executes the extents allocation request.  It also
6335  * takes care of the amount of memory needed to allocate or get the
6336  * allocated extents. It is the caller's responsibility to evaluate
6337  * the response.
6338  *
6339  * Returns:
6340  *   -Error:  Error value describes the condition found.
6341  *   0: if successful
6342  **/
6343 static int
6344 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6345 			  uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6346 {
6347 	int rc = 0;
6348 	uint32_t req_len;
6349 	uint32_t emb_len;
6350 	uint32_t alloc_len, mbox_tmo;
6351 
6352 	/* Calculate the total requested length of the dma memory */
6353 	req_len = extnt_cnt * sizeof(uint16_t);
6354 
6355 	/*
6356 	 * Calculate the size of an embedded mailbox.  The uint32_t
6357 	 * accounts for extents-specific word.
6358 	 */
6359 	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6360 		sizeof(uint32_t);
6361 
6362 	/*
6363 	 * Presume the allocation and response will fit into an embedded
6364 	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6365 	 */
6366 	*emb = LPFC_SLI4_MBX_EMBED;
6367 	if (req_len > emb_len) {
6368 		req_len = extnt_cnt * sizeof(uint16_t) +
6369 			sizeof(union lpfc_sli4_cfg_shdr) +
6370 			sizeof(uint32_t);
6371 		*emb = LPFC_SLI4_MBX_NEMBED;
6372 	}
6373 
6374 	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6375 				     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6376 				     req_len, *emb);
6377 	if (alloc_len < req_len) {
6378 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6379 			"2982 Allocated DMA memory size (x%x) is "
6380 			"less than the requested DMA memory "
6381 			"size (x%x)\n", alloc_len, req_len);
6382 		return -ENOMEM;
6383 	}
6384 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6385 	if (unlikely(rc))
6386 		return -EIO;
6387 
6388 	if (!phba->sli4_hba.intr_enable)
6389 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6390 	else {
6391 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6392 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6393 	}
6394 
6395 	if (unlikely(rc))
6396 		rc = -EIO;
6397 	return rc;
6398 }
6399 
6400 /**
6401  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6402  * @phba: Pointer to HBA context object.
6403  * @type:  The resource extent type to allocate.
6404  *
6405  * This function allocates the number of elements for the specified
6406  * resource type.
6407  **/
6408 static int
6409 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6410 {
6411 	bool emb = false;
6412 	uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6413 	uint16_t rsrc_id, rsrc_start, j, k;
6414 	uint16_t *ids;
6415 	int i, rc;
6416 	unsigned long longs;
6417 	unsigned long *bmask;
6418 	struct lpfc_rsrc_blks *rsrc_blks;
6419 	LPFC_MBOXQ_t *mbox;
6420 	uint32_t length;
6421 	struct lpfc_id_range *id_array = NULL;
6422 	void *virtaddr = NULL;
6423 	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6424 	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6425 	struct list_head *ext_blk_list;
6426 
6427 	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6428 					    &rsrc_cnt,
6429 					    &rsrc_size);
6430 	if (unlikely(rc))
6431 		return -EIO;
6432 
6433 	if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6434 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6435 			"3009 No available Resource Extents "
6436 			"for resource type 0x%x: Count: 0x%x, "
6437 			"Size 0x%x\n", type, rsrc_cnt,
6438 			rsrc_size);
6439 		return -ENOMEM;
6440 	}
6441 
6442 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6443 			"2903 Post resource extents type-0x%x: "
6444 			"count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6445 
6446 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6447 	if (!mbox)
6448 		return -ENOMEM;
6449 
6450 	rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6451 	if (unlikely(rc)) {
6452 		rc = -EIO;
6453 		goto err_exit;
6454 	}
6455 
6456 	/*
6457 	 * Figure out where the response is located.  Then get local pointers
6458 	 * to the response data.  The port does not guarantee to respond to
6459 	 * all extents counts request so update the local variable with the
6460 	 * allocated count from the port.
6461 	 */
6462 	if (emb == LPFC_SLI4_MBX_EMBED) {
6463 		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6464 		id_array = &rsrc_ext->u.rsp.id[0];
6465 		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6466 	} else {
6467 		virtaddr = mbox->sge_array->addr[0];
6468 		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6469 		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6470 		id_array = &n_rsrc->id;
6471 	}
6472 
6473 	longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6474 	rsrc_id_cnt = rsrc_cnt * rsrc_size;
6475 
6476 	/*
6477 	 * Based on the resource size and count, correct the base and max
6478 	 * resource values.
6479 	 */
6480 	length = sizeof(struct lpfc_rsrc_blks);
6481 	switch (type) {
6482 	case LPFC_RSC_TYPE_FCOE_RPI:
6483 		phba->sli4_hba.rpi_bmask = kcalloc(longs,
6484 						   sizeof(unsigned long),
6485 						   GFP_KERNEL);
6486 		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6487 			rc = -ENOMEM;
6488 			goto err_exit;
6489 		}
6490 		phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6491 						 sizeof(uint16_t),
6492 						 GFP_KERNEL);
6493 		if (unlikely(!phba->sli4_hba.rpi_ids)) {
6494 			kfree(phba->sli4_hba.rpi_bmask);
6495 			rc = -ENOMEM;
6496 			goto err_exit;
6497 		}
6498 
6499 		/*
6500 		 * The next_rpi was initialized with the maximum available
6501 		 * count but the port may allocate a smaller number.  Catch
6502 		 * that case and update the next_rpi.
6503 		 */
6504 		phba->sli4_hba.next_rpi = rsrc_id_cnt;
6505 
6506 		/* Initialize local ptrs for common extent processing later. */
6507 		bmask = phba->sli4_hba.rpi_bmask;
6508 		ids = phba->sli4_hba.rpi_ids;
6509 		ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6510 		break;
6511 	case LPFC_RSC_TYPE_FCOE_VPI:
6512 		phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6513 					  GFP_KERNEL);
6514 		if (unlikely(!phba->vpi_bmask)) {
6515 			rc = -ENOMEM;
6516 			goto err_exit;
6517 		}
6518 		phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6519 					 GFP_KERNEL);
6520 		if (unlikely(!phba->vpi_ids)) {
6521 			kfree(phba->vpi_bmask);
6522 			rc = -ENOMEM;
6523 			goto err_exit;
6524 		}
6525 
6526 		/* Initialize local ptrs for common extent processing later. */
6527 		bmask = phba->vpi_bmask;
6528 		ids = phba->vpi_ids;
6529 		ext_blk_list = &phba->lpfc_vpi_blk_list;
6530 		break;
6531 	case LPFC_RSC_TYPE_FCOE_XRI:
6532 		phba->sli4_hba.xri_bmask = kcalloc(longs,
6533 						   sizeof(unsigned long),
6534 						   GFP_KERNEL);
6535 		if (unlikely(!phba->sli4_hba.xri_bmask)) {
6536 			rc = -ENOMEM;
6537 			goto err_exit;
6538 		}
6539 		phba->sli4_hba.max_cfg_param.xri_used = 0;
6540 		phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6541 						 sizeof(uint16_t),
6542 						 GFP_KERNEL);
6543 		if (unlikely(!phba->sli4_hba.xri_ids)) {
6544 			kfree(phba->sli4_hba.xri_bmask);
6545 			rc = -ENOMEM;
6546 			goto err_exit;
6547 		}
6548 
6549 		/* Initialize local ptrs for common extent processing later. */
6550 		bmask = phba->sli4_hba.xri_bmask;
6551 		ids = phba->sli4_hba.xri_ids;
6552 		ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6553 		break;
6554 	case LPFC_RSC_TYPE_FCOE_VFI:
6555 		phba->sli4_hba.vfi_bmask = kcalloc(longs,
6556 						   sizeof(unsigned long),
6557 						   GFP_KERNEL);
6558 		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6559 			rc = -ENOMEM;
6560 			goto err_exit;
6561 		}
6562 		phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6563 						 sizeof(uint16_t),
6564 						 GFP_KERNEL);
6565 		if (unlikely(!phba->sli4_hba.vfi_ids)) {
6566 			kfree(phba->sli4_hba.vfi_bmask);
6567 			rc = -ENOMEM;
6568 			goto err_exit;
6569 		}
6570 
6571 		/* Initialize local ptrs for common extent processing later. */
6572 		bmask = phba->sli4_hba.vfi_bmask;
6573 		ids = phba->sli4_hba.vfi_ids;
6574 		ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6575 		break;
6576 	default:
6577 		/* Unsupported Opcode.  Fail call. */
6578 		id_array = NULL;
6579 		bmask = NULL;
6580 		ids = NULL;
6581 		ext_blk_list = NULL;
6582 		goto err_exit;
6583 	}
6584 
6585 	/*
6586 	 * Complete initializing the extent configuration with the
6587 	 * allocated ids assigned to this function.  The bitmask serves
6588 	 * as an index into the array and manages the available ids.  The
6589 	 * array just stores the ids communicated to the port via the wqes.
6590 	 */
6591 	for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6592 		if ((i % 2) == 0)
6593 			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6594 					 &id_array[k]);
6595 		else
6596 			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6597 					 &id_array[k]);
6598 
6599 		rsrc_blks = kzalloc(length, GFP_KERNEL);
6600 		if (unlikely(!rsrc_blks)) {
6601 			rc = -ENOMEM;
6602 			kfree(bmask);
6603 			kfree(ids);
6604 			goto err_exit;
6605 		}
6606 		rsrc_blks->rsrc_start = rsrc_id;
6607 		rsrc_blks->rsrc_size = rsrc_size;
6608 		list_add_tail(&rsrc_blks->list, ext_blk_list);
6609 		rsrc_start = rsrc_id;
6610 		if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6611 			phba->sli4_hba.io_xri_start = rsrc_start +
6612 				lpfc_sli4_get_iocb_cnt(phba);
6613 		}
6614 
6615 		while (rsrc_id < (rsrc_start + rsrc_size)) {
6616 			ids[j] = rsrc_id;
6617 			rsrc_id++;
6618 			j++;
6619 		}
6620 		/* Entire word processed.  Get next word.*/
6621 		if ((i % 2) == 1)
6622 			k++;
6623 	}
6624  err_exit:
6625 	lpfc_sli4_mbox_cmd_free(phba, mbox);
6626 	return rc;
6627 }
6628 
6629 
6630 
6631 /**
6632  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6633  * @phba: Pointer to HBA context object.
6634  * @type: the extent's type.
6635  *
6636  * This function deallocates all extents of a particular resource type.
6637  * SLI4 does not allow for deallocating a particular extent range.  It
6638  * is the caller's responsibility to release all kernel memory resources.
6639  **/
6640 static int
6641 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6642 {
6643 	int rc;
6644 	uint32_t length, mbox_tmo = 0;
6645 	LPFC_MBOXQ_t *mbox;
6646 	struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6647 	struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6648 
6649 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6650 	if (!mbox)
6651 		return -ENOMEM;
6652 
6653 	/*
6654 	 * This function sends an embedded mailbox because it only sends the
6655 	 * the resource type.  All extents of this type are released by the
6656 	 * port.
6657 	 */
6658 	length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6659 		  sizeof(struct lpfc_sli4_cfg_mhdr));
6660 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6661 			 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6662 			 length, LPFC_SLI4_MBX_EMBED);
6663 
6664 	/* Send an extents count of 0 - the dealloc doesn't use it. */
6665 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6666 					LPFC_SLI4_MBX_EMBED);
6667 	if (unlikely(rc)) {
6668 		rc = -EIO;
6669 		goto out_free_mbox;
6670 	}
6671 	if (!phba->sli4_hba.intr_enable)
6672 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6673 	else {
6674 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6675 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6676 	}
6677 	if (unlikely(rc)) {
6678 		rc = -EIO;
6679 		goto out_free_mbox;
6680 	}
6681 
6682 	dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6683 	if (bf_get(lpfc_mbox_hdr_status,
6684 		   &dealloc_rsrc->header.cfg_shdr.response)) {
6685 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6686 				"2919 Failed to release resource extents "
6687 				"for type %d - Status 0x%x Add'l Status 0x%x. "
6688 				"Resource memory not released.\n",
6689 				type,
6690 				bf_get(lpfc_mbox_hdr_status,
6691 				    &dealloc_rsrc->header.cfg_shdr.response),
6692 				bf_get(lpfc_mbox_hdr_add_status,
6693 				    &dealloc_rsrc->header.cfg_shdr.response));
6694 		rc = -EIO;
6695 		goto out_free_mbox;
6696 	}
6697 
6698 	/* Release kernel memory resources for the specific type. */
6699 	switch (type) {
6700 	case LPFC_RSC_TYPE_FCOE_VPI:
6701 		kfree(phba->vpi_bmask);
6702 		kfree(phba->vpi_ids);
6703 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6704 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6705 				    &phba->lpfc_vpi_blk_list, list) {
6706 			list_del_init(&rsrc_blk->list);
6707 			kfree(rsrc_blk);
6708 		}
6709 		phba->sli4_hba.max_cfg_param.vpi_used = 0;
6710 		break;
6711 	case LPFC_RSC_TYPE_FCOE_XRI:
6712 		kfree(phba->sli4_hba.xri_bmask);
6713 		kfree(phba->sli4_hba.xri_ids);
6714 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6715 				    &phba->sli4_hba.lpfc_xri_blk_list, list) {
6716 			list_del_init(&rsrc_blk->list);
6717 			kfree(rsrc_blk);
6718 		}
6719 		break;
6720 	case LPFC_RSC_TYPE_FCOE_VFI:
6721 		kfree(phba->sli4_hba.vfi_bmask);
6722 		kfree(phba->sli4_hba.vfi_ids);
6723 		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6724 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6725 				    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6726 			list_del_init(&rsrc_blk->list);
6727 			kfree(rsrc_blk);
6728 		}
6729 		break;
6730 	case LPFC_RSC_TYPE_FCOE_RPI:
6731 		/* RPI bitmask and physical id array are cleaned up earlier. */
6732 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6733 				    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6734 			list_del_init(&rsrc_blk->list);
6735 			kfree(rsrc_blk);
6736 		}
6737 		break;
6738 	default:
6739 		break;
6740 	}
6741 
6742 	bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6743 
6744  out_free_mbox:
6745 	mempool_free(mbox, phba->mbox_mem_pool);
6746 	return rc;
6747 }
6748 
6749 static void
6750 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6751 		  uint32_t feature)
6752 {
6753 	uint32_t len;
6754 	u32 sig_freq = 0;
6755 
6756 	len = sizeof(struct lpfc_mbx_set_feature) -
6757 		sizeof(struct lpfc_sli4_cfg_mhdr);
6758 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6759 			 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6760 			 LPFC_SLI4_MBX_EMBED);
6761 
6762 	switch (feature) {
6763 	case LPFC_SET_UE_RECOVERY:
6764 		bf_set(lpfc_mbx_set_feature_UER,
6765 		       &mbox->u.mqe.un.set_feature, 1);
6766 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6767 		mbox->u.mqe.un.set_feature.param_len = 8;
6768 		break;
6769 	case LPFC_SET_MDS_DIAGS:
6770 		bf_set(lpfc_mbx_set_feature_mds,
6771 		       &mbox->u.mqe.un.set_feature, 1);
6772 		bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6773 		       &mbox->u.mqe.un.set_feature, 1);
6774 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6775 		mbox->u.mqe.un.set_feature.param_len = 8;
6776 		break;
6777 	case LPFC_SET_CGN_SIGNAL:
6778 		if (phba->cmf_active_mode == LPFC_CFG_OFF)
6779 			sig_freq = 0;
6780 		else
6781 			sig_freq = phba->cgn_sig_freq;
6782 
6783 		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6784 			bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6785 			       &mbox->u.mqe.un.set_feature, sig_freq);
6786 			bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6787 			       &mbox->u.mqe.un.set_feature, sig_freq);
6788 		}
6789 
6790 		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6791 			bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6792 			       &mbox->u.mqe.un.set_feature, sig_freq);
6793 
6794 		if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6795 		    phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6796 			sig_freq = 0;
6797 		else
6798 			sig_freq = lpfc_acqe_cgn_frequency;
6799 
6800 		bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6801 		       &mbox->u.mqe.un.set_feature, sig_freq);
6802 
6803 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6804 		mbox->u.mqe.un.set_feature.param_len = 12;
6805 		break;
6806 	case LPFC_SET_DUAL_DUMP:
6807 		bf_set(lpfc_mbx_set_feature_dd,
6808 		       &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6809 		bf_set(lpfc_mbx_set_feature_ddquery,
6810 		       &mbox->u.mqe.un.set_feature, 0);
6811 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6812 		mbox->u.mqe.un.set_feature.param_len = 4;
6813 		break;
6814 	case LPFC_SET_ENABLE_MI:
6815 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6816 		mbox->u.mqe.un.set_feature.param_len = 4;
6817 		bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6818 		       phba->pport->cfg_lun_queue_depth);
6819 		bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6820 		       phba->sli4_hba.pc_sli4_params.mi_ver);
6821 		break;
6822 	case LPFC_SET_LD_SIGNAL:
6823 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
6824 		mbox->u.mqe.un.set_feature.param_len = 16;
6825 		bf_set(lpfc_mbx_set_feature_lds_qry,
6826 		       &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
6827 		break;
6828 	case LPFC_SET_ENABLE_CMF:
6829 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6830 		mbox->u.mqe.un.set_feature.param_len = 4;
6831 		bf_set(lpfc_mbx_set_feature_cmf,
6832 		       &mbox->u.mqe.un.set_feature, 1);
6833 		break;
6834 	}
6835 	return;
6836 }
6837 
6838 /**
6839  * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6840  * @phba: Pointer to HBA context object.
6841  *
6842  * Disable FW logging into host memory on the adapter. To
6843  * be done before reading logs from the host memory.
6844  **/
6845 void
6846 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6847 {
6848 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6849 
6850 	spin_lock_irq(&phba->ras_fwlog_lock);
6851 	ras_fwlog->state = INACTIVE;
6852 	spin_unlock_irq(&phba->ras_fwlog_lock);
6853 
6854 	/* Disable FW logging to host memory */
6855 	writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6856 	       phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6857 
6858 	/* Wait 10ms for firmware to stop using DMA buffer */
6859 	usleep_range(10 * 1000, 20 * 1000);
6860 }
6861 
6862 /**
6863  * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6864  * @phba: Pointer to HBA context object.
6865  *
6866  * This function is called to free memory allocated for RAS FW logging
6867  * support in the driver.
6868  **/
6869 void
6870 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6871 {
6872 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6873 	struct lpfc_dmabuf *dmabuf, *next;
6874 
6875 	if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6876 		list_for_each_entry_safe(dmabuf, next,
6877 				    &ras_fwlog->fwlog_buff_list,
6878 				    list) {
6879 			list_del(&dmabuf->list);
6880 			dma_free_coherent(&phba->pcidev->dev,
6881 					  LPFC_RAS_MAX_ENTRY_SIZE,
6882 					  dmabuf->virt, dmabuf->phys);
6883 			kfree(dmabuf);
6884 		}
6885 	}
6886 
6887 	if (ras_fwlog->lwpd.virt) {
6888 		dma_free_coherent(&phba->pcidev->dev,
6889 				  sizeof(uint32_t) * 2,
6890 				  ras_fwlog->lwpd.virt,
6891 				  ras_fwlog->lwpd.phys);
6892 		ras_fwlog->lwpd.virt = NULL;
6893 	}
6894 
6895 	spin_lock_irq(&phba->ras_fwlog_lock);
6896 	ras_fwlog->state = INACTIVE;
6897 	spin_unlock_irq(&phba->ras_fwlog_lock);
6898 }
6899 
6900 /**
6901  * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6902  * @phba: Pointer to HBA context object.
6903  * @fwlog_buff_count: Count of buffers to be created.
6904  *
6905  * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6906  * to update FW log is posted to the adapter.
6907  * Buffer count is calculated based on module param ras_fwlog_buffsize
6908  * Size of each buffer posted to FW is 64K.
6909  **/
6910 
6911 static int
6912 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6913 			uint32_t fwlog_buff_count)
6914 {
6915 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6916 	struct lpfc_dmabuf *dmabuf;
6917 	int rc = 0, i = 0;
6918 
6919 	/* Initialize List */
6920 	INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6921 
6922 	/* Allocate memory for the LWPD */
6923 	ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6924 					    sizeof(uint32_t) * 2,
6925 					    &ras_fwlog->lwpd.phys,
6926 					    GFP_KERNEL);
6927 	if (!ras_fwlog->lwpd.virt) {
6928 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6929 				"6185 LWPD Memory Alloc Failed\n");
6930 
6931 		return -ENOMEM;
6932 	}
6933 
6934 	ras_fwlog->fw_buffcount = fwlog_buff_count;
6935 	for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6936 		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf),
6937 				 GFP_KERNEL);
6938 		if (!dmabuf) {
6939 			rc = -ENOMEM;
6940 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6941 					"6186 Memory Alloc failed FW logging");
6942 			goto free_mem;
6943 		}
6944 
6945 		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6946 						  LPFC_RAS_MAX_ENTRY_SIZE,
6947 						  &dmabuf->phys, GFP_KERNEL);
6948 		if (!dmabuf->virt) {
6949 			kfree(dmabuf);
6950 			rc = -ENOMEM;
6951 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6952 					"6187 DMA Alloc Failed FW logging");
6953 			goto free_mem;
6954 		}
6955 		dmabuf->buffer_tag = i;
6956 		list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6957 	}
6958 
6959 free_mem:
6960 	if (rc)
6961 		lpfc_sli4_ras_dma_free(phba);
6962 
6963 	return rc;
6964 }
6965 
6966 /**
6967  * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6968  * @phba: pointer to lpfc hba data structure.
6969  * @pmb: pointer to the driver internal queue element for mailbox command.
6970  *
6971  * Completion handler for driver's RAS MBX command to the device.
6972  **/
6973 static void
6974 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6975 {
6976 	MAILBOX_t *mb;
6977 	union lpfc_sli4_cfg_shdr *shdr;
6978 	uint32_t shdr_status, shdr_add_status;
6979 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6980 
6981 	mb = &pmb->u.mb;
6982 
6983 	shdr = (union lpfc_sli4_cfg_shdr *)
6984 		&pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6985 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6986 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6987 
6988 	if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6989 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6990 				"6188 FW LOG mailbox "
6991 				"completed with status x%x add_status x%x,"
6992 				" mbx status x%x\n",
6993 				shdr_status, shdr_add_status, mb->mbxStatus);
6994 
6995 		ras_fwlog->ras_hwsupport = false;
6996 		goto disable_ras;
6997 	}
6998 
6999 	spin_lock_irq(&phba->ras_fwlog_lock);
7000 	ras_fwlog->state = ACTIVE;
7001 	spin_unlock_irq(&phba->ras_fwlog_lock);
7002 	mempool_free(pmb, phba->mbox_mem_pool);
7003 
7004 	return;
7005 
7006 disable_ras:
7007 	/* Free RAS DMA memory */
7008 	lpfc_sli4_ras_dma_free(phba);
7009 	mempool_free(pmb, phba->mbox_mem_pool);
7010 }
7011 
7012 /**
7013  * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
7014  * @phba: pointer to lpfc hba data structure.
7015  * @fwlog_level: Logging verbosity level.
7016  * @fwlog_enable: Enable/Disable logging.
7017  *
7018  * Initialize memory and post mailbox command to enable FW logging in host
7019  * memory.
7020  **/
7021 int
7022 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7023 			 uint32_t fwlog_level,
7024 			 uint32_t fwlog_enable)
7025 {
7026 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7027 	struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7028 	struct lpfc_dmabuf *dmabuf;
7029 	LPFC_MBOXQ_t *mbox;
7030 	uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7031 	int rc = 0;
7032 
7033 	spin_lock_irq(&phba->ras_fwlog_lock);
7034 	ras_fwlog->state = INACTIVE;
7035 	spin_unlock_irq(&phba->ras_fwlog_lock);
7036 
7037 	fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7038 			  phba->cfg_ras_fwlog_buffsize);
7039 	fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7040 
7041 	/*
7042 	 * If re-enabling FW logging support use earlier allocated
7043 	 * DMA buffers while posting MBX command.
7044 	 **/
7045 	if (!ras_fwlog->lwpd.virt) {
7046 		rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7047 		if (rc) {
7048 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7049 					"6189 FW Log Memory Allocation Failed");
7050 			return rc;
7051 		}
7052 	}
7053 
7054 	/* Setup Mailbox command */
7055 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7056 	if (!mbox) {
7057 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7058 				"6190 RAS MBX Alloc Failed");
7059 		rc = -ENOMEM;
7060 		goto mem_free;
7061 	}
7062 
7063 	ras_fwlog->fw_loglevel = fwlog_level;
7064 	len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7065 		sizeof(struct lpfc_sli4_cfg_mhdr));
7066 
7067 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7068 			 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7069 			 len, LPFC_SLI4_MBX_EMBED);
7070 
7071 	mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7072 	bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7073 	       fwlog_enable);
7074 	bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7075 	       ras_fwlog->fw_loglevel);
7076 	bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7077 	       ras_fwlog->fw_buffcount);
7078 	bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7079 	       LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7080 
7081 	/* Update DMA buffer address */
7082 	list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7083 		memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7084 
7085 		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7086 			putPaddrLow(dmabuf->phys);
7087 
7088 		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7089 			putPaddrHigh(dmabuf->phys);
7090 	}
7091 
7092 	/* Update LPWD address */
7093 	mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7094 	mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7095 
7096 	spin_lock_irq(&phba->ras_fwlog_lock);
7097 	ras_fwlog->state = REG_INPROGRESS;
7098 	spin_unlock_irq(&phba->ras_fwlog_lock);
7099 	mbox->vport = phba->pport;
7100 	mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7101 
7102 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7103 
7104 	if (rc == MBX_NOT_FINISHED) {
7105 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7106 				"6191 FW-Log Mailbox failed. "
7107 				"status %d mbxStatus : x%x", rc,
7108 				bf_get(lpfc_mqe_status, &mbox->u.mqe));
7109 		mempool_free(mbox, phba->mbox_mem_pool);
7110 		rc = -EIO;
7111 		goto mem_free;
7112 	} else
7113 		rc = 0;
7114 mem_free:
7115 	if (rc)
7116 		lpfc_sli4_ras_dma_free(phba);
7117 
7118 	return rc;
7119 }
7120 
7121 /**
7122  * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7123  * @phba: Pointer to HBA context object.
7124  *
7125  * Check if RAS is supported on the adapter and initialize it.
7126  **/
7127 void
7128 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7129 {
7130 	/* Check RAS FW Log needs to be enabled or not */
7131 	if (lpfc_check_fwlog_support(phba))
7132 		return;
7133 
7134 	lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7135 				 LPFC_RAS_ENABLE_LOGGING);
7136 }
7137 
7138 /**
7139  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7140  * @phba: Pointer to HBA context object.
7141  *
7142  * This function allocates all SLI4 resource identifiers.
7143  **/
7144 int
7145 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7146 {
7147 	int i, rc, error = 0;
7148 	uint16_t count, base;
7149 	unsigned long longs;
7150 
7151 	if (!phba->sli4_hba.rpi_hdrs_in_use)
7152 		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7153 	if (phba->sli4_hba.extents_in_use) {
7154 		/*
7155 		 * The port supports resource extents. The XRI, VPI, VFI, RPI
7156 		 * resource extent count must be read and allocated before
7157 		 * provisioning the resource id arrays.
7158 		 */
7159 		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7160 		    LPFC_IDX_RSRC_RDY) {
7161 			/*
7162 			 * Extent-based resources are set - the driver could
7163 			 * be in a port reset. Figure out if any corrective
7164 			 * actions need to be taken.
7165 			 */
7166 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7167 						 LPFC_RSC_TYPE_FCOE_VFI);
7168 			if (rc != 0)
7169 				error++;
7170 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7171 						 LPFC_RSC_TYPE_FCOE_VPI);
7172 			if (rc != 0)
7173 				error++;
7174 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7175 						 LPFC_RSC_TYPE_FCOE_XRI);
7176 			if (rc != 0)
7177 				error++;
7178 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7179 						 LPFC_RSC_TYPE_FCOE_RPI);
7180 			if (rc != 0)
7181 				error++;
7182 
7183 			/*
7184 			 * It's possible that the number of resources
7185 			 * provided to this port instance changed between
7186 			 * resets.  Detect this condition and reallocate
7187 			 * resources.  Otherwise, there is no action.
7188 			 */
7189 			if (error) {
7190 				lpfc_printf_log(phba, KERN_INFO,
7191 						LOG_MBOX | LOG_INIT,
7192 						"2931 Detected extent resource "
7193 						"change.  Reallocating all "
7194 						"extents.\n");
7195 				rc = lpfc_sli4_dealloc_extent(phba,
7196 						 LPFC_RSC_TYPE_FCOE_VFI);
7197 				rc = lpfc_sli4_dealloc_extent(phba,
7198 						 LPFC_RSC_TYPE_FCOE_VPI);
7199 				rc = lpfc_sli4_dealloc_extent(phba,
7200 						 LPFC_RSC_TYPE_FCOE_XRI);
7201 				rc = lpfc_sli4_dealloc_extent(phba,
7202 						 LPFC_RSC_TYPE_FCOE_RPI);
7203 			} else
7204 				return 0;
7205 		}
7206 
7207 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7208 		if (unlikely(rc))
7209 			goto err_exit;
7210 
7211 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7212 		if (unlikely(rc))
7213 			goto err_exit;
7214 
7215 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7216 		if (unlikely(rc))
7217 			goto err_exit;
7218 
7219 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7220 		if (unlikely(rc))
7221 			goto err_exit;
7222 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7223 		       LPFC_IDX_RSRC_RDY);
7224 		return rc;
7225 	} else {
7226 		/*
7227 		 * The port does not support resource extents.  The XRI, VPI,
7228 		 * VFI, RPI resource ids were determined from READ_CONFIG.
7229 		 * Just allocate the bitmasks and provision the resource id
7230 		 * arrays.  If a port reset is active, the resources don't
7231 		 * need any action - just exit.
7232 		 */
7233 		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7234 		    LPFC_IDX_RSRC_RDY) {
7235 			lpfc_sli4_dealloc_resource_identifiers(phba);
7236 			lpfc_sli4_remove_rpis(phba);
7237 		}
7238 		/* RPIs. */
7239 		count = phba->sli4_hba.max_cfg_param.max_rpi;
7240 		if (count <= 0) {
7241 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7242 					"3279 Invalid provisioning of "
7243 					"rpi:%d\n", count);
7244 			rc = -EINVAL;
7245 			goto err_exit;
7246 		}
7247 		base = phba->sli4_hba.max_cfg_param.rpi_base;
7248 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7249 		phba->sli4_hba.rpi_bmask = kcalloc(longs,
7250 						   sizeof(unsigned long),
7251 						   GFP_KERNEL);
7252 		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7253 			rc = -ENOMEM;
7254 			goto err_exit;
7255 		}
7256 		phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7257 						 GFP_KERNEL);
7258 		if (unlikely(!phba->sli4_hba.rpi_ids)) {
7259 			rc = -ENOMEM;
7260 			goto free_rpi_bmask;
7261 		}
7262 
7263 		for (i = 0; i < count; i++)
7264 			phba->sli4_hba.rpi_ids[i] = base + i;
7265 
7266 		/* VPIs. */
7267 		count = phba->sli4_hba.max_cfg_param.max_vpi;
7268 		if (count <= 0) {
7269 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7270 					"3280 Invalid provisioning of "
7271 					"vpi:%d\n", count);
7272 			rc = -EINVAL;
7273 			goto free_rpi_ids;
7274 		}
7275 		base = phba->sli4_hba.max_cfg_param.vpi_base;
7276 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7277 		phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7278 					  GFP_KERNEL);
7279 		if (unlikely(!phba->vpi_bmask)) {
7280 			rc = -ENOMEM;
7281 			goto free_rpi_ids;
7282 		}
7283 		phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7284 					GFP_KERNEL);
7285 		if (unlikely(!phba->vpi_ids)) {
7286 			rc = -ENOMEM;
7287 			goto free_vpi_bmask;
7288 		}
7289 
7290 		for (i = 0; i < count; i++)
7291 			phba->vpi_ids[i] = base + i;
7292 
7293 		/* XRIs. */
7294 		count = phba->sli4_hba.max_cfg_param.max_xri;
7295 		if (count <= 0) {
7296 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7297 					"3281 Invalid provisioning of "
7298 					"xri:%d\n", count);
7299 			rc = -EINVAL;
7300 			goto free_vpi_ids;
7301 		}
7302 		base = phba->sli4_hba.max_cfg_param.xri_base;
7303 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7304 		phba->sli4_hba.xri_bmask = kcalloc(longs,
7305 						   sizeof(unsigned long),
7306 						   GFP_KERNEL);
7307 		if (unlikely(!phba->sli4_hba.xri_bmask)) {
7308 			rc = -ENOMEM;
7309 			goto free_vpi_ids;
7310 		}
7311 		phba->sli4_hba.max_cfg_param.xri_used = 0;
7312 		phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7313 						 GFP_KERNEL);
7314 		if (unlikely(!phba->sli4_hba.xri_ids)) {
7315 			rc = -ENOMEM;
7316 			goto free_xri_bmask;
7317 		}
7318 
7319 		for (i = 0; i < count; i++)
7320 			phba->sli4_hba.xri_ids[i] = base + i;
7321 
7322 		/* VFIs. */
7323 		count = phba->sli4_hba.max_cfg_param.max_vfi;
7324 		if (count <= 0) {
7325 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7326 					"3282 Invalid provisioning of "
7327 					"vfi:%d\n", count);
7328 			rc = -EINVAL;
7329 			goto free_xri_ids;
7330 		}
7331 		base = phba->sli4_hba.max_cfg_param.vfi_base;
7332 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7333 		phba->sli4_hba.vfi_bmask = kcalloc(longs,
7334 						   sizeof(unsigned long),
7335 						   GFP_KERNEL);
7336 		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7337 			rc = -ENOMEM;
7338 			goto free_xri_ids;
7339 		}
7340 		phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7341 						 GFP_KERNEL);
7342 		if (unlikely(!phba->sli4_hba.vfi_ids)) {
7343 			rc = -ENOMEM;
7344 			goto free_vfi_bmask;
7345 		}
7346 
7347 		for (i = 0; i < count; i++)
7348 			phba->sli4_hba.vfi_ids[i] = base + i;
7349 
7350 		/*
7351 		 * Mark all resources ready.  An HBA reset doesn't need
7352 		 * to reset the initialization.
7353 		 */
7354 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7355 		       LPFC_IDX_RSRC_RDY);
7356 		return 0;
7357 	}
7358 
7359  free_vfi_bmask:
7360 	kfree(phba->sli4_hba.vfi_bmask);
7361 	phba->sli4_hba.vfi_bmask = NULL;
7362  free_xri_ids:
7363 	kfree(phba->sli4_hba.xri_ids);
7364 	phba->sli4_hba.xri_ids = NULL;
7365  free_xri_bmask:
7366 	kfree(phba->sli4_hba.xri_bmask);
7367 	phba->sli4_hba.xri_bmask = NULL;
7368  free_vpi_ids:
7369 	kfree(phba->vpi_ids);
7370 	phba->vpi_ids = NULL;
7371  free_vpi_bmask:
7372 	kfree(phba->vpi_bmask);
7373 	phba->vpi_bmask = NULL;
7374  free_rpi_ids:
7375 	kfree(phba->sli4_hba.rpi_ids);
7376 	phba->sli4_hba.rpi_ids = NULL;
7377  free_rpi_bmask:
7378 	kfree(phba->sli4_hba.rpi_bmask);
7379 	phba->sli4_hba.rpi_bmask = NULL;
7380  err_exit:
7381 	return rc;
7382 }
7383 
7384 /**
7385  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7386  * @phba: Pointer to HBA context object.
7387  *
7388  * This function allocates the number of elements for the specified
7389  * resource type.
7390  **/
7391 int
7392 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7393 {
7394 	if (phba->sli4_hba.extents_in_use) {
7395 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7396 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7397 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7398 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7399 	} else {
7400 		kfree(phba->vpi_bmask);
7401 		phba->sli4_hba.max_cfg_param.vpi_used = 0;
7402 		kfree(phba->vpi_ids);
7403 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7404 		kfree(phba->sli4_hba.xri_bmask);
7405 		kfree(phba->sli4_hba.xri_ids);
7406 		kfree(phba->sli4_hba.vfi_bmask);
7407 		kfree(phba->sli4_hba.vfi_ids);
7408 		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7409 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7410 	}
7411 
7412 	return 0;
7413 }
7414 
7415 /**
7416  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7417  * @phba: Pointer to HBA context object.
7418  * @type: The resource extent type.
7419  * @extnt_cnt: buffer to hold port extent count response
7420  * @extnt_size: buffer to hold port extent size response.
7421  *
7422  * This function calls the port to read the host allocated extents
7423  * for a particular type.
7424  **/
7425 int
7426 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7427 			       uint16_t *extnt_cnt, uint16_t *extnt_size)
7428 {
7429 	bool emb;
7430 	int rc = 0;
7431 	uint16_t curr_blks = 0;
7432 	uint32_t req_len, emb_len;
7433 	uint32_t alloc_len, mbox_tmo;
7434 	struct list_head *blk_list_head;
7435 	struct lpfc_rsrc_blks *rsrc_blk;
7436 	LPFC_MBOXQ_t *mbox;
7437 	void *virtaddr = NULL;
7438 	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7439 	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7440 	union  lpfc_sli4_cfg_shdr *shdr;
7441 
7442 	switch (type) {
7443 	case LPFC_RSC_TYPE_FCOE_VPI:
7444 		blk_list_head = &phba->lpfc_vpi_blk_list;
7445 		break;
7446 	case LPFC_RSC_TYPE_FCOE_XRI:
7447 		blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7448 		break;
7449 	case LPFC_RSC_TYPE_FCOE_VFI:
7450 		blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7451 		break;
7452 	case LPFC_RSC_TYPE_FCOE_RPI:
7453 		blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7454 		break;
7455 	default:
7456 		return -EIO;
7457 	}
7458 
7459 	/* Count the number of extents currently allocatd for this type. */
7460 	list_for_each_entry(rsrc_blk, blk_list_head, list) {
7461 		if (curr_blks == 0) {
7462 			/*
7463 			 * The GET_ALLOCATED mailbox does not return the size,
7464 			 * just the count.  The size should be just the size
7465 			 * stored in the current allocated block and all sizes
7466 			 * for an extent type are the same so set the return
7467 			 * value now.
7468 			 */
7469 			*extnt_size = rsrc_blk->rsrc_size;
7470 		}
7471 		curr_blks++;
7472 	}
7473 
7474 	/*
7475 	 * Calculate the size of an embedded mailbox.  The uint32_t
7476 	 * accounts for extents-specific word.
7477 	 */
7478 	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7479 		sizeof(uint32_t);
7480 
7481 	/*
7482 	 * Presume the allocation and response will fit into an embedded
7483 	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
7484 	 */
7485 	emb = LPFC_SLI4_MBX_EMBED;
7486 	req_len = emb_len;
7487 	if (req_len > emb_len) {
7488 		req_len = curr_blks * sizeof(uint16_t) +
7489 			sizeof(union lpfc_sli4_cfg_shdr) +
7490 			sizeof(uint32_t);
7491 		emb = LPFC_SLI4_MBX_NEMBED;
7492 	}
7493 
7494 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7495 	if (!mbox)
7496 		return -ENOMEM;
7497 	memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7498 
7499 	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7500 				     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7501 				     req_len, emb);
7502 	if (alloc_len < req_len) {
7503 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7504 			"2983 Allocated DMA memory size (x%x) is "
7505 			"less than the requested DMA memory "
7506 			"size (x%x)\n", alloc_len, req_len);
7507 		rc = -ENOMEM;
7508 		goto err_exit;
7509 	}
7510 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7511 	if (unlikely(rc)) {
7512 		rc = -EIO;
7513 		goto err_exit;
7514 	}
7515 
7516 	if (!phba->sli4_hba.intr_enable)
7517 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7518 	else {
7519 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7520 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7521 	}
7522 
7523 	if (unlikely(rc)) {
7524 		rc = -EIO;
7525 		goto err_exit;
7526 	}
7527 
7528 	/*
7529 	 * Figure out where the response is located.  Then get local pointers
7530 	 * to the response data.  The port does not guarantee to respond to
7531 	 * all extents counts request so update the local variable with the
7532 	 * allocated count from the port.
7533 	 */
7534 	if (emb == LPFC_SLI4_MBX_EMBED) {
7535 		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7536 		shdr = &rsrc_ext->header.cfg_shdr;
7537 		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7538 	} else {
7539 		virtaddr = mbox->sge_array->addr[0];
7540 		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7541 		shdr = &n_rsrc->cfg_shdr;
7542 		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7543 	}
7544 
7545 	if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7546 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7547 			"2984 Failed to read allocated resources "
7548 			"for type %d - Status 0x%x Add'l Status 0x%x.\n",
7549 			type,
7550 			bf_get(lpfc_mbox_hdr_status, &shdr->response),
7551 			bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7552 		rc = -EIO;
7553 		goto err_exit;
7554 	}
7555  err_exit:
7556 	lpfc_sli4_mbox_cmd_free(phba, mbox);
7557 	return rc;
7558 }
7559 
7560 /**
7561  * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7562  * @phba: pointer to lpfc hba data structure.
7563  * @sgl_list: linked link of sgl buffers to post
7564  * @cnt: number of linked list buffers
7565  *
7566  * This routine walks the list of buffers that have been allocated and
7567  * repost them to the port by using SGL block post. This is needed after a
7568  * pci_function_reset/warm_start or start. It attempts to construct blocks
7569  * of buffer sgls which contains contiguous xris and uses the non-embedded
7570  * SGL block post mailbox commands to post them to the port. For single
7571  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7572  * mailbox command for posting.
7573  *
7574  * Returns: 0 = success, non-zero failure.
7575  **/
7576 static int
7577 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7578 			  struct list_head *sgl_list, int cnt)
7579 {
7580 	struct lpfc_sglq *sglq_entry = NULL;
7581 	struct lpfc_sglq *sglq_entry_next = NULL;
7582 	struct lpfc_sglq *sglq_entry_first = NULL;
7583 	int status = 0, total_cnt;
7584 	int post_cnt = 0, num_posted = 0, block_cnt = 0;
7585 	int last_xritag = NO_XRI;
7586 	LIST_HEAD(prep_sgl_list);
7587 	LIST_HEAD(blck_sgl_list);
7588 	LIST_HEAD(allc_sgl_list);
7589 	LIST_HEAD(post_sgl_list);
7590 	LIST_HEAD(free_sgl_list);
7591 
7592 	spin_lock_irq(&phba->hbalock);
7593 	spin_lock(&phba->sli4_hba.sgl_list_lock);
7594 	list_splice_init(sgl_list, &allc_sgl_list);
7595 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
7596 	spin_unlock_irq(&phba->hbalock);
7597 
7598 	total_cnt = cnt;
7599 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7600 				 &allc_sgl_list, list) {
7601 		list_del_init(&sglq_entry->list);
7602 		block_cnt++;
7603 		if ((last_xritag != NO_XRI) &&
7604 		    (sglq_entry->sli4_xritag != last_xritag + 1)) {
7605 			/* a hole in xri block, form a sgl posting block */
7606 			list_splice_init(&prep_sgl_list, &blck_sgl_list);
7607 			post_cnt = block_cnt - 1;
7608 			/* prepare list for next posting block */
7609 			list_add_tail(&sglq_entry->list, &prep_sgl_list);
7610 			block_cnt = 1;
7611 		} else {
7612 			/* prepare list for next posting block */
7613 			list_add_tail(&sglq_entry->list, &prep_sgl_list);
7614 			/* enough sgls for non-embed sgl mbox command */
7615 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7616 				list_splice_init(&prep_sgl_list,
7617 						 &blck_sgl_list);
7618 				post_cnt = block_cnt;
7619 				block_cnt = 0;
7620 			}
7621 		}
7622 		num_posted++;
7623 
7624 		/* keep track of last sgl's xritag */
7625 		last_xritag = sglq_entry->sli4_xritag;
7626 
7627 		/* end of repost sgl list condition for buffers */
7628 		if (num_posted == total_cnt) {
7629 			if (post_cnt == 0) {
7630 				list_splice_init(&prep_sgl_list,
7631 						 &blck_sgl_list);
7632 				post_cnt = block_cnt;
7633 			} else if (block_cnt == 1) {
7634 				status = lpfc_sli4_post_sgl(phba,
7635 						sglq_entry->phys, 0,
7636 						sglq_entry->sli4_xritag);
7637 				if (!status) {
7638 					/* successful, put sgl to posted list */
7639 					list_add_tail(&sglq_entry->list,
7640 						      &post_sgl_list);
7641 				} else {
7642 					/* Failure, put sgl to free list */
7643 					lpfc_printf_log(phba, KERN_WARNING,
7644 						LOG_SLI,
7645 						"3159 Failed to post "
7646 						"sgl, xritag:x%x\n",
7647 						sglq_entry->sli4_xritag);
7648 					list_add_tail(&sglq_entry->list,
7649 						      &free_sgl_list);
7650 					total_cnt--;
7651 				}
7652 			}
7653 		}
7654 
7655 		/* continue until a nembed page worth of sgls */
7656 		if (post_cnt == 0)
7657 			continue;
7658 
7659 		/* post the buffer list sgls as a block */
7660 		status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7661 						 post_cnt);
7662 
7663 		if (!status) {
7664 			/* success, put sgl list to posted sgl list */
7665 			list_splice_init(&blck_sgl_list, &post_sgl_list);
7666 		} else {
7667 			/* Failure, put sgl list to free sgl list */
7668 			sglq_entry_first = list_first_entry(&blck_sgl_list,
7669 							    struct lpfc_sglq,
7670 							    list);
7671 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7672 					"3160 Failed to post sgl-list, "
7673 					"xritag:x%x-x%x\n",
7674 					sglq_entry_first->sli4_xritag,
7675 					(sglq_entry_first->sli4_xritag +
7676 					 post_cnt - 1));
7677 			list_splice_init(&blck_sgl_list, &free_sgl_list);
7678 			total_cnt -= post_cnt;
7679 		}
7680 
7681 		/* don't reset xirtag due to hole in xri block */
7682 		if (block_cnt == 0)
7683 			last_xritag = NO_XRI;
7684 
7685 		/* reset sgl post count for next round of posting */
7686 		post_cnt = 0;
7687 	}
7688 
7689 	/* free the sgls failed to post */
7690 	lpfc_free_sgl_list(phba, &free_sgl_list);
7691 
7692 	/* push sgls posted to the available list */
7693 	if (!list_empty(&post_sgl_list)) {
7694 		spin_lock_irq(&phba->hbalock);
7695 		spin_lock(&phba->sli4_hba.sgl_list_lock);
7696 		list_splice_init(&post_sgl_list, sgl_list);
7697 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
7698 		spin_unlock_irq(&phba->hbalock);
7699 	} else {
7700 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7701 				"3161 Failure to post sgl to port,status %x "
7702 				"blkcnt %d totalcnt %d postcnt %d\n",
7703 				status, block_cnt, total_cnt, post_cnt);
7704 		return -EIO;
7705 	}
7706 
7707 	/* return the number of XRIs actually posted */
7708 	return total_cnt;
7709 }
7710 
7711 /**
7712  * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7713  * @phba: pointer to lpfc hba data structure.
7714  *
7715  * This routine walks the list of nvme buffers that have been allocated and
7716  * repost them to the port by using SGL block post. This is needed after a
7717  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7718  * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7719  * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7720  *
7721  * Returns: 0 = success, non-zero failure.
7722  **/
7723 static int
7724 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7725 {
7726 	LIST_HEAD(post_nblist);
7727 	int num_posted, rc = 0;
7728 
7729 	/* get all NVME buffers need to repost to a local list */
7730 	lpfc_io_buf_flush(phba, &post_nblist);
7731 
7732 	/* post the list of nvme buffer sgls to port if available */
7733 	if (!list_empty(&post_nblist)) {
7734 		num_posted = lpfc_sli4_post_io_sgl_list(
7735 			phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7736 		/* failed to post any nvme buffer, return error */
7737 		if (num_posted == 0)
7738 			rc = -EIO;
7739 	}
7740 	return rc;
7741 }
7742 
7743 static void
7744 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7745 {
7746 	uint32_t len;
7747 
7748 	len = sizeof(struct lpfc_mbx_set_host_data) -
7749 		sizeof(struct lpfc_sli4_cfg_mhdr);
7750 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7751 			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7752 			 LPFC_SLI4_MBX_EMBED);
7753 
7754 	mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7755 	mbox->u.mqe.un.set_host_data.param_len =
7756 					LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7757 	snprintf(mbox->u.mqe.un.set_host_data.un.data,
7758 		 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7759 		 "Linux %s v"LPFC_DRIVER_VERSION,
7760 		 test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC");
7761 }
7762 
7763 int
7764 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7765 		    struct lpfc_queue *drq, int count, int idx)
7766 {
7767 	int rc, i;
7768 	struct lpfc_rqe hrqe;
7769 	struct lpfc_rqe drqe;
7770 	struct lpfc_rqb *rqbp;
7771 	unsigned long flags;
7772 	struct rqb_dmabuf *rqb_buffer;
7773 	LIST_HEAD(rqb_buf_list);
7774 
7775 	rqbp = hrq->rqbp;
7776 	for (i = 0; i < count; i++) {
7777 		spin_lock_irqsave(&phba->hbalock, flags);
7778 		/* IF RQ is already full, don't bother */
7779 		if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7780 			spin_unlock_irqrestore(&phba->hbalock, flags);
7781 			break;
7782 		}
7783 		spin_unlock_irqrestore(&phba->hbalock, flags);
7784 
7785 		rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7786 		if (!rqb_buffer)
7787 			break;
7788 		rqb_buffer->hrq = hrq;
7789 		rqb_buffer->drq = drq;
7790 		rqb_buffer->idx = idx;
7791 		list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7792 	}
7793 
7794 	spin_lock_irqsave(&phba->hbalock, flags);
7795 	while (!list_empty(&rqb_buf_list)) {
7796 		list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7797 				 hbuf.list);
7798 
7799 		hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7800 		hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7801 		drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7802 		drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7803 		rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7804 		if (rc < 0) {
7805 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7806 					"6421 Cannot post to HRQ %d: %x %x %x "
7807 					"DRQ %x %x\n",
7808 					hrq->queue_id,
7809 					hrq->host_index,
7810 					hrq->hba_index,
7811 					hrq->entry_count,
7812 					drq->host_index,
7813 					drq->hba_index);
7814 			rqbp->rqb_free_buffer(phba, rqb_buffer);
7815 		} else {
7816 			list_add_tail(&rqb_buffer->hbuf.list,
7817 				      &rqbp->rqb_buffer_list);
7818 			rqbp->buffer_count++;
7819 		}
7820 	}
7821 	spin_unlock_irqrestore(&phba->hbalock, flags);
7822 	return 1;
7823 }
7824 
7825 static void
7826 lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7827 {
7828 	union lpfc_sli4_cfg_shdr *shdr;
7829 	u32 shdr_status, shdr_add_status;
7830 
7831 	shdr = (union lpfc_sli4_cfg_shdr *)
7832 		&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7833 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7834 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7835 	if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7836 		lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
7837 				"4622 SET_FEATURE (x%x) mbox failed, "
7838 				"status x%x add_status x%x, mbx status x%x\n",
7839 				LPFC_SET_LD_SIGNAL, shdr_status,
7840 				shdr_add_status, pmb->u.mb.mbxStatus);
7841 		phba->degrade_activate_threshold = 0;
7842 		phba->degrade_deactivate_threshold = 0;
7843 		phba->fec_degrade_interval = 0;
7844 		goto out;
7845 	}
7846 
7847 	phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
7848 	phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
7849 	phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
7850 
7851 	lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
7852 			"4624 Success: da x%x dd x%x interval x%x\n",
7853 			phba->degrade_activate_threshold,
7854 			phba->degrade_deactivate_threshold,
7855 			phba->fec_degrade_interval);
7856 out:
7857 	mempool_free(pmb, phba->mbox_mem_pool);
7858 }
7859 
7860 int
7861 lpfc_read_lds_params(struct lpfc_hba *phba)
7862 {
7863 	LPFC_MBOXQ_t *mboxq;
7864 	int rc;
7865 
7866 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7867 	if (!mboxq)
7868 		return -ENOMEM;
7869 
7870 	lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
7871 	mboxq->vport = phba->pport;
7872 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
7873 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7874 	if (rc == MBX_NOT_FINISHED) {
7875 		mempool_free(mboxq, phba->mbox_mem_pool);
7876 		return -EIO;
7877 	}
7878 	return 0;
7879 }
7880 
7881 static void
7882 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7883 {
7884 	struct lpfc_vport *vport = pmb->vport;
7885 	union lpfc_sli4_cfg_shdr *shdr;
7886 	u32 shdr_status, shdr_add_status;
7887 	u32 sig, acqe;
7888 
7889 	/* Two outcomes. (1) Set featurs was successul and EDC negotiation
7890 	 * is done. (2) Mailbox failed and send FPIN support only.
7891 	 */
7892 	shdr = (union lpfc_sli4_cfg_shdr *)
7893 		&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7894 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7895 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7896 	if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7897 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7898 				"2516 CGN SET_FEATURE mbox failed with "
7899 				"status x%x add_status x%x, mbx status x%x "
7900 				"Reset Congestion to FPINs only\n",
7901 				shdr_status, shdr_add_status,
7902 				pmb->u.mb.mbxStatus);
7903 		/* If there is a mbox error, move on to RDF */
7904 		phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7905 		phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7906 		goto out;
7907 	}
7908 
7909 	/* Zero out Congestion Signal ACQE counter */
7910 	phba->cgn_acqe_cnt = 0;
7911 
7912 	acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7913 		      &pmb->u.mqe.un.set_feature);
7914 	sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7915 		     &pmb->u.mqe.un.set_feature);
7916 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7917 			"4620 SET_FEATURES Success: Freq: %ds %dms "
7918 			" Reg: x%x x%x\n", acqe, sig,
7919 			phba->cgn_reg_signal, phba->cgn_reg_fpin);
7920 out:
7921 	mempool_free(pmb, phba->mbox_mem_pool);
7922 
7923 	/* Register for FPIN events from the fabric now that the
7924 	 * EDC common_set_features has completed.
7925 	 */
7926 	lpfc_issue_els_rdf(vport, 0);
7927 }
7928 
7929 int
7930 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7931 {
7932 	LPFC_MBOXQ_t *mboxq;
7933 	u32 rc;
7934 
7935 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7936 	if (!mboxq)
7937 		goto out_rdf;
7938 
7939 	lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7940 	mboxq->vport = phba->pport;
7941 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7942 
7943 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7944 			"4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7945 			"Reg: x%x x%x\n",
7946 			phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7947 			phba->cgn_reg_signal, phba->cgn_reg_fpin);
7948 
7949 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7950 	if (rc == MBX_NOT_FINISHED)
7951 		goto out;
7952 	return 0;
7953 
7954 out:
7955 	mempool_free(mboxq, phba->mbox_mem_pool);
7956 out_rdf:
7957 	/* If there is a mbox error, move on to RDF */
7958 	phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7959 	phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7960 	lpfc_issue_els_rdf(phba->pport, 0);
7961 	return -EIO;
7962 }
7963 
7964 /**
7965  * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7966  * @phba: pointer to lpfc hba data structure.
7967  *
7968  * This routine initializes the per-eq idle_stat to dynamically dictate
7969  * polling decisions.
7970  *
7971  * Return codes:
7972  *   None
7973  **/
7974 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7975 {
7976 	int i;
7977 	struct lpfc_sli4_hdw_queue *hdwq;
7978 	struct lpfc_queue *eq;
7979 	struct lpfc_idle_stat *idle_stat;
7980 	u64 wall;
7981 
7982 	for_each_present_cpu(i) {
7983 		hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7984 		eq = hdwq->hba_eq;
7985 
7986 		/* Skip if we've already handled this eq's primary CPU */
7987 		if (eq->chann != i)
7988 			continue;
7989 
7990 		idle_stat = &phba->sli4_hba.idle_stat[i];
7991 
7992 		idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7993 		idle_stat->prev_wall = wall;
7994 
7995 		if (phba->nvmet_support ||
7996 		    phba->cmf_active_mode != LPFC_CFG_OFF ||
7997 		    phba->intr_type != MSIX)
7998 			eq->poll_mode = LPFC_QUEUE_WORK;
7999 		else
8000 			eq->poll_mode = LPFC_THREADED_IRQ;
8001 	}
8002 
8003 	if (!phba->nvmet_support && phba->intr_type == MSIX)
8004 		schedule_delayed_work(&phba->idle_stat_delay_work,
8005 				      msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
8006 }
8007 
8008 static void lpfc_sli4_dip(struct lpfc_hba *phba)
8009 {
8010 	uint32_t if_type;
8011 
8012 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
8013 	if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
8014 	    if_type == LPFC_SLI_INTF_IF_TYPE_6) {
8015 		struct lpfc_register reg_data;
8016 
8017 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8018 			       &reg_data.word0))
8019 			return;
8020 
8021 		if (bf_get(lpfc_sliport_status_dip, &reg_data))
8022 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8023 					"2904 Firmware Dump Image Present"
8024 					" on Adapter");
8025 	}
8026 }
8027 
8028 /**
8029  * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
8030  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8031  * @entries: Number of rx_info_entry objects to allocate in ring
8032  *
8033  * Return:
8034  * 0 - Success
8035  * ENOMEM - Failure to kmalloc
8036  **/
8037 int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
8038 				u32 entries)
8039 {
8040 	rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
8041 					 GFP_KERNEL);
8042 	if (!rx_monitor->ring)
8043 		return -ENOMEM;
8044 
8045 	rx_monitor->head_idx = 0;
8046 	rx_monitor->tail_idx = 0;
8047 	spin_lock_init(&rx_monitor->lock);
8048 	rx_monitor->entries = entries;
8049 
8050 	return 0;
8051 }
8052 
8053 /**
8054  * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
8055  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8056  *
8057  * Called after cancellation of cmf_timer.
8058  **/
8059 void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
8060 {
8061 	kfree(rx_monitor->ring);
8062 	rx_monitor->ring = NULL;
8063 	rx_monitor->entries = 0;
8064 	rx_monitor->head_idx = 0;
8065 	rx_monitor->tail_idx = 0;
8066 }
8067 
8068 /**
8069  * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
8070  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8071  * @entry: Pointer to rx_info_entry
8072  *
8073  * Used to insert an rx_info_entry into rx_monitor's ring.  Note that this is a
8074  * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
8075  *
8076  * This is called from lpfc_cmf_timer, which is in timer/softirq context.
8077  *
8078  * In cases of old data overflow, we do a best effort of FIFO order.
8079  **/
8080 void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
8081 			    struct rx_info_entry *entry)
8082 {
8083 	struct rx_info_entry *ring = rx_monitor->ring;
8084 	u32 *head_idx = &rx_monitor->head_idx;
8085 	u32 *tail_idx = &rx_monitor->tail_idx;
8086 	spinlock_t *ring_lock = &rx_monitor->lock;
8087 	u32 ring_size = rx_monitor->entries;
8088 
8089 	spin_lock(ring_lock);
8090 	memcpy(&ring[*tail_idx], entry, sizeof(*entry));
8091 	*tail_idx = (*tail_idx + 1) % ring_size;
8092 
8093 	/* Best effort of FIFO saved data */
8094 	if (*tail_idx == *head_idx)
8095 		*head_idx = (*head_idx + 1) % ring_size;
8096 
8097 	spin_unlock(ring_lock);
8098 }
8099 
8100 /**
8101  * lpfc_rx_monitor_report - Read out rx_monitor's ring
8102  * @phba: Pointer to lpfc_hba object
8103  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8104  * @buf: Pointer to char buffer that will contain rx monitor info data
8105  * @buf_len: Length buf including null char
8106  * @max_read_entries: Maximum number of entries to read out of ring
8107  *
8108  * Used to dump/read what's in rx_monitor's ring buffer.
8109  *
8110  * If buf is NULL || buf_len == 0, then it is implied that we want to log the
8111  * information to kmsg instead of filling out buf.
8112  *
8113  * Return:
8114  * Number of entries read out of the ring
8115  **/
8116 u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
8117 			   struct lpfc_rx_info_monitor *rx_monitor, char *buf,
8118 			   u32 buf_len, u32 max_read_entries)
8119 {
8120 	struct rx_info_entry *ring = rx_monitor->ring;
8121 	struct rx_info_entry *entry;
8122 	u32 *head_idx = &rx_monitor->head_idx;
8123 	u32 *tail_idx = &rx_monitor->tail_idx;
8124 	spinlock_t *ring_lock = &rx_monitor->lock;
8125 	u32 ring_size = rx_monitor->entries;
8126 	u32 cnt = 0;
8127 	char tmp[DBG_LOG_STR_SZ] = {0};
8128 	bool log_to_kmsg = (!buf || !buf_len) ? true : false;
8129 
8130 	if (!log_to_kmsg) {
8131 		/* clear the buffer to be sure */
8132 		memset(buf, 0, buf_len);
8133 
8134 		scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
8135 					"%-8s%-8s%-8s%-16s\n",
8136 					"MaxBPI", "Tot_Data_CMF",
8137 					"Tot_Data_Cmd", "Tot_Data_Cmpl",
8138 					"Lat(us)", "Avg_IO", "Max_IO", "Bsy",
8139 					"IO_cnt", "Info", "BWutil(ms)");
8140 	}
8141 
8142 	/* Needs to be _irq because record is called from timer interrupt
8143 	 * context
8144 	 */
8145 	spin_lock_irq(ring_lock);
8146 	while (*head_idx != *tail_idx) {
8147 		entry = &ring[*head_idx];
8148 
8149 		/* Read out this entry's data. */
8150 		if (!log_to_kmsg) {
8151 			/* If !log_to_kmsg, then store to buf. */
8152 			scnprintf(tmp, sizeof(tmp),
8153 				  "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
8154 				  "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
8155 				  *head_idx, entry->max_bytes_per_interval,
8156 				  entry->cmf_bytes, entry->total_bytes,
8157 				  entry->rcv_bytes, entry->avg_io_latency,
8158 				  entry->avg_io_size, entry->max_read_cnt,
8159 				  entry->cmf_busy, entry->io_cnt,
8160 				  entry->cmf_info, entry->timer_utilization,
8161 				  entry->timer_interval);
8162 
8163 			/* Check for buffer overflow */
8164 			if ((strlen(buf) + strlen(tmp)) >= buf_len)
8165 				break;
8166 
8167 			/* Append entry's data to buffer */
8168 			strlcat(buf, tmp, buf_len);
8169 		} else {
8170 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
8171 					"4410 %02u: MBPI %llu Xmit %llu "
8172 					"Cmpl %llu Lat %llu ASz %llu Info %02u "
8173 					"BWUtil %u Int %u slot %u\n",
8174 					cnt, entry->max_bytes_per_interval,
8175 					entry->total_bytes, entry->rcv_bytes,
8176 					entry->avg_io_latency,
8177 					entry->avg_io_size, entry->cmf_info,
8178 					entry->timer_utilization,
8179 					entry->timer_interval, *head_idx);
8180 		}
8181 
8182 		*head_idx = (*head_idx + 1) % ring_size;
8183 
8184 		/* Don't feed more than max_read_entries */
8185 		cnt++;
8186 		if (cnt >= max_read_entries)
8187 			break;
8188 	}
8189 	spin_unlock_irq(ring_lock);
8190 
8191 	return cnt;
8192 }
8193 
8194 /**
8195  * lpfc_cmf_setup - Initialize idle_stat tracking
8196  * @phba: Pointer to HBA context object.
8197  *
8198  * This is called from HBA setup during driver load or when the HBA
8199  * comes online. this does all the initialization to support CMF and MI.
8200  **/
8201 static int
8202 lpfc_cmf_setup(struct lpfc_hba *phba)
8203 {
8204 	LPFC_MBOXQ_t *mboxq;
8205 	struct lpfc_dmabuf *mp;
8206 	struct lpfc_pc_sli4_params *sli4_params;
8207 	int rc, cmf, mi_ver;
8208 
8209 	rc = lpfc_sli4_refresh_params(phba);
8210 	if (unlikely(rc))
8211 		return rc;
8212 
8213 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8214 	if (!mboxq)
8215 		return -ENOMEM;
8216 
8217 	sli4_params = &phba->sli4_hba.pc_sli4_params;
8218 
8219 	/* Always try to enable MI feature if we can */
8220 	if (sli4_params->mi_ver) {
8221 		lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
8222 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8223 		mi_ver = bf_get(lpfc_mbx_set_feature_mi,
8224 				 &mboxq->u.mqe.un.set_feature);
8225 
8226 		if (rc == MBX_SUCCESS) {
8227 			if (mi_ver) {
8228 				lpfc_printf_log(phba,
8229 						KERN_WARNING, LOG_CGN_MGMT,
8230 						"6215 MI is enabled\n");
8231 				sli4_params->mi_ver = mi_ver;
8232 			} else {
8233 				lpfc_printf_log(phba,
8234 						KERN_WARNING, LOG_CGN_MGMT,
8235 						"6338 MI is disabled\n");
8236 				sli4_params->mi_ver = 0;
8237 			}
8238 		} else {
8239 			/* mi_ver is already set from GET_SLI4_PARAMETERS */
8240 			lpfc_printf_log(phba, KERN_INFO,
8241 					LOG_CGN_MGMT | LOG_INIT,
8242 					"6245 Enable MI Mailbox x%x (x%x/x%x) "
8243 					"failed, rc:x%x mi:x%x\n",
8244 					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8245 					lpfc_sli_config_mbox_subsys_get
8246 						(phba, mboxq),
8247 					lpfc_sli_config_mbox_opcode_get
8248 						(phba, mboxq),
8249 					rc, sli4_params->mi_ver);
8250 		}
8251 	} else {
8252 		lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8253 				"6217 MI is disabled\n");
8254 	}
8255 
8256 	/* Ensure FDMI is enabled for MI if enable_mi is set */
8257 	if (sli4_params->mi_ver)
8258 		phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8259 
8260 	/* Always try to enable CMF feature if we can */
8261 	if (sli4_params->cmf) {
8262 		lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8263 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8264 		cmf = bf_get(lpfc_mbx_set_feature_cmf,
8265 			     &mboxq->u.mqe.un.set_feature);
8266 		if (rc == MBX_SUCCESS && cmf) {
8267 			lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8268 					"6218 CMF is enabled: mode %d\n",
8269 					phba->cmf_active_mode);
8270 		} else {
8271 			lpfc_printf_log(phba, KERN_WARNING,
8272 					LOG_CGN_MGMT | LOG_INIT,
8273 					"6219 Enable CMF Mailbox x%x (x%x/x%x) "
8274 					"failed, rc:x%x dd:x%x\n",
8275 					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8276 					lpfc_sli_config_mbox_subsys_get
8277 						(phba, mboxq),
8278 					lpfc_sli_config_mbox_opcode_get
8279 						(phba, mboxq),
8280 					rc, cmf);
8281 			sli4_params->cmf = 0;
8282 			phba->cmf_active_mode = LPFC_CFG_OFF;
8283 			goto no_cmf;
8284 		}
8285 
8286 		/* Allocate Congestion Information Buffer */
8287 		if (!phba->cgn_i) {
8288 			mp = kmalloc(sizeof(*mp), GFP_KERNEL);
8289 			if (mp)
8290 				mp->virt = dma_alloc_coherent
8291 						(&phba->pcidev->dev,
8292 						sizeof(struct lpfc_cgn_info),
8293 						&mp->phys, GFP_KERNEL);
8294 			if (!mp || !mp->virt) {
8295 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8296 						"2640 Failed to alloc memory "
8297 						"for Congestion Info\n");
8298 				kfree(mp);
8299 				sli4_params->cmf = 0;
8300 				phba->cmf_active_mode = LPFC_CFG_OFF;
8301 				goto no_cmf;
8302 			}
8303 			phba->cgn_i = mp;
8304 
8305 			/* initialize congestion buffer info */
8306 			lpfc_init_congestion_buf(phba);
8307 			lpfc_init_congestion_stat(phba);
8308 
8309 			/* Zero out Congestion Signal counters */
8310 			atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8311 			atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8312 		}
8313 
8314 		rc = lpfc_sli4_cgn_params_read(phba);
8315 		if (rc < 0) {
8316 			lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8317 					"6242 Error reading Cgn Params (%d)\n",
8318 					rc);
8319 			/* Ensure CGN Mode is off */
8320 			sli4_params->cmf = 0;
8321 		} else if (!rc) {
8322 			lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8323 					"6243 CGN Event empty object.\n");
8324 			/* Ensure CGN Mode is off */
8325 			sli4_params->cmf = 0;
8326 		}
8327 	} else {
8328 no_cmf:
8329 		lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8330 				"6220 CMF is disabled\n");
8331 	}
8332 
8333 	/* Only register congestion buffer with firmware if BOTH
8334 	 * CMF and E2E are enabled.
8335 	 */
8336 	if (sli4_params->cmf && sli4_params->mi_ver) {
8337 		rc = lpfc_reg_congestion_buf(phba);
8338 		if (rc) {
8339 			dma_free_coherent(&phba->pcidev->dev,
8340 					  sizeof(struct lpfc_cgn_info),
8341 					  phba->cgn_i->virt, phba->cgn_i->phys);
8342 			kfree(phba->cgn_i);
8343 			phba->cgn_i = NULL;
8344 			/* Ensure CGN Mode is off */
8345 			phba->cmf_active_mode = LPFC_CFG_OFF;
8346 			sli4_params->cmf = 0;
8347 			return 0;
8348 		}
8349 	}
8350 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8351 			"6470 Setup MI version %d CMF %d mode %d\n",
8352 			sli4_params->mi_ver, sli4_params->cmf,
8353 			phba->cmf_active_mode);
8354 
8355 	mempool_free(mboxq, phba->mbox_mem_pool);
8356 
8357 	/* Initialize atomic counters */
8358 	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8359 	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8360 	atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8361 	atomic_set(&phba->cgn_sync_warn_cnt, 0);
8362 	atomic_set(&phba->cgn_driver_evt_cnt, 0);
8363 	atomic_set(&phba->cgn_latency_evt_cnt, 0);
8364 	atomic64_set(&phba->cgn_latency_evt, 0);
8365 
8366 	phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8367 
8368 	/* Allocate RX Monitor Buffer */
8369 	if (!phba->rx_monitor) {
8370 		phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
8371 					   GFP_KERNEL);
8372 
8373 		if (!phba->rx_monitor) {
8374 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8375 					"2644 Failed to alloc memory "
8376 					"for RX Monitor Buffer\n");
8377 			return -ENOMEM;
8378 		}
8379 
8380 		/* Instruct the rx_monitor object to instantiate its ring */
8381 		if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
8382 						LPFC_MAX_RXMONITOR_ENTRY)) {
8383 			kfree(phba->rx_monitor);
8384 			phba->rx_monitor = NULL;
8385 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8386 					"2645 Failed to alloc memory "
8387 					"for RX Monitor's Ring\n");
8388 			return -ENOMEM;
8389 		}
8390 	}
8391 
8392 	return 0;
8393 }
8394 
8395 static int
8396 lpfc_set_host_tm(struct lpfc_hba *phba)
8397 {
8398 	LPFC_MBOXQ_t *mboxq;
8399 	uint32_t len, rc;
8400 	struct timespec64 cur_time;
8401 	struct tm broken;
8402 	uint32_t month, day, year;
8403 	uint32_t hour, minute, second;
8404 	struct lpfc_mbx_set_host_date_time *tm;
8405 
8406 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8407 	if (!mboxq)
8408 		return -ENOMEM;
8409 
8410 	len = sizeof(struct lpfc_mbx_set_host_data) -
8411 		sizeof(struct lpfc_sli4_cfg_mhdr);
8412 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8413 			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8414 			 LPFC_SLI4_MBX_EMBED);
8415 
8416 	mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8417 	mboxq->u.mqe.un.set_host_data.param_len =
8418 			sizeof(struct lpfc_mbx_set_host_date_time);
8419 	tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8420 	ktime_get_real_ts64(&cur_time);
8421 	time64_to_tm(cur_time.tv_sec, 0, &broken);
8422 	month = broken.tm_mon + 1;
8423 	day = broken.tm_mday;
8424 	year = broken.tm_year - 100;
8425 	hour = broken.tm_hour;
8426 	minute = broken.tm_min;
8427 	second = broken.tm_sec;
8428 	bf_set(lpfc_mbx_set_host_month, tm, month);
8429 	bf_set(lpfc_mbx_set_host_day, tm, day);
8430 	bf_set(lpfc_mbx_set_host_year, tm, year);
8431 	bf_set(lpfc_mbx_set_host_hour, tm, hour);
8432 	bf_set(lpfc_mbx_set_host_min, tm, minute);
8433 	bf_set(lpfc_mbx_set_host_sec, tm, second);
8434 
8435 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8436 	mempool_free(mboxq, phba->mbox_mem_pool);
8437 	return rc;
8438 }
8439 
8440 /**
8441  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8442  * @phba: Pointer to HBA context object.
8443  *
8444  * This function is the main SLI4 device initialization PCI function. This
8445  * function is called by the HBA initialization code, HBA reset code and
8446  * HBA error attention handler code. Caller is not required to hold any
8447  * locks.
8448  **/
8449 int
8450 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8451 {
8452 	int rc, i, cnt, len, dd;
8453 	LPFC_MBOXQ_t *mboxq;
8454 	struct lpfc_mqe *mqe;
8455 	uint8_t *vpd;
8456 	uint32_t vpd_size;
8457 	uint32_t ftr_rsp = 0;
8458 	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8459 	struct lpfc_vport *vport = phba->pport;
8460 	struct lpfc_dmabuf *mp;
8461 	struct lpfc_rqb *rqbp;
8462 	u32 flg;
8463 
8464 	/* Perform a PCI function reset to start from clean */
8465 	rc = lpfc_pci_function_reset(phba);
8466 	if (unlikely(rc))
8467 		return -ENODEV;
8468 
8469 	/* Check the HBA Host Status Register for readyness */
8470 	rc = lpfc_sli4_post_status_check(phba);
8471 	if (unlikely(rc))
8472 		return -ENODEV;
8473 	else {
8474 		spin_lock_irq(&phba->hbalock);
8475 		phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8476 		flg = phba->sli.sli_flag;
8477 		spin_unlock_irq(&phba->hbalock);
8478 		/* Allow a little time after setting SLI_ACTIVE for any polled
8479 		 * MBX commands to complete via BSG.
8480 		 */
8481 		for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8482 			msleep(20);
8483 			spin_lock_irq(&phba->hbalock);
8484 			flg = phba->sli.sli_flag;
8485 			spin_unlock_irq(&phba->hbalock);
8486 		}
8487 	}
8488 	clear_bit(HBA_SETUP, &phba->hba_flag);
8489 
8490 	lpfc_sli4_dip(phba);
8491 
8492 	/*
8493 	 * Allocate a single mailbox container for initializing the
8494 	 * port.
8495 	 */
8496 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8497 	if (!mboxq)
8498 		return -ENOMEM;
8499 
8500 	/* Issue READ_REV to collect vpd and FW information. */
8501 	vpd_size = SLI4_PAGE_SIZE;
8502 	vpd = kzalloc(vpd_size, GFP_KERNEL);
8503 	if (!vpd) {
8504 		rc = -ENOMEM;
8505 		goto out_free_mbox;
8506 	}
8507 
8508 	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8509 	if (unlikely(rc)) {
8510 		kfree(vpd);
8511 		goto out_free_mbox;
8512 	}
8513 
8514 	mqe = &mboxq->u.mqe;
8515 	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8516 	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8517 		set_bit(HBA_FCOE_MODE, &phba->hba_flag);
8518 		phba->fcp_embed_io = 0;	/* SLI4 FC support only */
8519 	} else {
8520 		clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
8521 	}
8522 
8523 	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8524 		LPFC_DCBX_CEE_MODE)
8525 		set_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
8526 	else
8527 		clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
8528 
8529 	clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
8530 
8531 	if (phba->sli_rev != LPFC_SLI_REV4) {
8532 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8533 			"0376 READ_REV Error. SLI Level %d "
8534 			"FCoE enabled %d\n",
8535 			phba->sli_rev,
8536 			test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0);
8537 		rc = -EIO;
8538 		kfree(vpd);
8539 		goto out_free_mbox;
8540 	}
8541 
8542 	rc = lpfc_set_host_tm(phba);
8543 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8544 			"6468 Set host date / time: Status x%x:\n", rc);
8545 
8546 	/*
8547 	 * Continue initialization with default values even if driver failed
8548 	 * to read FCoE param config regions, only read parameters if the
8549 	 * board is FCoE
8550 	 */
8551 	if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
8552 	    lpfc_sli4_read_fcoe_params(phba))
8553 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8554 			"2570 Failed to read FCoE parameters\n");
8555 
8556 	/*
8557 	 * Retrieve sli4 device physical port name, failure of doing it
8558 	 * is considered as non-fatal.
8559 	 */
8560 	rc = lpfc_sli4_retrieve_pport_name(phba);
8561 	if (!rc)
8562 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8563 				"3080 Successful retrieving SLI4 device "
8564 				"physical port name: %s.\n", phba->Port);
8565 
8566 	rc = lpfc_sli4_get_ctl_attr(phba);
8567 	if (!rc)
8568 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8569 				"8351 Successful retrieving SLI4 device "
8570 				"CTL ATTR\n");
8571 
8572 	/*
8573 	 * Evaluate the read rev and vpd data. Populate the driver
8574 	 * state with the results. If this routine fails, the failure
8575 	 * is not fatal as the driver will use generic values.
8576 	 */
8577 	rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8578 	if (unlikely(!rc))
8579 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8580 				"0377 Error %d parsing vpd. "
8581 				"Using defaults.\n", rc);
8582 	kfree(vpd);
8583 
8584 	/* Save information as VPD data */
8585 	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8586 	phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8587 
8588 	/*
8589 	 * This is because first G7 ASIC doesn't support the standard
8590 	 * 0x5a NVME cmd descriptor type/subtype
8591 	 */
8592 	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8593 			LPFC_SLI_INTF_IF_TYPE_6) &&
8594 	    (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8595 	    (phba->vpd.rev.smRev == 0) &&
8596 	    (phba->cfg_nvme_embed_cmd == 1))
8597 		phba->cfg_nvme_embed_cmd = 0;
8598 
8599 	phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8600 	phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8601 					 &mqe->un.read_rev);
8602 	phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8603 				       &mqe->un.read_rev);
8604 	phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8605 					    &mqe->un.read_rev);
8606 	phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8607 					   &mqe->un.read_rev);
8608 	phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8609 	memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8610 	phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8611 	memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8612 	phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8613 	memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8614 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8615 			"(%d):0380 READ_REV Status x%x "
8616 			"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8617 			mboxq->vport ? mboxq->vport->vpi : 0,
8618 			bf_get(lpfc_mqe_status, mqe),
8619 			phba->vpd.rev.opFwName,
8620 			phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8621 			phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8622 
8623 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8624 	    LPFC_SLI_INTF_IF_TYPE_0) {
8625 		lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8626 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8627 		if (rc == MBX_SUCCESS) {
8628 			set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag);
8629 			/* Set 1Sec interval to detect UE */
8630 			phba->eratt_poll_interval = 1;
8631 			phba->sli4_hba.ue_to_sr = bf_get(
8632 					lpfc_mbx_set_feature_UESR,
8633 					&mboxq->u.mqe.un.set_feature);
8634 			phba->sli4_hba.ue_to_rp = bf_get(
8635 					lpfc_mbx_set_feature_UERP,
8636 					&mboxq->u.mqe.un.set_feature);
8637 		}
8638 	}
8639 
8640 	if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8641 		/* Enable MDS Diagnostics only if the SLI Port supports it */
8642 		lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8643 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8644 		if (rc != MBX_SUCCESS)
8645 			phba->mds_diags_support = 0;
8646 	}
8647 
8648 	/*
8649 	 * Discover the port's supported feature set and match it against the
8650 	 * hosts requests.
8651 	 */
8652 	lpfc_request_features(phba, mboxq);
8653 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8654 	if (unlikely(rc)) {
8655 		rc = -EIO;
8656 		goto out_free_mbox;
8657 	}
8658 
8659 	/* Disable VMID if app header is not supported */
8660 	if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8661 						  &mqe->un.req_ftrs))) {
8662 		bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8663 		phba->cfg_vmid_app_header = 0;
8664 		lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8665 				"1242 vmid feature not supported\n");
8666 	}
8667 
8668 	/*
8669 	 * The port must support FCP initiator mode as this is the
8670 	 * only mode running in the host.
8671 	 */
8672 	if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8673 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8674 				"0378 No support for fcpi mode.\n");
8675 		ftr_rsp++;
8676 	}
8677 
8678 	/* Performance Hints are ONLY for FCoE */
8679 	if (test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
8680 		if (bf_get(lpfc_mbx_rq_ftr_rsp_perfh, &mqe->un.req_ftrs))
8681 			phba->sli3_options |= LPFC_SLI4_PERFH_ENABLED;
8682 		else
8683 			phba->sli3_options &= ~LPFC_SLI4_PERFH_ENABLED;
8684 	}
8685 
8686 	/*
8687 	 * If the port cannot support the host's requested features
8688 	 * then turn off the global config parameters to disable the
8689 	 * feature in the driver.  This is not a fatal error.
8690 	 */
8691 	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8692 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8693 			phba->cfg_enable_bg = 0;
8694 			phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8695 			ftr_rsp++;
8696 		}
8697 	}
8698 
8699 	if (phba->max_vpi && phba->cfg_enable_npiv &&
8700 	    !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8701 		ftr_rsp++;
8702 
8703 	if (ftr_rsp) {
8704 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8705 				"0379 Feature Mismatch Data: x%08x %08x "
8706 				"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8707 				mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8708 				phba->cfg_enable_npiv, phba->max_vpi);
8709 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8710 			phba->cfg_enable_bg = 0;
8711 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8712 			phba->cfg_enable_npiv = 0;
8713 	}
8714 
8715 	/* These SLI3 features are assumed in SLI4 */
8716 	spin_lock_irq(&phba->hbalock);
8717 	phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8718 	spin_unlock_irq(&phba->hbalock);
8719 
8720 	/* Always try to enable dual dump feature if we can */
8721 	lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8722 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8723 	dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8724 	if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8725 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8726 				"6448 Dual Dump is enabled\n");
8727 	else
8728 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8729 				"6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8730 				"rc:x%x dd:x%x\n",
8731 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8732 				lpfc_sli_config_mbox_subsys_get(
8733 					phba, mboxq),
8734 				lpfc_sli_config_mbox_opcode_get(
8735 					phba, mboxq),
8736 				rc, dd);
8737 	/*
8738 	 * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
8739 	 * calls depends on these resources to complete port setup.
8740 	 */
8741 	rc = lpfc_sli4_alloc_resource_identifiers(phba);
8742 	if (rc) {
8743 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8744 				"2920 Failed to alloc Resource IDs "
8745 				"rc = x%x\n", rc);
8746 		goto out_free_mbox;
8747 	}
8748 
8749 	lpfc_set_host_data(phba, mboxq);
8750 
8751 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8752 	if (rc) {
8753 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8754 				"2134 Failed to set host os driver version %x",
8755 				rc);
8756 	}
8757 
8758 	/* Read the port's service parameters. */
8759 	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8760 	if (rc) {
8761 		phba->link_state = LPFC_HBA_ERROR;
8762 		rc = -ENOMEM;
8763 		goto out_free_mbox;
8764 	}
8765 
8766 	mboxq->vport = vport;
8767 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8768 	mp = mboxq->ctx_buf;
8769 	if (rc == MBX_SUCCESS) {
8770 		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8771 		rc = 0;
8772 	}
8773 
8774 	/*
8775 	 * This memory was allocated by the lpfc_read_sparam routine but is
8776 	 * no longer needed.  It is released and ctx_buf NULLed to prevent
8777 	 * unintended pointer access as the mbox is reused.
8778 	 */
8779 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
8780 	kfree(mp);
8781 	mboxq->ctx_buf = NULL;
8782 	if (unlikely(rc)) {
8783 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8784 				"0382 READ_SPARAM command failed "
8785 				"status %d, mbxStatus x%x\n",
8786 				rc, bf_get(lpfc_mqe_status, mqe));
8787 		phba->link_state = LPFC_HBA_ERROR;
8788 		rc = -EIO;
8789 		goto out_free_mbox;
8790 	}
8791 
8792 	lpfc_update_vport_wwn(vport);
8793 
8794 	/* Update the fc_host data structures with new wwn. */
8795 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8796 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8797 
8798 	/* Create all the SLI4 queues */
8799 	rc = lpfc_sli4_queue_create(phba);
8800 	if (rc) {
8801 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8802 				"3089 Failed to allocate queues\n");
8803 		rc = -ENODEV;
8804 		goto out_free_mbox;
8805 	}
8806 	/* Set up all the queues to the device */
8807 	rc = lpfc_sli4_queue_setup(phba);
8808 	if (unlikely(rc)) {
8809 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8810 				"0381 Error %d during queue setup.\n ", rc);
8811 		goto out_stop_timers;
8812 	}
8813 	/* Initialize the driver internal SLI layer lists. */
8814 	lpfc_sli4_setup(phba);
8815 	lpfc_sli4_queue_init(phba);
8816 
8817 	/* update host els xri-sgl sizes and mappings */
8818 	rc = lpfc_sli4_els_sgl_update(phba);
8819 	if (unlikely(rc)) {
8820 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8821 				"1400 Failed to update xri-sgl size and "
8822 				"mapping: %d\n", rc);
8823 		goto out_destroy_queue;
8824 	}
8825 
8826 	/* register the els sgl pool to the port */
8827 	rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8828 				       phba->sli4_hba.els_xri_cnt);
8829 	if (unlikely(rc < 0)) {
8830 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8831 				"0582 Error %d during els sgl post "
8832 				"operation\n", rc);
8833 		rc = -ENODEV;
8834 		goto out_destroy_queue;
8835 	}
8836 	phba->sli4_hba.els_xri_cnt = rc;
8837 
8838 	if (phba->nvmet_support) {
8839 		/* update host nvmet xri-sgl sizes and mappings */
8840 		rc = lpfc_sli4_nvmet_sgl_update(phba);
8841 		if (unlikely(rc)) {
8842 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8843 					"6308 Failed to update nvmet-sgl size "
8844 					"and mapping: %d\n", rc);
8845 			goto out_destroy_queue;
8846 		}
8847 
8848 		/* register the nvmet sgl pool to the port */
8849 		rc = lpfc_sli4_repost_sgl_list(
8850 			phba,
8851 			&phba->sli4_hba.lpfc_nvmet_sgl_list,
8852 			phba->sli4_hba.nvmet_xri_cnt);
8853 		if (unlikely(rc < 0)) {
8854 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8855 					"3117 Error %d during nvmet "
8856 					"sgl post\n", rc);
8857 			rc = -ENODEV;
8858 			goto out_destroy_queue;
8859 		}
8860 		phba->sli4_hba.nvmet_xri_cnt = rc;
8861 
8862 		/* We allocate an iocbq for every receive context SGL.
8863 		 * The additional allocation is for abort and ls handling.
8864 		 */
8865 		cnt = phba->sli4_hba.nvmet_xri_cnt +
8866 			phba->sli4_hba.max_cfg_param.max_xri;
8867 	} else {
8868 		/* update host common xri-sgl sizes and mappings */
8869 		rc = lpfc_sli4_io_sgl_update(phba);
8870 		if (unlikely(rc)) {
8871 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8872 					"6082 Failed to update nvme-sgl size "
8873 					"and mapping: %d\n", rc);
8874 			goto out_destroy_queue;
8875 		}
8876 
8877 		/* register the allocated common sgl pool to the port */
8878 		rc = lpfc_sli4_repost_io_sgl_list(phba);
8879 		if (unlikely(rc)) {
8880 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8881 					"6116 Error %d during nvme sgl post "
8882 					"operation\n", rc);
8883 			/* Some NVME buffers were moved to abort nvme list */
8884 			/* A pci function reset will repost them */
8885 			rc = -ENODEV;
8886 			goto out_destroy_queue;
8887 		}
8888 		/* Each lpfc_io_buf job structure has an iocbq element.
8889 		 * This cnt provides for abort, els, ct and ls requests.
8890 		 */
8891 		cnt = phba->sli4_hba.max_cfg_param.max_xri;
8892 	}
8893 
8894 	if (!phba->sli.iocbq_lookup) {
8895 		/* Initialize and populate the iocb list per host */
8896 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8897 				"2821 initialize iocb list with %d entries\n",
8898 				cnt);
8899 		rc = lpfc_init_iocb_list(phba, cnt);
8900 		if (rc) {
8901 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8902 					"1413 Failed to init iocb list.\n");
8903 			goto out_destroy_queue;
8904 		}
8905 	}
8906 
8907 	if (phba->nvmet_support)
8908 		lpfc_nvmet_create_targetport(phba);
8909 
8910 	if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8911 		/* Post initial buffers to all RQs created */
8912 		for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8913 			rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8914 			INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8915 			rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8916 			rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8917 			rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8918 			rqbp->buffer_count = 0;
8919 
8920 			lpfc_post_rq_buffer(
8921 				phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8922 				phba->sli4_hba.nvmet_mrq_data[i],
8923 				phba->cfg_nvmet_mrq_post, i);
8924 		}
8925 	}
8926 
8927 	/* Post the rpi header region to the device. */
8928 	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8929 	if (unlikely(rc)) {
8930 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8931 				"0393 Error %d during rpi post operation\n",
8932 				rc);
8933 		rc = -ENODEV;
8934 		goto out_free_iocblist;
8935 	}
8936 	lpfc_sli4_node_prep(phba);
8937 
8938 	if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
8939 		if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8940 			/*
8941 			 * The FC Port needs to register FCFI (index 0)
8942 			 */
8943 			lpfc_reg_fcfi(phba, mboxq);
8944 			mboxq->vport = phba->pport;
8945 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8946 			if (rc != MBX_SUCCESS)
8947 				goto out_unset_queue;
8948 			rc = 0;
8949 			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8950 						&mboxq->u.mqe.un.reg_fcfi);
8951 		} else {
8952 			/* We are a NVME Target mode with MRQ > 1 */
8953 
8954 			/* First register the FCFI */
8955 			lpfc_reg_fcfi_mrq(phba, mboxq, 0);
8956 			mboxq->vport = phba->pport;
8957 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8958 			if (rc != MBX_SUCCESS)
8959 				goto out_unset_queue;
8960 			rc = 0;
8961 			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
8962 						&mboxq->u.mqe.un.reg_fcfi_mrq);
8963 
8964 			/* Next register the MRQs */
8965 			lpfc_reg_fcfi_mrq(phba, mboxq, 1);
8966 			mboxq->vport = phba->pport;
8967 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8968 			if (rc != MBX_SUCCESS)
8969 				goto out_unset_queue;
8970 			rc = 0;
8971 		}
8972 		/* Check if the port is configured to be disabled */
8973 		lpfc_sli_read_link_ste(phba);
8974 	}
8975 
8976 	/* Don't post more new bufs if repost already recovered
8977 	 * the nvme sgls.
8978 	 */
8979 	if (phba->nvmet_support == 0) {
8980 		if (phba->sli4_hba.io_xri_cnt == 0) {
8981 			len = lpfc_new_io_buf(
8982 					      phba, phba->sli4_hba.io_xri_max);
8983 			if (len == 0) {
8984 				rc = -ENOMEM;
8985 				goto out_unset_queue;
8986 			}
8987 
8988 			if (phba->cfg_xri_rebalancing)
8989 				lpfc_create_multixri_pools(phba);
8990 		}
8991 	} else {
8992 		phba->cfg_xri_rebalancing = 0;
8993 	}
8994 
8995 	/* Allow asynchronous mailbox command to go through */
8996 	spin_lock_irq(&phba->hbalock);
8997 	phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
8998 	spin_unlock_irq(&phba->hbalock);
8999 
9000 	/* Post receive buffers to the device */
9001 	lpfc_sli4_rb_setup(phba);
9002 
9003 	/* Reset HBA FCF states after HBA reset */
9004 	phba->fcf.fcf_flag = 0;
9005 	phba->fcf.current_rec.flag = 0;
9006 
9007 	/* Start the ELS watchdog timer */
9008 	mod_timer(&vport->els_tmofunc,
9009 		  jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2)));
9010 
9011 	/* Start heart beat timer */
9012 	mod_timer(&phba->hb_tmofunc,
9013 		  jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
9014 	clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
9015 	clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
9016 	phba->last_completion_time = jiffies;
9017 
9018 	/* start eq_delay heartbeat */
9019 	if (phba->cfg_auto_imax)
9020 		queue_delayed_work(phba->wq, &phba->eq_delay_work,
9021 				   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
9022 
9023 	/* start per phba idle_stat_delay heartbeat */
9024 	lpfc_init_idle_stat_hb(phba);
9025 
9026 	/* Start error attention (ERATT) polling timer */
9027 	mod_timer(&phba->eratt_poll,
9028 		  jiffies + msecs_to_jiffies(1000 * phba->eratt_poll_interval));
9029 
9030 	/*
9031 	 * The port is ready, set the host's link state to LINK_DOWN
9032 	 * in preparation for link interrupts.
9033 	 */
9034 	spin_lock_irq(&phba->hbalock);
9035 	phba->link_state = LPFC_LINK_DOWN;
9036 
9037 	/* Check if physical ports are trunked */
9038 	if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
9039 		phba->trunk_link.link0.state = LPFC_LINK_DOWN;
9040 	if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
9041 		phba->trunk_link.link1.state = LPFC_LINK_DOWN;
9042 	if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
9043 		phba->trunk_link.link2.state = LPFC_LINK_DOWN;
9044 	if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
9045 		phba->trunk_link.link3.state = LPFC_LINK_DOWN;
9046 	spin_unlock_irq(&phba->hbalock);
9047 
9048 	/* Arm the CQs and then EQs on device */
9049 	lpfc_sli4_arm_cqeq_intr(phba);
9050 
9051 	/* Indicate device interrupt mode */
9052 	phba->sli4_hba.intr_enable = 1;
9053 
9054 	/* Setup CMF after HBA is initialized */
9055 	lpfc_cmf_setup(phba);
9056 
9057 	if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
9058 	    test_bit(LINK_DISABLED, &phba->hba_flag)) {
9059 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9060 				"3103 Adapter Link is disabled.\n");
9061 		lpfc_down_link(phba, mboxq);
9062 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9063 		if (rc != MBX_SUCCESS) {
9064 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9065 					"3104 Adapter failed to issue "
9066 					"DOWN_LINK mbox cmd, rc:x%x\n", rc);
9067 			goto out_io_buff_free;
9068 		}
9069 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
9070 		/* don't perform init_link on SLI4 FC port loopback test */
9071 		if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
9072 			rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
9073 			if (rc)
9074 				goto out_io_buff_free;
9075 		}
9076 	}
9077 	mempool_free(mboxq, phba->mbox_mem_pool);
9078 
9079 	/* Enable RAS FW log support */
9080 	lpfc_sli4_ras_setup(phba);
9081 
9082 	set_bit(HBA_SETUP, &phba->hba_flag);
9083 	return rc;
9084 
9085 out_io_buff_free:
9086 	/* Free allocated IO Buffers */
9087 	lpfc_io_free(phba);
9088 out_unset_queue:
9089 	/* Unset all the queues set up in this routine when error out */
9090 	lpfc_sli4_queue_unset(phba);
9091 out_free_iocblist:
9092 	lpfc_free_iocb_list(phba);
9093 out_destroy_queue:
9094 	lpfc_sli4_queue_destroy(phba);
9095 out_stop_timers:
9096 	lpfc_stop_hba_timers(phba);
9097 out_free_mbox:
9098 	mempool_free(mboxq, phba->mbox_mem_pool);
9099 	return rc;
9100 }
9101 
9102 /**
9103  * lpfc_mbox_timeout - Timeout call back function for mbox timer
9104  * @t: Context to fetch pointer to hba structure from.
9105  *
9106  * This is the callback function for mailbox timer. The mailbox
9107  * timer is armed when a new mailbox command is issued and the timer
9108  * is deleted when the mailbox complete. The function is called by
9109  * the kernel timer code when a mailbox does not complete within
9110  * expected time. This function wakes up the worker thread to
9111  * process the mailbox timeout and returns. All the processing is
9112  * done by the worker thread function lpfc_mbox_timeout_handler.
9113  **/
9114 void
9115 lpfc_mbox_timeout(struct timer_list *t)
9116 {
9117 	struct lpfc_hba  *phba = from_timer(phba, t, sli.mbox_tmo);
9118 	unsigned long iflag;
9119 	uint32_t tmo_posted;
9120 
9121 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
9122 	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
9123 	if (!tmo_posted)
9124 		phba->pport->work_port_events |= WORKER_MBOX_TMO;
9125 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
9126 
9127 	if (!tmo_posted)
9128 		lpfc_worker_wake_up(phba);
9129 	return;
9130 }
9131 
9132 /**
9133  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
9134  *                                    are pending
9135  * @phba: Pointer to HBA context object.
9136  *
9137  * This function checks if any mailbox completions are present on the mailbox
9138  * completion queue.
9139  **/
9140 static bool
9141 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
9142 {
9143 
9144 	uint32_t idx;
9145 	struct lpfc_queue *mcq;
9146 	struct lpfc_mcqe *mcqe;
9147 	bool pending_completions = false;
9148 	uint8_t	qe_valid;
9149 
9150 	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9151 		return false;
9152 
9153 	/* Check for completions on mailbox completion queue */
9154 
9155 	mcq = phba->sli4_hba.mbx_cq;
9156 	idx = mcq->hba_index;
9157 	qe_valid = mcq->qe_valid;
9158 	while (bf_get_le32(lpfc_cqe_valid,
9159 	       (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
9160 		mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
9161 		if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
9162 		    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
9163 			pending_completions = true;
9164 			break;
9165 		}
9166 		idx = (idx + 1) % mcq->entry_count;
9167 		if (mcq->hba_index == idx)
9168 			break;
9169 
9170 		/* if the index wrapped around, toggle the valid bit */
9171 		if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
9172 			qe_valid = (qe_valid) ? 0 : 1;
9173 	}
9174 	return pending_completions;
9175 
9176 }
9177 
9178 /**
9179  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
9180  *					      that were missed.
9181  * @phba: Pointer to HBA context object.
9182  *
9183  * For sli4, it is possible to miss an interrupt. As such mbox completions
9184  * maybe missed causing erroneous mailbox timeouts to occur. This function
9185  * checks to see if mbox completions are on the mailbox completion queue
9186  * and will process all the completions associated with the eq for the
9187  * mailbox completion queue.
9188  **/
9189 static bool
9190 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
9191 {
9192 	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
9193 	uint32_t eqidx;
9194 	struct lpfc_queue *fpeq = NULL;
9195 	struct lpfc_queue *eq;
9196 	bool mbox_pending;
9197 
9198 	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9199 		return false;
9200 
9201 	/* Find the EQ associated with the mbox CQ */
9202 	if (sli4_hba->hdwq) {
9203 		for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
9204 			eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
9205 			if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
9206 				fpeq = eq;
9207 				break;
9208 			}
9209 		}
9210 	}
9211 	if (!fpeq)
9212 		return false;
9213 
9214 	/* Turn off interrupts from this EQ */
9215 
9216 	sli4_hba->sli4_eq_clr_intr(fpeq);
9217 
9218 	/* Check to see if a mbox completion is pending */
9219 
9220 	mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
9221 
9222 	/*
9223 	 * If a mbox completion is pending, process all the events on EQ
9224 	 * associated with the mbox completion queue (this could include
9225 	 * mailbox commands, async events, els commands, receive queue data
9226 	 * and fcp commands)
9227 	 */
9228 
9229 	if (mbox_pending)
9230 		/* process and rearm the EQ */
9231 		lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
9232 				     LPFC_QUEUE_WORK);
9233 	else
9234 		/* Always clear and re-arm the EQ */
9235 		sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9236 
9237 	return mbox_pending;
9238 
9239 }
9240 
9241 /**
9242  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9243  * @phba: Pointer to HBA context object.
9244  *
9245  * This function is called from worker thread when a mailbox command times out.
9246  * The caller is not required to hold any locks. This function will reset the
9247  * HBA and recover all the pending commands.
9248  **/
9249 void
9250 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9251 {
9252 	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9253 	MAILBOX_t *mb = NULL;
9254 
9255 	struct lpfc_sli *psli = &phba->sli;
9256 
9257 	/* If the mailbox completed, process the completion */
9258 	lpfc_sli4_process_missed_mbox_completions(phba);
9259 
9260 	if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9261 		return;
9262 
9263 	if (pmbox != NULL)
9264 		mb = &pmbox->u.mb;
9265 	/* Check the pmbox pointer first.  There is a race condition
9266 	 * between the mbox timeout handler getting executed in the
9267 	 * worklist and the mailbox actually completing. When this
9268 	 * race condition occurs, the mbox_active will be NULL.
9269 	 */
9270 	spin_lock_irq(&phba->hbalock);
9271 	if (pmbox == NULL) {
9272 		lpfc_printf_log(phba, KERN_WARNING,
9273 				LOG_MBOX | LOG_SLI,
9274 				"0353 Active Mailbox cleared - mailbox timeout "
9275 				"exiting\n");
9276 		spin_unlock_irq(&phba->hbalock);
9277 		return;
9278 	}
9279 
9280 	/* Mbox cmd <mbxCommand> timeout */
9281 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9282 			"0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9283 			mb->mbxCommand,
9284 			phba->pport->port_state,
9285 			phba->sli.sli_flag,
9286 			phba->sli.mbox_active);
9287 	spin_unlock_irq(&phba->hbalock);
9288 
9289 	/* Setting state unknown so lpfc_sli_abort_iocb_ring
9290 	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9291 	 * it to fail all outstanding SCSI IO.
9292 	 */
9293 	set_bit(MBX_TMO_ERR, &phba->bit_flags);
9294 	spin_lock_irq(&phba->pport->work_port_lock);
9295 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9296 	spin_unlock_irq(&phba->pport->work_port_lock);
9297 	spin_lock_irq(&phba->hbalock);
9298 	phba->link_state = LPFC_LINK_UNKNOWN;
9299 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9300 	spin_unlock_irq(&phba->hbalock);
9301 
9302 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9303 			"0345 Resetting board due to mailbox timeout\n");
9304 
9305 	/* Reset the HBA device */
9306 	lpfc_reset_hba(phba);
9307 }
9308 
9309 /**
9310  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9311  * @phba: Pointer to HBA context object.
9312  * @pmbox: Pointer to mailbox object.
9313  * @flag: Flag indicating how the mailbox need to be processed.
9314  *
9315  * This function is called by discovery code and HBA management code
9316  * to submit a mailbox command to firmware with SLI-3 interface spec. This
9317  * function gets the hbalock to protect the data structures.
9318  * The mailbox command can be submitted in polling mode, in which case
9319  * this function will wait in a polling loop for the completion of the
9320  * mailbox.
9321  * If the mailbox is submitted in no_wait mode (not polling) the
9322  * function will submit the command and returns immediately without waiting
9323  * for the mailbox completion. The no_wait is supported only when HBA
9324  * is in SLI2/SLI3 mode - interrupts are enabled.
9325  * The SLI interface allows only one mailbox pending at a time. If the
9326  * mailbox is issued in polling mode and there is already a mailbox
9327  * pending, then the function will return an error. If the mailbox is issued
9328  * in NO_WAIT mode and there is a mailbox pending already, the function
9329  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9330  * The sli layer owns the mailbox object until the completion of mailbox
9331  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9332  * return codes the caller owns the mailbox command after the return of
9333  * the function.
9334  **/
9335 static int
9336 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9337 		       uint32_t flag)
9338 {
9339 	MAILBOX_t *mbx;
9340 	struct lpfc_sli *psli = &phba->sli;
9341 	uint32_t status, evtctr;
9342 	uint32_t ha_copy, hc_copy;
9343 	int i;
9344 	unsigned long timeout;
9345 	unsigned long drvr_flag = 0;
9346 	uint32_t word0, ldata;
9347 	void __iomem *to_slim;
9348 	int processing_queue = 0;
9349 
9350 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
9351 	if (!pmbox) {
9352 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9353 		/* processing mbox queue from intr_handler */
9354 		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9355 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9356 			return MBX_SUCCESS;
9357 		}
9358 		processing_queue = 1;
9359 		pmbox = lpfc_mbox_get(phba);
9360 		if (!pmbox) {
9361 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9362 			return MBX_SUCCESS;
9363 		}
9364 	}
9365 
9366 	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9367 		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9368 		if(!pmbox->vport) {
9369 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9370 			lpfc_printf_log(phba, KERN_ERR,
9371 					LOG_MBOX | LOG_VPORT,
9372 					"1806 Mbox x%x failed. No vport\n",
9373 					pmbox->u.mb.mbxCommand);
9374 			dump_stack();
9375 			goto out_not_finished;
9376 		}
9377 	}
9378 
9379 	/* If the PCI channel is in offline state, do not post mbox. */
9380 	if (unlikely(pci_channel_offline(phba->pcidev))) {
9381 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9382 		goto out_not_finished;
9383 	}
9384 
9385 	/* If HBA has a deferred error attention, fail the iocb. */
9386 	if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
9387 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9388 		goto out_not_finished;
9389 	}
9390 
9391 	psli = &phba->sli;
9392 
9393 	mbx = &pmbox->u.mb;
9394 	status = MBX_SUCCESS;
9395 
9396 	if (phba->link_state == LPFC_HBA_ERROR) {
9397 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9398 
9399 		/* Mbox command <mbxCommand> cannot issue */
9400 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9401 				"(%d):0311 Mailbox command x%x cannot "
9402 				"issue Data: x%x x%x\n",
9403 				pmbox->vport ? pmbox->vport->vpi : 0,
9404 				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9405 		goto out_not_finished;
9406 	}
9407 
9408 	if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9409 		if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9410 			!(hc_copy & HC_MBINT_ENA)) {
9411 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9412 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9413 				"(%d):2528 Mailbox command x%x cannot "
9414 				"issue Data: x%x x%x\n",
9415 				pmbox->vport ? pmbox->vport->vpi : 0,
9416 				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9417 			goto out_not_finished;
9418 		}
9419 	}
9420 
9421 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9422 		/* Polling for a mbox command when another one is already active
9423 		 * is not allowed in SLI. Also, the driver must have established
9424 		 * SLI2 mode to queue and process multiple mbox commands.
9425 		 */
9426 
9427 		if (flag & MBX_POLL) {
9428 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9429 
9430 			/* Mbox command <mbxCommand> cannot issue */
9431 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9432 					"(%d):2529 Mailbox command x%x "
9433 					"cannot issue Data: x%x x%x\n",
9434 					pmbox->vport ? pmbox->vport->vpi : 0,
9435 					pmbox->u.mb.mbxCommand,
9436 					psli->sli_flag, flag);
9437 			goto out_not_finished;
9438 		}
9439 
9440 		if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9441 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9442 			/* Mbox command <mbxCommand> cannot issue */
9443 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9444 					"(%d):2530 Mailbox command x%x "
9445 					"cannot issue Data: x%x x%x\n",
9446 					pmbox->vport ? pmbox->vport->vpi : 0,
9447 					pmbox->u.mb.mbxCommand,
9448 					psli->sli_flag, flag);
9449 			goto out_not_finished;
9450 		}
9451 
9452 		/* Another mailbox command is still being processed, queue this
9453 		 * command to be processed later.
9454 		 */
9455 		lpfc_mbox_put(phba, pmbox);
9456 
9457 		/* Mbox cmd issue - BUSY */
9458 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9459 				"(%d):0308 Mbox cmd issue - BUSY Data: "
9460 				"x%x x%x x%x x%x\n",
9461 				pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9462 				mbx->mbxCommand,
9463 				phba->pport ? phba->pport->port_state : 0xff,
9464 				psli->sli_flag, flag);
9465 
9466 		psli->slistat.mbox_busy++;
9467 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9468 
9469 		if (pmbox->vport) {
9470 			lpfc_debugfs_disc_trc(pmbox->vport,
9471 				LPFC_DISC_TRC_MBOX_VPORT,
9472 				"MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
9473 				(uint32_t)mbx->mbxCommand,
9474 				mbx->un.varWords[0], mbx->un.varWords[1]);
9475 		}
9476 		else {
9477 			lpfc_debugfs_disc_trc(phba->pport,
9478 				LPFC_DISC_TRC_MBOX,
9479 				"MBOX Bsy:        cmd:x%x mb:x%x x%x",
9480 				(uint32_t)mbx->mbxCommand,
9481 				mbx->un.varWords[0], mbx->un.varWords[1]);
9482 		}
9483 
9484 		return MBX_BUSY;
9485 	}
9486 
9487 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9488 
9489 	/* If we are not polling, we MUST be in SLI2 mode */
9490 	if (flag != MBX_POLL) {
9491 		if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9492 		    (mbx->mbxCommand != MBX_KILL_BOARD)) {
9493 			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9494 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9495 			/* Mbox command <mbxCommand> cannot issue */
9496 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9497 					"(%d):2531 Mailbox command x%x "
9498 					"cannot issue Data: x%x x%x\n",
9499 					pmbox->vport ? pmbox->vport->vpi : 0,
9500 					pmbox->u.mb.mbxCommand,
9501 					psli->sli_flag, flag);
9502 			goto out_not_finished;
9503 		}
9504 		/* timeout active mbox command */
9505 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9506 					   1000);
9507 		mod_timer(&psli->mbox_tmo, jiffies + timeout);
9508 	}
9509 
9510 	/* Mailbox cmd <cmd> issue */
9511 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9512 			"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9513 			"x%x\n",
9514 			pmbox->vport ? pmbox->vport->vpi : 0,
9515 			mbx->mbxCommand,
9516 			phba->pport ? phba->pport->port_state : 0xff,
9517 			psli->sli_flag, flag);
9518 
9519 	if (mbx->mbxCommand != MBX_HEARTBEAT) {
9520 		if (pmbox->vport) {
9521 			lpfc_debugfs_disc_trc(pmbox->vport,
9522 				LPFC_DISC_TRC_MBOX_VPORT,
9523 				"MBOX Send vport: cmd:x%x mb:x%x x%x",
9524 				(uint32_t)mbx->mbxCommand,
9525 				mbx->un.varWords[0], mbx->un.varWords[1]);
9526 		}
9527 		else {
9528 			lpfc_debugfs_disc_trc(phba->pport,
9529 				LPFC_DISC_TRC_MBOX,
9530 				"MBOX Send:       cmd:x%x mb:x%x x%x",
9531 				(uint32_t)mbx->mbxCommand,
9532 				mbx->un.varWords[0], mbx->un.varWords[1]);
9533 		}
9534 	}
9535 
9536 	psli->slistat.mbox_cmd++;
9537 	evtctr = psli->slistat.mbox_event;
9538 
9539 	/* next set own bit for the adapter and copy over command word */
9540 	mbx->mbxOwner = OWN_CHIP;
9541 
9542 	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9543 		/* Populate mbox extension offset word. */
9544 		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9545 			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9546 				= (uint8_t *)phba->mbox_ext
9547 				  - (uint8_t *)phba->mbox;
9548 		}
9549 
9550 		/* Copy the mailbox extension data */
9551 		if (pmbox->in_ext_byte_len && pmbox->ext_buf) {
9552 			lpfc_sli_pcimem_bcopy(pmbox->ext_buf,
9553 					      (uint8_t *)phba->mbox_ext,
9554 					      pmbox->in_ext_byte_len);
9555 		}
9556 		/* Copy command data to host SLIM area */
9557 		lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9558 	} else {
9559 		/* Populate mbox extension offset word. */
9560 		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9561 			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9562 				= MAILBOX_HBA_EXT_OFFSET;
9563 
9564 		/* Copy the mailbox extension data */
9565 		if (pmbox->in_ext_byte_len && pmbox->ext_buf)
9566 			lpfc_memcpy_to_slim(phba->MBslimaddr +
9567 				MAILBOX_HBA_EXT_OFFSET,
9568 				pmbox->ext_buf, pmbox->in_ext_byte_len);
9569 
9570 		if (mbx->mbxCommand == MBX_CONFIG_PORT)
9571 			/* copy command data into host mbox for cmpl */
9572 			lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9573 					      MAILBOX_CMD_SIZE);
9574 
9575 		/* First copy mbox command data to HBA SLIM, skip past first
9576 		   word */
9577 		to_slim = phba->MBslimaddr + sizeof (uint32_t);
9578 		lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9579 			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
9580 
9581 		/* Next copy over first word, with mbxOwner set */
9582 		ldata = *((uint32_t *)mbx);
9583 		to_slim = phba->MBslimaddr;
9584 		writel(ldata, to_slim);
9585 		readl(to_slim); /* flush */
9586 
9587 		if (mbx->mbxCommand == MBX_CONFIG_PORT)
9588 			/* switch over to host mailbox */
9589 			psli->sli_flag |= LPFC_SLI_ACTIVE;
9590 	}
9591 
9592 	wmb();
9593 
9594 	switch (flag) {
9595 	case MBX_NOWAIT:
9596 		/* Set up reference to mailbox command */
9597 		psli->mbox_active = pmbox;
9598 		/* Interrupt board to do it */
9599 		writel(CA_MBATT, phba->CAregaddr);
9600 		readl(phba->CAregaddr); /* flush */
9601 		/* Don't wait for it to finish, just return */
9602 		break;
9603 
9604 	case MBX_POLL:
9605 		/* Set up null reference to mailbox command */
9606 		psli->mbox_active = NULL;
9607 		/* Interrupt board to do it */
9608 		writel(CA_MBATT, phba->CAregaddr);
9609 		readl(phba->CAregaddr); /* flush */
9610 
9611 		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9612 			/* First read mbox status word */
9613 			word0 = *((uint32_t *)phba->mbox);
9614 			word0 = le32_to_cpu(word0);
9615 		} else {
9616 			/* First read mbox status word */
9617 			if (lpfc_readl(phba->MBslimaddr, &word0)) {
9618 				spin_unlock_irqrestore(&phba->hbalock,
9619 						       drvr_flag);
9620 				goto out_not_finished;
9621 			}
9622 		}
9623 
9624 		/* Read the HBA Host Attention Register */
9625 		if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9626 			spin_unlock_irqrestore(&phba->hbalock,
9627 						       drvr_flag);
9628 			goto out_not_finished;
9629 		}
9630 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox) *
9631 							1000) + jiffies;
9632 		i = 0;
9633 		/* Wait for command to complete */
9634 		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9635 		       (!(ha_copy & HA_MBATT) &&
9636 			(phba->link_state > LPFC_WARM_START))) {
9637 			if (time_after(jiffies, timeout)) {
9638 				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9639 				spin_unlock_irqrestore(&phba->hbalock,
9640 						       drvr_flag);
9641 				goto out_not_finished;
9642 			}
9643 
9644 			/* Check if we took a mbox interrupt while we were
9645 			   polling */
9646 			if (((word0 & OWN_CHIP) != OWN_CHIP)
9647 			    && (evtctr != psli->slistat.mbox_event))
9648 				break;
9649 
9650 			if (i++ > 10) {
9651 				spin_unlock_irqrestore(&phba->hbalock,
9652 						       drvr_flag);
9653 				msleep(1);
9654 				spin_lock_irqsave(&phba->hbalock, drvr_flag);
9655 			}
9656 
9657 			if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9658 				/* First copy command data */
9659 				word0 = *((uint32_t *)phba->mbox);
9660 				word0 = le32_to_cpu(word0);
9661 				if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9662 					MAILBOX_t *slimmb;
9663 					uint32_t slimword0;
9664 					/* Check real SLIM for any errors */
9665 					slimword0 = readl(phba->MBslimaddr);
9666 					slimmb = (MAILBOX_t *) & slimword0;
9667 					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9668 					    && slimmb->mbxStatus) {
9669 						psli->sli_flag &=
9670 						    ~LPFC_SLI_ACTIVE;
9671 						word0 = slimword0;
9672 					}
9673 				}
9674 			} else {
9675 				/* First copy command data */
9676 				word0 = readl(phba->MBslimaddr);
9677 			}
9678 			/* Read the HBA Host Attention Register */
9679 			if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9680 				spin_unlock_irqrestore(&phba->hbalock,
9681 						       drvr_flag);
9682 				goto out_not_finished;
9683 			}
9684 		}
9685 
9686 		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9687 			/* copy results back to user */
9688 			lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9689 						MAILBOX_CMD_SIZE);
9690 			/* Copy the mailbox extension data */
9691 			if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
9692 				lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9693 						      pmbox->ext_buf,
9694 						      pmbox->out_ext_byte_len);
9695 			}
9696 		} else {
9697 			/* First copy command data */
9698 			lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9699 						MAILBOX_CMD_SIZE);
9700 			/* Copy the mailbox extension data */
9701 			if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
9702 				lpfc_memcpy_from_slim(
9703 					pmbox->ext_buf,
9704 					phba->MBslimaddr +
9705 					MAILBOX_HBA_EXT_OFFSET,
9706 					pmbox->out_ext_byte_len);
9707 			}
9708 		}
9709 
9710 		writel(HA_MBATT, phba->HAregaddr);
9711 		readl(phba->HAregaddr); /* flush */
9712 
9713 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9714 		status = mbx->mbxStatus;
9715 	}
9716 
9717 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9718 	return status;
9719 
9720 out_not_finished:
9721 	if (processing_queue) {
9722 		pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9723 		lpfc_mbox_cmpl_put(phba, pmbox);
9724 	}
9725 	return MBX_NOT_FINISHED;
9726 }
9727 
9728 /**
9729  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9730  * @phba: Pointer to HBA context object.
9731  *
9732  * The function blocks the posting of SLI4 asynchronous mailbox commands from
9733  * the driver internal pending mailbox queue. It will then try to wait out the
9734  * possible outstanding mailbox command before return.
9735  *
9736  * Returns:
9737  * 	0 - the outstanding mailbox command completed; otherwise, the wait for
9738  * 	the outstanding mailbox command timed out.
9739  **/
9740 static int
9741 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9742 {
9743 	struct lpfc_sli *psli = &phba->sli;
9744 	LPFC_MBOXQ_t *mboxq;
9745 	int rc = 0;
9746 	unsigned long timeout = 0;
9747 	u32 sli_flag;
9748 	u8 cmd, subsys, opcode;
9749 
9750 	/* Mark the asynchronous mailbox command posting as blocked */
9751 	spin_lock_irq(&phba->hbalock);
9752 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9753 	/* Determine how long we might wait for the active mailbox
9754 	 * command to be gracefully completed by firmware.
9755 	 */
9756 	if (phba->sli.mbox_active)
9757 		timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
9758 						phba->sli.mbox_active) *
9759 						1000) + jiffies;
9760 	spin_unlock_irq(&phba->hbalock);
9761 
9762 	/* Make sure the mailbox is really active */
9763 	if (timeout)
9764 		lpfc_sli4_process_missed_mbox_completions(phba);
9765 
9766 	/* Wait for the outstanding mailbox command to complete */
9767 	while (phba->sli.mbox_active) {
9768 		/* Check active mailbox complete status every 2ms */
9769 		msleep(2);
9770 		if (time_after(jiffies, timeout)) {
9771 			/* Timeout, mark the outstanding cmd not complete */
9772 
9773 			/* Sanity check sli.mbox_active has not completed or
9774 			 * cancelled from another context during last 2ms sleep,
9775 			 * so take hbalock to be sure before logging.
9776 			 */
9777 			spin_lock_irq(&phba->hbalock);
9778 			if (phba->sli.mbox_active) {
9779 				mboxq = phba->sli.mbox_active;
9780 				cmd = mboxq->u.mb.mbxCommand;
9781 				subsys = lpfc_sli_config_mbox_subsys_get(phba,
9782 									 mboxq);
9783 				opcode = lpfc_sli_config_mbox_opcode_get(phba,
9784 									 mboxq);
9785 				sli_flag = psli->sli_flag;
9786 				spin_unlock_irq(&phba->hbalock);
9787 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9788 						"2352 Mailbox command x%x "
9789 						"(x%x/x%x) sli_flag x%x could "
9790 						"not complete\n",
9791 						cmd, subsys, opcode,
9792 						sli_flag);
9793 			} else {
9794 				spin_unlock_irq(&phba->hbalock);
9795 			}
9796 
9797 			rc = 1;
9798 			break;
9799 		}
9800 	}
9801 
9802 	/* Can not cleanly block async mailbox command, fails it */
9803 	if (rc) {
9804 		spin_lock_irq(&phba->hbalock);
9805 		psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9806 		spin_unlock_irq(&phba->hbalock);
9807 	}
9808 	return rc;
9809 }
9810 
9811 /**
9812  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9813  * @phba: Pointer to HBA context object.
9814  *
9815  * The function unblocks and resume posting of SLI4 asynchronous mailbox
9816  * commands from the driver internal pending mailbox queue. It makes sure
9817  * that there is no outstanding mailbox command before resuming posting
9818  * asynchronous mailbox commands. If, for any reason, there is outstanding
9819  * mailbox command, it will try to wait it out before resuming asynchronous
9820  * mailbox command posting.
9821  **/
9822 static void
9823 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9824 {
9825 	struct lpfc_sli *psli = &phba->sli;
9826 
9827 	spin_lock_irq(&phba->hbalock);
9828 	if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9829 		/* Asynchronous mailbox posting is not blocked, do nothing */
9830 		spin_unlock_irq(&phba->hbalock);
9831 		return;
9832 	}
9833 
9834 	/* Outstanding synchronous mailbox command is guaranteed to be done,
9835 	 * successful or timeout, after timing-out the outstanding mailbox
9836 	 * command shall always be removed, so just unblock posting async
9837 	 * mailbox command and resume
9838 	 */
9839 	psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9840 	spin_unlock_irq(&phba->hbalock);
9841 
9842 	/* wake up worker thread to post asynchronous mailbox command */
9843 	lpfc_worker_wake_up(phba);
9844 }
9845 
9846 /**
9847  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9848  * @phba: Pointer to HBA context object.
9849  * @mboxq: Pointer to mailbox object.
9850  *
9851  * The function waits for the bootstrap mailbox register ready bit from
9852  * port for twice the regular mailbox command timeout value.
9853  *
9854  *      0 - no timeout on waiting for bootstrap mailbox register ready.
9855  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out or port
9856  *                     is in an unrecoverable state.
9857  **/
9858 static int
9859 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9860 {
9861 	uint32_t db_ready;
9862 	unsigned long timeout;
9863 	struct lpfc_register bmbx_reg;
9864 	struct lpfc_register portstat_reg = {-1};
9865 
9866 	/* Sanity check - there is no point to wait if the port is in an
9867 	 * unrecoverable state.
9868 	 */
9869 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
9870 	    LPFC_SLI_INTF_IF_TYPE_2) {
9871 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9872 			       &portstat_reg.word0) ||
9873 		    lpfc_sli4_unrecoverable_port(&portstat_reg)) {
9874 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9875 					"3858 Skipping bmbx ready because "
9876 					"Port Status x%x\n",
9877 					portstat_reg.word0);
9878 			return MBXERR_ERROR;
9879 		}
9880 	}
9881 
9882 	timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)
9883 				   * 1000) + jiffies;
9884 
9885 	do {
9886 		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9887 		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9888 		if (!db_ready)
9889 			mdelay(2);
9890 
9891 		if (time_after(jiffies, timeout))
9892 			return MBXERR_ERROR;
9893 	} while (!db_ready);
9894 
9895 	return 0;
9896 }
9897 
9898 /**
9899  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9900  * @phba: Pointer to HBA context object.
9901  * @mboxq: Pointer to mailbox object.
9902  *
9903  * The function posts a mailbox to the port.  The mailbox is expected
9904  * to be comletely filled in and ready for the port to operate on it.
9905  * This routine executes a synchronous completion operation on the
9906  * mailbox by polling for its completion.
9907  *
9908  * The caller must not be holding any locks when calling this routine.
9909  *
9910  * Returns:
9911  *	MBX_SUCCESS - mailbox posted successfully
9912  *	Any of the MBX error values.
9913  **/
9914 static int
9915 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9916 {
9917 	int rc = MBX_SUCCESS;
9918 	unsigned long iflag;
9919 	uint32_t mcqe_status;
9920 	uint32_t mbx_cmnd;
9921 	struct lpfc_sli *psli = &phba->sli;
9922 	struct lpfc_mqe *mb = &mboxq->u.mqe;
9923 	struct lpfc_bmbx_create *mbox_rgn;
9924 	struct dma_address *dma_address;
9925 
9926 	/*
9927 	 * Only one mailbox can be active to the bootstrap mailbox region
9928 	 * at a time and there is no queueing provided.
9929 	 */
9930 	spin_lock_irqsave(&phba->hbalock, iflag);
9931 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9932 		spin_unlock_irqrestore(&phba->hbalock, iflag);
9933 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9934 				"(%d):2532 Mailbox command x%x (x%x/x%x) "
9935 				"cannot issue Data: x%x x%x\n",
9936 				mboxq->vport ? mboxq->vport->vpi : 0,
9937 				mboxq->u.mb.mbxCommand,
9938 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9939 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9940 				psli->sli_flag, MBX_POLL);
9941 		return MBXERR_ERROR;
9942 	}
9943 	/* The server grabs the token and owns it until release */
9944 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9945 	phba->sli.mbox_active = mboxq;
9946 	spin_unlock_irqrestore(&phba->hbalock, iflag);
9947 
9948 	/* wait for bootstrap mbox register for readyness */
9949 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9950 	if (rc)
9951 		goto exit;
9952 	/*
9953 	 * Initialize the bootstrap memory region to avoid stale data areas
9954 	 * in the mailbox post.  Then copy the caller's mailbox contents to
9955 	 * the bmbx mailbox region.
9956 	 */
9957 	mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9958 	memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9959 	lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9960 			       sizeof(struct lpfc_mqe));
9961 
9962 	/* Post the high mailbox dma address to the port and wait for ready. */
9963 	dma_address = &phba->sli4_hba.bmbx.dma_address;
9964 	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
9965 
9966 	/* wait for bootstrap mbox register for hi-address write done */
9967 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9968 	if (rc)
9969 		goto exit;
9970 
9971 	/* Post the low mailbox dma address to the port. */
9972 	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
9973 
9974 	/* wait for bootstrap mbox register for low address write done */
9975 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9976 	if (rc)
9977 		goto exit;
9978 
9979 	/*
9980 	 * Read the CQ to ensure the mailbox has completed.
9981 	 * If so, update the mailbox status so that the upper layers
9982 	 * can complete the request normally.
9983 	 */
9984 	lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
9985 			       sizeof(struct lpfc_mqe));
9986 	mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
9987 	lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
9988 			       sizeof(struct lpfc_mcqe));
9989 	mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
9990 	/*
9991 	 * When the CQE status indicates a failure and the mailbox status
9992 	 * indicates success then copy the CQE status into the mailbox status
9993 	 * (and prefix it with x4000).
9994 	 */
9995 	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
9996 		if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
9997 			bf_set(lpfc_mqe_status, mb,
9998 			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
9999 		rc = MBXERR_ERROR;
10000 	} else
10001 		lpfc_sli4_swap_str(phba, mboxq);
10002 
10003 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10004 			"(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
10005 			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
10006 			" x%x x%x CQ: x%x x%x x%x x%x\n",
10007 			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10008 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10009 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10010 			bf_get(lpfc_mqe_status, mb),
10011 			mb->un.mb_words[0], mb->un.mb_words[1],
10012 			mb->un.mb_words[2], mb->un.mb_words[3],
10013 			mb->un.mb_words[4], mb->un.mb_words[5],
10014 			mb->un.mb_words[6], mb->un.mb_words[7],
10015 			mb->un.mb_words[8], mb->un.mb_words[9],
10016 			mb->un.mb_words[10], mb->un.mb_words[11],
10017 			mb->un.mb_words[12], mboxq->mcqe.word0,
10018 			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
10019 			mboxq->mcqe.trailer);
10020 exit:
10021 	/* We are holding the token, no needed for lock when release */
10022 	spin_lock_irqsave(&phba->hbalock, iflag);
10023 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10024 	phba->sli.mbox_active = NULL;
10025 	spin_unlock_irqrestore(&phba->hbalock, iflag);
10026 	return rc;
10027 }
10028 
10029 /**
10030  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
10031  * @phba: Pointer to HBA context object.
10032  * @mboxq: Pointer to mailbox object.
10033  * @flag: Flag indicating how the mailbox need to be processed.
10034  *
10035  * This function is called by discovery code and HBA management code to submit
10036  * a mailbox command to firmware with SLI-4 interface spec.
10037  *
10038  * Return codes the caller owns the mailbox command after the return of the
10039  * function.
10040  **/
10041 static int
10042 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
10043 		       uint32_t flag)
10044 {
10045 	struct lpfc_sli *psli = &phba->sli;
10046 	unsigned long iflags;
10047 	int rc;
10048 
10049 	/* dump from issue mailbox command if setup */
10050 	lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
10051 
10052 	rc = lpfc_mbox_dev_check(phba);
10053 	if (unlikely(rc)) {
10054 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10055 				"(%d):2544 Mailbox command x%x (x%x/x%x) "
10056 				"cannot issue Data: x%x x%x\n",
10057 				mboxq->vport ? mboxq->vport->vpi : 0,
10058 				mboxq->u.mb.mbxCommand,
10059 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10060 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10061 				psli->sli_flag, flag);
10062 		goto out_not_finished;
10063 	}
10064 
10065 	/* Detect polling mode and jump to a handler */
10066 	if (!phba->sli4_hba.intr_enable) {
10067 		if (flag == MBX_POLL)
10068 			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10069 		else
10070 			rc = -EIO;
10071 		if (rc != MBX_SUCCESS)
10072 			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10073 					"(%d):2541 Mailbox command x%x "
10074 					"(x%x/x%x) failure: "
10075 					"mqe_sta: x%x mcqe_sta: x%x/x%x "
10076 					"Data: x%x x%x\n",
10077 					mboxq->vport ? mboxq->vport->vpi : 0,
10078 					mboxq->u.mb.mbxCommand,
10079 					lpfc_sli_config_mbox_subsys_get(phba,
10080 									mboxq),
10081 					lpfc_sli_config_mbox_opcode_get(phba,
10082 									mboxq),
10083 					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10084 					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10085 					bf_get(lpfc_mcqe_ext_status,
10086 					       &mboxq->mcqe),
10087 					psli->sli_flag, flag);
10088 		return rc;
10089 	} else if (flag == MBX_POLL) {
10090 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10091 				"(%d):2542 Try to issue mailbox command "
10092 				"x%x (x%x/x%x) synchronously ahead of async "
10093 				"mailbox command queue: x%x x%x\n",
10094 				mboxq->vport ? mboxq->vport->vpi : 0,
10095 				mboxq->u.mb.mbxCommand,
10096 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10097 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10098 				psli->sli_flag, flag);
10099 		/* Try to block the asynchronous mailbox posting */
10100 		rc = lpfc_sli4_async_mbox_block(phba);
10101 		if (!rc) {
10102 			/* Successfully blocked, now issue sync mbox cmd */
10103 			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10104 			if (rc != MBX_SUCCESS)
10105 				lpfc_printf_log(phba, KERN_WARNING,
10106 					LOG_MBOX | LOG_SLI,
10107 					"(%d):2597 Sync Mailbox command "
10108 					"x%x (x%x/x%x) failure: "
10109 					"mqe_sta: x%x mcqe_sta: x%x/x%x "
10110 					"Data: x%x x%x\n",
10111 					mboxq->vport ? mboxq->vport->vpi : 0,
10112 					mboxq->u.mb.mbxCommand,
10113 					lpfc_sli_config_mbox_subsys_get(phba,
10114 									mboxq),
10115 					lpfc_sli_config_mbox_opcode_get(phba,
10116 									mboxq),
10117 					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10118 					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10119 					bf_get(lpfc_mcqe_ext_status,
10120 					       &mboxq->mcqe),
10121 					psli->sli_flag, flag);
10122 			/* Unblock the async mailbox posting afterward */
10123 			lpfc_sli4_async_mbox_unblock(phba);
10124 		}
10125 		return rc;
10126 	}
10127 
10128 	/* Now, interrupt mode asynchronous mailbox command */
10129 	rc = lpfc_mbox_cmd_check(phba, mboxq);
10130 	if (rc) {
10131 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10132 				"(%d):2543 Mailbox command x%x (x%x/x%x) "
10133 				"cannot issue Data: x%x x%x\n",
10134 				mboxq->vport ? mboxq->vport->vpi : 0,
10135 				mboxq->u.mb.mbxCommand,
10136 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10137 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10138 				psli->sli_flag, flag);
10139 		goto out_not_finished;
10140 	}
10141 
10142 	/* Put the mailbox command to the driver internal FIFO */
10143 	psli->slistat.mbox_busy++;
10144 	spin_lock_irqsave(&phba->hbalock, iflags);
10145 	lpfc_mbox_put(phba, mboxq);
10146 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10147 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10148 			"(%d):0354 Mbox cmd issue - Enqueue Data: "
10149 			"x%x (x%x/x%x) x%x x%x x%x x%x\n",
10150 			mboxq->vport ? mboxq->vport->vpi : 0xffffff,
10151 			bf_get(lpfc_mqe_command, &mboxq->u.mqe),
10152 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10153 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10154 			mboxq->u.mb.un.varUnregLogin.rpi,
10155 			phba->pport->port_state,
10156 			psli->sli_flag, MBX_NOWAIT);
10157 	/* Wake up worker thread to transport mailbox command from head */
10158 	lpfc_worker_wake_up(phba);
10159 
10160 	return MBX_BUSY;
10161 
10162 out_not_finished:
10163 	return MBX_NOT_FINISHED;
10164 }
10165 
10166 /**
10167  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
10168  * @phba: Pointer to HBA context object.
10169  *
10170  * This function is called by worker thread to send a mailbox command to
10171  * SLI4 HBA firmware.
10172  *
10173  **/
10174 int
10175 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
10176 {
10177 	struct lpfc_sli *psli = &phba->sli;
10178 	LPFC_MBOXQ_t *mboxq;
10179 	int rc = MBX_SUCCESS;
10180 	unsigned long iflags;
10181 	struct lpfc_mqe *mqe;
10182 	uint32_t mbx_cmnd;
10183 
10184 	/* Check interrupt mode before post async mailbox command */
10185 	if (unlikely(!phba->sli4_hba.intr_enable))
10186 		return MBX_NOT_FINISHED;
10187 
10188 	/* Check for mailbox command service token */
10189 	spin_lock_irqsave(&phba->hbalock, iflags);
10190 	if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
10191 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10192 		return MBX_NOT_FINISHED;
10193 	}
10194 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10195 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10196 		return MBX_NOT_FINISHED;
10197 	}
10198 	if (unlikely(phba->sli.mbox_active)) {
10199 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10200 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10201 				"0384 There is pending active mailbox cmd\n");
10202 		return MBX_NOT_FINISHED;
10203 	}
10204 	/* Take the mailbox command service token */
10205 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
10206 
10207 	/* Get the next mailbox command from head of queue */
10208 	mboxq = lpfc_mbox_get(phba);
10209 
10210 	/* If no more mailbox command waiting for post, we're done */
10211 	if (!mboxq) {
10212 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10213 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10214 		return MBX_SUCCESS;
10215 	}
10216 	phba->sli.mbox_active = mboxq;
10217 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10218 
10219 	/* Check device readiness for posting mailbox command */
10220 	rc = lpfc_mbox_dev_check(phba);
10221 	if (unlikely(rc))
10222 		/* Driver clean routine will clean up pending mailbox */
10223 		goto out_not_finished;
10224 
10225 	/* Prepare the mbox command to be posted */
10226 	mqe = &mboxq->u.mqe;
10227 	mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
10228 
10229 	/* Start timer for the mbox_tmo and log some mailbox post messages */
10230 	mod_timer(&psli->mbox_tmo, (jiffies +
10231 		  msecs_to_jiffies(1000 * lpfc_mbox_tmo_val(phba, mboxq))));
10232 
10233 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10234 			"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
10235 			"x%x x%x\n",
10236 			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10237 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10238 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10239 			phba->pport->port_state, psli->sli_flag);
10240 
10241 	if (mbx_cmnd != MBX_HEARTBEAT) {
10242 		if (mboxq->vport) {
10243 			lpfc_debugfs_disc_trc(mboxq->vport,
10244 				LPFC_DISC_TRC_MBOX_VPORT,
10245 				"MBOX Send vport: cmd:x%x mb:x%x x%x",
10246 				mbx_cmnd, mqe->un.mb_words[0],
10247 				mqe->un.mb_words[1]);
10248 		} else {
10249 			lpfc_debugfs_disc_trc(phba->pport,
10250 				LPFC_DISC_TRC_MBOX,
10251 				"MBOX Send: cmd:x%x mb:x%x x%x",
10252 				mbx_cmnd, mqe->un.mb_words[0],
10253 				mqe->un.mb_words[1]);
10254 		}
10255 	}
10256 	psli->slistat.mbox_cmd++;
10257 
10258 	/* Post the mailbox command to the port */
10259 	rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10260 	if (rc != MBX_SUCCESS) {
10261 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10262 				"(%d):2533 Mailbox command x%x (x%x/x%x) "
10263 				"cannot issue Data: x%x x%x\n",
10264 				mboxq->vport ? mboxq->vport->vpi : 0,
10265 				mboxq->u.mb.mbxCommand,
10266 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10267 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10268 				psli->sli_flag, MBX_NOWAIT);
10269 		goto out_not_finished;
10270 	}
10271 
10272 	return rc;
10273 
10274 out_not_finished:
10275 	spin_lock_irqsave(&phba->hbalock, iflags);
10276 	if (phba->sli.mbox_active) {
10277 		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10278 		__lpfc_mbox_cmpl_put(phba, mboxq);
10279 		/* Release the token */
10280 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10281 		phba->sli.mbox_active = NULL;
10282 	}
10283 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10284 
10285 	return MBX_NOT_FINISHED;
10286 }
10287 
10288 /**
10289  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10290  * @phba: Pointer to HBA context object.
10291  * @pmbox: Pointer to mailbox object.
10292  * @flag: Flag indicating how the mailbox need to be processed.
10293  *
10294  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10295  * the API jump table function pointer from the lpfc_hba struct.
10296  *
10297  * Return codes the caller owns the mailbox command after the return of the
10298  * function.
10299  **/
10300 int
10301 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10302 {
10303 	return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10304 }
10305 
10306 /**
10307  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10308  * @phba: The hba struct for which this call is being executed.
10309  * @dev_grp: The HBA PCI-Device group number.
10310  *
10311  * This routine sets up the mbox interface API function jump table in @phba
10312  * struct.
10313  * Returns: 0 - success, -ENODEV - failure.
10314  **/
10315 int
10316 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10317 {
10318 
10319 	switch (dev_grp) {
10320 	case LPFC_PCI_DEV_LP:
10321 		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10322 		phba->lpfc_sli_handle_slow_ring_event =
10323 				lpfc_sli_handle_slow_ring_event_s3;
10324 		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10325 		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10326 		phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10327 		break;
10328 	case LPFC_PCI_DEV_OC:
10329 		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10330 		phba->lpfc_sli_handle_slow_ring_event =
10331 				lpfc_sli_handle_slow_ring_event_s4;
10332 		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10333 		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10334 		phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10335 		break;
10336 	default:
10337 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10338 				"1420 Invalid HBA PCI-device group: 0x%x\n",
10339 				dev_grp);
10340 		return -ENODEV;
10341 	}
10342 	return 0;
10343 }
10344 
10345 /**
10346  * __lpfc_sli_ringtx_put - Add an iocb to the txq
10347  * @phba: Pointer to HBA context object.
10348  * @pring: Pointer to driver SLI ring object.
10349  * @piocb: Pointer to address of newly added command iocb.
10350  *
10351  * This function is called with hbalock held for SLI3 ports or
10352  * the ring lock held for SLI4 ports to add a command
10353  * iocb to the txq when SLI layer cannot submit the command iocb
10354  * to the ring.
10355  **/
10356 void
10357 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10358 		    struct lpfc_iocbq *piocb)
10359 {
10360 	if (phba->sli_rev == LPFC_SLI_REV4)
10361 		lockdep_assert_held(&pring->ring_lock);
10362 	else
10363 		lockdep_assert_held(&phba->hbalock);
10364 	/* Insert the caller's iocb in the txq tail for later processing. */
10365 	list_add_tail(&piocb->list, &pring->txq);
10366 }
10367 
10368 /**
10369  * lpfc_sli_next_iocb - Get the next iocb in the txq
10370  * @phba: Pointer to HBA context object.
10371  * @pring: Pointer to driver SLI ring object.
10372  * @piocb: Pointer to address of newly added command iocb.
10373  *
10374  * This function is called with hbalock held before a new
10375  * iocb is submitted to the firmware. This function checks
10376  * txq to flush the iocbs in txq to Firmware before
10377  * submitting new iocbs to the Firmware.
10378  * If there are iocbs in the txq which need to be submitted
10379  * to firmware, lpfc_sli_next_iocb returns the first element
10380  * of the txq after dequeuing it from txq.
10381  * If there is no iocb in the txq then the function will return
10382  * *piocb and *piocb is set to NULL. Caller needs to check
10383  * *piocb to find if there are more commands in the txq.
10384  **/
10385 static struct lpfc_iocbq *
10386 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10387 		   struct lpfc_iocbq **piocb)
10388 {
10389 	struct lpfc_iocbq * nextiocb;
10390 
10391 	lockdep_assert_held(&phba->hbalock);
10392 
10393 	nextiocb = lpfc_sli_ringtx_get(phba, pring);
10394 	if (!nextiocb) {
10395 		nextiocb = *piocb;
10396 		*piocb = NULL;
10397 	}
10398 
10399 	return nextiocb;
10400 }
10401 
10402 /**
10403  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10404  * @phba: Pointer to HBA context object.
10405  * @ring_number: SLI ring number to issue iocb on.
10406  * @piocb: Pointer to command iocb.
10407  * @flag: Flag indicating if this command can be put into txq.
10408  *
10409  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10410  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10411  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10412  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10413  * this function allows only iocbs for posting buffers. This function finds
10414  * next available slot in the command ring and posts the command to the
10415  * available slot and writes the port attention register to request HBA start
10416  * processing new iocb. If there is no slot available in the ring and
10417  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10418  * the function returns IOCB_BUSY.
10419  *
10420  * This function is called with hbalock held. The function will return success
10421  * after it successfully submit the iocb to firmware or after adding to the
10422  * txq.
10423  **/
10424 static int
10425 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10426 		    struct lpfc_iocbq *piocb, uint32_t flag)
10427 {
10428 	struct lpfc_iocbq *nextiocb;
10429 	IOCB_t *iocb;
10430 	struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10431 
10432 	lockdep_assert_held(&phba->hbalock);
10433 
10434 	if (piocb->cmd_cmpl && (!piocb->vport) &&
10435 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10436 	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10437 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10438 				"1807 IOCB x%x failed. No vport\n",
10439 				piocb->iocb.ulpCommand);
10440 		dump_stack();
10441 		return IOCB_ERROR;
10442 	}
10443 
10444 
10445 	/* If the PCI channel is in offline state, do not post iocbs. */
10446 	if (unlikely(pci_channel_offline(phba->pcidev)))
10447 		return IOCB_ERROR;
10448 
10449 	/* If HBA has a deferred error attention, fail the iocb. */
10450 	if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
10451 		return IOCB_ERROR;
10452 
10453 	/*
10454 	 * We should never get an IOCB if we are in a < LINK_DOWN state
10455 	 */
10456 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10457 		return IOCB_ERROR;
10458 
10459 	/*
10460 	 * Check to see if we are blocking IOCB processing because of a
10461 	 * outstanding event.
10462 	 */
10463 	if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10464 		goto iocb_busy;
10465 
10466 	if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10467 		/*
10468 		 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10469 		 * can be issued if the link is not up.
10470 		 */
10471 		switch (piocb->iocb.ulpCommand) {
10472 		case CMD_QUE_RING_BUF_CN:
10473 		case CMD_QUE_RING_BUF64_CN:
10474 			/*
10475 			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10476 			 * completion, cmd_cmpl MUST be 0.
10477 			 */
10478 			if (piocb->cmd_cmpl)
10479 				piocb->cmd_cmpl = NULL;
10480 			fallthrough;
10481 		case CMD_CREATE_XRI_CR:
10482 		case CMD_CLOSE_XRI_CN:
10483 		case CMD_CLOSE_XRI_CX:
10484 			break;
10485 		default:
10486 			goto iocb_busy;
10487 		}
10488 
10489 	/*
10490 	 * For FCP commands, we must be in a state where we can process link
10491 	 * attention events.
10492 	 */
10493 	} else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10494 			    !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10495 		goto iocb_busy;
10496 	}
10497 
10498 	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10499 	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10500 		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10501 
10502 	if (iocb)
10503 		lpfc_sli_update_ring(phba, pring);
10504 	else
10505 		lpfc_sli_update_full_ring(phba, pring);
10506 
10507 	if (!piocb)
10508 		return IOCB_SUCCESS;
10509 
10510 	goto out_busy;
10511 
10512  iocb_busy:
10513 	pring->stats.iocb_cmd_delay++;
10514 
10515  out_busy:
10516 
10517 	if (!(flag & SLI_IOCB_RET_IOCB)) {
10518 		__lpfc_sli_ringtx_put(phba, pring, piocb);
10519 		return IOCB_SUCCESS;
10520 	}
10521 
10522 	return IOCB_BUSY;
10523 }
10524 
10525 /**
10526  * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10527  * @phba: Pointer to HBA context object.
10528  * @ring_number: SLI ring number to issue wqe on.
10529  * @piocb: Pointer to command iocb.
10530  * @flag: Flag indicating if this command can be put into txq.
10531  *
10532  * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10533  * send  an iocb command to an HBA with SLI-3 interface spec.
10534  *
10535  * This function takes the hbalock before invoking the lockless version.
10536  * The function will return success after it successfully submit the wqe to
10537  * firmware or after adding to the txq.
10538  **/
10539 static int
10540 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10541 			   struct lpfc_iocbq *piocb, uint32_t flag)
10542 {
10543 	unsigned long iflags;
10544 	int rc;
10545 
10546 	spin_lock_irqsave(&phba->hbalock, iflags);
10547 	rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10548 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10549 
10550 	return rc;
10551 }
10552 
10553 /**
10554  * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10555  * @phba: Pointer to HBA context object.
10556  * @ring_number: SLI ring number to issue wqe on.
10557  * @piocb: Pointer to command iocb.
10558  * @flag: Flag indicating if this command can be put into txq.
10559  *
10560  * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10561  * an wqe command to an HBA with SLI-4 interface spec.
10562  *
10563  * This function is a lockless version. The function will return success
10564  * after it successfully submit the wqe to firmware or after adding to the
10565  * txq.
10566  **/
10567 static int
10568 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10569 			   struct lpfc_iocbq *piocb, uint32_t flag)
10570 {
10571 	struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10572 
10573 	lpfc_prep_embed_io(phba, lpfc_cmd);
10574 	return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10575 }
10576 
10577 void
10578 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10579 {
10580 	struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10581 	union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10582 	struct sli4_sge *sgl;
10583 
10584 	/* 128 byte wqe support here */
10585 	sgl = (struct sli4_sge *)lpfc_cmd->dma_sgl;
10586 
10587 	if (phba->fcp_embed_io) {
10588 		struct fcp_cmnd *fcp_cmnd;
10589 		u32 *ptr;
10590 
10591 		fcp_cmnd = lpfc_cmd->fcp_cmnd;
10592 
10593 		/* Word 0-2 - FCP_CMND */
10594 		wqe->generic.bde.tus.f.bdeFlags =
10595 			BUFF_TYPE_BDE_IMMED;
10596 		wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10597 		wqe->generic.bde.addrHigh = 0;
10598 		wqe->generic.bde.addrLow =  72;  /* Word 18 */
10599 
10600 		bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10601 		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10602 
10603 		/* Word 18-29  FCP CMND Payload */
10604 		ptr = &wqe->words[18];
10605 		memcpy(ptr, fcp_cmnd, sgl->sge_len);
10606 	} else {
10607 		/* Word 0-2 - Inline BDE */
10608 		wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
10609 		wqe->generic.bde.tus.f.bdeSize = sgl->sge_len;
10610 		wqe->generic.bde.addrHigh = sgl->addr_hi;
10611 		wqe->generic.bde.addrLow =  sgl->addr_lo;
10612 
10613 		/* Word 10 */
10614 		bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10615 		bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10616 	}
10617 
10618 	/* add the VMID tags as per switch response */
10619 	if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10620 		if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10621 			bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10622 			bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10623 					(piocb->vmid_tag.cs_ctl_vmid));
10624 		} else if (phba->cfg_vmid_app_header) {
10625 			bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10626 			bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10627 			wqe->words[31] = piocb->vmid_tag.app_id;
10628 		}
10629 	}
10630 }
10631 
10632 /**
10633  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10634  * @phba: Pointer to HBA context object.
10635  * @ring_number: SLI ring number to issue iocb on.
10636  * @piocb: Pointer to command iocb.
10637  * @flag: Flag indicating if this command can be put into txq.
10638  *
10639  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10640  * an iocb command to an HBA with SLI-4 interface spec.
10641  *
10642  * This function is called with ringlock held. The function will return success
10643  * after it successfully submit the iocb to firmware or after adding to the
10644  * txq.
10645  **/
10646 static int
10647 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10648 			 struct lpfc_iocbq *piocb, uint32_t flag)
10649 {
10650 	struct lpfc_sglq *sglq;
10651 	union lpfc_wqe128 *wqe;
10652 	struct lpfc_queue *wq;
10653 	struct lpfc_sli_ring *pring;
10654 	u32 ulp_command = get_job_cmnd(phba, piocb);
10655 
10656 	/* Get the WQ */
10657 	if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10658 	    (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10659 		wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10660 	} else {
10661 		wq = phba->sli4_hba.els_wq;
10662 	}
10663 
10664 	/* Get corresponding ring */
10665 	pring = wq->pring;
10666 
10667 	/*
10668 	 * The WQE can be either 64 or 128 bytes,
10669 	 */
10670 
10671 	lockdep_assert_held(&pring->ring_lock);
10672 	wqe = &piocb->wqe;
10673 	if (piocb->sli4_xritag == NO_XRI) {
10674 		if (ulp_command == CMD_ABORT_XRI_CX)
10675 			sglq = NULL;
10676 		else {
10677 			sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10678 			if (!sglq) {
10679 				if (!(flag & SLI_IOCB_RET_IOCB)) {
10680 					__lpfc_sli_ringtx_put(phba,
10681 							pring,
10682 							piocb);
10683 					return IOCB_SUCCESS;
10684 				} else {
10685 					return IOCB_BUSY;
10686 				}
10687 			}
10688 		}
10689 	} else if (piocb->cmd_flag &  LPFC_IO_FCP) {
10690 		/* These IO's already have an XRI and a mapped sgl. */
10691 		sglq = NULL;
10692 	}
10693 	else {
10694 		/*
10695 		 * This is a continuation of a commandi,(CX) so this
10696 		 * sglq is on the active list
10697 		 */
10698 		sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10699 		if (!sglq)
10700 			return IOCB_ERROR;
10701 	}
10702 
10703 	if (sglq) {
10704 		piocb->sli4_lxritag = sglq->sli4_lxritag;
10705 		piocb->sli4_xritag = sglq->sli4_xritag;
10706 
10707 		/* ABTS sent by initiator to CT exchange, the
10708 		 * RX_ID field will be filled with the newly
10709 		 * allocated responder XRI.
10710 		 */
10711 		if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10712 		    piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10713 			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10714 			       piocb->sli4_xritag);
10715 
10716 		bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10717 		       piocb->sli4_xritag);
10718 
10719 		if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10720 			return IOCB_ERROR;
10721 	}
10722 
10723 	if (lpfc_sli4_wq_put(wq, wqe))
10724 		return IOCB_ERROR;
10725 
10726 	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10727 
10728 	return 0;
10729 }
10730 
10731 /*
10732  * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10733  *
10734  * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10735  * or IOCB for sli-3  function.
10736  * pointer from the lpfc_hba struct.
10737  *
10738  * Return codes:
10739  * IOCB_ERROR - Error
10740  * IOCB_SUCCESS - Success
10741  * IOCB_BUSY - Busy
10742  **/
10743 int
10744 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10745 		      struct lpfc_iocbq *piocb, uint32_t flag)
10746 {
10747 	return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10748 }
10749 
10750 /*
10751  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10752  *
10753  * This routine wraps the actual lockless version for issusing IOCB function
10754  * pointer from the lpfc_hba struct.
10755  *
10756  * Return codes:
10757  * IOCB_ERROR - Error
10758  * IOCB_SUCCESS - Success
10759  * IOCB_BUSY - Busy
10760  **/
10761 int
10762 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10763 		struct lpfc_iocbq *piocb, uint32_t flag)
10764 {
10765 	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10766 }
10767 
10768 static void
10769 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10770 			       struct lpfc_vport *vport,
10771 			       struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10772 			       u32 elscmd, u8 tmo, u8 expect_rsp)
10773 {
10774 	struct lpfc_hba *phba = vport->phba;
10775 	IOCB_t *cmd;
10776 
10777 	cmd = &cmdiocbq->iocb;
10778 	memset(cmd, 0, sizeof(*cmd));
10779 
10780 	cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10781 	cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10782 	cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10783 
10784 	if (expect_rsp) {
10785 		cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10786 		cmd->un.elsreq64.remoteID = did; /* DID */
10787 		cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10788 		cmd->ulpTimeout = tmo;
10789 	} else {
10790 		cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10791 		cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10792 		cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10793 		cmd->ulpPU = PARM_NPIV_DID;
10794 	}
10795 	cmd->ulpBdeCount = 1;
10796 	cmd->ulpLe = 1;
10797 	cmd->ulpClass = CLASS3;
10798 
10799 	/* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10800 	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10801 		if (expect_rsp) {
10802 			cmd->un.elsreq64.myID = vport->fc_myDID;
10803 
10804 			/* For ELS_REQUEST64_CR, use the VPI by default */
10805 			cmd->ulpContext = phba->vpi_ids[vport->vpi];
10806 		}
10807 
10808 		cmd->ulpCt_h = 0;
10809 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10810 		if (elscmd == ELS_CMD_ECHO)
10811 			cmd->ulpCt_l = 0; /* context = invalid RPI */
10812 		else
10813 			cmd->ulpCt_l = 1; /* context = VPI */
10814 	}
10815 }
10816 
10817 static void
10818 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10819 			       struct lpfc_vport *vport,
10820 			       struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10821 			       u32 elscmd, u8 tmo, u8 expect_rsp)
10822 {
10823 	struct lpfc_hba  *phba = vport->phba;
10824 	union lpfc_wqe128 *wqe;
10825 	struct ulp_bde64_le *bde;
10826 	u8 els_id;
10827 
10828 	wqe = &cmdiocbq->wqe;
10829 	memset(wqe, 0, sizeof(*wqe));
10830 
10831 	/* Word 0 - 2 BDE */
10832 	bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10833 	bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10834 	bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10835 	bde->type_size = cpu_to_le32(cmd_size);
10836 	bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10837 
10838 	if (expect_rsp) {
10839 		bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10840 
10841 		/* Transfer length */
10842 		wqe->els_req.payload_len = cmd_size;
10843 		wqe->els_req.max_response_payload_len = FCELSSIZE;
10844 
10845 		/* DID */
10846 		bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10847 
10848 		/* Word 11 - ELS_ID */
10849 		switch (elscmd) {
10850 		case ELS_CMD_PLOGI:
10851 			els_id = LPFC_ELS_ID_PLOGI;
10852 			break;
10853 		case ELS_CMD_FLOGI:
10854 			els_id = LPFC_ELS_ID_FLOGI;
10855 			break;
10856 		case ELS_CMD_LOGO:
10857 			els_id = LPFC_ELS_ID_LOGO;
10858 			break;
10859 		case ELS_CMD_FDISC:
10860 			if (!vport->fc_myDID) {
10861 				els_id = LPFC_ELS_ID_FDISC;
10862 				break;
10863 			}
10864 			fallthrough;
10865 		default:
10866 			els_id = LPFC_ELS_ID_DEFAULT;
10867 			break;
10868 		}
10869 
10870 		bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10871 	} else {
10872 		/* DID */
10873 		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10874 
10875 		/* Transfer length */
10876 		wqe->xmit_els_rsp.response_payload_len = cmd_size;
10877 
10878 		bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10879 		       CMD_XMIT_ELS_RSP64_WQE);
10880 	}
10881 
10882 	bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10883 	bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10884 	bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10885 
10886 	/* If we have NPIV enabled, we want to send ELS traffic by VPI.
10887 	 * For SLI4, since the driver controls VPIs we also want to include
10888 	 * all ELS pt2pt protocol traffic as well.
10889 	 */
10890 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10891 	    test_bit(FC_PT2PT, &vport->fc_flag)) {
10892 		if (expect_rsp) {
10893 			bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10894 
10895 			/* For ELS_REQUEST64_WQE, use the VPI by default */
10896 			bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10897 			       phba->vpi_ids[vport->vpi]);
10898 		}
10899 
10900 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10901 		if (elscmd == ELS_CMD_ECHO)
10902 			bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10903 		else
10904 			bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10905 	}
10906 }
10907 
10908 void
10909 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10910 			  struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10911 			  u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10912 			  u8 expect_rsp)
10913 {
10914 	phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10915 					  elscmd, tmo, expect_rsp);
10916 }
10917 
10918 static void
10919 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10920 			   u16 rpi, u32 num_entry, u8 tmo)
10921 {
10922 	IOCB_t *cmd;
10923 
10924 	cmd = &cmdiocbq->iocb;
10925 	memset(cmd, 0, sizeof(*cmd));
10926 
10927 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10928 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10929 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10930 	cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10931 
10932 	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10933 	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10934 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10935 
10936 	cmd->ulpContext = rpi;
10937 	cmd->ulpClass = CLASS3;
10938 	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10939 	cmd->ulpBdeCount = 1;
10940 	cmd->ulpLe = 1;
10941 	cmd->ulpOwner = OWN_CHIP;
10942 	cmd->ulpTimeout = tmo;
10943 }
10944 
10945 static void
10946 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10947 			   u16 rpi, u32 num_entry, u8 tmo)
10948 {
10949 	union lpfc_wqe128 *cmdwqe;
10950 	struct ulp_bde64_le *bde, *bpl;
10951 	u32 xmit_len = 0, total_len = 0, size, type, i;
10952 
10953 	cmdwqe = &cmdiocbq->wqe;
10954 	memset(cmdwqe, 0, sizeof(*cmdwqe));
10955 
10956 	/* Calculate total_len and xmit_len */
10957 	bpl = (struct ulp_bde64_le *)bmp->virt;
10958 	for (i = 0; i < num_entry; i++) {
10959 		size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10960 		total_len += size;
10961 	}
10962 	for (i = 0; i < num_entry; i++) {
10963 		size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
10964 		type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
10965 		if (type != ULP_BDE64_TYPE_BDE_64)
10966 			break;
10967 		xmit_len += size;
10968 	}
10969 
10970 	/* Words 0 - 2 */
10971 	bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
10972 	bde->addr_low = bpl->addr_low;
10973 	bde->addr_high = bpl->addr_high;
10974 	bde->type_size = cpu_to_le32(xmit_len);
10975 	bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10976 
10977 	/* Word 3 */
10978 	cmdwqe->gen_req.request_payload_len = xmit_len;
10979 
10980 	/* Word 5 */
10981 	bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
10982 	bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
10983 	bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
10984 	bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
10985 
10986 	/* Word 6 */
10987 	bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
10988 
10989 	/* Word 7 */
10990 	bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
10991 	bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
10992 	bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
10993 	bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
10994 
10995 	/* Word 12 */
10996 	cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
10997 }
10998 
10999 void
11000 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11001 		      struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
11002 {
11003 	phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
11004 }
11005 
11006 static void
11007 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
11008 			      struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11009 			      u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11010 {
11011 	IOCB_t *icmd;
11012 
11013 	icmd = &cmdiocbq->iocb;
11014 	memset(icmd, 0, sizeof(*icmd));
11015 
11016 	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
11017 	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
11018 	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
11019 	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
11020 	icmd->un.xseq64.w5.hcsw.Fctl = LA;
11021 	if (last_seq)
11022 		icmd->un.xseq64.w5.hcsw.Fctl |= LS;
11023 	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11024 	icmd->un.xseq64.w5.hcsw.Rctl = rctl;
11025 	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
11026 
11027 	icmd->ulpBdeCount = 1;
11028 	icmd->ulpLe = 1;
11029 	icmd->ulpClass = CLASS3;
11030 
11031 	switch (cr_cx_cmd) {
11032 	case CMD_XMIT_SEQUENCE64_CR:
11033 		icmd->ulpContext = rpi;
11034 		icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
11035 		break;
11036 	case CMD_XMIT_SEQUENCE64_CX:
11037 		icmd->ulpContext = ox_id;
11038 		icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
11039 		break;
11040 	default:
11041 		break;
11042 	}
11043 }
11044 
11045 static void
11046 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
11047 			      struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11048 			      u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11049 {
11050 	union lpfc_wqe128 *wqe;
11051 	struct ulp_bde64 *bpl;
11052 
11053 	wqe = &cmdiocbq->wqe;
11054 	memset(wqe, 0, sizeof(*wqe));
11055 
11056 	/* Words 0 - 2 */
11057 	bpl = (struct ulp_bde64 *)bmp->virt;
11058 	wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
11059 	wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
11060 	wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
11061 
11062 	/* Word 5 */
11063 	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
11064 	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
11065 	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
11066 	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
11067 	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
11068 
11069 	/* Word 6 */
11070 	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
11071 
11072 	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
11073 	       CMD_XMIT_SEQUENCE64_WQE);
11074 
11075 	/* Word 7 */
11076 	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
11077 
11078 	/* Word 9 */
11079 	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
11080 
11081 	/* Word 12 */
11082 	if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK))
11083 		wqe->xmit_sequence.xmit_len = full_size;
11084 	else
11085 		wqe->xmit_sequence.xmit_len =
11086 			wqe->xmit_sequence.bde.tus.f.bdeSize;
11087 }
11088 
11089 void
11090 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11091 			 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11092 			 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11093 {
11094 	phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
11095 					 rctl, last_seq, cr_cx_cmd);
11096 }
11097 
11098 static void
11099 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11100 			     u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11101 			     bool wqec)
11102 {
11103 	IOCB_t *icmd = NULL;
11104 
11105 	icmd = &cmdiocbq->iocb;
11106 	memset(icmd, 0, sizeof(*icmd));
11107 
11108 	/* Word 5 */
11109 	icmd->un.acxri.abortContextTag = ulp_context;
11110 	icmd->un.acxri.abortIoTag = iotag;
11111 
11112 	if (ia) {
11113 		/* Word 7 */
11114 		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
11115 	} else {
11116 		/* Word 3 */
11117 		icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
11118 
11119 		/* Word 7 */
11120 		icmd->ulpClass = ulp_class;
11121 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
11122 	}
11123 
11124 	/* Word 7 */
11125 	icmd->ulpLe = 1;
11126 }
11127 
11128 static void
11129 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11130 			     u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11131 			     bool wqec)
11132 {
11133 	union lpfc_wqe128 *wqe;
11134 
11135 	wqe = &cmdiocbq->wqe;
11136 	memset(wqe, 0, sizeof(*wqe));
11137 
11138 	/* Word 3 */
11139 	bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
11140 	if (ia)
11141 		bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
11142 	else
11143 		bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
11144 
11145 	/* Word 7 */
11146 	bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
11147 
11148 	/* Word 8 */
11149 	wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
11150 
11151 	/* Word 9 */
11152 	bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
11153 
11154 	/* Word 10 */
11155 	bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
11156 
11157 	/* Word 11 */
11158 	if (wqec)
11159 		bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
11160 	bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
11161 	bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11162 }
11163 
11164 void
11165 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11166 			u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
11167 			bool ia, bool wqec)
11168 {
11169 	phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
11170 					cqid, ia, wqec);
11171 }
11172 
11173 /**
11174  * lpfc_sli_api_table_setup - Set up sli api function jump table
11175  * @phba: The hba struct for which this call is being executed.
11176  * @dev_grp: The HBA PCI-Device group number.
11177  *
11178  * This routine sets up the SLI interface API function jump table in @phba
11179  * struct.
11180  * Returns: 0 - success, -ENODEV - failure.
11181  **/
11182 int
11183 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11184 {
11185 
11186 	switch (dev_grp) {
11187 	case LPFC_PCI_DEV_LP:
11188 		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11189 		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11190 		phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11191 		phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
11192 		phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
11193 		phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
11194 		phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
11195 		break;
11196 	case LPFC_PCI_DEV_OC:
11197 		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11198 		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11199 		phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11200 		phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
11201 		phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
11202 		phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
11203 		phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
11204 		break;
11205 	default:
11206 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11207 				"1419 Invalid HBA PCI-device group: 0x%x\n",
11208 				dev_grp);
11209 		return -ENODEV;
11210 	}
11211 	return 0;
11212 }
11213 
11214 /**
11215  * lpfc_sli4_calc_ring - Calculates which ring to use
11216  * @phba: Pointer to HBA context object.
11217  * @piocb: Pointer to command iocb.
11218  *
11219  * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11220  * hba_wqidx, thus we need to calculate the corresponding ring.
11221  * Since ABORTS must go on the same WQ of the command they are
11222  * aborting, we use command's hba_wqidx.
11223  */
11224 struct lpfc_sli_ring *
11225 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11226 {
11227 	struct lpfc_io_buf *lpfc_cmd;
11228 
11229 	if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11230 		if (unlikely(!phba->sli4_hba.hdwq))
11231 			return NULL;
11232 		/*
11233 		 * for abort iocb hba_wqidx should already
11234 		 * be setup based on what work queue we used.
11235 		 */
11236 		if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11237 			lpfc_cmd = piocb->io_buf;
11238 			piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11239 		}
11240 		return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11241 	} else {
11242 		if (unlikely(!phba->sli4_hba.els_wq))
11243 			return NULL;
11244 		piocb->hba_wqidx = 0;
11245 		return phba->sli4_hba.els_wq->pring;
11246 	}
11247 }
11248 
11249 inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
11250 {
11251 	struct lpfc_hba *phba = eq->phba;
11252 
11253 	/*
11254 	 * Unlocking an irq is one of the entry point to check
11255 	 * for re-schedule, but we are good for io submission
11256 	 * path as midlayer does a get_cpu to glue us in. Flush
11257 	 * out the invalidate queue so we can see the updated
11258 	 * value for flag.
11259 	 */
11260 	smp_rmb();
11261 
11262 	if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
11263 		/* We will not likely get the completion for the caller
11264 		 * during this iteration but i guess that's fine.
11265 		 * Future io's coming on this eq should be able to
11266 		 * pick it up.  As for the case of single io's, they
11267 		 * will be handled through a sched from polling timer
11268 		 * function which is currently triggered every 1msec.
11269 		 */
11270 		lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
11271 				     LPFC_QUEUE_WORK);
11272 }
11273 
11274 /**
11275  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11276  * @phba: Pointer to HBA context object.
11277  * @ring_number: Ring number
11278  * @piocb: Pointer to command iocb.
11279  * @flag: Flag indicating if this command can be put into txq.
11280  *
11281  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11282  * function. This function gets the hbalock and calls
11283  * __lpfc_sli_issue_iocb function and will return the error returned
11284  * by __lpfc_sli_issue_iocb function. This wrapper is used by
11285  * functions which do not hold hbalock.
11286  **/
11287 int
11288 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11289 		    struct lpfc_iocbq *piocb, uint32_t flag)
11290 {
11291 	struct lpfc_sli_ring *pring;
11292 	struct lpfc_queue *eq;
11293 	unsigned long iflags;
11294 	int rc;
11295 
11296 	/* If the PCI channel is in offline state, do not post iocbs. */
11297 	if (unlikely(pci_channel_offline(phba->pcidev)))
11298 		return IOCB_ERROR;
11299 
11300 	if (phba->sli_rev == LPFC_SLI_REV4) {
11301 		lpfc_sli_prep_wqe(phba, piocb);
11302 
11303 		eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11304 
11305 		pring = lpfc_sli4_calc_ring(phba, piocb);
11306 		if (unlikely(pring == NULL))
11307 			return IOCB_ERROR;
11308 
11309 		spin_lock_irqsave(&pring->ring_lock, iflags);
11310 		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11311 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
11312 
11313 		lpfc_sli4_poll_eq(eq);
11314 	} else {
11315 		/* For now, SLI2/3 will still use hbalock */
11316 		spin_lock_irqsave(&phba->hbalock, iflags);
11317 		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11318 		spin_unlock_irqrestore(&phba->hbalock, iflags);
11319 	}
11320 	return rc;
11321 }
11322 
11323 /**
11324  * lpfc_extra_ring_setup - Extra ring setup function
11325  * @phba: Pointer to HBA context object.
11326  *
11327  * This function is called while driver attaches with the
11328  * HBA to setup the extra ring. The extra ring is used
11329  * only when driver needs to support target mode functionality
11330  * or IP over FC functionalities.
11331  *
11332  * This function is called with no lock held. SLI3 only.
11333  **/
11334 static int
11335 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11336 {
11337 	struct lpfc_sli *psli;
11338 	struct lpfc_sli_ring *pring;
11339 
11340 	psli = &phba->sli;
11341 
11342 	/* Adjust cmd/rsp ring iocb entries more evenly */
11343 
11344 	/* Take some away from the FCP ring */
11345 	pring = &psli->sli3_ring[LPFC_FCP_RING];
11346 	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11347 	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11348 	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11349 	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11350 
11351 	/* and give them to the extra ring */
11352 	pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11353 
11354 	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11355 	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11356 	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11357 	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11358 
11359 	/* Setup default profile for this ring */
11360 	pring->iotag_max = 4096;
11361 	pring->num_mask = 1;
11362 	pring->prt[0].profile = 0;      /* Mask 0 */
11363 	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11364 	pring->prt[0].type = phba->cfg_multi_ring_type;
11365 	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11366 	return 0;
11367 }
11368 
11369 static void
11370 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11371 			     struct lpfc_nodelist *ndlp)
11372 {
11373 	unsigned long iflags;
11374 	struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
11375 
11376 	/* Hold a node reference for outstanding queued work */
11377 	if (!lpfc_nlp_get(ndlp))
11378 		return;
11379 
11380 	spin_lock_irqsave(&phba->hbalock, iflags);
11381 	if (!list_empty(&evtp->evt_listp)) {
11382 		spin_unlock_irqrestore(&phba->hbalock, iflags);
11383 		lpfc_nlp_put(ndlp);
11384 		return;
11385 	}
11386 
11387 	evtp->evt_arg1 = ndlp;
11388 	evtp->evt = LPFC_EVT_RECOVER_PORT;
11389 	list_add_tail(&evtp->evt_listp, &phba->work_list);
11390 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11391 
11392 	lpfc_worker_wake_up(phba);
11393 }
11394 
11395 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11396  * @phba: Pointer to HBA context object.
11397  * @iocbq: Pointer to iocb object.
11398  *
11399  * The async_event handler calls this routine when it receives
11400  * an ASYNC_STATUS_CN event from the port.  The port generates
11401  * this event when an Abort Sequence request to an rport fails
11402  * twice in succession.  The abort could be originated by the
11403  * driver or by the port.  The ABTS could have been for an ELS
11404  * or FCP IO.  The port only generates this event when an ABTS
11405  * fails to complete after one retry.
11406  */
11407 static void
11408 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11409 			  struct lpfc_iocbq *iocbq)
11410 {
11411 	struct lpfc_nodelist *ndlp = NULL;
11412 	uint16_t rpi = 0, vpi = 0;
11413 	struct lpfc_vport *vport = NULL;
11414 
11415 	/* The rpi in the ulpContext is vport-sensitive. */
11416 	vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11417 	rpi = iocbq->iocb.ulpContext;
11418 
11419 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11420 			"3092 Port generated ABTS async event "
11421 			"on vpi %d rpi %d status 0x%x\n",
11422 			vpi, rpi, iocbq->iocb.ulpStatus);
11423 
11424 	vport = lpfc_find_vport_by_vpid(phba, vpi);
11425 	if (!vport)
11426 		goto err_exit;
11427 	ndlp = lpfc_findnode_rpi(vport, rpi);
11428 	if (!ndlp)
11429 		goto err_exit;
11430 
11431 	if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11432 		lpfc_sli_abts_recover_port(vport, ndlp);
11433 	return;
11434 
11435  err_exit:
11436 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11437 			"3095 Event Context not found, no "
11438 			"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11439 			vpi, rpi, iocbq->iocb.ulpStatus,
11440 			iocbq->iocb.ulpContext);
11441 }
11442 
11443 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11444  * @phba: pointer to HBA context object.
11445  * @ndlp: nodelist pointer for the impacted rport.
11446  * @axri: pointer to the wcqe containing the failed exchange.
11447  *
11448  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11449  * port.  The port generates this event when an abort exchange request to an
11450  * rport fails twice in succession with no reply.  The abort could be originated
11451  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
11452  */
11453 void
11454 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11455 			   struct lpfc_nodelist *ndlp,
11456 			   struct sli4_wcqe_xri_aborted *axri)
11457 {
11458 	uint32_t ext_status = 0;
11459 
11460 	if (!ndlp) {
11461 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11462 				"3115 Node Context not found, driver "
11463 				"ignoring abts err event\n");
11464 		return;
11465 	}
11466 
11467 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11468 			"3116 Port generated FCP XRI ABORT event on "
11469 			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11470 			ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11471 			bf_get(lpfc_wcqe_xa_xri, axri),
11472 			bf_get(lpfc_wcqe_xa_status, axri),
11473 			axri->parameter);
11474 
11475 	/*
11476 	 * Catch the ABTS protocol failure case.  Older OCe FW releases returned
11477 	 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11478 	 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11479 	 */
11480 	ext_status = axri->parameter & IOERR_PARAM_MASK;
11481 	if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11482 	    ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11483 		lpfc_sli_post_recovery_event(phba, ndlp);
11484 }
11485 
11486 /**
11487  * lpfc_sli_async_event_handler - ASYNC iocb handler function
11488  * @phba: Pointer to HBA context object.
11489  * @pring: Pointer to driver SLI ring object.
11490  * @iocbq: Pointer to iocb object.
11491  *
11492  * This function is called by the slow ring event handler
11493  * function when there is an ASYNC event iocb in the ring.
11494  * This function is called with no lock held.
11495  * Currently this function handles only temperature related
11496  * ASYNC events. The function decodes the temperature sensor
11497  * event message and posts events for the management applications.
11498  **/
11499 static void
11500 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11501 	struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11502 {
11503 	IOCB_t *icmd;
11504 	uint16_t evt_code;
11505 	struct temp_event temp_event_data;
11506 	struct Scsi_Host *shost;
11507 	uint32_t *iocb_w;
11508 
11509 	icmd = &iocbq->iocb;
11510 	evt_code = icmd->un.asyncstat.evt_code;
11511 
11512 	switch (evt_code) {
11513 	case ASYNC_TEMP_WARN:
11514 	case ASYNC_TEMP_SAFE:
11515 		temp_event_data.data = (uint32_t) icmd->ulpContext;
11516 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11517 		if (evt_code == ASYNC_TEMP_WARN) {
11518 			temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11519 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11520 				"0347 Adapter is very hot, please take "
11521 				"corrective action. temperature : %d Celsius\n",
11522 				(uint32_t) icmd->ulpContext);
11523 		} else {
11524 			temp_event_data.event_code = LPFC_NORMAL_TEMP;
11525 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11526 				"0340 Adapter temperature is OK now. "
11527 				"temperature : %d Celsius\n",
11528 				(uint32_t) icmd->ulpContext);
11529 		}
11530 
11531 		/* Send temperature change event to applications */
11532 		shost = lpfc_shost_from_vport(phba->pport);
11533 		fc_host_post_vendor_event(shost, fc_get_event_number(),
11534 			sizeof(temp_event_data), (char *) &temp_event_data,
11535 			LPFC_NL_VENDOR_ID);
11536 		break;
11537 	case ASYNC_STATUS_CN:
11538 		lpfc_sli_abts_err_handler(phba, iocbq);
11539 		break;
11540 	default:
11541 		iocb_w = (uint32_t *) icmd;
11542 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11543 			"0346 Ring %d handler: unexpected ASYNC_STATUS"
11544 			" evt_code 0x%x\n"
11545 			"W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
11546 			"W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
11547 			"W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
11548 			"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11549 			pring->ringno, icmd->un.asyncstat.evt_code,
11550 			iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11551 			iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11552 			iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11553 			iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11554 
11555 		break;
11556 	}
11557 }
11558 
11559 
11560 /**
11561  * lpfc_sli4_setup - SLI ring setup function
11562  * @phba: Pointer to HBA context object.
11563  *
11564  * lpfc_sli_setup sets up rings of the SLI interface with
11565  * number of iocbs per ring and iotags. This function is
11566  * called while driver attach to the HBA and before the
11567  * interrupts are enabled. So there is no need for locking.
11568  *
11569  * This function always returns 0.
11570  **/
11571 int
11572 lpfc_sli4_setup(struct lpfc_hba *phba)
11573 {
11574 	struct lpfc_sli_ring *pring;
11575 
11576 	pring = phba->sli4_hba.els_wq->pring;
11577 	pring->num_mask = LPFC_MAX_RING_MASK;
11578 	pring->prt[0].profile = 0;	/* Mask 0 */
11579 	pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11580 	pring->prt[0].type = FC_TYPE_ELS;
11581 	pring->prt[0].lpfc_sli_rcv_unsol_event =
11582 	    lpfc_els_unsol_event;
11583 	pring->prt[1].profile = 0;	/* Mask 1 */
11584 	pring->prt[1].rctl = FC_RCTL_ELS_REP;
11585 	pring->prt[1].type = FC_TYPE_ELS;
11586 	pring->prt[1].lpfc_sli_rcv_unsol_event =
11587 	    lpfc_els_unsol_event;
11588 	pring->prt[2].profile = 0;	/* Mask 2 */
11589 	/* NameServer Inquiry */
11590 	pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11591 	/* NameServer */
11592 	pring->prt[2].type = FC_TYPE_CT;
11593 	pring->prt[2].lpfc_sli_rcv_unsol_event =
11594 	    lpfc_ct_unsol_event;
11595 	pring->prt[3].profile = 0;	/* Mask 3 */
11596 	/* NameServer response */
11597 	pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11598 	/* NameServer */
11599 	pring->prt[3].type = FC_TYPE_CT;
11600 	pring->prt[3].lpfc_sli_rcv_unsol_event =
11601 	    lpfc_ct_unsol_event;
11602 	return 0;
11603 }
11604 
11605 /**
11606  * lpfc_sli_setup - SLI ring setup function
11607  * @phba: Pointer to HBA context object.
11608  *
11609  * lpfc_sli_setup sets up rings of the SLI interface with
11610  * number of iocbs per ring and iotags. This function is
11611  * called while driver attach to the HBA and before the
11612  * interrupts are enabled. So there is no need for locking.
11613  *
11614  * This function always returns 0. SLI3 only.
11615  **/
11616 int
11617 lpfc_sli_setup(struct lpfc_hba *phba)
11618 {
11619 	int i, totiocbsize = 0;
11620 	struct lpfc_sli *psli = &phba->sli;
11621 	struct lpfc_sli_ring *pring;
11622 
11623 	psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11624 	psli->sli_flag = 0;
11625 
11626 	psli->iocbq_lookup = NULL;
11627 	psli->iocbq_lookup_len = 0;
11628 	psli->last_iotag = 0;
11629 
11630 	for (i = 0; i < psli->num_rings; i++) {
11631 		pring = &psli->sli3_ring[i];
11632 		switch (i) {
11633 		case LPFC_FCP_RING:	/* ring 0 - FCP */
11634 			/* numCiocb and numRiocb are used in config_port */
11635 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11636 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11637 			pring->sli.sli3.numCiocb +=
11638 				SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11639 			pring->sli.sli3.numRiocb +=
11640 				SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11641 			pring->sli.sli3.numCiocb +=
11642 				SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11643 			pring->sli.sli3.numRiocb +=
11644 				SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11645 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11646 							SLI3_IOCB_CMD_SIZE :
11647 							SLI2_IOCB_CMD_SIZE;
11648 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11649 							SLI3_IOCB_RSP_SIZE :
11650 							SLI2_IOCB_RSP_SIZE;
11651 			pring->iotag_ctr = 0;
11652 			pring->iotag_max =
11653 			    (phba->cfg_hba_queue_depth * 2);
11654 			pring->fast_iotag = pring->iotag_max;
11655 			pring->num_mask = 0;
11656 			break;
11657 		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
11658 			/* numCiocb and numRiocb are used in config_port */
11659 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11660 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11661 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11662 							SLI3_IOCB_CMD_SIZE :
11663 							SLI2_IOCB_CMD_SIZE;
11664 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11665 							SLI3_IOCB_RSP_SIZE :
11666 							SLI2_IOCB_RSP_SIZE;
11667 			pring->iotag_max = phba->cfg_hba_queue_depth;
11668 			pring->num_mask = 0;
11669 			break;
11670 		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
11671 			/* numCiocb and numRiocb are used in config_port */
11672 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11673 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11674 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11675 							SLI3_IOCB_CMD_SIZE :
11676 							SLI2_IOCB_CMD_SIZE;
11677 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11678 							SLI3_IOCB_RSP_SIZE :
11679 							SLI2_IOCB_RSP_SIZE;
11680 			pring->fast_iotag = 0;
11681 			pring->iotag_ctr = 0;
11682 			pring->iotag_max = 4096;
11683 			pring->lpfc_sli_rcv_async_status =
11684 				lpfc_sli_async_event_handler;
11685 			pring->num_mask = LPFC_MAX_RING_MASK;
11686 			pring->prt[0].profile = 0;	/* Mask 0 */
11687 			pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11688 			pring->prt[0].type = FC_TYPE_ELS;
11689 			pring->prt[0].lpfc_sli_rcv_unsol_event =
11690 			    lpfc_els_unsol_event;
11691 			pring->prt[1].profile = 0;	/* Mask 1 */
11692 			pring->prt[1].rctl = FC_RCTL_ELS_REP;
11693 			pring->prt[1].type = FC_TYPE_ELS;
11694 			pring->prt[1].lpfc_sli_rcv_unsol_event =
11695 			    lpfc_els_unsol_event;
11696 			pring->prt[2].profile = 0;	/* Mask 2 */
11697 			/* NameServer Inquiry */
11698 			pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11699 			/* NameServer */
11700 			pring->prt[2].type = FC_TYPE_CT;
11701 			pring->prt[2].lpfc_sli_rcv_unsol_event =
11702 			    lpfc_ct_unsol_event;
11703 			pring->prt[3].profile = 0;	/* Mask 3 */
11704 			/* NameServer response */
11705 			pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11706 			/* NameServer */
11707 			pring->prt[3].type = FC_TYPE_CT;
11708 			pring->prt[3].lpfc_sli_rcv_unsol_event =
11709 			    lpfc_ct_unsol_event;
11710 			break;
11711 		}
11712 		totiocbsize += (pring->sli.sli3.numCiocb *
11713 			pring->sli.sli3.sizeCiocb) +
11714 			(pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11715 	}
11716 	if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11717 		/* Too many cmd / rsp ring entries in SLI2 SLIM */
11718 		printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11719 		       "SLI2 SLIM Data: x%x x%lx\n",
11720 		       phba->brd_no, totiocbsize,
11721 		       (unsigned long) MAX_SLIM_IOCB_SIZE);
11722 	}
11723 	if (phba->cfg_multi_ring_support == 2)
11724 		lpfc_extra_ring_setup(phba);
11725 
11726 	return 0;
11727 }
11728 
11729 /**
11730  * lpfc_sli4_queue_init - Queue initialization function
11731  * @phba: Pointer to HBA context object.
11732  *
11733  * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11734  * ring. This function also initializes ring indices of each ring.
11735  * This function is called during the initialization of the SLI
11736  * interface of an HBA.
11737  * This function is called with no lock held and always returns
11738  * 1.
11739  **/
11740 void
11741 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11742 {
11743 	struct lpfc_sli *psli;
11744 	struct lpfc_sli_ring *pring;
11745 	int i;
11746 
11747 	psli = &phba->sli;
11748 	spin_lock_irq(&phba->hbalock);
11749 	INIT_LIST_HEAD(&psli->mboxq);
11750 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
11751 	/* Initialize list headers for txq and txcmplq as double linked lists */
11752 	for (i = 0; i < phba->cfg_hdw_queue; i++) {
11753 		pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11754 		pring->flag = 0;
11755 		pring->ringno = LPFC_FCP_RING;
11756 		pring->txcmplq_cnt = 0;
11757 		INIT_LIST_HEAD(&pring->txq);
11758 		INIT_LIST_HEAD(&pring->txcmplq);
11759 		INIT_LIST_HEAD(&pring->iocb_continueq);
11760 		spin_lock_init(&pring->ring_lock);
11761 	}
11762 	pring = phba->sli4_hba.els_wq->pring;
11763 	pring->flag = 0;
11764 	pring->ringno = LPFC_ELS_RING;
11765 	pring->txcmplq_cnt = 0;
11766 	INIT_LIST_HEAD(&pring->txq);
11767 	INIT_LIST_HEAD(&pring->txcmplq);
11768 	INIT_LIST_HEAD(&pring->iocb_continueq);
11769 	spin_lock_init(&pring->ring_lock);
11770 
11771 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11772 		pring = phba->sli4_hba.nvmels_wq->pring;
11773 		pring->flag = 0;
11774 		pring->ringno = LPFC_ELS_RING;
11775 		pring->txcmplq_cnt = 0;
11776 		INIT_LIST_HEAD(&pring->txq);
11777 		INIT_LIST_HEAD(&pring->txcmplq);
11778 		INIT_LIST_HEAD(&pring->iocb_continueq);
11779 		spin_lock_init(&pring->ring_lock);
11780 	}
11781 
11782 	spin_unlock_irq(&phba->hbalock);
11783 }
11784 
11785 /**
11786  * lpfc_sli_queue_init - Queue initialization function
11787  * @phba: Pointer to HBA context object.
11788  *
11789  * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11790  * ring. This function also initializes ring indices of each ring.
11791  * This function is called during the initialization of the SLI
11792  * interface of an HBA.
11793  * This function is called with no lock held and always returns
11794  * 1.
11795  **/
11796 void
11797 lpfc_sli_queue_init(struct lpfc_hba *phba)
11798 {
11799 	struct lpfc_sli *psli;
11800 	struct lpfc_sli_ring *pring;
11801 	int i;
11802 
11803 	psli = &phba->sli;
11804 	spin_lock_irq(&phba->hbalock);
11805 	INIT_LIST_HEAD(&psli->mboxq);
11806 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
11807 	/* Initialize list headers for txq and txcmplq as double linked lists */
11808 	for (i = 0; i < psli->num_rings; i++) {
11809 		pring = &psli->sli3_ring[i];
11810 		pring->ringno = i;
11811 		pring->sli.sli3.next_cmdidx  = 0;
11812 		pring->sli.sli3.local_getidx = 0;
11813 		pring->sli.sli3.cmdidx = 0;
11814 		INIT_LIST_HEAD(&pring->iocb_continueq);
11815 		INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11816 		INIT_LIST_HEAD(&pring->postbufq);
11817 		pring->flag = 0;
11818 		INIT_LIST_HEAD(&pring->txq);
11819 		INIT_LIST_HEAD(&pring->txcmplq);
11820 		spin_lock_init(&pring->ring_lock);
11821 	}
11822 	spin_unlock_irq(&phba->hbalock);
11823 }
11824 
11825 /**
11826  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11827  * @phba: Pointer to HBA context object.
11828  *
11829  * This routine flushes the mailbox command subsystem. It will unconditionally
11830  * flush all the mailbox commands in the three possible stages in the mailbox
11831  * command sub-system: pending mailbox command queue; the outstanding mailbox
11832  * command; and completed mailbox command queue. It is caller's responsibility
11833  * to make sure that the driver is in the proper state to flush the mailbox
11834  * command sub-system. Namely, the posting of mailbox commands into the
11835  * pending mailbox command queue from the various clients must be stopped;
11836  * either the HBA is in a state that it will never works on the outstanding
11837  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11838  * mailbox command has been completed.
11839  **/
11840 static void
11841 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11842 {
11843 	LIST_HEAD(completions);
11844 	struct lpfc_sli *psli = &phba->sli;
11845 	LPFC_MBOXQ_t *pmb;
11846 	unsigned long iflag;
11847 
11848 	/* Disable softirqs, including timers from obtaining phba->hbalock */
11849 	local_bh_disable();
11850 
11851 	/* Flush all the mailbox commands in the mbox system */
11852 	spin_lock_irqsave(&phba->hbalock, iflag);
11853 
11854 	/* The pending mailbox command queue */
11855 	list_splice_init(&phba->sli.mboxq, &completions);
11856 	/* The outstanding active mailbox command */
11857 	if (psli->mbox_active) {
11858 		list_add_tail(&psli->mbox_active->list, &completions);
11859 		psli->mbox_active = NULL;
11860 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11861 	}
11862 	/* The completed mailbox command queue */
11863 	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11864 	spin_unlock_irqrestore(&phba->hbalock, iflag);
11865 
11866 	/* Enable softirqs again, done with phba->hbalock */
11867 	local_bh_enable();
11868 
11869 	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11870 	while (!list_empty(&completions)) {
11871 		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11872 		pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11873 		if (pmb->mbox_cmpl)
11874 			pmb->mbox_cmpl(phba, pmb);
11875 	}
11876 }
11877 
11878 /**
11879  * lpfc_sli_host_down - Vport cleanup function
11880  * @vport: Pointer to virtual port object.
11881  *
11882  * lpfc_sli_host_down is called to clean up the resources
11883  * associated with a vport before destroying virtual
11884  * port data structures.
11885  * This function does following operations:
11886  * - Free discovery resources associated with this virtual
11887  *   port.
11888  * - Free iocbs associated with this virtual port in
11889  *   the txq.
11890  * - Send abort for all iocb commands associated with this
11891  *   vport in txcmplq.
11892  *
11893  * This function is called with no lock held and always returns 1.
11894  **/
11895 int
11896 lpfc_sli_host_down(struct lpfc_vport *vport)
11897 {
11898 	LIST_HEAD(completions);
11899 	struct lpfc_hba *phba = vport->phba;
11900 	struct lpfc_sli *psli = &phba->sli;
11901 	struct lpfc_queue *qp = NULL;
11902 	struct lpfc_sli_ring *pring;
11903 	struct lpfc_iocbq *iocb, *next_iocb;
11904 	int i;
11905 	unsigned long flags = 0;
11906 	uint16_t prev_pring_flag;
11907 
11908 	lpfc_cleanup_discovery_resources(vport);
11909 
11910 	spin_lock_irqsave(&phba->hbalock, flags);
11911 
11912 	/*
11913 	 * Error everything on the txq since these iocbs
11914 	 * have not been given to the FW yet.
11915 	 * Also issue ABTS for everything on the txcmplq
11916 	 */
11917 	if (phba->sli_rev != LPFC_SLI_REV4) {
11918 		for (i = 0; i < psli->num_rings; i++) {
11919 			pring = &psli->sli3_ring[i];
11920 			prev_pring_flag = pring->flag;
11921 			/* Only slow rings */
11922 			if (pring->ringno == LPFC_ELS_RING) {
11923 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
11924 				/* Set the lpfc data pending flag */
11925 				set_bit(LPFC_DATA_READY, &phba->data_flags);
11926 			}
11927 			list_for_each_entry_safe(iocb, next_iocb,
11928 						 &pring->txq, list) {
11929 				if (iocb->vport != vport)
11930 					continue;
11931 				list_move_tail(&iocb->list, &completions);
11932 			}
11933 			list_for_each_entry_safe(iocb, next_iocb,
11934 						 &pring->txcmplq, list) {
11935 				if (iocb->vport != vport)
11936 					continue;
11937 				lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11938 							   NULL);
11939 			}
11940 			pring->flag = prev_pring_flag;
11941 		}
11942 	} else {
11943 		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11944 			pring = qp->pring;
11945 			if (!pring)
11946 				continue;
11947 			if (pring == phba->sli4_hba.els_wq->pring) {
11948 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
11949 				/* Set the lpfc data pending flag */
11950 				set_bit(LPFC_DATA_READY, &phba->data_flags);
11951 			}
11952 			prev_pring_flag = pring->flag;
11953 			spin_lock(&pring->ring_lock);
11954 			list_for_each_entry_safe(iocb, next_iocb,
11955 						 &pring->txq, list) {
11956 				if (iocb->vport != vport)
11957 					continue;
11958 				list_move_tail(&iocb->list, &completions);
11959 			}
11960 			spin_unlock(&pring->ring_lock);
11961 			list_for_each_entry_safe(iocb, next_iocb,
11962 						 &pring->txcmplq, list) {
11963 				if (iocb->vport != vport)
11964 					continue;
11965 				lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11966 							   NULL);
11967 			}
11968 			pring->flag = prev_pring_flag;
11969 		}
11970 	}
11971 	spin_unlock_irqrestore(&phba->hbalock, flags);
11972 
11973 	/* Make sure HBA is alive */
11974 	lpfc_issue_hb_tmo(phba);
11975 
11976 	/* Cancel all the IOCBs from the completions list */
11977 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
11978 			      IOERR_SLI_DOWN);
11979 	return 1;
11980 }
11981 
11982 /**
11983  * lpfc_sli_hba_down - Resource cleanup function for the HBA
11984  * @phba: Pointer to HBA context object.
11985  *
11986  * This function cleans up all iocb, buffers, mailbox commands
11987  * while shutting down the HBA. This function is called with no
11988  * lock held and always returns 1.
11989  * This function does the following to cleanup driver resources:
11990  * - Free discovery resources for each virtual port
11991  * - Cleanup any pending fabric iocbs
11992  * - Iterate through the iocb txq and free each entry
11993  *   in the list.
11994  * - Free up any buffer posted to the HBA
11995  * - Free mailbox commands in the mailbox queue.
11996  **/
11997 int
11998 lpfc_sli_hba_down(struct lpfc_hba *phba)
11999 {
12000 	LIST_HEAD(completions);
12001 	struct lpfc_sli *psli = &phba->sli;
12002 	struct lpfc_queue *qp = NULL;
12003 	struct lpfc_sli_ring *pring;
12004 	struct lpfc_dmabuf *buf_ptr;
12005 	unsigned long flags = 0;
12006 	int i;
12007 
12008 	/* Shutdown the mailbox command sub-system */
12009 	lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
12010 
12011 	lpfc_hba_down_prep(phba);
12012 
12013 	/* Disable softirqs, including timers from obtaining phba->hbalock */
12014 	local_bh_disable();
12015 
12016 	lpfc_fabric_abort_hba(phba);
12017 
12018 	spin_lock_irqsave(&phba->hbalock, flags);
12019 
12020 	/*
12021 	 * Error everything on the txq since these iocbs
12022 	 * have not been given to the FW yet.
12023 	 */
12024 	if (phba->sli_rev != LPFC_SLI_REV4) {
12025 		for (i = 0; i < psli->num_rings; i++) {
12026 			pring = &psli->sli3_ring[i];
12027 			/* Only slow rings */
12028 			if (pring->ringno == LPFC_ELS_RING) {
12029 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
12030 				/* Set the lpfc data pending flag */
12031 				set_bit(LPFC_DATA_READY, &phba->data_flags);
12032 			}
12033 			list_splice_init(&pring->txq, &completions);
12034 		}
12035 	} else {
12036 		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12037 			pring = qp->pring;
12038 			if (!pring)
12039 				continue;
12040 			spin_lock(&pring->ring_lock);
12041 			list_splice_init(&pring->txq, &completions);
12042 			spin_unlock(&pring->ring_lock);
12043 			if (pring == phba->sli4_hba.els_wq->pring) {
12044 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
12045 				/* Set the lpfc data pending flag */
12046 				set_bit(LPFC_DATA_READY, &phba->data_flags);
12047 			}
12048 		}
12049 	}
12050 	spin_unlock_irqrestore(&phba->hbalock, flags);
12051 
12052 	/* Cancel all the IOCBs from the completions list */
12053 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12054 			      IOERR_SLI_DOWN);
12055 
12056 	spin_lock_irqsave(&phba->hbalock, flags);
12057 	list_splice_init(&phba->elsbuf, &completions);
12058 	phba->elsbuf_cnt = 0;
12059 	phba->elsbuf_prev_cnt = 0;
12060 	spin_unlock_irqrestore(&phba->hbalock, flags);
12061 
12062 	while (!list_empty(&completions)) {
12063 		list_remove_head(&completions, buf_ptr,
12064 			struct lpfc_dmabuf, list);
12065 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12066 		kfree(buf_ptr);
12067 	}
12068 
12069 	/* Enable softirqs again, done with phba->hbalock */
12070 	local_bh_enable();
12071 
12072 	/* Return any active mbox cmds */
12073 	del_timer_sync(&psli->mbox_tmo);
12074 
12075 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12076 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12077 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12078 
12079 	return 1;
12080 }
12081 
12082 /**
12083  * lpfc_sli_pcimem_bcopy - SLI memory copy function
12084  * @srcp: Source memory pointer.
12085  * @destp: Destination memory pointer.
12086  * @cnt: Number of words required to be copied.
12087  *
12088  * This function is used for copying data between driver memory
12089  * and the SLI memory. This function also changes the endianness
12090  * of each word if native endianness is different from SLI
12091  * endianness. This function can be called with or without
12092  * lock.
12093  **/
12094 void
12095 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12096 {
12097 	uint32_t *src = srcp;
12098 	uint32_t *dest = destp;
12099 	uint32_t ldata;
12100 	int i;
12101 
12102 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12103 		ldata = *src;
12104 		ldata = le32_to_cpu(ldata);
12105 		*dest = ldata;
12106 		src++;
12107 		dest++;
12108 	}
12109 }
12110 
12111 
12112 /**
12113  * lpfc_sli_bemem_bcopy - SLI memory copy function
12114  * @srcp: Source memory pointer.
12115  * @destp: Destination memory pointer.
12116  * @cnt: Number of words required to be copied.
12117  *
12118  * This function is used for copying data between a data structure
12119  * with big endian representation to local endianness.
12120  * This function can be called with or without lock.
12121  **/
12122 void
12123 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12124 {
12125 	uint32_t *src = srcp;
12126 	uint32_t *dest = destp;
12127 	uint32_t ldata;
12128 	int i;
12129 
12130 	for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12131 		ldata = *src;
12132 		ldata = be32_to_cpu(ldata);
12133 		*dest = ldata;
12134 		src++;
12135 		dest++;
12136 	}
12137 }
12138 
12139 /**
12140  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12141  * @phba: Pointer to HBA context object.
12142  * @pring: Pointer to driver SLI ring object.
12143  * @mp: Pointer to driver buffer object.
12144  *
12145  * This function is called with no lock held.
12146  * It always return zero after adding the buffer to the postbufq
12147  * buffer list.
12148  **/
12149 int
12150 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12151 			 struct lpfc_dmabuf *mp)
12152 {
12153 	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12154 	   later */
12155 	spin_lock_irq(&phba->hbalock);
12156 	list_add_tail(&mp->list, &pring->postbufq);
12157 	pring->postbufq_cnt++;
12158 	spin_unlock_irq(&phba->hbalock);
12159 	return 0;
12160 }
12161 
12162 /**
12163  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12164  * @phba: Pointer to HBA context object.
12165  *
12166  * When HBQ is enabled, buffers are searched based on tags. This function
12167  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12168  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12169  * does not conflict with tags of buffer posted for unsolicited events.
12170  * The function returns the allocated tag. The function is called with
12171  * no locks held.
12172  **/
12173 uint32_t
12174 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12175 {
12176 	spin_lock_irq(&phba->hbalock);
12177 	phba->buffer_tag_count++;
12178 	/*
12179 	 * Always set the QUE_BUFTAG_BIT to distiguish between
12180 	 * a tag assigned by HBQ.
12181 	 */
12182 	phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12183 	spin_unlock_irq(&phba->hbalock);
12184 	return phba->buffer_tag_count;
12185 }
12186 
12187 /**
12188  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12189  * @phba: Pointer to HBA context object.
12190  * @pring: Pointer to driver SLI ring object.
12191  * @tag: Buffer tag.
12192  *
12193  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12194  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12195  * iocb is posted to the response ring with the tag of the buffer.
12196  * This function searches the pring->postbufq list using the tag
12197  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12198  * iocb. If the buffer is found then lpfc_dmabuf object of the
12199  * buffer is returned to the caller else NULL is returned.
12200  * This function is called with no lock held.
12201  **/
12202 struct lpfc_dmabuf *
12203 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12204 			uint32_t tag)
12205 {
12206 	struct lpfc_dmabuf *mp, *next_mp;
12207 	struct list_head *slp = &pring->postbufq;
12208 
12209 	/* Search postbufq, from the beginning, looking for a match on tag */
12210 	spin_lock_irq(&phba->hbalock);
12211 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12212 		if (mp->buffer_tag == tag) {
12213 			list_del_init(&mp->list);
12214 			pring->postbufq_cnt--;
12215 			spin_unlock_irq(&phba->hbalock);
12216 			return mp;
12217 		}
12218 	}
12219 
12220 	spin_unlock_irq(&phba->hbalock);
12221 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12222 			"0402 Cannot find virtual addr for buffer tag on "
12223 			"ring %d Data x%lx x%px x%px x%x\n",
12224 			pring->ringno, (unsigned long) tag,
12225 			slp->next, slp->prev, pring->postbufq_cnt);
12226 
12227 	return NULL;
12228 }
12229 
12230 /**
12231  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12232  * @phba: Pointer to HBA context object.
12233  * @pring: Pointer to driver SLI ring object.
12234  * @phys: DMA address of the buffer.
12235  *
12236  * This function searches the buffer list using the dma_address
12237  * of unsolicited event to find the driver's lpfc_dmabuf object
12238  * corresponding to the dma_address. The function returns the
12239  * lpfc_dmabuf object if a buffer is found else it returns NULL.
12240  * This function is called by the ct and els unsolicited event
12241  * handlers to get the buffer associated with the unsolicited
12242  * event.
12243  *
12244  * This function is called with no lock held.
12245  **/
12246 struct lpfc_dmabuf *
12247 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12248 			 dma_addr_t phys)
12249 {
12250 	struct lpfc_dmabuf *mp, *next_mp;
12251 	struct list_head *slp = &pring->postbufq;
12252 
12253 	/* Search postbufq, from the beginning, looking for a match on phys */
12254 	spin_lock_irq(&phba->hbalock);
12255 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12256 		if (mp->phys == phys) {
12257 			list_del_init(&mp->list);
12258 			pring->postbufq_cnt--;
12259 			spin_unlock_irq(&phba->hbalock);
12260 			return mp;
12261 		}
12262 	}
12263 
12264 	spin_unlock_irq(&phba->hbalock);
12265 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12266 			"0410 Cannot find virtual addr for mapped buf on "
12267 			"ring %d Data x%llx x%px x%px x%x\n",
12268 			pring->ringno, (unsigned long long)phys,
12269 			slp->next, slp->prev, pring->postbufq_cnt);
12270 	return NULL;
12271 }
12272 
12273 /**
12274  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12275  * @phba: Pointer to HBA context object.
12276  * @cmdiocb: Pointer to driver command iocb object.
12277  * @rspiocb: Pointer to driver response iocb object.
12278  *
12279  * This function is the completion handler for the abort iocbs for
12280  * ELS commands. This function is called from the ELS ring event
12281  * handler with no lock held. This function frees memory resources
12282  * associated with the abort iocb.
12283  **/
12284 static void
12285 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12286 			struct lpfc_iocbq *rspiocb)
12287 {
12288 	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12289 	u32 ulp_word4 = get_job_word4(phba, rspiocb);
12290 	u8 cmnd = get_job_cmnd(phba, cmdiocb);
12291 
12292 	if (ulp_status) {
12293 		/*
12294 		 * Assume that the port already completed and returned, or
12295 		 * will return the iocb. Just Log the message.
12296 		 */
12297 		if (phba->sli_rev < LPFC_SLI_REV4) {
12298 			if (cmnd == CMD_ABORT_XRI_CX &&
12299 			    ulp_status == IOSTAT_LOCAL_REJECT &&
12300 			    ulp_word4 == IOERR_ABORT_REQUESTED) {
12301 				goto release_iocb;
12302 			}
12303 		}
12304 
12305 		lpfc_printf_log(phba, KERN_WARNING, LOG_ELS | LOG_SLI,
12306 				"0327 Cannot abort els iocb x%px "
12307 				"with io cmd xri %x abort tag : x%x, "
12308 				"abort status %x abort code %x\n",
12309 				cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12310 				(phba->sli_rev == LPFC_SLI_REV4) ?
12311 				get_wqe_reqtag(cmdiocb) :
12312 				cmdiocb->iocb.un.acxri.abortContextTag,
12313 				ulp_status, ulp_word4);
12314 
12315 	}
12316 release_iocb:
12317 	lpfc_sli_release_iocbq(phba, cmdiocb);
12318 	return;
12319 }
12320 
12321 /**
12322  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12323  * @phba: Pointer to HBA context object.
12324  * @cmdiocb: Pointer to driver command iocb object.
12325  * @rspiocb: Pointer to driver response iocb object.
12326  *
12327  * The function is called from SLI ring event handler with no
12328  * lock held. This function is the completion handler for ELS commands
12329  * which are aborted. The function frees memory resources used for
12330  * the aborted ELS commands.
12331  **/
12332 void
12333 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12334 		     struct lpfc_iocbq *rspiocb)
12335 {
12336 	struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12337 	IOCB_t *irsp;
12338 	LPFC_MBOXQ_t *mbox;
12339 	u32 ulp_command, ulp_status, ulp_word4, iotag;
12340 
12341 	ulp_command = get_job_cmnd(phba, cmdiocb);
12342 	ulp_status = get_job_ulpstatus(phba, rspiocb);
12343 	ulp_word4 = get_job_word4(phba, rspiocb);
12344 
12345 	if (phba->sli_rev == LPFC_SLI_REV4) {
12346 		iotag = get_wqe_reqtag(cmdiocb);
12347 	} else {
12348 		irsp = &rspiocb->iocb;
12349 		iotag = irsp->ulpIoTag;
12350 
12351 		/* It is possible a PLOGI_RJT for NPIV ports to get aborted.
12352 		 * The MBX_REG_LOGIN64 mbox command is freed back to the
12353 		 * mbox_mem_pool here.
12354 		 */
12355 		if (cmdiocb->context_un.mbox) {
12356 			mbox = cmdiocb->context_un.mbox;
12357 			lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12358 			cmdiocb->context_un.mbox = NULL;
12359 		}
12360 	}
12361 
12362 	/* ELS cmd tag <ulpIoTag> completes */
12363 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12364 			"0139 Ignoring ELS cmd code x%x ref cnt x%x Data: "
12365 			"x%x x%x x%x x%px\n",
12366 			ulp_command, kref_read(&cmdiocb->ndlp->kref),
12367 			ulp_status, ulp_word4, iotag, cmdiocb->ndlp);
12368 	/*
12369 	 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12370 	 * if exchange is busy.
12371 	 */
12372 	if (ulp_command == CMD_GEN_REQUEST64_CR)
12373 		lpfc_ct_free_iocb(phba, cmdiocb);
12374 	else
12375 		lpfc_els_free_iocb(phba, cmdiocb);
12376 
12377 	lpfc_nlp_put(ndlp);
12378 }
12379 
12380 /**
12381  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12382  * @phba: Pointer to HBA context object.
12383  * @pring: Pointer to driver SLI ring object.
12384  * @cmdiocb: Pointer to driver command iocb object.
12385  * @cmpl: completion function.
12386  *
12387  * This function issues an abort iocb for the provided command iocb. In case
12388  * of unloading, the abort iocb will not be issued to commands on the ELS
12389  * ring. Instead, the callback function shall be changed to those commands
12390  * so that nothing happens when them finishes. This function is called with
12391  * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12392  * when the command iocb is an abort request.
12393  *
12394  **/
12395 int
12396 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12397 			   struct lpfc_iocbq *cmdiocb, void *cmpl)
12398 {
12399 	struct lpfc_vport *vport = cmdiocb->vport;
12400 	struct lpfc_iocbq *abtsiocbp;
12401 	int retval = IOCB_ERROR;
12402 	unsigned long iflags;
12403 	struct lpfc_nodelist *ndlp = NULL;
12404 	u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12405 	u16 ulp_context, iotag;
12406 	bool ia;
12407 
12408 	/*
12409 	 * There are certain command types we don't want to abort.  And we
12410 	 * don't want to abort commands that are already in the process of
12411 	 * being aborted.
12412 	 */
12413 	if (ulp_command == CMD_ABORT_XRI_WQE ||
12414 	    ulp_command == CMD_ABORT_XRI_CN ||
12415 	    ulp_command == CMD_CLOSE_XRI_CN ||
12416 	    cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12417 		return IOCB_ABORTING;
12418 
12419 	if (!pring) {
12420 		if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12421 			cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12422 		else
12423 			cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12424 		return retval;
12425 	}
12426 
12427 	/*
12428 	 * If we're unloading, don't abort iocb on the ELS ring, but change
12429 	 * the callback so that nothing happens when it finishes.
12430 	 */
12431 	if (test_bit(FC_UNLOADING, &vport->load_flag) &&
12432 	    pring->ringno == LPFC_ELS_RING) {
12433 		if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12434 			cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12435 		else
12436 			cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12437 		return retval;
12438 	}
12439 
12440 	/* issue ABTS for this IOCB based on iotag */
12441 	abtsiocbp = __lpfc_sli_get_iocbq(phba);
12442 	if (abtsiocbp == NULL)
12443 		return IOCB_NORESOURCE;
12444 
12445 	/* This signals the response to set the correct status
12446 	 * before calling the completion handler
12447 	 */
12448 	cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12449 
12450 	if (phba->sli_rev == LPFC_SLI_REV4) {
12451 		ulp_context = cmdiocb->sli4_xritag;
12452 		iotag = abtsiocbp->iotag;
12453 	} else {
12454 		iotag = cmdiocb->iocb.ulpIoTag;
12455 		if (pring->ringno == LPFC_ELS_RING) {
12456 			ndlp = cmdiocb->ndlp;
12457 			ulp_context = ndlp->nlp_rpi;
12458 		} else {
12459 			ulp_context = cmdiocb->iocb.ulpContext;
12460 		}
12461 	}
12462 
12463 	/* Just close the exchange under certain conditions. */
12464 	if (test_bit(FC_UNLOADING, &vport->load_flag) ||
12465 	    phba->link_state < LPFC_LINK_UP ||
12466 	    (phba->sli_rev == LPFC_SLI_REV4 &&
12467 	     phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12468 	    (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12469 		ia = true;
12470 	else
12471 		ia = false;
12472 
12473 	lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12474 				cmdiocb->iocb.ulpClass,
12475 				LPFC_WQE_CQ_ID_DEFAULT, ia, false);
12476 
12477 	abtsiocbp->vport = vport;
12478 
12479 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
12480 	abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12481 	if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12482 		abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12483 
12484 	if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12485 		abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12486 
12487 	if (cmpl)
12488 		abtsiocbp->cmd_cmpl = cmpl;
12489 	else
12490 		abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12491 	abtsiocbp->vport = vport;
12492 
12493 	if (phba->sli_rev == LPFC_SLI_REV4) {
12494 		pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12495 		if (unlikely(pring == NULL))
12496 			goto abort_iotag_exit;
12497 		/* Note: both hbalock and ring_lock need to be set here */
12498 		spin_lock_irqsave(&pring->ring_lock, iflags);
12499 		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12500 			abtsiocbp, 0);
12501 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
12502 	} else {
12503 		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12504 			abtsiocbp, 0);
12505 	}
12506 
12507 abort_iotag_exit:
12508 
12509 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12510 			 "0339 Abort IO XRI x%x, Original iotag x%x, "
12511 			 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12512 			 "retval x%x : IA %d\n",
12513 			 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12514 			 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12515 			 retval, ia);
12516 	if (retval) {
12517 		cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12518 		__lpfc_sli_release_iocbq(phba, abtsiocbp);
12519 	}
12520 
12521 	/*
12522 	 * Caller to this routine should check for IOCB_ERROR
12523 	 * and handle it properly.  This routine no longer removes
12524 	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12525 	 */
12526 	return retval;
12527 }
12528 
12529 /**
12530  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12531  * @phba: pointer to lpfc HBA data structure.
12532  *
12533  * This routine will abort all pending and outstanding iocbs to an HBA.
12534  **/
12535 void
12536 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12537 {
12538 	struct lpfc_sli *psli = &phba->sli;
12539 	struct lpfc_sli_ring *pring;
12540 	struct lpfc_queue *qp = NULL;
12541 	int i;
12542 
12543 	if (phba->sli_rev != LPFC_SLI_REV4) {
12544 		for (i = 0; i < psli->num_rings; i++) {
12545 			pring = &psli->sli3_ring[i];
12546 			lpfc_sli_abort_iocb_ring(phba, pring);
12547 		}
12548 		return;
12549 	}
12550 	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12551 		pring = qp->pring;
12552 		if (!pring)
12553 			continue;
12554 		lpfc_sli_abort_iocb_ring(phba, pring);
12555 	}
12556 }
12557 
12558 /**
12559  * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12560  * @iocbq: Pointer to iocb object.
12561  * @vport: Pointer to driver virtual port object.
12562  *
12563  * This function acts as an iocb filter for functions which abort FCP iocbs.
12564  *
12565  * Return values
12566  * -ENODEV, if a null iocb or vport ptr is encountered
12567  * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12568  *          driver already started the abort process, or is an abort iocb itself
12569  * 0, passes criteria for aborting the FCP I/O iocb
12570  **/
12571 static int
12572 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12573 				     struct lpfc_vport *vport)
12574 {
12575 	u8 ulp_command;
12576 
12577 	/* No null ptr vports */
12578 	if (!iocbq || iocbq->vport != vport)
12579 		return -ENODEV;
12580 
12581 	/* iocb must be for FCP IO, already exists on the TX cmpl queue,
12582 	 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12583 	 */
12584 	ulp_command = get_job_cmnd(vport->phba, iocbq);
12585 	if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12586 	    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12587 	    (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12588 	    (ulp_command == CMD_ABORT_XRI_CN ||
12589 	     ulp_command == CMD_CLOSE_XRI_CN ||
12590 	     ulp_command == CMD_ABORT_XRI_WQE))
12591 		return -EINVAL;
12592 
12593 	return 0;
12594 }
12595 
12596 /**
12597  * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12598  * @iocbq: Pointer to driver iocb object.
12599  * @vport: Pointer to driver virtual port object.
12600  * @tgt_id: SCSI ID of the target.
12601  * @lun_id: LUN ID of the scsi device.
12602  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12603  *
12604  * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12605  * host.
12606  *
12607  * It will return
12608  * 0 if the filtering criteria is met for the given iocb and will return
12609  * 1 if the filtering criteria is not met.
12610  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12611  * given iocb is for the SCSI device specified by vport, tgt_id and
12612  * lun_id parameter.
12613  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
12614  * given iocb is for the SCSI target specified by vport and tgt_id
12615  * parameters.
12616  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12617  * given iocb is for the SCSI host associated with the given vport.
12618  * This function is called with no locks held.
12619  **/
12620 static int
12621 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12622 			   uint16_t tgt_id, uint64_t lun_id,
12623 			   lpfc_ctx_cmd ctx_cmd)
12624 {
12625 	struct lpfc_io_buf *lpfc_cmd;
12626 	int rc = 1;
12627 
12628 	lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12629 
12630 	if (lpfc_cmd->pCmd == NULL)
12631 		return rc;
12632 
12633 	switch (ctx_cmd) {
12634 	case LPFC_CTX_LUN:
12635 		if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12636 		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12637 		    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12638 			rc = 0;
12639 		break;
12640 	case LPFC_CTX_TGT:
12641 		if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12642 		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12643 			rc = 0;
12644 		break;
12645 	case LPFC_CTX_HOST:
12646 		rc = 0;
12647 		break;
12648 	default:
12649 		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12650 			__func__, ctx_cmd);
12651 		break;
12652 	}
12653 
12654 	return rc;
12655 }
12656 
12657 /**
12658  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12659  * @vport: Pointer to virtual port.
12660  * @tgt_id: SCSI ID of the target.
12661  * @lun_id: LUN ID of the scsi device.
12662  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12663  *
12664  * This function returns number of FCP commands pending for the vport.
12665  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12666  * commands pending on the vport associated with SCSI device specified
12667  * by tgt_id and lun_id parameters.
12668  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12669  * commands pending on the vport associated with SCSI target specified
12670  * by tgt_id parameter.
12671  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12672  * commands pending on the vport.
12673  * This function returns the number of iocbs which satisfy the filter.
12674  * This function is called without any lock held.
12675  **/
12676 int
12677 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12678 		  lpfc_ctx_cmd ctx_cmd)
12679 {
12680 	struct lpfc_hba *phba = vport->phba;
12681 	struct lpfc_iocbq *iocbq;
12682 	int sum, i;
12683 	unsigned long iflags;
12684 	u8 ulp_command;
12685 
12686 	spin_lock_irqsave(&phba->hbalock, iflags);
12687 	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12688 		iocbq = phba->sli.iocbq_lookup[i];
12689 
12690 		if (!iocbq || iocbq->vport != vport)
12691 			continue;
12692 		if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12693 		    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12694 			continue;
12695 
12696 		/* Include counting outstanding aborts */
12697 		ulp_command = get_job_cmnd(phba, iocbq);
12698 		if (ulp_command == CMD_ABORT_XRI_CN ||
12699 		    ulp_command == CMD_CLOSE_XRI_CN ||
12700 		    ulp_command == CMD_ABORT_XRI_WQE) {
12701 			sum++;
12702 			continue;
12703 		}
12704 
12705 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12706 					       ctx_cmd) == 0)
12707 			sum++;
12708 	}
12709 	spin_unlock_irqrestore(&phba->hbalock, iflags);
12710 
12711 	return sum;
12712 }
12713 
12714 /**
12715  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12716  * @phba: Pointer to HBA context object
12717  * @cmdiocb: Pointer to command iocb object.
12718  * @rspiocb: Pointer to response iocb object.
12719  *
12720  * This function is called when an aborted FCP iocb completes. This
12721  * function is called by the ring event handler with no lock held.
12722  * This function frees the iocb.
12723  **/
12724 void
12725 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12726 			struct lpfc_iocbq *rspiocb)
12727 {
12728 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12729 			"3096 ABORT_XRI_CX completing on rpi x%x "
12730 			"original iotag x%x, abort cmd iotag x%x "
12731 			"status 0x%x, reason 0x%x\n",
12732 			(phba->sli_rev == LPFC_SLI_REV4) ?
12733 			cmdiocb->sli4_xritag :
12734 			cmdiocb->iocb.un.acxri.abortContextTag,
12735 			get_job_abtsiotag(phba, cmdiocb),
12736 			cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12737 			get_job_word4(phba, rspiocb));
12738 	lpfc_sli_release_iocbq(phba, cmdiocb);
12739 	return;
12740 }
12741 
12742 /**
12743  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12744  * @vport: Pointer to virtual port.
12745  * @tgt_id: SCSI ID of the target.
12746  * @lun_id: LUN ID of the scsi device.
12747  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12748  *
12749  * This function sends an abort command for every SCSI command
12750  * associated with the given virtual port pending on the ring
12751  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12752  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12753  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12754  * followed by lpfc_sli_validate_fcp_iocb.
12755  *
12756  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12757  * FCP iocbs associated with lun specified by tgt_id and lun_id
12758  * parameters
12759  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12760  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12761  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12762  * FCP iocbs associated with virtual port.
12763  * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12764  * lpfc_sli4_calc_ring is used.
12765  * This function returns number of iocbs it failed to abort.
12766  * This function is called with no locks held.
12767  **/
12768 int
12769 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12770 		    lpfc_ctx_cmd abort_cmd)
12771 {
12772 	struct lpfc_hba *phba = vport->phba;
12773 	struct lpfc_sli_ring *pring = NULL;
12774 	struct lpfc_iocbq *iocbq;
12775 	int errcnt = 0, ret_val = 0;
12776 	unsigned long iflags;
12777 	int i;
12778 
12779 	/* all I/Os are in process of being flushed */
12780 	if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
12781 		return errcnt;
12782 
12783 	for (i = 1; i <= phba->sli.last_iotag; i++) {
12784 		iocbq = phba->sli.iocbq_lookup[i];
12785 
12786 		if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12787 			continue;
12788 
12789 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12790 					       abort_cmd) != 0)
12791 			continue;
12792 
12793 		spin_lock_irqsave(&phba->hbalock, iflags);
12794 		if (phba->sli_rev == LPFC_SLI_REV3) {
12795 			pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12796 		} else if (phba->sli_rev == LPFC_SLI_REV4) {
12797 			pring = lpfc_sli4_calc_ring(phba, iocbq);
12798 		}
12799 		ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12800 						     lpfc_sli_abort_fcp_cmpl);
12801 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12802 		if (ret_val != IOCB_SUCCESS)
12803 			errcnt++;
12804 	}
12805 
12806 	return errcnt;
12807 }
12808 
12809 /**
12810  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12811  * @vport: Pointer to virtual port.
12812  * @pring: Pointer to driver SLI ring object.
12813  * @tgt_id: SCSI ID of the target.
12814  * @lun_id: LUN ID of the scsi device.
12815  * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12816  *
12817  * This function sends an abort command for every SCSI command
12818  * associated with the given virtual port pending on the ring
12819  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12820  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12821  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12822  * followed by lpfc_sli_validate_fcp_iocb.
12823  *
12824  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12825  * FCP iocbs associated with lun specified by tgt_id and lun_id
12826  * parameters
12827  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12828  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12829  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12830  * FCP iocbs associated with virtual port.
12831  * This function returns number of iocbs it aborted .
12832  * This function is called with no locks held right after a taskmgmt
12833  * command is sent.
12834  **/
12835 int
12836 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12837 			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12838 {
12839 	struct lpfc_hba *phba = vport->phba;
12840 	struct lpfc_io_buf *lpfc_cmd;
12841 	struct lpfc_iocbq *abtsiocbq;
12842 	struct lpfc_nodelist *ndlp = NULL;
12843 	struct lpfc_iocbq *iocbq;
12844 	int sum, i, ret_val;
12845 	unsigned long iflags;
12846 	struct lpfc_sli_ring *pring_s4 = NULL;
12847 	u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12848 	bool ia;
12849 
12850 	/* all I/Os are in process of being flushed */
12851 	if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
12852 		return 0;
12853 
12854 	sum = 0;
12855 
12856 	spin_lock_irqsave(&phba->hbalock, iflags);
12857 	for (i = 1; i <= phba->sli.last_iotag; i++) {
12858 		iocbq = phba->sli.iocbq_lookup[i];
12859 
12860 		if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12861 			continue;
12862 
12863 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12864 					       cmd) != 0)
12865 			continue;
12866 
12867 		/* Guard against IO completion being called at same time */
12868 		lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12869 		spin_lock(&lpfc_cmd->buf_lock);
12870 
12871 		if (!lpfc_cmd->pCmd) {
12872 			spin_unlock(&lpfc_cmd->buf_lock);
12873 			continue;
12874 		}
12875 
12876 		if (phba->sli_rev == LPFC_SLI_REV4) {
12877 			pring_s4 =
12878 			    phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12879 			if (!pring_s4) {
12880 				spin_unlock(&lpfc_cmd->buf_lock);
12881 				continue;
12882 			}
12883 			/* Note: both hbalock and ring_lock must be set here */
12884 			spin_lock(&pring_s4->ring_lock);
12885 		}
12886 
12887 		/*
12888 		 * If the iocbq is already being aborted, don't take a second
12889 		 * action, but do count it.
12890 		 */
12891 		if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12892 		    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12893 			if (phba->sli_rev == LPFC_SLI_REV4)
12894 				spin_unlock(&pring_s4->ring_lock);
12895 			spin_unlock(&lpfc_cmd->buf_lock);
12896 			continue;
12897 		}
12898 
12899 		/* issue ABTS for this IOCB based on iotag */
12900 		abtsiocbq = __lpfc_sli_get_iocbq(phba);
12901 		if (!abtsiocbq) {
12902 			if (phba->sli_rev == LPFC_SLI_REV4)
12903 				spin_unlock(&pring_s4->ring_lock);
12904 			spin_unlock(&lpfc_cmd->buf_lock);
12905 			continue;
12906 		}
12907 
12908 		if (phba->sli_rev == LPFC_SLI_REV4) {
12909 			iotag = abtsiocbq->iotag;
12910 			ulp_context = iocbq->sli4_xritag;
12911 			cqid = lpfc_cmd->hdwq->io_cq_map;
12912 		} else {
12913 			iotag = iocbq->iocb.ulpIoTag;
12914 			if (pring->ringno == LPFC_ELS_RING) {
12915 				ndlp = iocbq->ndlp;
12916 				ulp_context = ndlp->nlp_rpi;
12917 			} else {
12918 				ulp_context = iocbq->iocb.ulpContext;
12919 			}
12920 		}
12921 
12922 		ndlp = lpfc_cmd->rdata->pnode;
12923 
12924 		if (lpfc_is_link_up(phba) &&
12925 		    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12926 		    !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12927 			ia = false;
12928 		else
12929 			ia = true;
12930 
12931 		lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12932 					iocbq->iocb.ulpClass, cqid,
12933 					ia, false);
12934 
12935 		abtsiocbq->vport = vport;
12936 
12937 		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
12938 		abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12939 		if (iocbq->cmd_flag & LPFC_IO_FCP)
12940 			abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12941 		if (iocbq->cmd_flag & LPFC_IO_FOF)
12942 			abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12943 
12944 		/* Setup callback routine and issue the command. */
12945 		abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12946 
12947 		/*
12948 		 * Indicate the IO is being aborted by the driver and set
12949 		 * the caller's flag into the aborted IO.
12950 		 */
12951 		iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12952 
12953 		if (phba->sli_rev == LPFC_SLI_REV4) {
12954 			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12955 							abtsiocbq, 0);
12956 			spin_unlock(&pring_s4->ring_lock);
12957 		} else {
12958 			ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12959 							abtsiocbq, 0);
12960 		}
12961 
12962 		spin_unlock(&lpfc_cmd->buf_lock);
12963 
12964 		if (ret_val == IOCB_ERROR)
12965 			__lpfc_sli_release_iocbq(phba, abtsiocbq);
12966 		else
12967 			sum++;
12968 	}
12969 	spin_unlock_irqrestore(&phba->hbalock, iflags);
12970 	return sum;
12971 }
12972 
12973 /**
12974  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
12975  * @phba: Pointer to HBA context object.
12976  * @cmdiocbq: Pointer to command iocb.
12977  * @rspiocbq: Pointer to response iocb.
12978  *
12979  * This function is the completion handler for iocbs issued using
12980  * lpfc_sli_issue_iocb_wait function. This function is called by the
12981  * ring event handler function without any lock held. This function
12982  * can be called from both worker thread context and interrupt
12983  * context. This function also can be called from other thread which
12984  * cleans up the SLI layer objects.
12985  * This function copy the contents of the response iocb to the
12986  * response iocb memory object provided by the caller of
12987  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
12988  * sleeps for the iocb completion.
12989  **/
12990 static void
12991 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
12992 			struct lpfc_iocbq *cmdiocbq,
12993 			struct lpfc_iocbq *rspiocbq)
12994 {
12995 	wait_queue_head_t *pdone_q;
12996 	unsigned long iflags;
12997 	struct lpfc_io_buf *lpfc_cmd;
12998 	size_t offset = offsetof(struct lpfc_iocbq, wqe);
12999 
13000 	spin_lock_irqsave(&phba->hbalock, iflags);
13001 	if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
13002 
13003 		/*
13004 		 * A time out has occurred for the iocb.  If a time out
13005 		 * completion handler has been supplied, call it.  Otherwise,
13006 		 * just free the iocbq.
13007 		 */
13008 
13009 		spin_unlock_irqrestore(&phba->hbalock, iflags);
13010 		cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
13011 		cmdiocbq->wait_cmd_cmpl = NULL;
13012 		if (cmdiocbq->cmd_cmpl)
13013 			cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
13014 		else
13015 			lpfc_sli_release_iocbq(phba, cmdiocbq);
13016 		return;
13017 	}
13018 
13019 	/* Copy the contents of the local rspiocb into the caller's buffer. */
13020 	cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
13021 	if (cmdiocbq->rsp_iocb && rspiocbq)
13022 		memcpy((char *)cmdiocbq->rsp_iocb + offset,
13023 		       (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
13024 
13025 	/* Set the exchange busy flag for task management commands */
13026 	if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
13027 	    !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
13028 		lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
13029 					cur_iocbq);
13030 		if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
13031 			lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
13032 		else
13033 			lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
13034 	}
13035 
13036 	pdone_q = cmdiocbq->context_un.wait_queue;
13037 	if (pdone_q)
13038 		wake_up(pdone_q);
13039 	spin_unlock_irqrestore(&phba->hbalock, iflags);
13040 	return;
13041 }
13042 
13043 /**
13044  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
13045  * @phba: Pointer to HBA context object..
13046  * @piocbq: Pointer to command iocb.
13047  * @flag: Flag to test.
13048  *
13049  * This routine grabs the hbalock and then test the cmd_flag to
13050  * see if the passed in flag is set.
13051  * Returns:
13052  * 1 if flag is set.
13053  * 0 if flag is not set.
13054  **/
13055 static int
13056 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13057 		 struct lpfc_iocbq *piocbq, uint32_t flag)
13058 {
13059 	unsigned long iflags;
13060 	int ret;
13061 
13062 	spin_lock_irqsave(&phba->hbalock, iflags);
13063 	ret = piocbq->cmd_flag & flag;
13064 	spin_unlock_irqrestore(&phba->hbalock, iflags);
13065 	return ret;
13066 
13067 }
13068 
13069 /**
13070  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13071  * @phba: Pointer to HBA context object..
13072  * @ring_number: Ring number
13073  * @piocb: Pointer to command iocb.
13074  * @prspiocbq: Pointer to response iocb.
13075  * @timeout: Timeout in number of seconds.
13076  *
13077  * This function issues the iocb to firmware and waits for the
13078  * iocb to complete. The cmd_cmpl field of the shall be used
13079  * to handle iocbs which time out. If the field is NULL, the
13080  * function shall free the iocbq structure.  If more clean up is
13081  * needed, the caller is expected to provide a completion function
13082  * that will provide the needed clean up.  If the iocb command is
13083  * not completed within timeout seconds, the function will either
13084  * free the iocbq structure (if cmd_cmpl == NULL) or execute the
13085  * completion function set in the cmd_cmpl field and then return
13086  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
13087  * resources if this function returns IOCB_TIMEDOUT.
13088  * The function waits for the iocb completion using an
13089  * non-interruptible wait.
13090  * This function will sleep while waiting for iocb completion.
13091  * So, this function should not be called from any context which
13092  * does not allow sleeping. Due to the same reason, this function
13093  * cannot be called with interrupt disabled.
13094  * This function assumes that the iocb completions occur while
13095  * this function sleep. So, this function cannot be called from
13096  * the thread which process iocb completion for this ring.
13097  * This function clears the cmd_flag of the iocb object before
13098  * issuing the iocb and the iocb completion handler sets this
13099  * flag and wakes this thread when the iocb completes.
13100  * The contents of the response iocb will be copied to prspiocbq
13101  * by the completion handler when the command completes.
13102  * This function returns IOCB_SUCCESS when success.
13103  * This function is called with no lock held.
13104  **/
13105 int
13106 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13107 			 uint32_t ring_number,
13108 			 struct lpfc_iocbq *piocb,
13109 			 struct lpfc_iocbq *prspiocbq,
13110 			 uint32_t timeout)
13111 {
13112 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13113 	long timeleft, timeout_req = 0;
13114 	int retval = IOCB_SUCCESS;
13115 	uint32_t creg_val;
13116 	struct lpfc_iocbq *iocb;
13117 	int txq_cnt = 0;
13118 	int txcmplq_cnt = 0;
13119 	struct lpfc_sli_ring *pring;
13120 	unsigned long iflags;
13121 	bool iocb_completed = true;
13122 
13123 	if (phba->sli_rev >= LPFC_SLI_REV4) {
13124 		lpfc_sli_prep_wqe(phba, piocb);
13125 
13126 		pring = lpfc_sli4_calc_ring(phba, piocb);
13127 	} else
13128 		pring = &phba->sli.sli3_ring[ring_number];
13129 	/*
13130 	 * If the caller has provided a response iocbq buffer, then rsp_iocb
13131 	 * is NULL or its an error.
13132 	 */
13133 	if (prspiocbq) {
13134 		if (piocb->rsp_iocb)
13135 			return IOCB_ERROR;
13136 		piocb->rsp_iocb = prspiocbq;
13137 	}
13138 
13139 	piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
13140 	piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
13141 	piocb->context_un.wait_queue = &done_q;
13142 	piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13143 
13144 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13145 		if (lpfc_readl(phba->HCregaddr, &creg_val))
13146 			return IOCB_ERROR;
13147 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13148 		writel(creg_val, phba->HCregaddr);
13149 		readl(phba->HCregaddr); /* flush */
13150 	}
13151 
13152 	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13153 				     SLI_IOCB_RET_IOCB);
13154 	if (retval == IOCB_SUCCESS) {
13155 		timeout_req = msecs_to_jiffies(timeout * 1000);
13156 		timeleft = wait_event_timeout(done_q,
13157 				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13158 				timeout_req);
13159 		spin_lock_irqsave(&phba->hbalock, iflags);
13160 		if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
13161 
13162 			/*
13163 			 * IOCB timed out.  Inform the wake iocb wait
13164 			 * completion function and set local status
13165 			 */
13166 
13167 			iocb_completed = false;
13168 			piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
13169 		}
13170 		spin_unlock_irqrestore(&phba->hbalock, iflags);
13171 		if (iocb_completed) {
13172 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13173 					"0331 IOCB wake signaled\n");
13174 			/* Note: we are not indicating if the IOCB has a success
13175 			 * status or not - that's for the caller to check.
13176 			 * IOCB_SUCCESS means just that the command was sent and
13177 			 * completed. Not that it completed successfully.
13178 			 * */
13179 		} else if (timeleft == 0) {
13180 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13181 					"0338 IOCB wait timeout error - no "
13182 					"wake response Data x%x\n", timeout);
13183 			retval = IOCB_TIMEDOUT;
13184 		} else {
13185 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13186 					"0330 IOCB wake NOT set, "
13187 					"Data x%x x%lx\n",
13188 					timeout, (timeleft / jiffies));
13189 			retval = IOCB_TIMEDOUT;
13190 		}
13191 	} else if (retval == IOCB_BUSY) {
13192 		if (phba->cfg_log_verbose & LOG_SLI) {
13193 			list_for_each_entry(iocb, &pring->txq, list) {
13194 				txq_cnt++;
13195 			}
13196 			list_for_each_entry(iocb, &pring->txcmplq, list) {
13197 				txcmplq_cnt++;
13198 			}
13199 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13200 				"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13201 				phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13202 		}
13203 		return retval;
13204 	} else {
13205 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13206 				"0332 IOCB wait issue failed, Data x%x\n",
13207 				retval);
13208 		retval = IOCB_ERROR;
13209 	}
13210 
13211 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13212 		if (lpfc_readl(phba->HCregaddr, &creg_val))
13213 			return IOCB_ERROR;
13214 		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13215 		writel(creg_val, phba->HCregaddr);
13216 		readl(phba->HCregaddr); /* flush */
13217 	}
13218 
13219 	if (prspiocbq)
13220 		piocb->rsp_iocb = NULL;
13221 
13222 	piocb->context_un.wait_queue = NULL;
13223 	piocb->cmd_cmpl = NULL;
13224 	return retval;
13225 }
13226 
13227 /**
13228  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13229  * @phba: Pointer to HBA context object.
13230  * @pmboxq: Pointer to driver mailbox object.
13231  * @timeout: Timeout in number of seconds.
13232  *
13233  * This function issues the mailbox to firmware and waits for the
13234  * mailbox command to complete. If the mailbox command is not
13235  * completed within timeout seconds, it returns MBX_TIMEOUT.
13236  * The function waits for the mailbox completion using an
13237  * interruptible wait. If the thread is woken up due to a
13238  * signal, MBX_TIMEOUT error is returned to the caller. Caller
13239  * should not free the mailbox resources, if this function returns
13240  * MBX_TIMEOUT.
13241  * This function will sleep while waiting for mailbox completion.
13242  * So, this function should not be called from any context which
13243  * does not allow sleeping. Due to the same reason, this function
13244  * cannot be called with interrupt disabled.
13245  * This function assumes that the mailbox completion occurs while
13246  * this function sleep. So, this function cannot be called from
13247  * the worker thread which processes mailbox completion.
13248  * This function is called in the context of HBA management
13249  * applications.
13250  * This function returns MBX_SUCCESS when successful.
13251  * This function is called with no lock held.
13252  **/
13253 int
13254 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13255 			 uint32_t timeout)
13256 {
13257 	struct completion mbox_done;
13258 	int retval;
13259 	unsigned long flag;
13260 
13261 	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13262 	/* setup wake call as IOCB callback */
13263 	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13264 
13265 	/* setup ctx_u field to pass wait_queue pointer to wake function  */
13266 	init_completion(&mbox_done);
13267 	pmboxq->ctx_u.mbox_wait = &mbox_done;
13268 	/* now issue the command */
13269 	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13270 	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13271 		wait_for_completion_timeout(&mbox_done,
13272 					    msecs_to_jiffies(timeout * 1000));
13273 
13274 		spin_lock_irqsave(&phba->hbalock, flag);
13275 		pmboxq->ctx_u.mbox_wait = NULL;
13276 		/*
13277 		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13278 		 * else do not free the resources.
13279 		 */
13280 		if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13281 			retval = MBX_SUCCESS;
13282 		} else {
13283 			retval = MBX_TIMEOUT;
13284 			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13285 		}
13286 		spin_unlock_irqrestore(&phba->hbalock, flag);
13287 	}
13288 	return retval;
13289 }
13290 
13291 /**
13292  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13293  * @phba: Pointer to HBA context.
13294  * @mbx_action: Mailbox shutdown options.
13295  *
13296  * This function is called to shutdown the driver's mailbox sub-system.
13297  * It first marks the mailbox sub-system is in a block state to prevent
13298  * the asynchronous mailbox command from issued off the pending mailbox
13299  * command queue. If the mailbox command sub-system shutdown is due to
13300  * HBA error conditions such as EEH or ERATT, this routine shall invoke
13301  * the mailbox sub-system flush routine to forcefully bring down the
13302  * mailbox sub-system. Otherwise, if it is due to normal condition (such
13303  * as with offline or HBA function reset), this routine will wait for the
13304  * outstanding mailbox command to complete before invoking the mailbox
13305  * sub-system flush routine to gracefully bring down mailbox sub-system.
13306  **/
13307 void
13308 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13309 {
13310 	struct lpfc_sli *psli = &phba->sli;
13311 	unsigned long timeout;
13312 
13313 	if (mbx_action == LPFC_MBX_NO_WAIT) {
13314 		/* delay 100ms for port state */
13315 		msleep(100);
13316 		lpfc_sli_mbox_sys_flush(phba);
13317 		return;
13318 	}
13319 	timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
13320 
13321 	/* Disable softirqs, including timers from obtaining phba->hbalock */
13322 	local_bh_disable();
13323 
13324 	spin_lock_irq(&phba->hbalock);
13325 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13326 
13327 	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13328 		/* Determine how long we might wait for the active mailbox
13329 		 * command to be gracefully completed by firmware.
13330 		 */
13331 		if (phba->sli.mbox_active)
13332 			timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba,
13333 						phba->sli.mbox_active) *
13334 						1000) + jiffies;
13335 		spin_unlock_irq(&phba->hbalock);
13336 
13337 		/* Enable softirqs again, done with phba->hbalock */
13338 		local_bh_enable();
13339 
13340 		while (phba->sli.mbox_active) {
13341 			/* Check active mailbox complete status every 2ms */
13342 			msleep(2);
13343 			if (time_after(jiffies, timeout))
13344 				/* Timeout, let the mailbox flush routine to
13345 				 * forcefully release active mailbox command
13346 				 */
13347 				break;
13348 		}
13349 	} else {
13350 		spin_unlock_irq(&phba->hbalock);
13351 
13352 		/* Enable softirqs again, done with phba->hbalock */
13353 		local_bh_enable();
13354 	}
13355 
13356 	lpfc_sli_mbox_sys_flush(phba);
13357 }
13358 
13359 /**
13360  * lpfc_sli_eratt_read - read sli-3 error attention events
13361  * @phba: Pointer to HBA context.
13362  *
13363  * This function is called to read the SLI3 device error attention registers
13364  * for possible error attention events. The caller must hold the hostlock
13365  * with spin_lock_irq().
13366  *
13367  * This function returns 1 when there is Error Attention in the Host Attention
13368  * Register and returns 0 otherwise.
13369  **/
13370 static int
13371 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13372 {
13373 	uint32_t ha_copy;
13374 
13375 	/* Read chip Host Attention (HA) register */
13376 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
13377 		goto unplug_err;
13378 
13379 	if (ha_copy & HA_ERATT) {
13380 		/* Read host status register to retrieve error event */
13381 		if (lpfc_sli_read_hs(phba))
13382 			goto unplug_err;
13383 
13384 		/* Check if there is a deferred error condition is active */
13385 		if ((HS_FFER1 & phba->work_hs) &&
13386 		    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13387 		      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13388 			set_bit(DEFER_ERATT, &phba->hba_flag);
13389 			/* Clear all interrupt enable conditions */
13390 			writel(0, phba->HCregaddr);
13391 			readl(phba->HCregaddr);
13392 		}
13393 
13394 		/* Set the driver HA work bitmap */
13395 		phba->work_ha |= HA_ERATT;
13396 		/* Indicate polling handles this ERATT */
13397 		set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13398 		return 1;
13399 	}
13400 	return 0;
13401 
13402 unplug_err:
13403 	/* Set the driver HS work bitmap */
13404 	phba->work_hs |= UNPLUG_ERR;
13405 	/* Set the driver HA work bitmap */
13406 	phba->work_ha |= HA_ERATT;
13407 	/* Indicate polling handles this ERATT */
13408 	set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13409 	return 1;
13410 }
13411 
13412 /**
13413  * lpfc_sli4_eratt_read - read sli-4 error attention events
13414  * @phba: Pointer to HBA context.
13415  *
13416  * This function is called to read the SLI4 device error attention registers
13417  * for possible error attention events. The caller must hold the hostlock
13418  * with spin_lock_irq().
13419  *
13420  * This function returns 1 when there is Error Attention in the Host Attention
13421  * Register and returns 0 otherwise.
13422  **/
13423 static int
13424 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13425 {
13426 	uint32_t uerr_sta_hi, uerr_sta_lo;
13427 	uint32_t if_type, portsmphr;
13428 	struct lpfc_register portstat_reg;
13429 	u32 logmask;
13430 
13431 	/*
13432 	 * For now, use the SLI4 device internal unrecoverable error
13433 	 * registers for error attention. This can be changed later.
13434 	 */
13435 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13436 	switch (if_type) {
13437 	case LPFC_SLI_INTF_IF_TYPE_0:
13438 		if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13439 			&uerr_sta_lo) ||
13440 			lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13441 			&uerr_sta_hi)) {
13442 			phba->work_hs |= UNPLUG_ERR;
13443 			phba->work_ha |= HA_ERATT;
13444 			set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13445 			return 1;
13446 		}
13447 		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13448 		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13449 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13450 					"1423 HBA Unrecoverable error: "
13451 					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13452 					"ue_mask_lo_reg=0x%x, "
13453 					"ue_mask_hi_reg=0x%x\n",
13454 					uerr_sta_lo, uerr_sta_hi,
13455 					phba->sli4_hba.ue_mask_lo,
13456 					phba->sli4_hba.ue_mask_hi);
13457 			phba->work_status[0] = uerr_sta_lo;
13458 			phba->work_status[1] = uerr_sta_hi;
13459 			phba->work_ha |= HA_ERATT;
13460 			set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13461 			return 1;
13462 		}
13463 		break;
13464 	case LPFC_SLI_INTF_IF_TYPE_2:
13465 	case LPFC_SLI_INTF_IF_TYPE_6:
13466 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13467 			&portstat_reg.word0) ||
13468 			lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13469 			&portsmphr)){
13470 			phba->work_hs |= UNPLUG_ERR;
13471 			phba->work_ha |= HA_ERATT;
13472 			set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13473 			return 1;
13474 		}
13475 		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13476 			phba->work_status[0] =
13477 				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13478 			phba->work_status[1] =
13479 				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13480 			logmask = LOG_TRACE_EVENT;
13481 			if (phba->work_status[0] ==
13482 				SLIPORT_ERR1_REG_ERR_CODE_2 &&
13483 			    phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13484 				logmask = LOG_SLI;
13485 			lpfc_printf_log(phba, KERN_ERR, logmask,
13486 					"2885 Port Status Event: "
13487 					"port status reg 0x%x, "
13488 					"port smphr reg 0x%x, "
13489 					"error 1=0x%x, error 2=0x%x\n",
13490 					portstat_reg.word0,
13491 					portsmphr,
13492 					phba->work_status[0],
13493 					phba->work_status[1]);
13494 			phba->work_ha |= HA_ERATT;
13495 			set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13496 			return 1;
13497 		}
13498 		break;
13499 	case LPFC_SLI_INTF_IF_TYPE_1:
13500 	default:
13501 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13502 				"2886 HBA Error Attention on unsupported "
13503 				"if type %d.", if_type);
13504 		return 1;
13505 	}
13506 
13507 	return 0;
13508 }
13509 
13510 /**
13511  * lpfc_sli_check_eratt - check error attention events
13512  * @phba: Pointer to HBA context.
13513  *
13514  * This function is called from timer soft interrupt context to check HBA's
13515  * error attention register bit for error attention events.
13516  *
13517  * This function returns 1 when there is Error Attention in the Host Attention
13518  * Register and returns 0 otherwise.
13519  **/
13520 int
13521 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13522 {
13523 	uint32_t ha_copy;
13524 
13525 	/* If somebody is waiting to handle an eratt, don't process it
13526 	 * here. The brdkill function will do this.
13527 	 */
13528 	if (phba->link_flag & LS_IGNORE_ERATT)
13529 		return 0;
13530 
13531 	/* Check if interrupt handler handles this ERATT */
13532 	if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
13533 		/* Interrupt handler has handled ERATT */
13534 		return 0;
13535 
13536 	/*
13537 	 * If there is deferred error attention, do not check for error
13538 	 * attention
13539 	 */
13540 	if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
13541 		return 0;
13542 
13543 	spin_lock_irq(&phba->hbalock);
13544 	/* If PCI channel is offline, don't process it */
13545 	if (unlikely(pci_channel_offline(phba->pcidev))) {
13546 		spin_unlock_irq(&phba->hbalock);
13547 		return 0;
13548 	}
13549 
13550 	switch (phba->sli_rev) {
13551 	case LPFC_SLI_REV2:
13552 	case LPFC_SLI_REV3:
13553 		/* Read chip Host Attention (HA) register */
13554 		ha_copy = lpfc_sli_eratt_read(phba);
13555 		break;
13556 	case LPFC_SLI_REV4:
13557 		/* Read device Uncoverable Error (UERR) registers */
13558 		ha_copy = lpfc_sli4_eratt_read(phba);
13559 		break;
13560 	default:
13561 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13562 				"0299 Invalid SLI revision (%d)\n",
13563 				phba->sli_rev);
13564 		ha_copy = 0;
13565 		break;
13566 	}
13567 	spin_unlock_irq(&phba->hbalock);
13568 
13569 	return ha_copy;
13570 }
13571 
13572 /**
13573  * lpfc_intr_state_check - Check device state for interrupt handling
13574  * @phba: Pointer to HBA context.
13575  *
13576  * This inline routine checks whether a device or its PCI slot is in a state
13577  * that the interrupt should be handled.
13578  *
13579  * This function returns 0 if the device or the PCI slot is in a state that
13580  * interrupt should be handled, otherwise -EIO.
13581  */
13582 static inline int
13583 lpfc_intr_state_check(struct lpfc_hba *phba)
13584 {
13585 	/* If the pci channel is offline, ignore all the interrupts */
13586 	if (unlikely(pci_channel_offline(phba->pcidev)))
13587 		return -EIO;
13588 
13589 	/* Update device level interrupt statistics */
13590 	phba->sli.slistat.sli_intr++;
13591 
13592 	/* Ignore all interrupts during initialization. */
13593 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13594 		return -EIO;
13595 
13596 	return 0;
13597 }
13598 
13599 /**
13600  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13601  * @irq: Interrupt number.
13602  * @dev_id: The device context pointer.
13603  *
13604  * This function is directly called from the PCI layer as an interrupt
13605  * service routine when device with SLI-3 interface spec is enabled with
13606  * MSI-X multi-message interrupt mode and there are slow-path events in
13607  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13608  * interrupt mode, this function is called as part of the device-level
13609  * interrupt handler. When the PCI slot is in error recovery or the HBA
13610  * is undergoing initialization, the interrupt handler will not process
13611  * the interrupt. The link attention and ELS ring attention events are
13612  * handled by the worker thread. The interrupt handler signals the worker
13613  * thread and returns for these events. This function is called without
13614  * any lock held. It gets the hbalock to access and update SLI data
13615  * structures.
13616  *
13617  * This function returns IRQ_HANDLED when interrupt is handled else it
13618  * returns IRQ_NONE.
13619  **/
13620 irqreturn_t
13621 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13622 {
13623 	struct lpfc_hba  *phba;
13624 	uint32_t ha_copy, hc_copy;
13625 	uint32_t work_ha_copy;
13626 	unsigned long status;
13627 	unsigned long iflag;
13628 	uint32_t control;
13629 
13630 	MAILBOX_t *mbox, *pmbox;
13631 	struct lpfc_vport *vport;
13632 	struct lpfc_nodelist *ndlp;
13633 	struct lpfc_dmabuf *mp;
13634 	LPFC_MBOXQ_t *pmb;
13635 	int rc;
13636 
13637 	/*
13638 	 * Get the driver's phba structure from the dev_id and
13639 	 * assume the HBA is not interrupting.
13640 	 */
13641 	phba = (struct lpfc_hba *)dev_id;
13642 
13643 	if (unlikely(!phba))
13644 		return IRQ_NONE;
13645 
13646 	/*
13647 	 * Stuff needs to be attented to when this function is invoked as an
13648 	 * individual interrupt handler in MSI-X multi-message interrupt mode
13649 	 */
13650 	if (phba->intr_type == MSIX) {
13651 		/* Check device state for handling interrupt */
13652 		if (lpfc_intr_state_check(phba))
13653 			return IRQ_NONE;
13654 		/* Need to read HA REG for slow-path events */
13655 		spin_lock_irqsave(&phba->hbalock, iflag);
13656 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
13657 			goto unplug_error;
13658 		/* If somebody is waiting to handle an eratt don't process it
13659 		 * here. The brdkill function will do this.
13660 		 */
13661 		if (phba->link_flag & LS_IGNORE_ERATT)
13662 			ha_copy &= ~HA_ERATT;
13663 		/* Check the need for handling ERATT in interrupt handler */
13664 		if (ha_copy & HA_ERATT) {
13665 			if (test_and_set_bit(HBA_ERATT_HANDLED,
13666 					     &phba->hba_flag))
13667 				/* ERATT polling has handled ERATT */
13668 				ha_copy &= ~HA_ERATT;
13669 		}
13670 
13671 		/*
13672 		 * If there is deferred error attention, do not check for any
13673 		 * interrupt.
13674 		 */
13675 		if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
13676 			spin_unlock_irqrestore(&phba->hbalock, iflag);
13677 			return IRQ_NONE;
13678 		}
13679 
13680 		/* Clear up only attention source related to slow-path */
13681 		if (lpfc_readl(phba->HCregaddr, &hc_copy))
13682 			goto unplug_error;
13683 
13684 		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13685 			HC_LAINT_ENA | HC_ERINT_ENA),
13686 			phba->HCregaddr);
13687 		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13688 			phba->HAregaddr);
13689 		writel(hc_copy, phba->HCregaddr);
13690 		readl(phba->HAregaddr); /* flush */
13691 		spin_unlock_irqrestore(&phba->hbalock, iflag);
13692 	} else
13693 		ha_copy = phba->ha_copy;
13694 
13695 	work_ha_copy = ha_copy & phba->work_ha_mask;
13696 
13697 	if (work_ha_copy) {
13698 		if (work_ha_copy & HA_LATT) {
13699 			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13700 				/*
13701 				 * Turn off Link Attention interrupts
13702 				 * until CLEAR_LA done
13703 				 */
13704 				spin_lock_irqsave(&phba->hbalock, iflag);
13705 				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13706 				if (lpfc_readl(phba->HCregaddr, &control))
13707 					goto unplug_error;
13708 				control &= ~HC_LAINT_ENA;
13709 				writel(control, phba->HCregaddr);
13710 				readl(phba->HCregaddr); /* flush */
13711 				spin_unlock_irqrestore(&phba->hbalock, iflag);
13712 			}
13713 			else
13714 				work_ha_copy &= ~HA_LATT;
13715 		}
13716 
13717 		if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13718 			/*
13719 			 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13720 			 * the only slow ring.
13721 			 */
13722 			status = (work_ha_copy &
13723 				(HA_RXMASK  << (4*LPFC_ELS_RING)));
13724 			status >>= (4*LPFC_ELS_RING);
13725 			if (status & HA_RXMASK) {
13726 				spin_lock_irqsave(&phba->hbalock, iflag);
13727 				if (lpfc_readl(phba->HCregaddr, &control))
13728 					goto unplug_error;
13729 
13730 				lpfc_debugfs_slow_ring_trc(phba,
13731 				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
13732 				control, status,
13733 				(uint32_t)phba->sli.slistat.sli_intr);
13734 
13735 				if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13736 					lpfc_debugfs_slow_ring_trc(phba,
13737 						"ISR Disable ring:"
13738 						"pwork:x%x hawork:x%x wait:x%x",
13739 						phba->work_ha, work_ha_copy,
13740 						(uint32_t)((unsigned long)
13741 						&phba->work_waitq));
13742 
13743 					control &=
13744 					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
13745 					writel(control, phba->HCregaddr);
13746 					readl(phba->HCregaddr); /* flush */
13747 				}
13748 				else {
13749 					lpfc_debugfs_slow_ring_trc(phba,
13750 						"ISR slow ring:   pwork:"
13751 						"x%x hawork:x%x wait:x%x",
13752 						phba->work_ha, work_ha_copy,
13753 						(uint32_t)((unsigned long)
13754 						&phba->work_waitq));
13755 				}
13756 				spin_unlock_irqrestore(&phba->hbalock, iflag);
13757 			}
13758 		}
13759 		spin_lock_irqsave(&phba->hbalock, iflag);
13760 		if (work_ha_copy & HA_ERATT) {
13761 			if (lpfc_sli_read_hs(phba))
13762 				goto unplug_error;
13763 			/*
13764 			 * Check if there is a deferred error condition
13765 			 * is active
13766 			 */
13767 			if ((HS_FFER1 & phba->work_hs) &&
13768 				((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13769 				  HS_FFER6 | HS_FFER7 | HS_FFER8) &
13770 				  phba->work_hs)) {
13771 				set_bit(DEFER_ERATT, &phba->hba_flag);
13772 				/* Clear all interrupt enable conditions */
13773 				writel(0, phba->HCregaddr);
13774 				readl(phba->HCregaddr);
13775 			}
13776 		}
13777 
13778 		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13779 			pmb = phba->sli.mbox_active;
13780 			pmbox = &pmb->u.mb;
13781 			mbox = phba->mbox;
13782 			vport = pmb->vport;
13783 
13784 			/* First check out the status word */
13785 			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13786 			if (pmbox->mbxOwner != OWN_HOST) {
13787 				spin_unlock_irqrestore(&phba->hbalock, iflag);
13788 				/*
13789 				 * Stray Mailbox Interrupt, mbxCommand <cmd>
13790 				 * mbxStatus <status>
13791 				 */
13792 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13793 						"(%d):0304 Stray Mailbox "
13794 						"Interrupt mbxCommand x%x "
13795 						"mbxStatus x%x\n",
13796 						(vport ? vport->vpi : 0),
13797 						pmbox->mbxCommand,
13798 						pmbox->mbxStatus);
13799 				/* clear mailbox attention bit */
13800 				work_ha_copy &= ~HA_MBATT;
13801 			} else {
13802 				phba->sli.mbox_active = NULL;
13803 				spin_unlock_irqrestore(&phba->hbalock, iflag);
13804 				phba->last_completion_time = jiffies;
13805 				del_timer(&phba->sli.mbox_tmo);
13806 				if (pmb->mbox_cmpl) {
13807 					lpfc_sli_pcimem_bcopy(mbox, pmbox,
13808 							MAILBOX_CMD_SIZE);
13809 					if (pmb->out_ext_byte_len &&
13810 						pmb->ext_buf)
13811 						lpfc_sli_pcimem_bcopy(
13812 						phba->mbox_ext,
13813 						pmb->ext_buf,
13814 						pmb->out_ext_byte_len);
13815 				}
13816 				if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13817 					pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13818 
13819 					lpfc_debugfs_disc_trc(vport,
13820 						LPFC_DISC_TRC_MBOX_VPORT,
13821 						"MBOX dflt rpi: : "
13822 						"status:x%x rpi:x%x",
13823 						(uint32_t)pmbox->mbxStatus,
13824 						pmbox->un.varWords[0], 0);
13825 
13826 					if (!pmbox->mbxStatus) {
13827 						mp = pmb->ctx_buf;
13828 						ndlp = pmb->ctx_ndlp;
13829 
13830 						/* Reg_LOGIN of dflt RPI was
13831 						 * successful. new lets get
13832 						 * rid of the RPI using the
13833 						 * same mbox buffer.
13834 						 */
13835 						lpfc_unreg_login(phba,
13836 							vport->vpi,
13837 							pmbox->un.varWords[0],
13838 							pmb);
13839 						pmb->mbox_cmpl =
13840 							lpfc_mbx_cmpl_dflt_rpi;
13841 						pmb->ctx_buf = mp;
13842 						pmb->ctx_ndlp = ndlp;
13843 						pmb->vport = vport;
13844 						rc = lpfc_sli_issue_mbox(phba,
13845 								pmb,
13846 								MBX_NOWAIT);
13847 						if (rc != MBX_BUSY)
13848 							lpfc_printf_log(phba,
13849 							KERN_ERR,
13850 							LOG_TRACE_EVENT,
13851 							"0350 rc should have"
13852 							"been MBX_BUSY\n");
13853 						if (rc != MBX_NOT_FINISHED)
13854 							goto send_current_mbox;
13855 					}
13856 				}
13857 				spin_lock_irqsave(
13858 						&phba->pport->work_port_lock,
13859 						iflag);
13860 				phba->pport->work_port_events &=
13861 					~WORKER_MBOX_TMO;
13862 				spin_unlock_irqrestore(
13863 						&phba->pport->work_port_lock,
13864 						iflag);
13865 
13866 				/* Do NOT queue MBX_HEARTBEAT to the worker
13867 				 * thread for processing.
13868 				 */
13869 				if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13870 					/* Process mbox now */
13871 					phba->sli.mbox_active = NULL;
13872 					phba->sli.sli_flag &=
13873 						~LPFC_SLI_MBOX_ACTIVE;
13874 					if (pmb->mbox_cmpl)
13875 						pmb->mbox_cmpl(phba, pmb);
13876 				} else {
13877 					/* Queue to worker thread to process */
13878 					lpfc_mbox_cmpl_put(phba, pmb);
13879 				}
13880 			}
13881 		} else
13882 			spin_unlock_irqrestore(&phba->hbalock, iflag);
13883 
13884 		if ((work_ha_copy & HA_MBATT) &&
13885 		    (phba->sli.mbox_active == NULL)) {
13886 send_current_mbox:
13887 			/* Process next mailbox command if there is one */
13888 			do {
13889 				rc = lpfc_sli_issue_mbox(phba, NULL,
13890 							 MBX_NOWAIT);
13891 			} while (rc == MBX_NOT_FINISHED);
13892 			if (rc != MBX_SUCCESS)
13893 				lpfc_printf_log(phba, KERN_ERR,
13894 						LOG_TRACE_EVENT,
13895 						"0349 rc should be "
13896 						"MBX_SUCCESS\n");
13897 		}
13898 
13899 		spin_lock_irqsave(&phba->hbalock, iflag);
13900 		phba->work_ha |= work_ha_copy;
13901 		spin_unlock_irqrestore(&phba->hbalock, iflag);
13902 		lpfc_worker_wake_up(phba);
13903 	}
13904 	return IRQ_HANDLED;
13905 unplug_error:
13906 	spin_unlock_irqrestore(&phba->hbalock, iflag);
13907 	return IRQ_HANDLED;
13908 
13909 } /* lpfc_sli_sp_intr_handler */
13910 
13911 /**
13912  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13913  * @irq: Interrupt number.
13914  * @dev_id: The device context pointer.
13915  *
13916  * This function is directly called from the PCI layer as an interrupt
13917  * service routine when device with SLI-3 interface spec is enabled with
13918  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13919  * ring event in the HBA. However, when the device is enabled with either
13920  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13921  * device-level interrupt handler. When the PCI slot is in error recovery
13922  * or the HBA is undergoing initialization, the interrupt handler will not
13923  * process the interrupt. The SCSI FCP fast-path ring event are handled in
13924  * the intrrupt context. This function is called without any lock held.
13925  * It gets the hbalock to access and update SLI data structures.
13926  *
13927  * This function returns IRQ_HANDLED when interrupt is handled else it
13928  * returns IRQ_NONE.
13929  **/
13930 irqreturn_t
13931 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13932 {
13933 	struct lpfc_hba  *phba;
13934 	uint32_t ha_copy;
13935 	unsigned long status;
13936 	unsigned long iflag;
13937 	struct lpfc_sli_ring *pring;
13938 
13939 	/* Get the driver's phba structure from the dev_id and
13940 	 * assume the HBA is not interrupting.
13941 	 */
13942 	phba = (struct lpfc_hba *) dev_id;
13943 
13944 	if (unlikely(!phba))
13945 		return IRQ_NONE;
13946 
13947 	/*
13948 	 * Stuff needs to be attented to when this function is invoked as an
13949 	 * individual interrupt handler in MSI-X multi-message interrupt mode
13950 	 */
13951 	if (phba->intr_type == MSIX) {
13952 		/* Check device state for handling interrupt */
13953 		if (lpfc_intr_state_check(phba))
13954 			return IRQ_NONE;
13955 		/* Need to read HA REG for FCP ring and other ring events */
13956 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
13957 			return IRQ_HANDLED;
13958 
13959 		/*
13960 		 * If there is deferred error attention, do not check for
13961 		 * any interrupt.
13962 		 */
13963 		if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
13964 			return IRQ_NONE;
13965 
13966 		/* Clear up only attention source related to fast-path */
13967 		spin_lock_irqsave(&phba->hbalock, iflag);
13968 		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
13969 			phba->HAregaddr);
13970 		readl(phba->HAregaddr); /* flush */
13971 		spin_unlock_irqrestore(&phba->hbalock, iflag);
13972 	} else
13973 		ha_copy = phba->ha_copy;
13974 
13975 	/*
13976 	 * Process all events on FCP ring. Take the optimized path for FCP IO.
13977 	 */
13978 	ha_copy &= ~(phba->work_ha_mask);
13979 
13980 	status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
13981 	status >>= (4*LPFC_FCP_RING);
13982 	pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
13983 	if (status & HA_RXMASK)
13984 		lpfc_sli_handle_fast_ring_event(phba, pring, status);
13985 
13986 	if (phba->cfg_multi_ring_support == 2) {
13987 		/*
13988 		 * Process all events on extra ring. Take the optimized path
13989 		 * for extra ring IO.
13990 		 */
13991 		status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
13992 		status >>= (4*LPFC_EXTRA_RING);
13993 		if (status & HA_RXMASK) {
13994 			lpfc_sli_handle_fast_ring_event(phba,
13995 					&phba->sli.sli3_ring[LPFC_EXTRA_RING],
13996 					status);
13997 		}
13998 	}
13999 	return IRQ_HANDLED;
14000 }  /* lpfc_sli_fp_intr_handler */
14001 
14002 /**
14003  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
14004  * @irq: Interrupt number.
14005  * @dev_id: The device context pointer.
14006  *
14007  * This function is the HBA device-level interrupt handler to device with
14008  * SLI-3 interface spec, called from the PCI layer when either MSI or
14009  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
14010  * requires driver attention. This function invokes the slow-path interrupt
14011  * attention handling function and fast-path interrupt attention handling
14012  * function in turn to process the relevant HBA attention events. This
14013  * function is called without any lock held. It gets the hbalock to access
14014  * and update SLI data structures.
14015  *
14016  * This function returns IRQ_HANDLED when interrupt is handled, else it
14017  * returns IRQ_NONE.
14018  **/
14019 irqreturn_t
14020 lpfc_sli_intr_handler(int irq, void *dev_id)
14021 {
14022 	struct lpfc_hba  *phba;
14023 	irqreturn_t sp_irq_rc, fp_irq_rc;
14024 	unsigned long status1, status2;
14025 	uint32_t hc_copy;
14026 
14027 	/*
14028 	 * Get the driver's phba structure from the dev_id and
14029 	 * assume the HBA is not interrupting.
14030 	 */
14031 	phba = (struct lpfc_hba *) dev_id;
14032 
14033 	if (unlikely(!phba))
14034 		return IRQ_NONE;
14035 
14036 	/* Check device state for handling interrupt */
14037 	if (lpfc_intr_state_check(phba))
14038 		return IRQ_NONE;
14039 
14040 	spin_lock(&phba->hbalock);
14041 	if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14042 		spin_unlock(&phba->hbalock);
14043 		return IRQ_HANDLED;
14044 	}
14045 
14046 	if (unlikely(!phba->ha_copy)) {
14047 		spin_unlock(&phba->hbalock);
14048 		return IRQ_NONE;
14049 	} else if (phba->ha_copy & HA_ERATT) {
14050 		if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
14051 			/* ERATT polling has handled ERATT */
14052 			phba->ha_copy &= ~HA_ERATT;
14053 	}
14054 
14055 	/*
14056 	 * If there is deferred error attention, do not check for any interrupt.
14057 	 */
14058 	if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
14059 		spin_unlock(&phba->hbalock);
14060 		return IRQ_NONE;
14061 	}
14062 
14063 	/* Clear attention sources except link and error attentions */
14064 	if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14065 		spin_unlock(&phba->hbalock);
14066 		return IRQ_HANDLED;
14067 	}
14068 	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14069 		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14070 		phba->HCregaddr);
14071 	writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14072 	writel(hc_copy, phba->HCregaddr);
14073 	readl(phba->HAregaddr); /* flush */
14074 	spin_unlock(&phba->hbalock);
14075 
14076 	/*
14077 	 * Invokes slow-path host attention interrupt handling as appropriate.
14078 	 */
14079 
14080 	/* status of events with mailbox and link attention */
14081 	status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14082 
14083 	/* status of events with ELS ring */
14084 	status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
14085 	status2 >>= (4*LPFC_ELS_RING);
14086 
14087 	if (status1 || (status2 & HA_RXMASK))
14088 		sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14089 	else
14090 		sp_irq_rc = IRQ_NONE;
14091 
14092 	/*
14093 	 * Invoke fast-path host attention interrupt handling as appropriate.
14094 	 */
14095 
14096 	/* status of events with FCP ring */
14097 	status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14098 	status1 >>= (4*LPFC_FCP_RING);
14099 
14100 	/* status of events with extra ring */
14101 	if (phba->cfg_multi_ring_support == 2) {
14102 		status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14103 		status2 >>= (4*LPFC_EXTRA_RING);
14104 	} else
14105 		status2 = 0;
14106 
14107 	if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14108 		fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14109 	else
14110 		fp_irq_rc = IRQ_NONE;
14111 
14112 	/* Return device-level interrupt handling status */
14113 	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14114 }  /* lpfc_sli_intr_handler */
14115 
14116 /**
14117  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14118  * @phba: pointer to lpfc hba data structure.
14119  *
14120  * This routine is invoked by the worker thread to process all the pending
14121  * SLI4 els abort xri events.
14122  **/
14123 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14124 {
14125 	struct lpfc_cq_event *cq_event;
14126 	unsigned long iflags;
14127 
14128 	/* First, declare the els xri abort event has been handled */
14129 	clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
14130 
14131 	/* Now, handle all the els xri abort events */
14132 	spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14133 	while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14134 		/* Get the first event from the head of the event queue */
14135 		list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14136 				 cq_event, struct lpfc_cq_event, list);
14137 		spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14138 				       iflags);
14139 		/* Notify aborted XRI for ELS work queue */
14140 		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14141 
14142 		/* Free the event processed back to the free pool */
14143 		lpfc_sli4_cq_event_release(phba, cq_event);
14144 		spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14145 				  iflags);
14146 	}
14147 	spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14148 }
14149 
14150 /**
14151  * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
14152  * @phba: Pointer to HBA context object.
14153  * @irspiocbq: Pointer to work-queue completion queue entry.
14154  *
14155  * This routine handles an ELS work-queue completion event and construct
14156  * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
14157  * discovery engine to handle.
14158  *
14159  * Return: Pointer to the receive IOCBQ, NULL otherwise.
14160  **/
14161 static struct lpfc_iocbq *
14162 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
14163 				  struct lpfc_iocbq *irspiocbq)
14164 {
14165 	struct lpfc_sli_ring *pring;
14166 	struct lpfc_iocbq *cmdiocbq;
14167 	struct lpfc_wcqe_complete *wcqe;
14168 	unsigned long iflags;
14169 
14170 	pring = lpfc_phba_elsring(phba);
14171 	if (unlikely(!pring))
14172 		return NULL;
14173 
14174 	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14175 	spin_lock_irqsave(&pring->ring_lock, iflags);
14176 	pring->stats.iocb_event++;
14177 	/* Look up the ELS command IOCB and create pseudo response IOCB */
14178 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14179 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
14180 	if (unlikely(!cmdiocbq)) {
14181 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
14182 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14183 				"0386 ELS complete with no corresponding "
14184 				"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14185 				wcqe->word0, wcqe->total_data_placed,
14186 				wcqe->parameter, wcqe->word3);
14187 		lpfc_sli_release_iocbq(phba, irspiocbq);
14188 		return NULL;
14189 	}
14190 
14191 	memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
14192 	memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
14193 
14194 	/* Put the iocb back on the txcmplq */
14195 	lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14196 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
14197 
14198 	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14199 		spin_lock_irqsave(&phba->hbalock, iflags);
14200 		irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14201 		spin_unlock_irqrestore(&phba->hbalock, iflags);
14202 	}
14203 
14204 	return irspiocbq;
14205 }
14206 
14207 inline struct lpfc_cq_event *
14208 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14209 {
14210 	struct lpfc_cq_event *cq_event;
14211 
14212 	/* Allocate a new internal CQ_EVENT entry */
14213 	cq_event = lpfc_sli4_cq_event_alloc(phba);
14214 	if (!cq_event) {
14215 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14216 				"0602 Failed to alloc CQ_EVENT entry\n");
14217 		return NULL;
14218 	}
14219 
14220 	/* Move the CQE into the event */
14221 	memcpy(&cq_event->cqe, entry, size);
14222 	return cq_event;
14223 }
14224 
14225 /**
14226  * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14227  * @phba: Pointer to HBA context object.
14228  * @mcqe: Pointer to mailbox completion queue entry.
14229  *
14230  * This routine process a mailbox completion queue entry with asynchronous
14231  * event.
14232  *
14233  * Return: true if work posted to worker thread, otherwise false.
14234  **/
14235 static bool
14236 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14237 {
14238 	struct lpfc_cq_event *cq_event;
14239 	unsigned long iflags;
14240 
14241 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14242 			"0392 Async Event: word0:x%x, word1:x%x, "
14243 			"word2:x%x, word3:x%x\n", mcqe->word0,
14244 			mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14245 
14246 	cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14247 	if (!cq_event)
14248 		return false;
14249 
14250 	spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14251 	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14252 	spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14253 
14254 	/* Set the async event flag */
14255 	set_bit(ASYNC_EVENT, &phba->hba_flag);
14256 
14257 	return true;
14258 }
14259 
14260 /**
14261  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14262  * @phba: Pointer to HBA context object.
14263  * @mcqe: Pointer to mailbox completion queue entry.
14264  *
14265  * This routine process a mailbox completion queue entry with mailbox
14266  * completion event.
14267  *
14268  * Return: true if work posted to worker thread, otherwise false.
14269  **/
14270 static bool
14271 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14272 {
14273 	uint32_t mcqe_status;
14274 	MAILBOX_t *mbox, *pmbox;
14275 	struct lpfc_mqe *mqe;
14276 	struct lpfc_vport *vport;
14277 	struct lpfc_nodelist *ndlp;
14278 	struct lpfc_dmabuf *mp;
14279 	unsigned long iflags;
14280 	LPFC_MBOXQ_t *pmb;
14281 	bool workposted = false;
14282 	int rc;
14283 
14284 	/* If not a mailbox complete MCQE, out by checking mailbox consume */
14285 	if (!bf_get(lpfc_trailer_completed, mcqe))
14286 		goto out_no_mqe_complete;
14287 
14288 	/* Get the reference to the active mbox command */
14289 	spin_lock_irqsave(&phba->hbalock, iflags);
14290 	pmb = phba->sli.mbox_active;
14291 	if (unlikely(!pmb)) {
14292 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14293 				"1832 No pending MBOX command to handle\n");
14294 		spin_unlock_irqrestore(&phba->hbalock, iflags);
14295 		goto out_no_mqe_complete;
14296 	}
14297 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14298 	mqe = &pmb->u.mqe;
14299 	pmbox = (MAILBOX_t *)&pmb->u.mqe;
14300 	mbox = phba->mbox;
14301 	vport = pmb->vport;
14302 
14303 	/* Reset heartbeat timer */
14304 	phba->last_completion_time = jiffies;
14305 	del_timer(&phba->sli.mbox_tmo);
14306 
14307 	/* Move mbox data to caller's mailbox region, do endian swapping */
14308 	if (pmb->mbox_cmpl && mbox)
14309 		lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14310 
14311 	/*
14312 	 * For mcqe errors, conditionally move a modified error code to
14313 	 * the mbox so that the error will not be missed.
14314 	 */
14315 	mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14316 	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14317 		if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14318 			bf_set(lpfc_mqe_status, mqe,
14319 			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
14320 	}
14321 	if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14322 		pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14323 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14324 				      "MBOX dflt rpi: status:x%x rpi:x%x",
14325 				      mcqe_status,
14326 				      pmbox->un.varWords[0], 0);
14327 		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14328 			mp = pmb->ctx_buf;
14329 			ndlp = pmb->ctx_ndlp;
14330 
14331 			/* Reg_LOGIN of dflt RPI was successful. Mark the
14332 			 * node as having an UNREG_LOGIN in progress to stop
14333 			 * an unsolicited PLOGI from the same NPortId from
14334 			 * starting another mailbox transaction.
14335 			 */
14336 			spin_lock_irqsave(&ndlp->lock, iflags);
14337 			ndlp->nlp_flag |= NLP_UNREG_INP;
14338 			spin_unlock_irqrestore(&ndlp->lock, iflags);
14339 			lpfc_unreg_login(phba, vport->vpi,
14340 					 pmbox->un.varWords[0], pmb);
14341 			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14342 			pmb->ctx_buf = mp;
14343 
14344 			/* No reference taken here.  This is a default
14345 			 * RPI reg/immediate unreg cycle. The reference was
14346 			 * taken in the reg rpi path and is released when
14347 			 * this mailbox completes.
14348 			 */
14349 			pmb->ctx_ndlp = ndlp;
14350 			pmb->vport = vport;
14351 			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14352 			if (rc != MBX_BUSY)
14353 				lpfc_printf_log(phba, KERN_ERR,
14354 						LOG_TRACE_EVENT,
14355 						"0385 rc should "
14356 						"have been MBX_BUSY\n");
14357 			if (rc != MBX_NOT_FINISHED)
14358 				goto send_current_mbox;
14359 		}
14360 	}
14361 	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14362 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14363 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14364 
14365 	/* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14366 	if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14367 		spin_lock_irqsave(&phba->hbalock, iflags);
14368 		/* Release the mailbox command posting token */
14369 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14370 		phba->sli.mbox_active = NULL;
14371 		if (bf_get(lpfc_trailer_consumed, mcqe))
14372 			lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14373 		spin_unlock_irqrestore(&phba->hbalock, iflags);
14374 
14375 		/* Post the next mbox command, if there is one */
14376 		lpfc_sli4_post_async_mbox(phba);
14377 
14378 		/* Process cmpl now */
14379 		if (pmb->mbox_cmpl)
14380 			pmb->mbox_cmpl(phba, pmb);
14381 		return false;
14382 	}
14383 
14384 	/* There is mailbox completion work to queue to the worker thread */
14385 	spin_lock_irqsave(&phba->hbalock, iflags);
14386 	__lpfc_mbox_cmpl_put(phba, pmb);
14387 	phba->work_ha |= HA_MBATT;
14388 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14389 	workposted = true;
14390 
14391 send_current_mbox:
14392 	spin_lock_irqsave(&phba->hbalock, iflags);
14393 	/* Release the mailbox command posting token */
14394 	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14395 	/* Setting active mailbox pointer need to be in sync to flag clear */
14396 	phba->sli.mbox_active = NULL;
14397 	if (bf_get(lpfc_trailer_consumed, mcqe))
14398 		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14399 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14400 	/* Wake up worker thread to post the next pending mailbox command */
14401 	lpfc_worker_wake_up(phba);
14402 	return workposted;
14403 
14404 out_no_mqe_complete:
14405 	spin_lock_irqsave(&phba->hbalock, iflags);
14406 	if (bf_get(lpfc_trailer_consumed, mcqe))
14407 		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14408 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14409 	return false;
14410 }
14411 
14412 /**
14413  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14414  * @phba: Pointer to HBA context object.
14415  * @cq: Pointer to associated CQ
14416  * @cqe: Pointer to mailbox completion queue entry.
14417  *
14418  * This routine process a mailbox completion queue entry, it invokes the
14419  * proper mailbox complete handling or asynchronous event handling routine
14420  * according to the MCQE's async bit.
14421  *
14422  * Return: true if work posted to worker thread, otherwise false.
14423  **/
14424 static bool
14425 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14426 			 struct lpfc_cqe *cqe)
14427 {
14428 	struct lpfc_mcqe mcqe;
14429 	bool workposted;
14430 
14431 	cq->CQ_mbox++;
14432 
14433 	/* Copy the mailbox MCQE and convert endian order as needed */
14434 	lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14435 
14436 	/* Invoke the proper event handling routine */
14437 	if (!bf_get(lpfc_trailer_async, &mcqe))
14438 		workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14439 	else
14440 		workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14441 	return workposted;
14442 }
14443 
14444 /**
14445  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14446  * @phba: Pointer to HBA context object.
14447  * @cq: Pointer to associated CQ
14448  * @wcqe: Pointer to work-queue completion queue entry.
14449  *
14450  * This routine handles an ELS work-queue completion event.
14451  *
14452  * Return: true if work posted to worker thread, otherwise false.
14453  **/
14454 static bool
14455 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14456 			     struct lpfc_wcqe_complete *wcqe)
14457 {
14458 	struct lpfc_iocbq *irspiocbq;
14459 	unsigned long iflags;
14460 	struct lpfc_sli_ring *pring = cq->pring;
14461 	int txq_cnt = 0;
14462 	int txcmplq_cnt = 0;
14463 
14464 	/* Check for response status */
14465 	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14466 		/* Log the error status */
14467 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14468 				"0357 ELS CQE error: status=x%x: "
14469 				"CQE: %08x %08x %08x %08x\n",
14470 				bf_get(lpfc_wcqe_c_status, wcqe),
14471 				wcqe->word0, wcqe->total_data_placed,
14472 				wcqe->parameter, wcqe->word3);
14473 	}
14474 
14475 	/* Get an irspiocbq for later ELS response processing use */
14476 	irspiocbq = lpfc_sli_get_iocbq(phba);
14477 	if (!irspiocbq) {
14478 		if (!list_empty(&pring->txq))
14479 			txq_cnt++;
14480 		if (!list_empty(&pring->txcmplq))
14481 			txcmplq_cnt++;
14482 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14483 			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14484 			"els_txcmplq_cnt=%d\n",
14485 			txq_cnt, phba->iocb_cnt,
14486 			txcmplq_cnt);
14487 		return false;
14488 	}
14489 
14490 	/* Save off the slow-path queue event for work thread to process */
14491 	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14492 	spin_lock_irqsave(&phba->hbalock, iflags);
14493 	list_add_tail(&irspiocbq->cq_event.list,
14494 		      &phba->sli4_hba.sp_queue_event);
14495 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14496 	set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
14497 
14498 	return true;
14499 }
14500 
14501 /**
14502  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14503  * @phba: Pointer to HBA context object.
14504  * @wcqe: Pointer to work-queue completion queue entry.
14505  *
14506  * This routine handles slow-path WQ entry consumed event by invoking the
14507  * proper WQ release routine to the slow-path WQ.
14508  **/
14509 static void
14510 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14511 			     struct lpfc_wcqe_release *wcqe)
14512 {
14513 	/* sanity check on queue memory */
14514 	if (unlikely(!phba->sli4_hba.els_wq))
14515 		return;
14516 	/* Check for the slow-path ELS work queue */
14517 	if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14518 		lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14519 				     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14520 	else
14521 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14522 				"2579 Slow-path wqe consume event carries "
14523 				"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14524 				bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14525 				phba->sli4_hba.els_wq->queue_id);
14526 }
14527 
14528 /**
14529  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14530  * @phba: Pointer to HBA context object.
14531  * @cq: Pointer to a WQ completion queue.
14532  * @wcqe: Pointer to work-queue completion queue entry.
14533  *
14534  * This routine handles an XRI abort event.
14535  *
14536  * Return: true if work posted to worker thread, otherwise false.
14537  **/
14538 static bool
14539 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14540 				   struct lpfc_queue *cq,
14541 				   struct sli4_wcqe_xri_aborted *wcqe)
14542 {
14543 	bool workposted = false;
14544 	struct lpfc_cq_event *cq_event;
14545 	unsigned long iflags;
14546 
14547 	switch (cq->subtype) {
14548 	case LPFC_IO:
14549 		lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14550 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14551 			/* Notify aborted XRI for NVME work queue */
14552 			if (phba->nvmet_support)
14553 				lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14554 		}
14555 		workposted = false;
14556 		break;
14557 	case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14558 	case LPFC_ELS:
14559 		cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14560 		if (!cq_event) {
14561 			workposted = false;
14562 			break;
14563 		}
14564 		cq_event->hdwq = cq->hdwq;
14565 		spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14566 				  iflags);
14567 		list_add_tail(&cq_event->list,
14568 			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14569 		/* Set the els xri abort event flag */
14570 		set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
14571 		spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14572 				       iflags);
14573 		workposted = true;
14574 		break;
14575 	default:
14576 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14577 				"0603 Invalid CQ subtype %d: "
14578 				"%08x %08x %08x %08x\n",
14579 				cq->subtype, wcqe->word0, wcqe->parameter,
14580 				wcqe->word2, wcqe->word3);
14581 		workposted = false;
14582 		break;
14583 	}
14584 	return workposted;
14585 }
14586 
14587 #define FC_RCTL_MDS_DIAGS	0xF4
14588 
14589 /**
14590  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14591  * @phba: Pointer to HBA context object.
14592  * @rcqe: Pointer to receive-queue completion queue entry.
14593  *
14594  * This routine process a receive-queue completion queue entry.
14595  *
14596  * Return: true if work posted to worker thread, otherwise false.
14597  **/
14598 static bool
14599 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14600 {
14601 	bool workposted = false;
14602 	struct fc_frame_header *fc_hdr;
14603 	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14604 	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14605 	struct lpfc_nvmet_tgtport *tgtp;
14606 	struct hbq_dmabuf *dma_buf;
14607 	uint32_t status, rq_id;
14608 	unsigned long iflags;
14609 
14610 	/* sanity check on queue memory */
14611 	if (unlikely(!hrq) || unlikely(!drq))
14612 		return workposted;
14613 
14614 	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14615 		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14616 	else
14617 		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14618 	if (rq_id != hrq->queue_id)
14619 		goto out;
14620 
14621 	status = bf_get(lpfc_rcqe_status, rcqe);
14622 	switch (status) {
14623 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14624 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14625 				"2537 Receive Frame Truncated!!\n");
14626 		fallthrough;
14627 	case FC_STATUS_RQ_SUCCESS:
14628 		spin_lock_irqsave(&phba->hbalock, iflags);
14629 		lpfc_sli4_rq_release(hrq, drq);
14630 		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14631 		if (!dma_buf) {
14632 			hrq->RQ_no_buf_found++;
14633 			spin_unlock_irqrestore(&phba->hbalock, iflags);
14634 			goto out;
14635 		}
14636 		hrq->RQ_rcv_buf++;
14637 		hrq->RQ_buf_posted--;
14638 		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14639 
14640 		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14641 
14642 		if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14643 		    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14644 			spin_unlock_irqrestore(&phba->hbalock, iflags);
14645 			/* Handle MDS Loopback frames */
14646 			if  (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
14647 				lpfc_sli4_handle_mds_loopback(phba->pport,
14648 							      dma_buf);
14649 			else
14650 				lpfc_in_buf_free(phba, &dma_buf->dbuf);
14651 			break;
14652 		}
14653 
14654 		/* save off the frame for the work thread to process */
14655 		list_add_tail(&dma_buf->cq_event.list,
14656 			      &phba->sli4_hba.sp_queue_event);
14657 		spin_unlock_irqrestore(&phba->hbalock, iflags);
14658 		/* Frame received */
14659 		set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
14660 		workposted = true;
14661 		break;
14662 	case FC_STATUS_INSUFF_BUF_FRM_DISC:
14663 		if (phba->nvmet_support) {
14664 			tgtp = phba->targetport->private;
14665 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14666 					"6402 RQE Error x%x, posted %d err_cnt "
14667 					"%d: %x %x %x\n",
14668 					status, hrq->RQ_buf_posted,
14669 					hrq->RQ_no_posted_buf,
14670 					atomic_read(&tgtp->rcv_fcp_cmd_in),
14671 					atomic_read(&tgtp->rcv_fcp_cmd_out),
14672 					atomic_read(&tgtp->xmt_fcp_release));
14673 		}
14674 		fallthrough;
14675 
14676 	case FC_STATUS_INSUFF_BUF_NEED_BUF:
14677 		hrq->RQ_no_posted_buf++;
14678 		/* Post more buffers if possible */
14679 		set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag);
14680 		workposted = true;
14681 		break;
14682 	case FC_STATUS_RQ_DMA_FAILURE:
14683 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14684 				"2564 RQE DMA Error x%x, x%08x x%08x x%08x "
14685 				"x%08x\n",
14686 				status, rcqe->word0, rcqe->word1,
14687 				rcqe->word2, rcqe->word3);
14688 
14689 		/* If IV set, no further recovery */
14690 		if (bf_get(lpfc_rcqe_iv, rcqe))
14691 			break;
14692 
14693 		/* recycle consumed resource */
14694 		spin_lock_irqsave(&phba->hbalock, iflags);
14695 		lpfc_sli4_rq_release(hrq, drq);
14696 		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14697 		if (!dma_buf) {
14698 			hrq->RQ_no_buf_found++;
14699 			spin_unlock_irqrestore(&phba->hbalock, iflags);
14700 			break;
14701 		}
14702 		hrq->RQ_rcv_buf++;
14703 		hrq->RQ_buf_posted--;
14704 		spin_unlock_irqrestore(&phba->hbalock, iflags);
14705 		lpfc_in_buf_free(phba, &dma_buf->dbuf);
14706 		break;
14707 	default:
14708 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14709 				"2565 Unexpected RQE Status x%x, w0-3 x%08x "
14710 				"x%08x x%08x x%08x\n",
14711 				status, rcqe->word0, rcqe->word1,
14712 				rcqe->word2, rcqe->word3);
14713 		break;
14714 	}
14715 out:
14716 	return workposted;
14717 }
14718 
14719 /**
14720  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14721  * @phba: Pointer to HBA context object.
14722  * @cq: Pointer to the completion queue.
14723  * @cqe: Pointer to a completion queue entry.
14724  *
14725  * This routine process a slow-path work-queue or receive queue completion queue
14726  * entry.
14727  *
14728  * Return: true if work posted to worker thread, otherwise false.
14729  **/
14730 static bool
14731 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14732 			 struct lpfc_cqe *cqe)
14733 {
14734 	struct lpfc_cqe cqevt;
14735 	bool workposted = false;
14736 
14737 	/* Copy the work queue CQE and convert endian order if needed */
14738 	lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14739 
14740 	/* Check and process for different type of WCQE and dispatch */
14741 	switch (bf_get(lpfc_cqe_code, &cqevt)) {
14742 	case CQE_CODE_COMPL_WQE:
14743 		/* Process the WQ/RQ complete event */
14744 		phba->last_completion_time = jiffies;
14745 		workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14746 				(struct lpfc_wcqe_complete *)&cqevt);
14747 		break;
14748 	case CQE_CODE_RELEASE_WQE:
14749 		/* Process the WQ release event */
14750 		lpfc_sli4_sp_handle_rel_wcqe(phba,
14751 				(struct lpfc_wcqe_release *)&cqevt);
14752 		break;
14753 	case CQE_CODE_XRI_ABORTED:
14754 		/* Process the WQ XRI abort event */
14755 		phba->last_completion_time = jiffies;
14756 		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14757 				(struct sli4_wcqe_xri_aborted *)&cqevt);
14758 		break;
14759 	case CQE_CODE_RECEIVE:
14760 	case CQE_CODE_RECEIVE_V1:
14761 		/* Process the RQ event */
14762 		phba->last_completion_time = jiffies;
14763 		workposted = lpfc_sli4_sp_handle_rcqe(phba,
14764 				(struct lpfc_rcqe *)&cqevt);
14765 		break;
14766 	default:
14767 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14768 				"0388 Not a valid WCQE code: x%x\n",
14769 				bf_get(lpfc_cqe_code, &cqevt));
14770 		break;
14771 	}
14772 	return workposted;
14773 }
14774 
14775 /**
14776  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14777  * @phba: Pointer to HBA context object.
14778  * @eqe: Pointer to fast-path event queue entry.
14779  * @speq: Pointer to slow-path event queue.
14780  *
14781  * This routine process a event queue entry from the slow-path event queue.
14782  * It will check the MajorCode and MinorCode to determine this is for a
14783  * completion event on a completion queue, if not, an error shall be logged
14784  * and just return. Otherwise, it will get to the corresponding completion
14785  * queue and process all the entries on that completion queue, rearm the
14786  * completion queue, and then return.
14787  *
14788  **/
14789 static void
14790 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14791 	struct lpfc_queue *speq)
14792 {
14793 	struct lpfc_queue *cq = NULL, *childq;
14794 	uint16_t cqid;
14795 	int ret = 0;
14796 
14797 	/* Get the reference to the corresponding CQ */
14798 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14799 
14800 	list_for_each_entry(childq, &speq->child_list, list) {
14801 		if (childq->queue_id == cqid) {
14802 			cq = childq;
14803 			break;
14804 		}
14805 	}
14806 	if (unlikely(!cq)) {
14807 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14808 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14809 					"0365 Slow-path CQ identifier "
14810 					"(%d) does not exist\n", cqid);
14811 		return;
14812 	}
14813 
14814 	/* Save EQ associated with this CQ */
14815 	cq->assoc_qp = speq;
14816 
14817 	if (is_kdump_kernel())
14818 		ret = queue_work(phba->wq, &cq->spwork);
14819 	else
14820 		ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14821 
14822 	if (!ret)
14823 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14824 				"0390 Cannot schedule queue work "
14825 				"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14826 				cqid, cq->queue_id, raw_smp_processor_id());
14827 }
14828 
14829 /**
14830  * __lpfc_sli4_process_cq - Process elements of a CQ
14831  * @phba: Pointer to HBA context object.
14832  * @cq: Pointer to CQ to be processed
14833  * @handler: Routine to process each cqe
14834  * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14835  *
14836  * This routine processes completion queue entries in a CQ. While a valid
14837  * queue element is found, the handler is called. During processing checks
14838  * are made for periodic doorbell writes to let the hardware know of
14839  * element consumption.
14840  *
14841  * If the max limit on cqes to process is hit, or there are no more valid
14842  * entries, the loop stops. If we processed a sufficient number of elements,
14843  * meaning there is sufficient load, rather than rearming and generating
14844  * another interrupt, a cq rescheduling delay will be set. A delay of 0
14845  * indicates no rescheduling.
14846  *
14847  * Returns True if work scheduled, False otherwise.
14848  **/
14849 static bool
14850 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14851 	bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14852 			struct lpfc_cqe *), unsigned long *delay)
14853 {
14854 	struct lpfc_cqe *cqe;
14855 	bool workposted = false;
14856 	int count = 0, consumed = 0;
14857 	bool arm = true;
14858 
14859 	/* default - no reschedule */
14860 	*delay = 0;
14861 
14862 	if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14863 		goto rearm_and_exit;
14864 
14865 	/* Process all the entries to the CQ */
14866 	cq->q_flag = 0;
14867 	cqe = lpfc_sli4_cq_get(cq);
14868 	while (cqe) {
14869 		workposted |= handler(phba, cq, cqe);
14870 		__lpfc_sli4_consume_cqe(phba, cq, cqe);
14871 
14872 		consumed++;
14873 		if (!(++count % cq->max_proc_limit))
14874 			break;
14875 
14876 		if (!(count % cq->notify_interval)) {
14877 			phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14878 						LPFC_QUEUE_NOARM);
14879 			consumed = 0;
14880 			cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14881 		}
14882 
14883 		if (count == LPFC_NVMET_CQ_NOTIFY)
14884 			cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14885 
14886 		cqe = lpfc_sli4_cq_get(cq);
14887 	}
14888 	if (count >= phba->cfg_cq_poll_threshold) {
14889 		*delay = 1;
14890 		arm = false;
14891 	}
14892 
14893 	/* Track the max number of CQEs processed in 1 EQ */
14894 	if (count > cq->CQ_max_cqe)
14895 		cq->CQ_max_cqe = count;
14896 
14897 	cq->assoc_qp->EQ_cqe_cnt += count;
14898 
14899 	/* Catch the no cq entry condition */
14900 	if (unlikely(count == 0))
14901 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14902 				"0369 No entry from completion queue "
14903 				"qid=%d\n", cq->queue_id);
14904 
14905 	xchg(&cq->queue_claimed, 0);
14906 
14907 rearm_and_exit:
14908 	phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14909 			arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14910 
14911 	return workposted;
14912 }
14913 
14914 /**
14915  * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14916  * @cq: pointer to CQ to process
14917  *
14918  * This routine calls the cq processing routine with a handler specific
14919  * to the type of queue bound to it.
14920  *
14921  * The CQ routine returns two values: the first is the calling status,
14922  * which indicates whether work was queued to the  background discovery
14923  * thread. If true, the routine should wakeup the discovery thread;
14924  * the second is the delay parameter. If non-zero, rather than rearming
14925  * the CQ and yet another interrupt, the CQ handler should be queued so
14926  * that it is processed in a subsequent polling action. The value of
14927  * the delay indicates when to reschedule it.
14928  **/
14929 static void
14930 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14931 {
14932 	struct lpfc_hba *phba = cq->phba;
14933 	unsigned long delay;
14934 	bool workposted = false;
14935 	int ret = 0;
14936 
14937 	/* Process and rearm the CQ */
14938 	switch (cq->type) {
14939 	case LPFC_MCQ:
14940 		workposted |= __lpfc_sli4_process_cq(phba, cq,
14941 						lpfc_sli4_sp_handle_mcqe,
14942 						&delay);
14943 		break;
14944 	case LPFC_WCQ:
14945 		if (cq->subtype == LPFC_IO)
14946 			workposted |= __lpfc_sli4_process_cq(phba, cq,
14947 						lpfc_sli4_fp_handle_cqe,
14948 						&delay);
14949 		else
14950 			workposted |= __lpfc_sli4_process_cq(phba, cq,
14951 						lpfc_sli4_sp_handle_cqe,
14952 						&delay);
14953 		break;
14954 	default:
14955 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14956 				"0370 Invalid completion queue type (%d)\n",
14957 				cq->type);
14958 		return;
14959 	}
14960 
14961 	if (delay) {
14962 		if (is_kdump_kernel())
14963 			ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
14964 						delay);
14965 		else
14966 			ret = queue_delayed_work_on(cq->chann, phba->wq,
14967 						&cq->sched_spwork, delay);
14968 		if (!ret)
14969 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14970 				"0394 Cannot schedule queue work "
14971 				"for cqid=%d on CPU %d\n",
14972 				cq->queue_id, cq->chann);
14973 	}
14974 
14975 	/* wake up worker thread if there are works to be done */
14976 	if (workposted)
14977 		lpfc_worker_wake_up(phba);
14978 }
14979 
14980 /**
14981  * lpfc_sli4_sp_process_cq - slow-path work handler when started by
14982  *   interrupt
14983  * @work: pointer to work element
14984  *
14985  * translates from the work handler and calls the slow-path handler.
14986  **/
14987 static void
14988 lpfc_sli4_sp_process_cq(struct work_struct *work)
14989 {
14990 	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
14991 
14992 	__lpfc_sli4_sp_process_cq(cq);
14993 }
14994 
14995 /**
14996  * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
14997  * @work: pointer to work element
14998  *
14999  * translates from the work handler and calls the slow-path handler.
15000  **/
15001 static void
15002 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
15003 {
15004 	struct lpfc_queue *cq = container_of(to_delayed_work(work),
15005 					struct lpfc_queue, sched_spwork);
15006 
15007 	__lpfc_sli4_sp_process_cq(cq);
15008 }
15009 
15010 /**
15011  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
15012  * @phba: Pointer to HBA context object.
15013  * @cq: Pointer to associated CQ
15014  * @wcqe: Pointer to work-queue completion queue entry.
15015  *
15016  * This routine process a fast-path work queue completion entry from fast-path
15017  * event queue for FCP command response completion.
15018  **/
15019 static void
15020 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15021 			     struct lpfc_wcqe_complete *wcqe)
15022 {
15023 	struct lpfc_sli_ring *pring = cq->pring;
15024 	struct lpfc_iocbq *cmdiocbq;
15025 	unsigned long iflags;
15026 
15027 	/* Check for response status */
15028 	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
15029 		/* If resource errors reported from HBA, reduce queue
15030 		 * depth of the SCSI device.
15031 		 */
15032 		if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15033 		     IOSTAT_LOCAL_REJECT)) &&
15034 		    ((wcqe->parameter & IOERR_PARAM_MASK) ==
15035 		     IOERR_NO_RESOURCES))
15036 			phba->lpfc_rampdown_queue_depth(phba);
15037 
15038 		/* Log the cmpl status */
15039 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15040 				"0373 FCP CQE cmpl: status=x%x: "
15041 				"CQE: %08x %08x %08x %08x\n",
15042 				bf_get(lpfc_wcqe_c_status, wcqe),
15043 				wcqe->word0, wcqe->total_data_placed,
15044 				wcqe->parameter, wcqe->word3);
15045 	}
15046 
15047 	/* Look up the FCP command IOCB and create pseudo response IOCB */
15048 	spin_lock_irqsave(&pring->ring_lock, iflags);
15049 	pring->stats.iocb_event++;
15050 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15051 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
15052 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
15053 	if (unlikely(!cmdiocbq)) {
15054 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15055 				"0374 FCP complete with no corresponding "
15056 				"cmdiocb: iotag (%d)\n",
15057 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
15058 		return;
15059 	}
15060 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15061 	cmdiocbq->isr_timestamp = cq->isr_timestamp;
15062 #endif
15063 	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
15064 		spin_lock_irqsave(&phba->hbalock, iflags);
15065 		cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
15066 		spin_unlock_irqrestore(&phba->hbalock, iflags);
15067 	}
15068 
15069 	if (cmdiocbq->cmd_cmpl) {
15070 		/* For FCP the flag is cleared in cmd_cmpl */
15071 		if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
15072 		    cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
15073 			spin_lock_irqsave(&phba->hbalock, iflags);
15074 			cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
15075 			spin_unlock_irqrestore(&phba->hbalock, iflags);
15076 		}
15077 
15078 		/* Pass the cmd_iocb and the wcqe to the upper layer */
15079 		memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
15080 		       sizeof(struct lpfc_wcqe_complete));
15081 		cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
15082 	} else {
15083 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15084 				"0375 FCP cmdiocb not callback function "
15085 				"iotag: (%d)\n",
15086 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
15087 	}
15088 }
15089 
15090 /**
15091  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15092  * @phba: Pointer to HBA context object.
15093  * @cq: Pointer to completion queue.
15094  * @wcqe: Pointer to work-queue completion queue entry.
15095  *
15096  * This routine handles an fast-path WQ entry consumed event by invoking the
15097  * proper WQ release routine to the slow-path WQ.
15098  **/
15099 static void
15100 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15101 			     struct lpfc_wcqe_release *wcqe)
15102 {
15103 	struct lpfc_queue *childwq;
15104 	bool wqid_matched = false;
15105 	uint16_t hba_wqid;
15106 
15107 	/* Check for fast-path FCP work queue release */
15108 	hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15109 	list_for_each_entry(childwq, &cq->child_list, list) {
15110 		if (childwq->queue_id == hba_wqid) {
15111 			lpfc_sli4_wq_release(childwq,
15112 					bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15113 			if (childwq->q_flag & HBA_NVMET_WQFULL)
15114 				lpfc_nvmet_wqfull_process(phba, childwq);
15115 			wqid_matched = true;
15116 			break;
15117 		}
15118 	}
15119 	/* Report warning log message if no match found */
15120 	if (wqid_matched != true)
15121 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15122 				"2580 Fast-path wqe consume event carries "
15123 				"miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15124 }
15125 
15126 /**
15127  * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15128  * @phba: Pointer to HBA context object.
15129  * @cq: Pointer to completion queue.
15130  * @rcqe: Pointer to receive-queue completion queue entry.
15131  *
15132  * This routine process a receive-queue completion queue entry.
15133  *
15134  * Return: true if work posted to worker thread, otherwise false.
15135  **/
15136 static bool
15137 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15138 			    struct lpfc_rcqe *rcqe)
15139 {
15140 	bool workposted = false;
15141 	struct lpfc_queue *hrq;
15142 	struct lpfc_queue *drq;
15143 	struct rqb_dmabuf *dma_buf;
15144 	struct fc_frame_header *fc_hdr;
15145 	struct lpfc_nvmet_tgtport *tgtp;
15146 	uint32_t status, rq_id;
15147 	unsigned long iflags;
15148 	uint32_t fctl, idx;
15149 
15150 	if ((phba->nvmet_support == 0) ||
15151 	    (phba->sli4_hba.nvmet_cqset == NULL))
15152 		return workposted;
15153 
15154 	idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15155 	hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15156 	drq = phba->sli4_hba.nvmet_mrq_data[idx];
15157 
15158 	/* sanity check on queue memory */
15159 	if (unlikely(!hrq) || unlikely(!drq))
15160 		return workposted;
15161 
15162 	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15163 		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15164 	else
15165 		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15166 
15167 	if ((phba->nvmet_support == 0) ||
15168 	    (rq_id != hrq->queue_id))
15169 		return workposted;
15170 
15171 	status = bf_get(lpfc_rcqe_status, rcqe);
15172 	switch (status) {
15173 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15174 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15175 				"6126 Receive Frame Truncated!!\n");
15176 		fallthrough;
15177 	case FC_STATUS_RQ_SUCCESS:
15178 		spin_lock_irqsave(&phba->hbalock, iflags);
15179 		lpfc_sli4_rq_release(hrq, drq);
15180 		dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15181 		if (!dma_buf) {
15182 			hrq->RQ_no_buf_found++;
15183 			spin_unlock_irqrestore(&phba->hbalock, iflags);
15184 			goto out;
15185 		}
15186 		spin_unlock_irqrestore(&phba->hbalock, iflags);
15187 		hrq->RQ_rcv_buf++;
15188 		hrq->RQ_buf_posted--;
15189 		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15190 
15191 		/* Just some basic sanity checks on FCP Command frame */
15192 		fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15193 			fc_hdr->fh_f_ctl[1] << 8 |
15194 			fc_hdr->fh_f_ctl[2]);
15195 		if (((fctl &
15196 		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15197 		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15198 		    (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15199 			goto drop;
15200 
15201 		if (fc_hdr->fh_type == FC_TYPE_FCP) {
15202 			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15203 			lpfc_nvmet_unsol_fcp_event(
15204 				phba, idx, dma_buf, cq->isr_timestamp,
15205 				cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15206 			return false;
15207 		}
15208 drop:
15209 		lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15210 		break;
15211 	case FC_STATUS_INSUFF_BUF_FRM_DISC:
15212 		if (phba->nvmet_support) {
15213 			tgtp = phba->targetport->private;
15214 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15215 					"6401 RQE Error x%x, posted %d err_cnt "
15216 					"%d: %x %x %x\n",
15217 					status, hrq->RQ_buf_posted,
15218 					hrq->RQ_no_posted_buf,
15219 					atomic_read(&tgtp->rcv_fcp_cmd_in),
15220 					atomic_read(&tgtp->rcv_fcp_cmd_out),
15221 					atomic_read(&tgtp->xmt_fcp_release));
15222 		}
15223 		fallthrough;
15224 
15225 	case FC_STATUS_INSUFF_BUF_NEED_BUF:
15226 		hrq->RQ_no_posted_buf++;
15227 		/* Post more buffers if possible */
15228 		break;
15229 	case FC_STATUS_RQ_DMA_FAILURE:
15230 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15231 				"2575 RQE DMA Error x%x, x%08x x%08x x%08x "
15232 				"x%08x\n",
15233 				status, rcqe->word0, rcqe->word1,
15234 				rcqe->word2, rcqe->word3);
15235 
15236 		/* If IV set, no further recovery */
15237 		if (bf_get(lpfc_rcqe_iv, rcqe))
15238 			break;
15239 
15240 		/* recycle consumed resource */
15241 		spin_lock_irqsave(&phba->hbalock, iflags);
15242 		lpfc_sli4_rq_release(hrq, drq);
15243 		dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15244 		if (!dma_buf) {
15245 			hrq->RQ_no_buf_found++;
15246 			spin_unlock_irqrestore(&phba->hbalock, iflags);
15247 			break;
15248 		}
15249 		hrq->RQ_rcv_buf++;
15250 		hrq->RQ_buf_posted--;
15251 		spin_unlock_irqrestore(&phba->hbalock, iflags);
15252 		lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15253 		break;
15254 	default:
15255 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15256 				"2576 Unexpected RQE Status x%x, w0-3 x%08x "
15257 				"x%08x x%08x x%08x\n",
15258 				status, rcqe->word0, rcqe->word1,
15259 				rcqe->word2, rcqe->word3);
15260 		break;
15261 	}
15262 out:
15263 	return workposted;
15264 }
15265 
15266 /**
15267  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15268  * @phba: adapter with cq
15269  * @cq: Pointer to the completion queue.
15270  * @cqe: Pointer to fast-path completion queue entry.
15271  *
15272  * This routine process a fast-path work queue completion entry from fast-path
15273  * event queue for FCP command response completion.
15274  *
15275  * Return: true if work posted to worker thread, otherwise false.
15276  **/
15277 static bool
15278 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15279 			 struct lpfc_cqe *cqe)
15280 {
15281 	struct lpfc_wcqe_release wcqe;
15282 	bool workposted = false;
15283 
15284 	/* Copy the work queue CQE and convert endian order if needed */
15285 	lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15286 
15287 	/* Check and process for different type of WCQE and dispatch */
15288 	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15289 	case CQE_CODE_COMPL_WQE:
15290 	case CQE_CODE_NVME_ERSP:
15291 		cq->CQ_wq++;
15292 		/* Process the WQ complete event */
15293 		phba->last_completion_time = jiffies;
15294 		if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15295 			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15296 				(struct lpfc_wcqe_complete *)&wcqe);
15297 		break;
15298 	case CQE_CODE_RELEASE_WQE:
15299 		cq->CQ_release_wqe++;
15300 		/* Process the WQ release event */
15301 		lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15302 				(struct lpfc_wcqe_release *)&wcqe);
15303 		break;
15304 	case CQE_CODE_XRI_ABORTED:
15305 		cq->CQ_xri_aborted++;
15306 		/* Process the WQ XRI abort event */
15307 		phba->last_completion_time = jiffies;
15308 		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15309 				(struct sli4_wcqe_xri_aborted *)&wcqe);
15310 		break;
15311 	case CQE_CODE_RECEIVE_V1:
15312 	case CQE_CODE_RECEIVE:
15313 		phba->last_completion_time = jiffies;
15314 		if (cq->subtype == LPFC_NVMET) {
15315 			workposted = lpfc_sli4_nvmet_handle_rcqe(
15316 				phba, cq, (struct lpfc_rcqe *)&wcqe);
15317 		}
15318 		break;
15319 	default:
15320 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15321 				"0144 Not a valid CQE code: x%x\n",
15322 				bf_get(lpfc_wcqe_c_code, &wcqe));
15323 		break;
15324 	}
15325 	return workposted;
15326 }
15327 
15328 /**
15329  * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15330  * @cq: Pointer to CQ to be processed
15331  *
15332  * This routine calls the cq processing routine with the handler for
15333  * fast path CQEs.
15334  *
15335  * The CQ routine returns two values: the first is the calling status,
15336  * which indicates whether work was queued to the  background discovery
15337  * thread. If true, the routine should wakeup the discovery thread;
15338  * the second is the delay parameter. If non-zero, rather than rearming
15339  * the CQ and yet another interrupt, the CQ handler should be queued so
15340  * that it is processed in a subsequent polling action. The value of
15341  * the delay indicates when to reschedule it.
15342  **/
15343 static void
15344 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
15345 {
15346 	struct lpfc_hba *phba = cq->phba;
15347 	unsigned long delay;
15348 	bool workposted = false;
15349 	int ret;
15350 
15351 	/* process and rearm the CQ */
15352 	workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15353 					     &delay);
15354 
15355 	if (delay) {
15356 		if (is_kdump_kernel())
15357 			ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15358 						delay);
15359 		else
15360 			ret = queue_delayed_work_on(cq->chann, phba->wq,
15361 						&cq->sched_irqwork, delay);
15362 		if (!ret)
15363 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15364 					"0367 Cannot schedule queue work "
15365 					"for cqid=%d on CPU %d\n",
15366 					cq->queue_id, cq->chann);
15367 	}
15368 
15369 	/* wake up worker thread if there are works to be done */
15370 	if (workposted)
15371 		lpfc_worker_wake_up(phba);
15372 }
15373 
15374 /**
15375  * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15376  *   interrupt
15377  * @work: pointer to work element
15378  *
15379  * translates from the work handler and calls the fast-path handler.
15380  **/
15381 static void
15382 lpfc_sli4_hba_process_cq(struct work_struct *work)
15383 {
15384 	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15385 
15386 	__lpfc_sli4_hba_process_cq(cq);
15387 }
15388 
15389 /**
15390  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15391  * @phba: Pointer to HBA context object.
15392  * @eq: Pointer to the queue structure.
15393  * @eqe: Pointer to fast-path event queue entry.
15394  * @poll_mode: poll_mode to execute processing the cq.
15395  *
15396  * This routine process a event queue entry from the fast-path event queue.
15397  * It will check the MajorCode and MinorCode to determine this is for a
15398  * completion event on a completion queue, if not, an error shall be logged
15399  * and just return. Otherwise, it will get to the corresponding completion
15400  * queue and process all the entries on the completion queue, rearm the
15401  * completion queue, and then return.
15402  **/
15403 static void
15404 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15405 			 struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
15406 {
15407 	struct lpfc_queue *cq = NULL;
15408 	uint32_t qidx = eq->hdwq;
15409 	uint16_t cqid, id;
15410 	int ret;
15411 
15412 	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15413 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15414 				"0366 Not a valid completion "
15415 				"event: majorcode=x%x, minorcode=x%x\n",
15416 				bf_get_le32(lpfc_eqe_major_code, eqe),
15417 				bf_get_le32(lpfc_eqe_minor_code, eqe));
15418 		return;
15419 	}
15420 
15421 	/* Get the reference to the corresponding CQ */
15422 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15423 
15424 	/* Use the fast lookup method first */
15425 	if (cqid <= phba->sli4_hba.cq_max) {
15426 		cq = phba->sli4_hba.cq_lookup[cqid];
15427 		if (cq)
15428 			goto  work_cq;
15429 	}
15430 
15431 	/* Next check for NVMET completion */
15432 	if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15433 		id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15434 		if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15435 			/* Process NVMET unsol rcv */
15436 			cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15437 			goto  process_cq;
15438 		}
15439 	}
15440 
15441 	if (phba->sli4_hba.nvmels_cq &&
15442 	    (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15443 		/* Process NVME unsol rcv */
15444 		cq = phba->sli4_hba.nvmels_cq;
15445 	}
15446 
15447 	/* Otherwise this is a Slow path event */
15448 	if (cq == NULL) {
15449 		lpfc_sli4_sp_handle_eqe(phba, eqe,
15450 					phba->sli4_hba.hdwq[qidx].hba_eq);
15451 		return;
15452 	}
15453 
15454 process_cq:
15455 	if (unlikely(cqid != cq->queue_id)) {
15456 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15457 				"0368 Miss-matched fast-path completion "
15458 				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
15459 				cqid, cq->queue_id);
15460 		return;
15461 	}
15462 
15463 work_cq:
15464 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15465 	if (phba->ktime_on)
15466 		cq->isr_timestamp = ktime_get_ns();
15467 	else
15468 		cq->isr_timestamp = 0;
15469 #endif
15470 
15471 	switch (poll_mode) {
15472 	case LPFC_THREADED_IRQ:
15473 		__lpfc_sli4_hba_process_cq(cq);
15474 		break;
15475 	case LPFC_QUEUE_WORK:
15476 	default:
15477 		if (is_kdump_kernel())
15478 			ret = queue_work(phba->wq, &cq->irqwork);
15479 		else
15480 			ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15481 		if (!ret)
15482 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15483 					"0383 Cannot schedule queue work "
15484 					"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15485 					cqid, cq->queue_id,
15486 					raw_smp_processor_id());
15487 		break;
15488 	}
15489 }
15490 
15491 /**
15492  * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15493  * @work: pointer to work element
15494  *
15495  * translates from the work handler and calls the fast-path handler.
15496  **/
15497 static void
15498 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15499 {
15500 	struct lpfc_queue *cq = container_of(to_delayed_work(work),
15501 					struct lpfc_queue, sched_irqwork);
15502 
15503 	__lpfc_sli4_hba_process_cq(cq);
15504 }
15505 
15506 /**
15507  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15508  * @irq: Interrupt number.
15509  * @dev_id: The device context pointer.
15510  *
15511  * This function is directly called from the PCI layer as an interrupt
15512  * service routine when device with SLI-4 interface spec is enabled with
15513  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15514  * ring event in the HBA. However, when the device is enabled with either
15515  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15516  * device-level interrupt handler. When the PCI slot is in error recovery
15517  * or the HBA is undergoing initialization, the interrupt handler will not
15518  * process the interrupt. The SCSI FCP fast-path ring event are handled in
15519  * the intrrupt context. This function is called without any lock held.
15520  * It gets the hbalock to access and update SLI data structures. Note that,
15521  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15522  * equal to that of FCP CQ index.
15523  *
15524  * The link attention and ELS ring attention events are handled
15525  * by the worker thread. The interrupt handler signals the worker thread
15526  * and returns for these events. This function is called without any lock
15527  * held. It gets the hbalock to access and update SLI data structures.
15528  *
15529  * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
15530  * when interrupt is scheduled to be handled from a threaded irq context, or
15531  * else returns IRQ_NONE.
15532  **/
15533 irqreturn_t
15534 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15535 {
15536 	struct lpfc_hba *phba;
15537 	struct lpfc_hba_eq_hdl *hba_eq_hdl;
15538 	struct lpfc_queue *fpeq;
15539 	unsigned long iflag;
15540 	int hba_eqidx;
15541 	int ecount = 0;
15542 	struct lpfc_eq_intr_info *eqi;
15543 
15544 	/* Get the driver's phba structure from the dev_id */
15545 	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15546 	phba = hba_eq_hdl->phba;
15547 	hba_eqidx = hba_eq_hdl->idx;
15548 
15549 	if (unlikely(!phba))
15550 		return IRQ_NONE;
15551 	if (unlikely(!phba->sli4_hba.hdwq))
15552 		return IRQ_NONE;
15553 
15554 	/* Get to the EQ struct associated with this vector */
15555 	fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15556 	if (unlikely(!fpeq))
15557 		return IRQ_NONE;
15558 
15559 	/* Check device state for handling interrupt */
15560 	if (unlikely(lpfc_intr_state_check(phba))) {
15561 		/* Check again for link_state with lock held */
15562 		spin_lock_irqsave(&phba->hbalock, iflag);
15563 		if (phba->link_state < LPFC_LINK_DOWN)
15564 			/* Flush, clear interrupt, and rearm the EQ */
15565 			lpfc_sli4_eqcq_flush(phba, fpeq);
15566 		spin_unlock_irqrestore(&phba->hbalock, iflag);
15567 		return IRQ_NONE;
15568 	}
15569 
15570 	switch (fpeq->poll_mode) {
15571 	case LPFC_THREADED_IRQ:
15572 		/* CGN mgmt is mutually exclusive from irq processing */
15573 		if (phba->cmf_active_mode == LPFC_CFG_OFF)
15574 			return IRQ_WAKE_THREAD;
15575 		fallthrough;
15576 	case LPFC_QUEUE_WORK:
15577 	default:
15578 		eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15579 		eqi->icnt++;
15580 
15581 		fpeq->last_cpu = raw_smp_processor_id();
15582 
15583 		if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15584 		    fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15585 		    phba->cfg_auto_imax &&
15586 		    fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15587 		    phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15588 			lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
15589 						   LPFC_MAX_AUTO_EQ_DELAY);
15590 
15591 		/* process and rearm the EQ */
15592 		ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
15593 					      LPFC_QUEUE_WORK);
15594 
15595 		if (unlikely(ecount == 0)) {
15596 			fpeq->EQ_no_entry++;
15597 			if (phba->intr_type == MSIX)
15598 				/* MSI-X treated interrupt served as no EQ share INT */
15599 				lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15600 						"0358 MSI-X interrupt with no EQE\n");
15601 			else
15602 				/* Non MSI-X treated on interrupt as EQ share INT */
15603 				return IRQ_NONE;
15604 		}
15605 	}
15606 
15607 	return IRQ_HANDLED;
15608 } /* lpfc_sli4_hba_intr_handler */
15609 
15610 /**
15611  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15612  * @irq: Interrupt number.
15613  * @dev_id: The device context pointer.
15614  *
15615  * This function is the device-level interrupt handler to device with SLI-4
15616  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15617  * interrupt mode is enabled and there is an event in the HBA which requires
15618  * driver attention. This function invokes the slow-path interrupt attention
15619  * handling function and fast-path interrupt attention handling function in
15620  * turn to process the relevant HBA attention events. This function is called
15621  * without any lock held. It gets the hbalock to access and update SLI data
15622  * structures.
15623  *
15624  * This function returns IRQ_HANDLED when interrupt is handled, else it
15625  * returns IRQ_NONE.
15626  **/
15627 irqreturn_t
15628 lpfc_sli4_intr_handler(int irq, void *dev_id)
15629 {
15630 	struct lpfc_hba  *phba;
15631 	irqreturn_t hba_irq_rc;
15632 	bool hba_handled = false;
15633 	int qidx;
15634 
15635 	/* Get the driver's phba structure from the dev_id */
15636 	phba = (struct lpfc_hba *)dev_id;
15637 
15638 	if (unlikely(!phba))
15639 		return IRQ_NONE;
15640 
15641 	/*
15642 	 * Invoke fast-path host attention interrupt handling as appropriate.
15643 	 */
15644 	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15645 		hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15646 					&phba->sli4_hba.hba_eq_hdl[qidx]);
15647 		if (hba_irq_rc == IRQ_HANDLED)
15648 			hba_handled |= true;
15649 	}
15650 
15651 	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15652 } /* lpfc_sli4_intr_handler */
15653 
15654 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15655 {
15656 	struct lpfc_hba *phba = from_timer(phba, t, cpuhp_poll_timer);
15657 	struct lpfc_queue *eq;
15658 
15659 	rcu_read_lock();
15660 
15661 	list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15662 		lpfc_sli4_poll_eq(eq);
15663 	if (!list_empty(&phba->poll_list))
15664 		mod_timer(&phba->cpuhp_poll_timer,
15665 			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15666 
15667 	rcu_read_unlock();
15668 }
15669 
15670 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15671 {
15672 	struct lpfc_hba *phba = eq->phba;
15673 
15674 	/* kickstart slowpath processing if needed */
15675 	if (list_empty(&phba->poll_list))
15676 		mod_timer(&phba->cpuhp_poll_timer,
15677 			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15678 
15679 	list_add_rcu(&eq->_poll_list, &phba->poll_list);
15680 	synchronize_rcu();
15681 }
15682 
15683 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15684 {
15685 	struct lpfc_hba *phba = eq->phba;
15686 
15687 	/* Disable slowpath processing for this eq.  Kick start the eq
15688 	 * by RE-ARMING the eq's ASAP
15689 	 */
15690 	list_del_rcu(&eq->_poll_list);
15691 	synchronize_rcu();
15692 
15693 	if (list_empty(&phba->poll_list))
15694 		del_timer_sync(&phba->cpuhp_poll_timer);
15695 }
15696 
15697 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15698 {
15699 	struct lpfc_queue *eq, *next;
15700 
15701 	list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15702 		list_del(&eq->_poll_list);
15703 
15704 	INIT_LIST_HEAD(&phba->poll_list);
15705 	synchronize_rcu();
15706 }
15707 
15708 static inline void
15709 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15710 {
15711 	if (mode == eq->mode)
15712 		return;
15713 	/*
15714 	 * currently this function is only called during a hotplug
15715 	 * event and the cpu on which this function is executing
15716 	 * is going offline.  By now the hotplug has instructed
15717 	 * the scheduler to remove this cpu from cpu active mask.
15718 	 * So we don't need to work about being put aside by the
15719 	 * scheduler for a high priority process.  Yes, the inte-
15720 	 * rrupts could come but they are known to retire ASAP.
15721 	 */
15722 
15723 	/* Disable polling in the fastpath */
15724 	WRITE_ONCE(eq->mode, mode);
15725 	/* flush out the store buffer */
15726 	smp_wmb();
15727 
15728 	/*
15729 	 * Add this eq to the polling list and start polling. For
15730 	 * a grace period both interrupt handler and poller will
15731 	 * try to process the eq _but_ that's fine.  We have a
15732 	 * synchronization mechanism in place (queue_claimed) to
15733 	 * deal with it.  This is just a draining phase for int-
15734 	 * errupt handler (not eq's) as we have guranteed through
15735 	 * barrier that all the CPUs have seen the new CQ_POLLED
15736 	 * state. which will effectively disable the REARMING of
15737 	 * the EQ.  The whole idea is eq's die off eventually as
15738 	 * we are not rearming EQ's anymore.
15739 	 */
15740 	mode ? lpfc_sli4_add_to_poll_list(eq) :
15741 	       lpfc_sli4_remove_from_poll_list(eq);
15742 }
15743 
15744 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15745 {
15746 	__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15747 }
15748 
15749 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15750 {
15751 	struct lpfc_hba *phba = eq->phba;
15752 
15753 	__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15754 
15755 	/* Kick start for the pending io's in h/w.
15756 	 * Once we switch back to interrupt processing on a eq
15757 	 * the io path completion will only arm eq's when it
15758 	 * receives a completion.  But since eq's are in disa-
15759 	 * rmed state it doesn't receive a completion.  This
15760 	 * creates a deadlock scenaro.
15761 	 */
15762 	phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15763 }
15764 
15765 /**
15766  * lpfc_sli4_queue_free - free a queue structure and associated memory
15767  * @queue: The queue structure to free.
15768  *
15769  * This function frees a queue structure and the DMAable memory used for
15770  * the host resident queue. This function must be called after destroying the
15771  * queue on the HBA.
15772  **/
15773 void
15774 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15775 {
15776 	struct lpfc_dmabuf *dmabuf;
15777 
15778 	if (!queue)
15779 		return;
15780 
15781 	if (!list_empty(&queue->wq_list))
15782 		list_del(&queue->wq_list);
15783 
15784 	while (!list_empty(&queue->page_list)) {
15785 		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15786 				 list);
15787 		dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15788 				  dmabuf->virt, dmabuf->phys);
15789 		kfree(dmabuf);
15790 	}
15791 	if (queue->rqbp) {
15792 		lpfc_free_rq_buffer(queue->phba, queue);
15793 		kfree(queue->rqbp);
15794 	}
15795 
15796 	if (!list_empty(&queue->cpu_list))
15797 		list_del(&queue->cpu_list);
15798 
15799 	kfree(queue);
15800 	return;
15801 }
15802 
15803 /**
15804  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15805  * @phba: The HBA that this queue is being created on.
15806  * @page_size: The size of a queue page
15807  * @entry_size: The size of each queue entry for this queue.
15808  * @entry_count: The number of entries that this queue will handle.
15809  * @cpu: The cpu that will primarily utilize this queue.
15810  *
15811  * This function allocates a queue structure and the DMAable memory used for
15812  * the host resident queue. This function must be called before creating the
15813  * queue on the HBA.
15814  **/
15815 struct lpfc_queue *
15816 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15817 		      uint32_t entry_size, uint32_t entry_count, int cpu)
15818 {
15819 	struct lpfc_queue *queue;
15820 	struct lpfc_dmabuf *dmabuf;
15821 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15822 	uint16_t x, pgcnt;
15823 
15824 	if (!phba->sli4_hba.pc_sli4_params.supported)
15825 		hw_page_size = page_size;
15826 
15827 	pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15828 
15829 	/* If needed, Adjust page count to match the max the adapter supports */
15830 	if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15831 		pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15832 
15833 	queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15834 			     GFP_KERNEL, cpu_to_node(cpu));
15835 	if (!queue)
15836 		return NULL;
15837 
15838 	INIT_LIST_HEAD(&queue->list);
15839 	INIT_LIST_HEAD(&queue->_poll_list);
15840 	INIT_LIST_HEAD(&queue->wq_list);
15841 	INIT_LIST_HEAD(&queue->wqfull_list);
15842 	INIT_LIST_HEAD(&queue->page_list);
15843 	INIT_LIST_HEAD(&queue->child_list);
15844 	INIT_LIST_HEAD(&queue->cpu_list);
15845 
15846 	/* Set queue parameters now.  If the system cannot provide memory
15847 	 * resources, the free routine needs to know what was allocated.
15848 	 */
15849 	queue->page_count = pgcnt;
15850 	queue->q_pgs = (void **)&queue[1];
15851 	queue->entry_cnt_per_pg = hw_page_size / entry_size;
15852 	queue->entry_size = entry_size;
15853 	queue->entry_count = entry_count;
15854 	queue->page_size = hw_page_size;
15855 	queue->phba = phba;
15856 
15857 	for (x = 0; x < queue->page_count; x++) {
15858 		dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15859 				      dev_to_node(&phba->pcidev->dev));
15860 		if (!dmabuf)
15861 			goto out_fail;
15862 		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15863 						  hw_page_size, &dmabuf->phys,
15864 						  GFP_KERNEL);
15865 		if (!dmabuf->virt) {
15866 			kfree(dmabuf);
15867 			goto out_fail;
15868 		}
15869 		dmabuf->buffer_tag = x;
15870 		list_add_tail(&dmabuf->list, &queue->page_list);
15871 		/* use lpfc_sli4_qe to index a paritcular entry in this page */
15872 		queue->q_pgs[x] = dmabuf->virt;
15873 	}
15874 	INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15875 	INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15876 	INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15877 	INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15878 
15879 	/* notify_interval will be set during q creation */
15880 
15881 	return queue;
15882 out_fail:
15883 	lpfc_sli4_queue_free(queue);
15884 	return NULL;
15885 }
15886 
15887 /**
15888  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15889  * @phba: HBA structure that indicates port to create a queue on.
15890  * @pci_barset: PCI BAR set flag.
15891  *
15892  * This function shall perform iomap of the specified PCI BAR address to host
15893  * memory address if not already done so and return it. The returned host
15894  * memory address can be NULL.
15895  */
15896 static void __iomem *
15897 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15898 {
15899 	if (!phba->pcidev)
15900 		return NULL;
15901 
15902 	switch (pci_barset) {
15903 	case WQ_PCI_BAR_0_AND_1:
15904 		return phba->pci_bar0_memmap_p;
15905 	case WQ_PCI_BAR_2_AND_3:
15906 		return phba->pci_bar2_memmap_p;
15907 	case WQ_PCI_BAR_4_AND_5:
15908 		return phba->pci_bar4_memmap_p;
15909 	default:
15910 		break;
15911 	}
15912 	return NULL;
15913 }
15914 
15915 /**
15916  * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15917  * @phba: HBA structure that EQs are on.
15918  * @startq: The starting EQ index to modify
15919  * @numq: The number of EQs (consecutive indexes) to modify
15920  * @usdelay: amount of delay
15921  *
15922  * This function revises the EQ delay on 1 or more EQs. The EQ delay
15923  * is set either by writing to a register (if supported by the SLI Port)
15924  * or by mailbox command. The mailbox command allows several EQs to be
15925  * updated at once.
15926  *
15927  * The @phba struct is used to send a mailbox command to HBA. The @startq
15928  * is used to get the starting EQ index to change. The @numq value is
15929  * used to specify how many consecutive EQ indexes, starting at EQ index,
15930  * are to be changed. This function is asynchronous and will wait for any
15931  * mailbox commands to finish before returning.
15932  *
15933  * On success this function will return a zero. If unable to allocate
15934  * enough memory this function will return -ENOMEM. If a mailbox command
15935  * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
15936  * have had their delay multipler changed.
15937  **/
15938 void
15939 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
15940 			 uint32_t numq, uint32_t usdelay)
15941 {
15942 	struct lpfc_mbx_modify_eq_delay *eq_delay;
15943 	LPFC_MBOXQ_t *mbox;
15944 	struct lpfc_queue *eq;
15945 	int cnt = 0, rc, length;
15946 	uint32_t shdr_status, shdr_add_status;
15947 	uint32_t dmult;
15948 	int qidx;
15949 	union lpfc_sli4_cfg_shdr *shdr;
15950 
15951 	if (startq >= phba->cfg_irq_chann)
15952 		return;
15953 
15954 	if (usdelay > 0xFFFF) {
15955 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
15956 				"6429 usdelay %d too large. Scaled down to "
15957 				"0xFFFF.\n", usdelay);
15958 		usdelay = 0xFFFF;
15959 	}
15960 
15961 	/* set values by EQ_DELAY register if supported */
15962 	if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
15963 		for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
15964 			eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
15965 			if (!eq)
15966 				continue;
15967 
15968 			lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
15969 
15970 			if (++cnt >= numq)
15971 				break;
15972 		}
15973 		return;
15974 	}
15975 
15976 	/* Otherwise, set values by mailbox cmd */
15977 
15978 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
15979 	if (!mbox) {
15980 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15981 				"6428 Failed allocating mailbox cmd buffer."
15982 				" EQ delay was not set.\n");
15983 		return;
15984 	}
15985 	length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
15986 		  sizeof(struct lpfc_sli4_cfg_mhdr));
15987 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
15988 			 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
15989 			 length, LPFC_SLI4_MBX_EMBED);
15990 	eq_delay = &mbox->u.mqe.un.eq_delay;
15991 
15992 	/* Calculate delay multiper from maximum interrupt per second */
15993 	dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
15994 	if (dmult)
15995 		dmult--;
15996 	if (dmult > LPFC_DMULT_MAX)
15997 		dmult = LPFC_DMULT_MAX;
15998 
15999 	for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
16000 		eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
16001 		if (!eq)
16002 			continue;
16003 		eq->q_mode = usdelay;
16004 		eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
16005 		eq_delay->u.request.eq[cnt].phase = 0;
16006 		eq_delay->u.request.eq[cnt].delay_multi = dmult;
16007 
16008 		if (++cnt >= numq)
16009 			break;
16010 	}
16011 	eq_delay->u.request.num_eq = cnt;
16012 
16013 	mbox->vport = phba->pport;
16014 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16015 	mbox->ctx_ndlp = NULL;
16016 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16017 	shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
16018 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16019 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16020 	if (shdr_status || shdr_add_status || rc) {
16021 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16022 				"2512 MODIFY_EQ_DELAY mailbox failed with "
16023 				"status x%x add_status x%x, mbx status x%x\n",
16024 				shdr_status, shdr_add_status, rc);
16025 	}
16026 	mempool_free(mbox, phba->mbox_mem_pool);
16027 	return;
16028 }
16029 
16030 /**
16031  * lpfc_eq_create - Create an Event Queue on the HBA
16032  * @phba: HBA structure that indicates port to create a queue on.
16033  * @eq: The queue structure to use to create the event queue.
16034  * @imax: The maximum interrupt per second limit.
16035  *
16036  * This function creates an event queue, as detailed in @eq, on a port,
16037  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
16038  *
16039  * The @phba struct is used to send mailbox command to HBA. The @eq struct
16040  * is used to get the entry count and entry size that are necessary to
16041  * determine the number of pages to allocate and use for this queue. This
16042  * function will send the EQ_CREATE mailbox command to the HBA to setup the
16043  * event queue. This function is asynchronous and will wait for the mailbox
16044  * command to finish before continuing.
16045  *
16046  * On success this function will return a zero. If unable to allocate enough
16047  * memory this function will return -ENOMEM. If the queue create mailbox command
16048  * fails this function will return -ENXIO.
16049  **/
16050 int
16051 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16052 {
16053 	struct lpfc_mbx_eq_create *eq_create;
16054 	LPFC_MBOXQ_t *mbox;
16055 	int rc, length, status = 0;
16056 	struct lpfc_dmabuf *dmabuf;
16057 	uint32_t shdr_status, shdr_add_status;
16058 	union lpfc_sli4_cfg_shdr *shdr;
16059 	uint16_t dmult;
16060 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16061 
16062 	/* sanity check on queue memory */
16063 	if (!eq)
16064 		return -ENODEV;
16065 	if (!phba->sli4_hba.pc_sli4_params.supported)
16066 		hw_page_size = SLI4_PAGE_SIZE;
16067 
16068 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16069 	if (!mbox)
16070 		return -ENOMEM;
16071 	length = (sizeof(struct lpfc_mbx_eq_create) -
16072 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16073 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16074 			 LPFC_MBOX_OPCODE_EQ_CREATE,
16075 			 length, LPFC_SLI4_MBX_EMBED);
16076 	eq_create = &mbox->u.mqe.un.eq_create;
16077 	shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16078 	bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16079 	       eq->page_count);
16080 	bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16081 	       LPFC_EQE_SIZE);
16082 	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16083 
16084 	/* Use version 2 of CREATE_EQ if eqav is set */
16085 	if (phba->sli4_hba.pc_sli4_params.eqav) {
16086 		bf_set(lpfc_mbox_hdr_version, &shdr->request,
16087 		       LPFC_Q_CREATE_VERSION_2);
16088 		bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16089 		       phba->sli4_hba.pc_sli4_params.eqav);
16090 	}
16091 
16092 	/* don't setup delay multiplier using EQ_CREATE */
16093 	dmult = 0;
16094 	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16095 	       dmult);
16096 	switch (eq->entry_count) {
16097 	default:
16098 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16099 				"0360 Unsupported EQ count. (%d)\n",
16100 				eq->entry_count);
16101 		if (eq->entry_count < 256) {
16102 			status = -EINVAL;
16103 			goto out;
16104 		}
16105 		fallthrough;	/* otherwise default to smallest count */
16106 	case 256:
16107 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16108 		       LPFC_EQ_CNT_256);
16109 		break;
16110 	case 512:
16111 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16112 		       LPFC_EQ_CNT_512);
16113 		break;
16114 	case 1024:
16115 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16116 		       LPFC_EQ_CNT_1024);
16117 		break;
16118 	case 2048:
16119 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16120 		       LPFC_EQ_CNT_2048);
16121 		break;
16122 	case 4096:
16123 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16124 		       LPFC_EQ_CNT_4096);
16125 		break;
16126 	}
16127 	list_for_each_entry(dmabuf, &eq->page_list, list) {
16128 		memset(dmabuf->virt, 0, hw_page_size);
16129 		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16130 					putPaddrLow(dmabuf->phys);
16131 		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16132 					putPaddrHigh(dmabuf->phys);
16133 	}
16134 	mbox->vport = phba->pport;
16135 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16136 	mbox->ctx_buf = NULL;
16137 	mbox->ctx_ndlp = NULL;
16138 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16139 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16140 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16141 	if (shdr_status || shdr_add_status || rc) {
16142 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16143 				"2500 EQ_CREATE mailbox failed with "
16144 				"status x%x add_status x%x, mbx status x%x\n",
16145 				shdr_status, shdr_add_status, rc);
16146 		status = -ENXIO;
16147 	}
16148 	eq->type = LPFC_EQ;
16149 	eq->subtype = LPFC_NONE;
16150 	eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16151 	if (eq->queue_id == 0xFFFF)
16152 		status = -ENXIO;
16153 	eq->host_index = 0;
16154 	eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16155 	eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16156 out:
16157 	mempool_free(mbox, phba->mbox_mem_pool);
16158 	return status;
16159 }
16160 
16161 /**
16162  * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
16163  * @irq: Interrupt number.
16164  * @dev_id: The device context pointer.
16165  *
16166  * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
16167  * threaded irq context.
16168  *
16169  * Returns
16170  * IRQ_HANDLED - interrupt is handled
16171  * IRQ_NONE - otherwise
16172  **/
16173 irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
16174 {
16175 	struct lpfc_hba *phba;
16176 	struct lpfc_hba_eq_hdl *hba_eq_hdl;
16177 	struct lpfc_queue *fpeq;
16178 	int ecount = 0;
16179 	int hba_eqidx;
16180 	struct lpfc_eq_intr_info *eqi;
16181 
16182 	/* Get the driver's phba structure from the dev_id */
16183 	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
16184 	phba = hba_eq_hdl->phba;
16185 	hba_eqidx = hba_eq_hdl->idx;
16186 
16187 	if (unlikely(!phba))
16188 		return IRQ_NONE;
16189 	if (unlikely(!phba->sli4_hba.hdwq))
16190 		return IRQ_NONE;
16191 
16192 	/* Get to the EQ struct associated with this vector */
16193 	fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
16194 	if (unlikely(!fpeq))
16195 		return IRQ_NONE;
16196 
16197 	eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
16198 	eqi->icnt++;
16199 
16200 	fpeq->last_cpu = raw_smp_processor_id();
16201 
16202 	if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
16203 	    fpeq->q_flag & HBA_EQ_DELAY_CHK &&
16204 	    phba->cfg_auto_imax &&
16205 	    fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
16206 	    phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
16207 		lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
16208 
16209 	/* process and rearm the EQ */
16210 	ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
16211 				      LPFC_THREADED_IRQ);
16212 
16213 	if (unlikely(ecount == 0)) {
16214 		fpeq->EQ_no_entry++;
16215 		if (phba->intr_type == MSIX)
16216 			/* MSI-X treated interrupt served as no EQ share INT */
16217 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16218 					"3358 MSI-X interrupt with no EQE\n");
16219 		else
16220 			/* Non MSI-X treated on interrupt as EQ share INT */
16221 			return IRQ_NONE;
16222 	}
16223 	return IRQ_HANDLED;
16224 }
16225 
16226 /**
16227  * lpfc_cq_create - Create a Completion Queue on the HBA
16228  * @phba: HBA structure that indicates port to create a queue on.
16229  * @cq: The queue structure to use to create the completion queue.
16230  * @eq: The event queue to bind this completion queue to.
16231  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16232  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16233  *
16234  * This function creates a completion queue, as detailed in @wq, on a port,
16235  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16236  *
16237  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16238  * is used to get the entry count and entry size that are necessary to
16239  * determine the number of pages to allocate and use for this queue. The @eq
16240  * is used to indicate which event queue to bind this completion queue to. This
16241  * function will send the CQ_CREATE mailbox command to the HBA to setup the
16242  * completion queue. This function is asynchronous and will wait for the mailbox
16243  * command to finish before continuing.
16244  *
16245  * On success this function will return a zero. If unable to allocate enough
16246  * memory this function will return -ENOMEM. If the queue create mailbox command
16247  * fails this function will return -ENXIO.
16248  **/
16249 int
16250 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16251 	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16252 {
16253 	struct lpfc_mbx_cq_create *cq_create;
16254 	struct lpfc_dmabuf *dmabuf;
16255 	LPFC_MBOXQ_t *mbox;
16256 	int rc, length, status = 0;
16257 	uint32_t shdr_status, shdr_add_status;
16258 	union lpfc_sli4_cfg_shdr *shdr;
16259 
16260 	/* sanity check on queue memory */
16261 	if (!cq || !eq)
16262 		return -ENODEV;
16263 
16264 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16265 	if (!mbox)
16266 		return -ENOMEM;
16267 	length = (sizeof(struct lpfc_mbx_cq_create) -
16268 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16269 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16270 			 LPFC_MBOX_OPCODE_CQ_CREATE,
16271 			 length, LPFC_SLI4_MBX_EMBED);
16272 	cq_create = &mbox->u.mqe.un.cq_create;
16273 	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16274 	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16275 		    cq->page_count);
16276 	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16277 	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16278 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
16279 	       phba->sli4_hba.pc_sli4_params.cqv);
16280 	if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16281 		bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16282 		       (cq->page_size / SLI4_PAGE_SIZE));
16283 		bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16284 		       eq->queue_id);
16285 		bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16286 		       phba->sli4_hba.pc_sli4_params.cqav);
16287 	} else {
16288 		bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16289 		       eq->queue_id);
16290 	}
16291 	switch (cq->entry_count) {
16292 	case 2048:
16293 	case 4096:
16294 		if (phba->sli4_hba.pc_sli4_params.cqv ==
16295 		    LPFC_Q_CREATE_VERSION_2) {
16296 			cq_create->u.request.context.lpfc_cq_context_count =
16297 				cq->entry_count;
16298 			bf_set(lpfc_cq_context_count,
16299 			       &cq_create->u.request.context,
16300 			       LPFC_CQ_CNT_WORD7);
16301 			break;
16302 		}
16303 		fallthrough;
16304 	default:
16305 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16306 				"0361 Unsupported CQ count: "
16307 				"entry cnt %d sz %d pg cnt %d\n",
16308 				cq->entry_count, cq->entry_size,
16309 				cq->page_count);
16310 		if (cq->entry_count < 256) {
16311 			status = -EINVAL;
16312 			goto out;
16313 		}
16314 		fallthrough;	/* otherwise default to smallest count */
16315 	case 256:
16316 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16317 		       LPFC_CQ_CNT_256);
16318 		break;
16319 	case 512:
16320 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16321 		       LPFC_CQ_CNT_512);
16322 		break;
16323 	case 1024:
16324 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16325 		       LPFC_CQ_CNT_1024);
16326 		break;
16327 	}
16328 	list_for_each_entry(dmabuf, &cq->page_list, list) {
16329 		memset(dmabuf->virt, 0, cq->page_size);
16330 		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16331 					putPaddrLow(dmabuf->phys);
16332 		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16333 					putPaddrHigh(dmabuf->phys);
16334 	}
16335 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16336 
16337 	/* The IOCTL status is embedded in the mailbox subheader. */
16338 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16339 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16340 	if (shdr_status || shdr_add_status || rc) {
16341 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16342 				"2501 CQ_CREATE mailbox failed with "
16343 				"status x%x add_status x%x, mbx status x%x\n",
16344 				shdr_status, shdr_add_status, rc);
16345 		status = -ENXIO;
16346 		goto out;
16347 	}
16348 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16349 	if (cq->queue_id == 0xFFFF) {
16350 		status = -ENXIO;
16351 		goto out;
16352 	}
16353 	/* link the cq onto the parent eq child list */
16354 	list_add_tail(&cq->list, &eq->child_list);
16355 	/* Set up completion queue's type and subtype */
16356 	cq->type = type;
16357 	cq->subtype = subtype;
16358 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16359 	cq->assoc_qid = eq->queue_id;
16360 	cq->assoc_qp = eq;
16361 	cq->host_index = 0;
16362 	cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16363 	cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16364 
16365 	if (cq->queue_id > phba->sli4_hba.cq_max)
16366 		phba->sli4_hba.cq_max = cq->queue_id;
16367 out:
16368 	mempool_free(mbox, phba->mbox_mem_pool);
16369 	return status;
16370 }
16371 
16372 /**
16373  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16374  * @phba: HBA structure that indicates port to create a queue on.
16375  * @cqp: The queue structure array to use to create the completion queues.
16376  * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
16377  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16378  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16379  *
16380  * This function creates a set of  completion queue, s to support MRQ
16381  * as detailed in @cqp, on a port,
16382  * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16383  *
16384  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16385  * is used to get the entry count and entry size that are necessary to
16386  * determine the number of pages to allocate and use for this queue. The @eq
16387  * is used to indicate which event queue to bind this completion queue to. This
16388  * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16389  * completion queue. This function is asynchronous and will wait for the mailbox
16390  * command to finish before continuing.
16391  *
16392  * On success this function will return a zero. If unable to allocate enough
16393  * memory this function will return -ENOMEM. If the queue create mailbox command
16394  * fails this function will return -ENXIO.
16395  **/
16396 int
16397 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16398 		   struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16399 		   uint32_t subtype)
16400 {
16401 	struct lpfc_queue *cq;
16402 	struct lpfc_queue *eq;
16403 	struct lpfc_mbx_cq_create_set *cq_set;
16404 	struct lpfc_dmabuf *dmabuf;
16405 	LPFC_MBOXQ_t *mbox;
16406 	int rc, length, alloclen, status = 0;
16407 	int cnt, idx, numcq, page_idx = 0;
16408 	uint32_t shdr_status, shdr_add_status;
16409 	union lpfc_sli4_cfg_shdr *shdr;
16410 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16411 
16412 	/* sanity check on queue memory */
16413 	numcq = phba->cfg_nvmet_mrq;
16414 	if (!cqp || !hdwq || !numcq)
16415 		return -ENODEV;
16416 
16417 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16418 	if (!mbox)
16419 		return -ENOMEM;
16420 
16421 	length = sizeof(struct lpfc_mbx_cq_create_set);
16422 	length += ((numcq * cqp[0]->page_count) *
16423 		   sizeof(struct dma_address));
16424 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16425 			LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16426 			LPFC_SLI4_MBX_NEMBED);
16427 	if (alloclen < length) {
16428 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16429 				"3098 Allocated DMA memory size (%d) is "
16430 				"less than the requested DMA memory size "
16431 				"(%d)\n", alloclen, length);
16432 		status = -ENOMEM;
16433 		goto out;
16434 	}
16435 	cq_set = mbox->sge_array->addr[0];
16436 	shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16437 	bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16438 
16439 	for (idx = 0; idx < numcq; idx++) {
16440 		cq = cqp[idx];
16441 		eq = hdwq[idx].hba_eq;
16442 		if (!cq || !eq) {
16443 			status = -ENOMEM;
16444 			goto out;
16445 		}
16446 		if (!phba->sli4_hba.pc_sli4_params.supported)
16447 			hw_page_size = cq->page_size;
16448 
16449 		switch (idx) {
16450 		case 0:
16451 			bf_set(lpfc_mbx_cq_create_set_page_size,
16452 			       &cq_set->u.request,
16453 			       (hw_page_size / SLI4_PAGE_SIZE));
16454 			bf_set(lpfc_mbx_cq_create_set_num_pages,
16455 			       &cq_set->u.request, cq->page_count);
16456 			bf_set(lpfc_mbx_cq_create_set_evt,
16457 			       &cq_set->u.request, 1);
16458 			bf_set(lpfc_mbx_cq_create_set_valid,
16459 			       &cq_set->u.request, 1);
16460 			bf_set(lpfc_mbx_cq_create_set_cqe_size,
16461 			       &cq_set->u.request, 0);
16462 			bf_set(lpfc_mbx_cq_create_set_num_cq,
16463 			       &cq_set->u.request, numcq);
16464 			bf_set(lpfc_mbx_cq_create_set_autovalid,
16465 			       &cq_set->u.request,
16466 			       phba->sli4_hba.pc_sli4_params.cqav);
16467 			switch (cq->entry_count) {
16468 			case 2048:
16469 			case 4096:
16470 				if (phba->sli4_hba.pc_sli4_params.cqv ==
16471 				    LPFC_Q_CREATE_VERSION_2) {
16472 					bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16473 					       &cq_set->u.request,
16474 						cq->entry_count);
16475 					bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16476 					       &cq_set->u.request,
16477 					       LPFC_CQ_CNT_WORD7);
16478 					break;
16479 				}
16480 				fallthrough;
16481 			default:
16482 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16483 						"3118 Bad CQ count. (%d)\n",
16484 						cq->entry_count);
16485 				if (cq->entry_count < 256) {
16486 					status = -EINVAL;
16487 					goto out;
16488 				}
16489 				fallthrough;	/* otherwise default to smallest */
16490 			case 256:
16491 				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16492 				       &cq_set->u.request, LPFC_CQ_CNT_256);
16493 				break;
16494 			case 512:
16495 				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16496 				       &cq_set->u.request, LPFC_CQ_CNT_512);
16497 				break;
16498 			case 1024:
16499 				bf_set(lpfc_mbx_cq_create_set_cqe_cnt,
16500 				       &cq_set->u.request, LPFC_CQ_CNT_1024);
16501 				break;
16502 			}
16503 			bf_set(lpfc_mbx_cq_create_set_eq_id0,
16504 			       &cq_set->u.request, eq->queue_id);
16505 			break;
16506 		case 1:
16507 			bf_set(lpfc_mbx_cq_create_set_eq_id1,
16508 			       &cq_set->u.request, eq->queue_id);
16509 			break;
16510 		case 2:
16511 			bf_set(lpfc_mbx_cq_create_set_eq_id2,
16512 			       &cq_set->u.request, eq->queue_id);
16513 			break;
16514 		case 3:
16515 			bf_set(lpfc_mbx_cq_create_set_eq_id3,
16516 			       &cq_set->u.request, eq->queue_id);
16517 			break;
16518 		case 4:
16519 			bf_set(lpfc_mbx_cq_create_set_eq_id4,
16520 			       &cq_set->u.request, eq->queue_id);
16521 			break;
16522 		case 5:
16523 			bf_set(lpfc_mbx_cq_create_set_eq_id5,
16524 			       &cq_set->u.request, eq->queue_id);
16525 			break;
16526 		case 6:
16527 			bf_set(lpfc_mbx_cq_create_set_eq_id6,
16528 			       &cq_set->u.request, eq->queue_id);
16529 			break;
16530 		case 7:
16531 			bf_set(lpfc_mbx_cq_create_set_eq_id7,
16532 			       &cq_set->u.request, eq->queue_id);
16533 			break;
16534 		case 8:
16535 			bf_set(lpfc_mbx_cq_create_set_eq_id8,
16536 			       &cq_set->u.request, eq->queue_id);
16537 			break;
16538 		case 9:
16539 			bf_set(lpfc_mbx_cq_create_set_eq_id9,
16540 			       &cq_set->u.request, eq->queue_id);
16541 			break;
16542 		case 10:
16543 			bf_set(lpfc_mbx_cq_create_set_eq_id10,
16544 			       &cq_set->u.request, eq->queue_id);
16545 			break;
16546 		case 11:
16547 			bf_set(lpfc_mbx_cq_create_set_eq_id11,
16548 			       &cq_set->u.request, eq->queue_id);
16549 			break;
16550 		case 12:
16551 			bf_set(lpfc_mbx_cq_create_set_eq_id12,
16552 			       &cq_set->u.request, eq->queue_id);
16553 			break;
16554 		case 13:
16555 			bf_set(lpfc_mbx_cq_create_set_eq_id13,
16556 			       &cq_set->u.request, eq->queue_id);
16557 			break;
16558 		case 14:
16559 			bf_set(lpfc_mbx_cq_create_set_eq_id14,
16560 			       &cq_set->u.request, eq->queue_id);
16561 			break;
16562 		case 15:
16563 			bf_set(lpfc_mbx_cq_create_set_eq_id15,
16564 			       &cq_set->u.request, eq->queue_id);
16565 			break;
16566 		}
16567 
16568 		/* link the cq onto the parent eq child list */
16569 		list_add_tail(&cq->list, &eq->child_list);
16570 		/* Set up completion queue's type and subtype */
16571 		cq->type = type;
16572 		cq->subtype = subtype;
16573 		cq->assoc_qid = eq->queue_id;
16574 		cq->assoc_qp = eq;
16575 		cq->host_index = 0;
16576 		cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16577 		cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16578 					 cq->entry_count);
16579 		cq->chann = idx;
16580 
16581 		rc = 0;
16582 		list_for_each_entry(dmabuf, &cq->page_list, list) {
16583 			memset(dmabuf->virt, 0, hw_page_size);
16584 			cnt = page_idx + dmabuf->buffer_tag;
16585 			cq_set->u.request.page[cnt].addr_lo =
16586 					putPaddrLow(dmabuf->phys);
16587 			cq_set->u.request.page[cnt].addr_hi =
16588 					putPaddrHigh(dmabuf->phys);
16589 			rc++;
16590 		}
16591 		page_idx += rc;
16592 	}
16593 
16594 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16595 
16596 	/* The IOCTL status is embedded in the mailbox subheader. */
16597 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16598 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16599 	if (shdr_status || shdr_add_status || rc) {
16600 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16601 				"3119 CQ_CREATE_SET mailbox failed with "
16602 				"status x%x add_status x%x, mbx status x%x\n",
16603 				shdr_status, shdr_add_status, rc);
16604 		status = -ENXIO;
16605 		goto out;
16606 	}
16607 	rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16608 	if (rc == 0xFFFF) {
16609 		status = -ENXIO;
16610 		goto out;
16611 	}
16612 
16613 	for (idx = 0; idx < numcq; idx++) {
16614 		cq = cqp[idx];
16615 		cq->queue_id = rc + idx;
16616 		if (cq->queue_id > phba->sli4_hba.cq_max)
16617 			phba->sli4_hba.cq_max = cq->queue_id;
16618 	}
16619 
16620 out:
16621 	lpfc_sli4_mbox_cmd_free(phba, mbox);
16622 	return status;
16623 }
16624 
16625 /**
16626  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16627  * @phba: HBA structure that indicates port to create a queue on.
16628  * @mq: The queue structure to use to create the mailbox queue.
16629  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16630  * @cq: The completion queue to associate with this cq.
16631  *
16632  * This function provides failback (fb) functionality when the
16633  * mq_create_ext fails on older FW generations.  It's purpose is identical
16634  * to mq_create_ext otherwise.
16635  *
16636  * This routine cannot fail as all attributes were previously accessed and
16637  * initialized in mq_create_ext.
16638  **/
16639 static void
16640 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16641 		       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16642 {
16643 	struct lpfc_mbx_mq_create *mq_create;
16644 	struct lpfc_dmabuf *dmabuf;
16645 	int length;
16646 
16647 	length = (sizeof(struct lpfc_mbx_mq_create) -
16648 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16649 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16650 			 LPFC_MBOX_OPCODE_MQ_CREATE,
16651 			 length, LPFC_SLI4_MBX_EMBED);
16652 	mq_create = &mbox->u.mqe.un.mq_create;
16653 	bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16654 	       mq->page_count);
16655 	bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16656 	       cq->queue_id);
16657 	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16658 	switch (mq->entry_count) {
16659 	case 16:
16660 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16661 		       LPFC_MQ_RING_SIZE_16);
16662 		break;
16663 	case 32:
16664 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16665 		       LPFC_MQ_RING_SIZE_32);
16666 		break;
16667 	case 64:
16668 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16669 		       LPFC_MQ_RING_SIZE_64);
16670 		break;
16671 	case 128:
16672 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16673 		       LPFC_MQ_RING_SIZE_128);
16674 		break;
16675 	}
16676 	list_for_each_entry(dmabuf, &mq->page_list, list) {
16677 		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16678 			putPaddrLow(dmabuf->phys);
16679 		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16680 			putPaddrHigh(dmabuf->phys);
16681 	}
16682 }
16683 
16684 /**
16685  * lpfc_mq_create - Create a mailbox Queue on the HBA
16686  * @phba: HBA structure that indicates port to create a queue on.
16687  * @mq: The queue structure to use to create the mailbox queue.
16688  * @cq: The completion queue to associate with this cq.
16689  * @subtype: The queue's subtype.
16690  *
16691  * This function creates a mailbox queue, as detailed in @mq, on a port,
16692  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16693  *
16694  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16695  * is used to get the entry count and entry size that are necessary to
16696  * determine the number of pages to allocate and use for this queue. This
16697  * function will send the MQ_CREATE mailbox command to the HBA to setup the
16698  * mailbox queue. This function is asynchronous and will wait for the mailbox
16699  * command to finish before continuing.
16700  *
16701  * On success this function will return a zero. If unable to allocate enough
16702  * memory this function will return -ENOMEM. If the queue create mailbox command
16703  * fails this function will return -ENXIO.
16704  **/
16705 int32_t
16706 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16707 	       struct lpfc_queue *cq, uint32_t subtype)
16708 {
16709 	struct lpfc_mbx_mq_create *mq_create;
16710 	struct lpfc_mbx_mq_create_ext *mq_create_ext;
16711 	struct lpfc_dmabuf *dmabuf;
16712 	LPFC_MBOXQ_t *mbox;
16713 	int rc, length, status = 0;
16714 	uint32_t shdr_status, shdr_add_status;
16715 	union lpfc_sli4_cfg_shdr *shdr;
16716 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16717 
16718 	/* sanity check on queue memory */
16719 	if (!mq || !cq)
16720 		return -ENODEV;
16721 	if (!phba->sli4_hba.pc_sli4_params.supported)
16722 		hw_page_size = SLI4_PAGE_SIZE;
16723 
16724 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16725 	if (!mbox)
16726 		return -ENOMEM;
16727 	length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16728 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16729 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16730 			 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16731 			 length, LPFC_SLI4_MBX_EMBED);
16732 
16733 	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16734 	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16735 	bf_set(lpfc_mbx_mq_create_ext_num_pages,
16736 	       &mq_create_ext->u.request, mq->page_count);
16737 	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16738 	       &mq_create_ext->u.request, 1);
16739 	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16740 	       &mq_create_ext->u.request, 1);
16741 	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16742 	       &mq_create_ext->u.request, 1);
16743 	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16744 	       &mq_create_ext->u.request, 1);
16745 	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16746 	       &mq_create_ext->u.request, 1);
16747 	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16748 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
16749 	       phba->sli4_hba.pc_sli4_params.mqv);
16750 	if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16751 		bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16752 		       cq->queue_id);
16753 	else
16754 		bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16755 		       cq->queue_id);
16756 	switch (mq->entry_count) {
16757 	default:
16758 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16759 				"0362 Unsupported MQ count. (%d)\n",
16760 				mq->entry_count);
16761 		if (mq->entry_count < 16) {
16762 			status = -EINVAL;
16763 			goto out;
16764 		}
16765 		fallthrough;	/* otherwise default to smallest count */
16766 	case 16:
16767 		bf_set(lpfc_mq_context_ring_size,
16768 		       &mq_create_ext->u.request.context,
16769 		       LPFC_MQ_RING_SIZE_16);
16770 		break;
16771 	case 32:
16772 		bf_set(lpfc_mq_context_ring_size,
16773 		       &mq_create_ext->u.request.context,
16774 		       LPFC_MQ_RING_SIZE_32);
16775 		break;
16776 	case 64:
16777 		bf_set(lpfc_mq_context_ring_size,
16778 		       &mq_create_ext->u.request.context,
16779 		       LPFC_MQ_RING_SIZE_64);
16780 		break;
16781 	case 128:
16782 		bf_set(lpfc_mq_context_ring_size,
16783 		       &mq_create_ext->u.request.context,
16784 		       LPFC_MQ_RING_SIZE_128);
16785 		break;
16786 	}
16787 	list_for_each_entry(dmabuf, &mq->page_list, list) {
16788 		memset(dmabuf->virt, 0, hw_page_size);
16789 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16790 					putPaddrLow(dmabuf->phys);
16791 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16792 					putPaddrHigh(dmabuf->phys);
16793 	}
16794 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16795 	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16796 			      &mq_create_ext->u.response);
16797 	if (rc != MBX_SUCCESS) {
16798 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16799 				"2795 MQ_CREATE_EXT failed with "
16800 				"status x%x. Failback to MQ_CREATE.\n",
16801 				rc);
16802 		lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16803 		mq_create = &mbox->u.mqe.un.mq_create;
16804 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16805 		shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16806 		mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16807 				      &mq_create->u.response);
16808 	}
16809 
16810 	/* The IOCTL status is embedded in the mailbox subheader. */
16811 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16812 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16813 	if (shdr_status || shdr_add_status || rc) {
16814 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16815 				"2502 MQ_CREATE mailbox failed with "
16816 				"status x%x add_status x%x, mbx status x%x\n",
16817 				shdr_status, shdr_add_status, rc);
16818 		status = -ENXIO;
16819 		goto out;
16820 	}
16821 	if (mq->queue_id == 0xFFFF) {
16822 		status = -ENXIO;
16823 		goto out;
16824 	}
16825 	mq->type = LPFC_MQ;
16826 	mq->assoc_qid = cq->queue_id;
16827 	mq->subtype = subtype;
16828 	mq->host_index = 0;
16829 	mq->hba_index = 0;
16830 
16831 	/* link the mq onto the parent cq child list */
16832 	list_add_tail(&mq->list, &cq->child_list);
16833 out:
16834 	mempool_free(mbox, phba->mbox_mem_pool);
16835 	return status;
16836 }
16837 
16838 /**
16839  * lpfc_wq_create - Create a Work Queue on the HBA
16840  * @phba: HBA structure that indicates port to create a queue on.
16841  * @wq: The queue structure to use to create the work queue.
16842  * @cq: The completion queue to bind this work queue to.
16843  * @subtype: The subtype of the work queue indicating its functionality.
16844  *
16845  * This function creates a work queue, as detailed in @wq, on a port, described
16846  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16847  *
16848  * The @phba struct is used to send mailbox command to HBA. The @wq struct
16849  * is used to get the entry count and entry size that are necessary to
16850  * determine the number of pages to allocate and use for this queue. The @cq
16851  * is used to indicate which completion queue to bind this work queue to. This
16852  * function will send the WQ_CREATE mailbox command to the HBA to setup the
16853  * work queue. This function is asynchronous and will wait for the mailbox
16854  * command to finish before continuing.
16855  *
16856  * On success this function will return a zero. If unable to allocate enough
16857  * memory this function will return -ENOMEM. If the queue create mailbox command
16858  * fails this function will return -ENXIO.
16859  **/
16860 int
16861 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16862 	       struct lpfc_queue *cq, uint32_t subtype)
16863 {
16864 	struct lpfc_mbx_wq_create *wq_create;
16865 	struct lpfc_dmabuf *dmabuf;
16866 	LPFC_MBOXQ_t *mbox;
16867 	int rc, length, status = 0;
16868 	uint32_t shdr_status, shdr_add_status;
16869 	union lpfc_sli4_cfg_shdr *shdr;
16870 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16871 	struct dma_address *page;
16872 	void __iomem *bar_memmap_p;
16873 	uint32_t db_offset;
16874 	uint16_t pci_barset;
16875 	uint8_t dpp_barset;
16876 	uint32_t dpp_offset;
16877 	uint8_t wq_create_version;
16878 #ifdef CONFIG_X86
16879 	unsigned long pg_addr;
16880 #endif
16881 
16882 	/* sanity check on queue memory */
16883 	if (!wq || !cq)
16884 		return -ENODEV;
16885 	if (!phba->sli4_hba.pc_sli4_params.supported)
16886 		hw_page_size = wq->page_size;
16887 
16888 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16889 	if (!mbox)
16890 		return -ENOMEM;
16891 	length = (sizeof(struct lpfc_mbx_wq_create) -
16892 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16893 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16894 			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16895 			 length, LPFC_SLI4_MBX_EMBED);
16896 	wq_create = &mbox->u.mqe.un.wq_create;
16897 	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16898 	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16899 		    wq->page_count);
16900 	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16901 		    cq->queue_id);
16902 
16903 	/* wqv is the earliest version supported, NOT the latest */
16904 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
16905 	       phba->sli4_hba.pc_sli4_params.wqv);
16906 
16907 	if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16908 	    (wq->page_size > SLI4_PAGE_SIZE))
16909 		wq_create_version = LPFC_Q_CREATE_VERSION_1;
16910 	else
16911 		wq_create_version = LPFC_Q_CREATE_VERSION_0;
16912 
16913 	switch (wq_create_version) {
16914 	case LPFC_Q_CREATE_VERSION_1:
16915 		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16916 		       wq->entry_count);
16917 		bf_set(lpfc_mbox_hdr_version, &shdr->request,
16918 		       LPFC_Q_CREATE_VERSION_1);
16919 
16920 		switch (wq->entry_size) {
16921 		default:
16922 		case 64:
16923 			bf_set(lpfc_mbx_wq_create_wqe_size,
16924 			       &wq_create->u.request_1,
16925 			       LPFC_WQ_WQE_SIZE_64);
16926 			break;
16927 		case 128:
16928 			bf_set(lpfc_mbx_wq_create_wqe_size,
16929 			       &wq_create->u.request_1,
16930 			       LPFC_WQ_WQE_SIZE_128);
16931 			break;
16932 		}
16933 		/* Request DPP by default */
16934 		bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
16935 		bf_set(lpfc_mbx_wq_create_page_size,
16936 		       &wq_create->u.request_1,
16937 		       (wq->page_size / SLI4_PAGE_SIZE));
16938 		page = wq_create->u.request_1.page;
16939 		break;
16940 	default:
16941 		page = wq_create->u.request.page;
16942 		break;
16943 	}
16944 
16945 	list_for_each_entry(dmabuf, &wq->page_list, list) {
16946 		memset(dmabuf->virt, 0, hw_page_size);
16947 		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
16948 		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
16949 	}
16950 
16951 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
16952 		bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
16953 
16954 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16955 	/* The IOCTL status is embedded in the mailbox subheader. */
16956 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16957 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16958 	if (shdr_status || shdr_add_status || rc) {
16959 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16960 				"2503 WQ_CREATE mailbox failed with "
16961 				"status x%x add_status x%x, mbx status x%x\n",
16962 				shdr_status, shdr_add_status, rc);
16963 		status = -ENXIO;
16964 		goto out;
16965 	}
16966 
16967 	if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
16968 		wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
16969 					&wq_create->u.response);
16970 	else
16971 		wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
16972 					&wq_create->u.response_1);
16973 
16974 	if (wq->queue_id == 0xFFFF) {
16975 		status = -ENXIO;
16976 		goto out;
16977 	}
16978 
16979 	wq->db_format = LPFC_DB_LIST_FORMAT;
16980 	if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
16981 		if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
16982 			wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
16983 					       &wq_create->u.response);
16984 			if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
16985 			    (wq->db_format != LPFC_DB_RING_FORMAT)) {
16986 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16987 						"3265 WQ[%d] doorbell format "
16988 						"not supported: x%x\n",
16989 						wq->queue_id, wq->db_format);
16990 				status = -EINVAL;
16991 				goto out;
16992 			}
16993 			pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
16994 					    &wq_create->u.response);
16995 			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
16996 								   pci_barset);
16997 			if (!bar_memmap_p) {
16998 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16999 						"3263 WQ[%d] failed to memmap "
17000 						"pci barset:x%x\n",
17001 						wq->queue_id, pci_barset);
17002 				status = -ENOMEM;
17003 				goto out;
17004 			}
17005 			db_offset = wq_create->u.response.doorbell_offset;
17006 			if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
17007 			    (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
17008 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17009 						"3252 WQ[%d] doorbell offset "
17010 						"not supported: x%x\n",
17011 						wq->queue_id, db_offset);
17012 				status = -EINVAL;
17013 				goto out;
17014 			}
17015 			wq->db_regaddr = bar_memmap_p + db_offset;
17016 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17017 					"3264 WQ[%d]: barset:x%x, offset:x%x, "
17018 					"format:x%x\n", wq->queue_id,
17019 					pci_barset, db_offset, wq->db_format);
17020 		} else
17021 			wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17022 	} else {
17023 		/* Check if DPP was honored by the firmware */
17024 		wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
17025 				    &wq_create->u.response_1);
17026 		if (wq->dpp_enable) {
17027 			pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
17028 					    &wq_create->u.response_1);
17029 			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17030 								   pci_barset);
17031 			if (!bar_memmap_p) {
17032 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17033 						"3267 WQ[%d] failed to memmap "
17034 						"pci barset:x%x\n",
17035 						wq->queue_id, pci_barset);
17036 				status = -ENOMEM;
17037 				goto out;
17038 			}
17039 			db_offset = wq_create->u.response_1.doorbell_offset;
17040 			wq->db_regaddr = bar_memmap_p + db_offset;
17041 			wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
17042 					    &wq_create->u.response_1);
17043 			dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
17044 					    &wq_create->u.response_1);
17045 			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17046 								   dpp_barset);
17047 			if (!bar_memmap_p) {
17048 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17049 						"3268 WQ[%d] failed to memmap "
17050 						"pci barset:x%x\n",
17051 						wq->queue_id, dpp_barset);
17052 				status = -ENOMEM;
17053 				goto out;
17054 			}
17055 			dpp_offset = wq_create->u.response_1.dpp_offset;
17056 			wq->dpp_regaddr = bar_memmap_p + dpp_offset;
17057 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17058 					"3271 WQ[%d]: barset:x%x, offset:x%x, "
17059 					"dpp_id:x%x dpp_barset:x%x "
17060 					"dpp_offset:x%x\n",
17061 					wq->queue_id, pci_barset, db_offset,
17062 					wq->dpp_id, dpp_barset, dpp_offset);
17063 
17064 #ifdef CONFIG_X86
17065 			/* Enable combined writes for DPP aperture */
17066 			pg_addr = (unsigned long)(wq->dpp_regaddr) & PAGE_MASK;
17067 			rc = set_memory_wc(pg_addr, 1);
17068 			if (rc) {
17069 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17070 					"3272 Cannot setup Combined "
17071 					"Write on WQ[%d] - disable DPP\n",
17072 					wq->queue_id);
17073 				phba->cfg_enable_dpp = 0;
17074 			}
17075 #else
17076 			phba->cfg_enable_dpp = 0;
17077 #endif
17078 		} else
17079 			wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17080 	}
17081 	wq->pring = kzalloc(sizeof(struct lpfc_sli_ring), GFP_KERNEL);
17082 	if (wq->pring == NULL) {
17083 		status = -ENOMEM;
17084 		goto out;
17085 	}
17086 	wq->type = LPFC_WQ;
17087 	wq->assoc_qid = cq->queue_id;
17088 	wq->subtype = subtype;
17089 	wq->host_index = 0;
17090 	wq->hba_index = 0;
17091 	wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
17092 
17093 	/* link the wq onto the parent cq child list */
17094 	list_add_tail(&wq->list, &cq->child_list);
17095 out:
17096 	mempool_free(mbox, phba->mbox_mem_pool);
17097 	return status;
17098 }
17099 
17100 /**
17101  * lpfc_rq_create - Create a Receive Queue on the HBA
17102  * @phba: HBA structure that indicates port to create a queue on.
17103  * @hrq: The queue structure to use to create the header receive queue.
17104  * @drq: The queue structure to use to create the data receive queue.
17105  * @cq: The completion queue to bind this work queue to.
17106  * @subtype: The subtype of the work queue indicating its functionality.
17107  *
17108  * This function creates a receive buffer queue pair , as detailed in @hrq and
17109  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17110  * to the HBA.
17111  *
17112  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17113  * struct is used to get the entry count that is necessary to determine the
17114  * number of pages to use for this queue. The @cq is used to indicate which
17115  * completion queue to bind received buffers that are posted to these queues to.
17116  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17117  * receive queue pair. This function is asynchronous and will wait for the
17118  * mailbox command to finish before continuing.
17119  *
17120  * On success this function will return a zero. If unable to allocate enough
17121  * memory this function will return -ENOMEM. If the queue create mailbox command
17122  * fails this function will return -ENXIO.
17123  **/
17124 int
17125 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17126 	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17127 {
17128 	struct lpfc_mbx_rq_create *rq_create;
17129 	struct lpfc_dmabuf *dmabuf;
17130 	LPFC_MBOXQ_t *mbox;
17131 	int rc, length, status = 0;
17132 	uint32_t shdr_status, shdr_add_status;
17133 	union lpfc_sli4_cfg_shdr *shdr;
17134 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17135 	void __iomem *bar_memmap_p;
17136 	uint32_t db_offset;
17137 	uint16_t pci_barset;
17138 
17139 	/* sanity check on queue memory */
17140 	if (!hrq || !drq || !cq)
17141 		return -ENODEV;
17142 	if (!phba->sli4_hba.pc_sli4_params.supported)
17143 		hw_page_size = SLI4_PAGE_SIZE;
17144 
17145 	if (hrq->entry_count != drq->entry_count)
17146 		return -EINVAL;
17147 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17148 	if (!mbox)
17149 		return -ENOMEM;
17150 	length = (sizeof(struct lpfc_mbx_rq_create) -
17151 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17152 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17153 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17154 			 length, LPFC_SLI4_MBX_EMBED);
17155 	rq_create = &mbox->u.mqe.un.rq_create;
17156 	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17157 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
17158 	       phba->sli4_hba.pc_sli4_params.rqv);
17159 	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17160 		bf_set(lpfc_rq_context_rqe_count_1,
17161 		       &rq_create->u.request.context,
17162 		       hrq->entry_count);
17163 		rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17164 		bf_set(lpfc_rq_context_rqe_size,
17165 		       &rq_create->u.request.context,
17166 		       LPFC_RQE_SIZE_8);
17167 		bf_set(lpfc_rq_context_page_size,
17168 		       &rq_create->u.request.context,
17169 		       LPFC_RQ_PAGE_SIZE_4096);
17170 	} else {
17171 		switch (hrq->entry_count) {
17172 		default:
17173 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17174 					"2535 Unsupported RQ count. (%d)\n",
17175 					hrq->entry_count);
17176 			if (hrq->entry_count < 512) {
17177 				status = -EINVAL;
17178 				goto out;
17179 			}
17180 			fallthrough;	/* otherwise default to smallest count */
17181 		case 512:
17182 			bf_set(lpfc_rq_context_rqe_count,
17183 			       &rq_create->u.request.context,
17184 			       LPFC_RQ_RING_SIZE_512);
17185 			break;
17186 		case 1024:
17187 			bf_set(lpfc_rq_context_rqe_count,
17188 			       &rq_create->u.request.context,
17189 			       LPFC_RQ_RING_SIZE_1024);
17190 			break;
17191 		case 2048:
17192 			bf_set(lpfc_rq_context_rqe_count,
17193 			       &rq_create->u.request.context,
17194 			       LPFC_RQ_RING_SIZE_2048);
17195 			break;
17196 		case 4096:
17197 			bf_set(lpfc_rq_context_rqe_count,
17198 			       &rq_create->u.request.context,
17199 			       LPFC_RQ_RING_SIZE_4096);
17200 			break;
17201 		}
17202 		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17203 		       LPFC_HDR_BUF_SIZE);
17204 	}
17205 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17206 	       cq->queue_id);
17207 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17208 	       hrq->page_count);
17209 	list_for_each_entry(dmabuf, &hrq->page_list, list) {
17210 		memset(dmabuf->virt, 0, hw_page_size);
17211 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17212 					putPaddrLow(dmabuf->phys);
17213 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17214 					putPaddrHigh(dmabuf->phys);
17215 	}
17216 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17217 		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17218 
17219 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17220 	/* The IOCTL status is embedded in the mailbox subheader. */
17221 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17222 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17223 	if (shdr_status || shdr_add_status || rc) {
17224 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17225 				"2504 RQ_CREATE mailbox failed with "
17226 				"status x%x add_status x%x, mbx status x%x\n",
17227 				shdr_status, shdr_add_status, rc);
17228 		status = -ENXIO;
17229 		goto out;
17230 	}
17231 	hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17232 	if (hrq->queue_id == 0xFFFF) {
17233 		status = -ENXIO;
17234 		goto out;
17235 	}
17236 
17237 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17238 		hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17239 					&rq_create->u.response);
17240 		if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17241 		    (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17242 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17243 					"3262 RQ [%d] doorbell format not "
17244 					"supported: x%x\n", hrq->queue_id,
17245 					hrq->db_format);
17246 			status = -EINVAL;
17247 			goto out;
17248 		}
17249 
17250 		pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17251 				    &rq_create->u.response);
17252 		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17253 		if (!bar_memmap_p) {
17254 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17255 					"3269 RQ[%d] failed to memmap pci "
17256 					"barset:x%x\n", hrq->queue_id,
17257 					pci_barset);
17258 			status = -ENOMEM;
17259 			goto out;
17260 		}
17261 
17262 		db_offset = rq_create->u.response.doorbell_offset;
17263 		if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17264 		    (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17265 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17266 					"3270 RQ[%d] doorbell offset not "
17267 					"supported: x%x\n", hrq->queue_id,
17268 					db_offset);
17269 			status = -EINVAL;
17270 			goto out;
17271 		}
17272 		hrq->db_regaddr = bar_memmap_p + db_offset;
17273 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17274 				"3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17275 				"format:x%x\n", hrq->queue_id, pci_barset,
17276 				db_offset, hrq->db_format);
17277 	} else {
17278 		hrq->db_format = LPFC_DB_RING_FORMAT;
17279 		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17280 	}
17281 	hrq->type = LPFC_HRQ;
17282 	hrq->assoc_qid = cq->queue_id;
17283 	hrq->subtype = subtype;
17284 	hrq->host_index = 0;
17285 	hrq->hba_index = 0;
17286 	hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17287 
17288 	/* now create the data queue */
17289 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17290 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17291 			 length, LPFC_SLI4_MBX_EMBED);
17292 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
17293 	       phba->sli4_hba.pc_sli4_params.rqv);
17294 	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17295 		bf_set(lpfc_rq_context_rqe_count_1,
17296 		       &rq_create->u.request.context, hrq->entry_count);
17297 		if (subtype == LPFC_NVMET)
17298 			rq_create->u.request.context.buffer_size =
17299 				LPFC_NVMET_DATA_BUF_SIZE;
17300 		else
17301 			rq_create->u.request.context.buffer_size =
17302 				LPFC_DATA_BUF_SIZE;
17303 		bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17304 		       LPFC_RQE_SIZE_8);
17305 		bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17306 		       (PAGE_SIZE/SLI4_PAGE_SIZE));
17307 	} else {
17308 		switch (drq->entry_count) {
17309 		default:
17310 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17311 					"2536 Unsupported RQ count. (%d)\n",
17312 					drq->entry_count);
17313 			if (drq->entry_count < 512) {
17314 				status = -EINVAL;
17315 				goto out;
17316 			}
17317 			fallthrough;	/* otherwise default to smallest count */
17318 		case 512:
17319 			bf_set(lpfc_rq_context_rqe_count,
17320 			       &rq_create->u.request.context,
17321 			       LPFC_RQ_RING_SIZE_512);
17322 			break;
17323 		case 1024:
17324 			bf_set(lpfc_rq_context_rqe_count,
17325 			       &rq_create->u.request.context,
17326 			       LPFC_RQ_RING_SIZE_1024);
17327 			break;
17328 		case 2048:
17329 			bf_set(lpfc_rq_context_rqe_count,
17330 			       &rq_create->u.request.context,
17331 			       LPFC_RQ_RING_SIZE_2048);
17332 			break;
17333 		case 4096:
17334 			bf_set(lpfc_rq_context_rqe_count,
17335 			       &rq_create->u.request.context,
17336 			       LPFC_RQ_RING_SIZE_4096);
17337 			break;
17338 		}
17339 		if (subtype == LPFC_NVMET)
17340 			bf_set(lpfc_rq_context_buf_size,
17341 			       &rq_create->u.request.context,
17342 			       LPFC_NVMET_DATA_BUF_SIZE);
17343 		else
17344 			bf_set(lpfc_rq_context_buf_size,
17345 			       &rq_create->u.request.context,
17346 			       LPFC_DATA_BUF_SIZE);
17347 	}
17348 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17349 	       cq->queue_id);
17350 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17351 	       drq->page_count);
17352 	list_for_each_entry(dmabuf, &drq->page_list, list) {
17353 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17354 					putPaddrLow(dmabuf->phys);
17355 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17356 					putPaddrHigh(dmabuf->phys);
17357 	}
17358 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17359 		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17360 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17361 	/* The IOCTL status is embedded in the mailbox subheader. */
17362 	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17363 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17364 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17365 	if (shdr_status || shdr_add_status || rc) {
17366 		status = -ENXIO;
17367 		goto out;
17368 	}
17369 	drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17370 	if (drq->queue_id == 0xFFFF) {
17371 		status = -ENXIO;
17372 		goto out;
17373 	}
17374 	drq->type = LPFC_DRQ;
17375 	drq->assoc_qid = cq->queue_id;
17376 	drq->subtype = subtype;
17377 	drq->host_index = 0;
17378 	drq->hba_index = 0;
17379 	drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17380 
17381 	/* link the header and data RQs onto the parent cq child list */
17382 	list_add_tail(&hrq->list, &cq->child_list);
17383 	list_add_tail(&drq->list, &cq->child_list);
17384 
17385 out:
17386 	mempool_free(mbox, phba->mbox_mem_pool);
17387 	return status;
17388 }
17389 
17390 /**
17391  * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17392  * @phba: HBA structure that indicates port to create a queue on.
17393  * @hrqp: The queue structure array to use to create the header receive queues.
17394  * @drqp: The queue structure array to use to create the data receive queues.
17395  * @cqp: The completion queue array to bind these receive queues to.
17396  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17397  *
17398  * This function creates a receive buffer queue pair , as detailed in @hrq and
17399  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17400  * to the HBA.
17401  *
17402  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17403  * struct is used to get the entry count that is necessary to determine the
17404  * number of pages to use for this queue. The @cq is used to indicate which
17405  * completion queue to bind received buffers that are posted to these queues to.
17406  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17407  * receive queue pair. This function is asynchronous and will wait for the
17408  * mailbox command to finish before continuing.
17409  *
17410  * On success this function will return a zero. If unable to allocate enough
17411  * memory this function will return -ENOMEM. If the queue create mailbox command
17412  * fails this function will return -ENXIO.
17413  **/
17414 int
17415 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17416 		struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17417 		uint32_t subtype)
17418 {
17419 	struct lpfc_queue *hrq, *drq, *cq;
17420 	struct lpfc_mbx_rq_create_v2 *rq_create;
17421 	struct lpfc_dmabuf *dmabuf;
17422 	LPFC_MBOXQ_t *mbox;
17423 	int rc, length, alloclen, status = 0;
17424 	int cnt, idx, numrq, page_idx = 0;
17425 	uint32_t shdr_status, shdr_add_status;
17426 	union lpfc_sli4_cfg_shdr *shdr;
17427 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17428 
17429 	numrq = phba->cfg_nvmet_mrq;
17430 	/* sanity check on array memory */
17431 	if (!hrqp || !drqp || !cqp || !numrq)
17432 		return -ENODEV;
17433 	if (!phba->sli4_hba.pc_sli4_params.supported)
17434 		hw_page_size = SLI4_PAGE_SIZE;
17435 
17436 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17437 	if (!mbox)
17438 		return -ENOMEM;
17439 
17440 	length = sizeof(struct lpfc_mbx_rq_create_v2);
17441 	length += ((2 * numrq * hrqp[0]->page_count) *
17442 		   sizeof(struct dma_address));
17443 
17444 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17445 				    LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17446 				    LPFC_SLI4_MBX_NEMBED);
17447 	if (alloclen < length) {
17448 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17449 				"3099 Allocated DMA memory size (%d) is "
17450 				"less than the requested DMA memory size "
17451 				"(%d)\n", alloclen, length);
17452 		status = -ENOMEM;
17453 		goto out;
17454 	}
17455 
17456 
17457 
17458 	rq_create = mbox->sge_array->addr[0];
17459 	shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17460 
17461 	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17462 	cnt = 0;
17463 
17464 	for (idx = 0; idx < numrq; idx++) {
17465 		hrq = hrqp[idx];
17466 		drq = drqp[idx];
17467 		cq  = cqp[idx];
17468 
17469 		/* sanity check on queue memory */
17470 		if (!hrq || !drq || !cq) {
17471 			status = -ENODEV;
17472 			goto out;
17473 		}
17474 
17475 		if (hrq->entry_count != drq->entry_count) {
17476 			status = -EINVAL;
17477 			goto out;
17478 		}
17479 
17480 		if (idx == 0) {
17481 			bf_set(lpfc_mbx_rq_create_num_pages,
17482 			       &rq_create->u.request,
17483 			       hrq->page_count);
17484 			bf_set(lpfc_mbx_rq_create_rq_cnt,
17485 			       &rq_create->u.request, (numrq * 2));
17486 			bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17487 			       1);
17488 			bf_set(lpfc_rq_context_base_cq,
17489 			       &rq_create->u.request.context,
17490 			       cq->queue_id);
17491 			bf_set(lpfc_rq_context_data_size,
17492 			       &rq_create->u.request.context,
17493 			       LPFC_NVMET_DATA_BUF_SIZE);
17494 			bf_set(lpfc_rq_context_hdr_size,
17495 			       &rq_create->u.request.context,
17496 			       LPFC_HDR_BUF_SIZE);
17497 			bf_set(lpfc_rq_context_rqe_count_1,
17498 			       &rq_create->u.request.context,
17499 			       hrq->entry_count);
17500 			bf_set(lpfc_rq_context_rqe_size,
17501 			       &rq_create->u.request.context,
17502 			       LPFC_RQE_SIZE_8);
17503 			bf_set(lpfc_rq_context_page_size,
17504 			       &rq_create->u.request.context,
17505 			       (PAGE_SIZE/SLI4_PAGE_SIZE));
17506 		}
17507 		rc = 0;
17508 		list_for_each_entry(dmabuf, &hrq->page_list, list) {
17509 			memset(dmabuf->virt, 0, hw_page_size);
17510 			cnt = page_idx + dmabuf->buffer_tag;
17511 			rq_create->u.request.page[cnt].addr_lo =
17512 					putPaddrLow(dmabuf->phys);
17513 			rq_create->u.request.page[cnt].addr_hi =
17514 					putPaddrHigh(dmabuf->phys);
17515 			rc++;
17516 		}
17517 		page_idx += rc;
17518 
17519 		rc = 0;
17520 		list_for_each_entry(dmabuf, &drq->page_list, list) {
17521 			memset(dmabuf->virt, 0, hw_page_size);
17522 			cnt = page_idx + dmabuf->buffer_tag;
17523 			rq_create->u.request.page[cnt].addr_lo =
17524 					putPaddrLow(dmabuf->phys);
17525 			rq_create->u.request.page[cnt].addr_hi =
17526 					putPaddrHigh(dmabuf->phys);
17527 			rc++;
17528 		}
17529 		page_idx += rc;
17530 
17531 		hrq->db_format = LPFC_DB_RING_FORMAT;
17532 		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17533 		hrq->type = LPFC_HRQ;
17534 		hrq->assoc_qid = cq->queue_id;
17535 		hrq->subtype = subtype;
17536 		hrq->host_index = 0;
17537 		hrq->hba_index = 0;
17538 		hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17539 
17540 		drq->db_format = LPFC_DB_RING_FORMAT;
17541 		drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17542 		drq->type = LPFC_DRQ;
17543 		drq->assoc_qid = cq->queue_id;
17544 		drq->subtype = subtype;
17545 		drq->host_index = 0;
17546 		drq->hba_index = 0;
17547 		drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17548 
17549 		list_add_tail(&hrq->list, &cq->child_list);
17550 		list_add_tail(&drq->list, &cq->child_list);
17551 	}
17552 
17553 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17554 	/* The IOCTL status is embedded in the mailbox subheader. */
17555 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17556 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17557 	if (shdr_status || shdr_add_status || rc) {
17558 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17559 				"3120 RQ_CREATE mailbox failed with "
17560 				"status x%x add_status x%x, mbx status x%x\n",
17561 				shdr_status, shdr_add_status, rc);
17562 		status = -ENXIO;
17563 		goto out;
17564 	}
17565 	rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17566 	if (rc == 0xFFFF) {
17567 		status = -ENXIO;
17568 		goto out;
17569 	}
17570 
17571 	/* Initialize all RQs with associated queue id */
17572 	for (idx = 0; idx < numrq; idx++) {
17573 		hrq = hrqp[idx];
17574 		hrq->queue_id = rc + (2 * idx);
17575 		drq = drqp[idx];
17576 		drq->queue_id = rc + (2 * idx) + 1;
17577 	}
17578 
17579 out:
17580 	lpfc_sli4_mbox_cmd_free(phba, mbox);
17581 	return status;
17582 }
17583 
17584 /**
17585  * lpfc_eq_destroy - Destroy an event Queue on the HBA
17586  * @phba: HBA structure that indicates port to destroy a queue on.
17587  * @eq: The queue structure associated with the queue to destroy.
17588  *
17589  * This function destroys a queue, as detailed in @eq by sending an mailbox
17590  * command, specific to the type of queue, to the HBA.
17591  *
17592  * The @eq struct is used to get the queue ID of the queue to destroy.
17593  *
17594  * On success this function will return a zero. If the queue destroy mailbox
17595  * command fails this function will return -ENXIO.
17596  **/
17597 int
17598 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17599 {
17600 	LPFC_MBOXQ_t *mbox;
17601 	int rc, length, status = 0;
17602 	uint32_t shdr_status, shdr_add_status;
17603 	union lpfc_sli4_cfg_shdr *shdr;
17604 
17605 	/* sanity check on queue memory */
17606 	if (!eq)
17607 		return -ENODEV;
17608 
17609 	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17610 	if (!mbox)
17611 		return -ENOMEM;
17612 	length = (sizeof(struct lpfc_mbx_eq_destroy) -
17613 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17614 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17615 			 LPFC_MBOX_OPCODE_EQ_DESTROY,
17616 			 length, LPFC_SLI4_MBX_EMBED);
17617 	bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17618 	       eq->queue_id);
17619 	mbox->vport = eq->phba->pport;
17620 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17621 
17622 	rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17623 	/* The IOCTL status is embedded in the mailbox subheader. */
17624 	shdr = (union lpfc_sli4_cfg_shdr *)
17625 		&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17626 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17627 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17628 	if (shdr_status || shdr_add_status || rc) {
17629 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17630 				"2505 EQ_DESTROY mailbox failed with "
17631 				"status x%x add_status x%x, mbx status x%x\n",
17632 				shdr_status, shdr_add_status, rc);
17633 		status = -ENXIO;
17634 	}
17635 
17636 	/* Remove eq from any list */
17637 	list_del_init(&eq->list);
17638 	mempool_free(mbox, eq->phba->mbox_mem_pool);
17639 	return status;
17640 }
17641 
17642 /**
17643  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17644  * @phba: HBA structure that indicates port to destroy a queue on.
17645  * @cq: The queue structure associated with the queue to destroy.
17646  *
17647  * This function destroys a queue, as detailed in @cq by sending an mailbox
17648  * command, specific to the type of queue, to the HBA.
17649  *
17650  * The @cq struct is used to get the queue ID of the queue to destroy.
17651  *
17652  * On success this function will return a zero. If the queue destroy mailbox
17653  * command fails this function will return -ENXIO.
17654  **/
17655 int
17656 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17657 {
17658 	LPFC_MBOXQ_t *mbox;
17659 	int rc, length, status = 0;
17660 	uint32_t shdr_status, shdr_add_status;
17661 	union lpfc_sli4_cfg_shdr *shdr;
17662 
17663 	/* sanity check on queue memory */
17664 	if (!cq)
17665 		return -ENODEV;
17666 	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17667 	if (!mbox)
17668 		return -ENOMEM;
17669 	length = (sizeof(struct lpfc_mbx_cq_destroy) -
17670 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17671 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17672 			 LPFC_MBOX_OPCODE_CQ_DESTROY,
17673 			 length, LPFC_SLI4_MBX_EMBED);
17674 	bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17675 	       cq->queue_id);
17676 	mbox->vport = cq->phba->pport;
17677 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17678 	rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17679 	/* The IOCTL status is embedded in the mailbox subheader. */
17680 	shdr = (union lpfc_sli4_cfg_shdr *)
17681 		&mbox->u.mqe.un.wq_create.header.cfg_shdr;
17682 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17683 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17684 	if (shdr_status || shdr_add_status || rc) {
17685 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17686 				"2506 CQ_DESTROY mailbox failed with "
17687 				"status x%x add_status x%x, mbx status x%x\n",
17688 				shdr_status, shdr_add_status, rc);
17689 		status = -ENXIO;
17690 	}
17691 	/* Remove cq from any list */
17692 	list_del_init(&cq->list);
17693 	mempool_free(mbox, cq->phba->mbox_mem_pool);
17694 	return status;
17695 }
17696 
17697 /**
17698  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17699  * @phba: HBA structure that indicates port to destroy a queue on.
17700  * @mq: The queue structure associated with the queue to destroy.
17701  *
17702  * This function destroys a queue, as detailed in @mq by sending an mailbox
17703  * command, specific to the type of queue, to the HBA.
17704  *
17705  * The @mq struct is used to get the queue ID of the queue to destroy.
17706  *
17707  * On success this function will return a zero. If the queue destroy mailbox
17708  * command fails this function will return -ENXIO.
17709  **/
17710 int
17711 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17712 {
17713 	LPFC_MBOXQ_t *mbox;
17714 	int rc, length, status = 0;
17715 	uint32_t shdr_status, shdr_add_status;
17716 	union lpfc_sli4_cfg_shdr *shdr;
17717 
17718 	/* sanity check on queue memory */
17719 	if (!mq)
17720 		return -ENODEV;
17721 	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17722 	if (!mbox)
17723 		return -ENOMEM;
17724 	length = (sizeof(struct lpfc_mbx_mq_destroy) -
17725 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17726 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17727 			 LPFC_MBOX_OPCODE_MQ_DESTROY,
17728 			 length, LPFC_SLI4_MBX_EMBED);
17729 	bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17730 	       mq->queue_id);
17731 	mbox->vport = mq->phba->pport;
17732 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17733 	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17734 	/* The IOCTL status is embedded in the mailbox subheader. */
17735 	shdr = (union lpfc_sli4_cfg_shdr *)
17736 		&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17737 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17738 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17739 	if (shdr_status || shdr_add_status || rc) {
17740 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17741 				"2507 MQ_DESTROY mailbox failed with "
17742 				"status x%x add_status x%x, mbx status x%x\n",
17743 				shdr_status, shdr_add_status, rc);
17744 		status = -ENXIO;
17745 	}
17746 	/* Remove mq from any list */
17747 	list_del_init(&mq->list);
17748 	mempool_free(mbox, mq->phba->mbox_mem_pool);
17749 	return status;
17750 }
17751 
17752 /**
17753  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17754  * @phba: HBA structure that indicates port to destroy a queue on.
17755  * @wq: The queue structure associated with the queue to destroy.
17756  *
17757  * This function destroys a queue, as detailed in @wq by sending an mailbox
17758  * command, specific to the type of queue, to the HBA.
17759  *
17760  * The @wq struct is used to get the queue ID of the queue to destroy.
17761  *
17762  * On success this function will return a zero. If the queue destroy mailbox
17763  * command fails this function will return -ENXIO.
17764  **/
17765 int
17766 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17767 {
17768 	LPFC_MBOXQ_t *mbox;
17769 	int rc, length, status = 0;
17770 	uint32_t shdr_status, shdr_add_status;
17771 	union lpfc_sli4_cfg_shdr *shdr;
17772 
17773 	/* sanity check on queue memory */
17774 	if (!wq)
17775 		return -ENODEV;
17776 	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17777 	if (!mbox)
17778 		return -ENOMEM;
17779 	length = (sizeof(struct lpfc_mbx_wq_destroy) -
17780 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17781 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17782 			 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17783 			 length, LPFC_SLI4_MBX_EMBED);
17784 	bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17785 	       wq->queue_id);
17786 	mbox->vport = wq->phba->pport;
17787 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17788 	rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17789 	shdr = (union lpfc_sli4_cfg_shdr *)
17790 		&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17791 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17792 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17793 	if (shdr_status || shdr_add_status || rc) {
17794 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17795 				"2508 WQ_DESTROY mailbox failed with "
17796 				"status x%x add_status x%x, mbx status x%x\n",
17797 				shdr_status, shdr_add_status, rc);
17798 		status = -ENXIO;
17799 	}
17800 	/* Remove wq from any list */
17801 	list_del_init(&wq->list);
17802 	kfree(wq->pring);
17803 	wq->pring = NULL;
17804 	mempool_free(mbox, wq->phba->mbox_mem_pool);
17805 	return status;
17806 }
17807 
17808 /**
17809  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17810  * @phba: HBA structure that indicates port to destroy a queue on.
17811  * @hrq: The queue structure associated with the queue to destroy.
17812  * @drq: The queue structure associated with the queue to destroy.
17813  *
17814  * This function destroys a queue, as detailed in @rq by sending an mailbox
17815  * command, specific to the type of queue, to the HBA.
17816  *
17817  * The @rq struct is used to get the queue ID of the queue to destroy.
17818  *
17819  * On success this function will return a zero. If the queue destroy mailbox
17820  * command fails this function will return -ENXIO.
17821  **/
17822 int
17823 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17824 		struct lpfc_queue *drq)
17825 {
17826 	LPFC_MBOXQ_t *mbox;
17827 	int rc, length, status = 0;
17828 	uint32_t shdr_status, shdr_add_status;
17829 	union lpfc_sli4_cfg_shdr *shdr;
17830 
17831 	/* sanity check on queue memory */
17832 	if (!hrq || !drq)
17833 		return -ENODEV;
17834 	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17835 	if (!mbox)
17836 		return -ENOMEM;
17837 	length = (sizeof(struct lpfc_mbx_rq_destroy) -
17838 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17839 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17840 			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17841 			 length, LPFC_SLI4_MBX_EMBED);
17842 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17843 	       hrq->queue_id);
17844 	mbox->vport = hrq->phba->pport;
17845 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17846 	rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17847 	/* The IOCTL status is embedded in the mailbox subheader. */
17848 	shdr = (union lpfc_sli4_cfg_shdr *)
17849 		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17850 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17851 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17852 	if (shdr_status || shdr_add_status || rc) {
17853 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17854 				"2509 RQ_DESTROY mailbox failed with "
17855 				"status x%x add_status x%x, mbx status x%x\n",
17856 				shdr_status, shdr_add_status, rc);
17857 		mempool_free(mbox, hrq->phba->mbox_mem_pool);
17858 		return -ENXIO;
17859 	}
17860 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17861 	       drq->queue_id);
17862 	rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17863 	shdr = (union lpfc_sli4_cfg_shdr *)
17864 		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17865 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17866 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17867 	if (shdr_status || shdr_add_status || rc) {
17868 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17869 				"2510 RQ_DESTROY mailbox failed with "
17870 				"status x%x add_status x%x, mbx status x%x\n",
17871 				shdr_status, shdr_add_status, rc);
17872 		status = -ENXIO;
17873 	}
17874 	list_del_init(&hrq->list);
17875 	list_del_init(&drq->list);
17876 	mempool_free(mbox, hrq->phba->mbox_mem_pool);
17877 	return status;
17878 }
17879 
17880 /**
17881  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17882  * @phba: The virtual port for which this call being executed.
17883  * @pdma_phys_addr0: Physical address of the 1st SGL page.
17884  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17885  * @xritag: the xritag that ties this io to the SGL pages.
17886  *
17887  * This routine will post the sgl pages for the IO that has the xritag
17888  * that is in the iocbq structure. The xritag is assigned during iocbq
17889  * creation and persists for as long as the driver is loaded.
17890  * if the caller has fewer than 256 scatter gather segments to map then
17891  * pdma_phys_addr1 should be 0.
17892  * If the caller needs to map more than 256 scatter gather segment then
17893  * pdma_phys_addr1 should be a valid physical address.
17894  * physical address for SGLs must be 64 byte aligned.
17895  * If you are going to map 2 SGL's then the first one must have 256 entries
17896  * the second sgl can have between 1 and 256 entries.
17897  *
17898  * Return codes:
17899  * 	0 - Success
17900  * 	-ENXIO, -ENOMEM - Failure
17901  **/
17902 int
17903 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
17904 		dma_addr_t pdma_phys_addr0,
17905 		dma_addr_t pdma_phys_addr1,
17906 		uint16_t xritag)
17907 {
17908 	struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
17909 	LPFC_MBOXQ_t *mbox;
17910 	int rc;
17911 	uint32_t shdr_status, shdr_add_status;
17912 	uint32_t mbox_tmo;
17913 	union lpfc_sli4_cfg_shdr *shdr;
17914 
17915 	if (xritag == NO_XRI) {
17916 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17917 				"0364 Invalid param:\n");
17918 		return -EINVAL;
17919 	}
17920 
17921 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17922 	if (!mbox)
17923 		return -ENOMEM;
17924 
17925 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17926 			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
17927 			sizeof(struct lpfc_mbx_post_sgl_pages) -
17928 			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
17929 
17930 	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
17931 				&mbox->u.mqe.un.post_sgl_pages;
17932 	bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
17933 	bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
17934 
17935 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo	=
17936 				cpu_to_le32(putPaddrLow(pdma_phys_addr0));
17937 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
17938 				cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
17939 
17940 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo	=
17941 				cpu_to_le32(putPaddrLow(pdma_phys_addr1));
17942 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
17943 				cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
17944 	if (!phba->sli4_hba.intr_enable)
17945 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17946 	else {
17947 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
17948 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
17949 	}
17950 	/* The IOCTL status is embedded in the mailbox subheader. */
17951 	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
17952 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17953 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17954 	if (!phba->sli4_hba.intr_enable)
17955 		mempool_free(mbox, phba->mbox_mem_pool);
17956 	else if (rc != MBX_TIMEOUT)
17957 		mempool_free(mbox, phba->mbox_mem_pool);
17958 	if (shdr_status || shdr_add_status || rc) {
17959 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17960 				"2511 POST_SGL mailbox failed with "
17961 				"status x%x add_status x%x, mbx status x%x\n",
17962 				shdr_status, shdr_add_status, rc);
17963 	}
17964 	return 0;
17965 }
17966 
17967 /**
17968  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
17969  * @phba: pointer to lpfc hba data structure.
17970  *
17971  * This routine is invoked to post rpi header templates to the
17972  * HBA consistent with the SLI-4 interface spec.  This routine
17973  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
17974  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
17975  *
17976  * Returns
17977  *	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
17978  *	LPFC_RPI_ALLOC_ERROR if no rpis are available.
17979  **/
17980 static uint16_t
17981 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
17982 {
17983 	unsigned long xri;
17984 
17985 	/*
17986 	 * Fetch the next logical xri.  Because this index is logical,
17987 	 * the driver starts at 0 each time.
17988 	 */
17989 	spin_lock_irq(&phba->hbalock);
17990 	xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
17991 				 phba->sli4_hba.max_cfg_param.max_xri);
17992 	if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
17993 		spin_unlock_irq(&phba->hbalock);
17994 		return NO_XRI;
17995 	} else {
17996 		set_bit(xri, phba->sli4_hba.xri_bmask);
17997 		phba->sli4_hba.max_cfg_param.xri_used++;
17998 	}
17999 	spin_unlock_irq(&phba->hbalock);
18000 	return xri;
18001 }
18002 
18003 /**
18004  * __lpfc_sli4_free_xri - Release an xri for reuse.
18005  * @phba: pointer to lpfc hba data structure.
18006  * @xri: xri to release.
18007  *
18008  * This routine is invoked to release an xri to the pool of
18009  * available rpis maintained by the driver.
18010  **/
18011 static void
18012 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18013 {
18014 	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
18015 		phba->sli4_hba.max_cfg_param.xri_used--;
18016 	}
18017 }
18018 
18019 /**
18020  * lpfc_sli4_free_xri - Release an xri for reuse.
18021  * @phba: pointer to lpfc hba data structure.
18022  * @xri: xri to release.
18023  *
18024  * This routine is invoked to release an xri to the pool of
18025  * available rpis maintained by the driver.
18026  **/
18027 void
18028 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18029 {
18030 	spin_lock_irq(&phba->hbalock);
18031 	__lpfc_sli4_free_xri(phba, xri);
18032 	spin_unlock_irq(&phba->hbalock);
18033 }
18034 
18035 /**
18036  * lpfc_sli4_next_xritag - Get an xritag for the io
18037  * @phba: Pointer to HBA context object.
18038  *
18039  * This function gets an xritag for the iocb. If there is no unused xritag
18040  * it will return 0xffff.
18041  * The function returns the allocated xritag if successful, else returns zero.
18042  * Zero is not a valid xritag.
18043  * The caller is not required to hold any lock.
18044  **/
18045 uint16_t
18046 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
18047 {
18048 	uint16_t xri_index;
18049 
18050 	xri_index = lpfc_sli4_alloc_xri(phba);
18051 	if (xri_index == NO_XRI)
18052 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18053 				"2004 Failed to allocate XRI.last XRITAG is %d"
18054 				" Max XRI is %d, Used XRI is %d\n",
18055 				xri_index,
18056 				phba->sli4_hba.max_cfg_param.max_xri,
18057 				phba->sli4_hba.max_cfg_param.xri_used);
18058 	return xri_index;
18059 }
18060 
18061 /**
18062  * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
18063  * @phba: pointer to lpfc hba data structure.
18064  * @post_sgl_list: pointer to els sgl entry list.
18065  * @post_cnt: number of els sgl entries on the list.
18066  *
18067  * This routine is invoked to post a block of driver's sgl pages to the
18068  * HBA using non-embedded mailbox command. No Lock is held. This routine
18069  * is only called when the driver is loading and after all IO has been
18070  * stopped.
18071  **/
18072 static int
18073 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
18074 			    struct list_head *post_sgl_list,
18075 			    int post_cnt)
18076 {
18077 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
18078 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18079 	struct sgl_page_pairs *sgl_pg_pairs;
18080 	void *viraddr;
18081 	LPFC_MBOXQ_t *mbox;
18082 	uint32_t reqlen, alloclen, pg_pairs;
18083 	uint32_t mbox_tmo;
18084 	uint16_t xritag_start = 0;
18085 	int rc = 0;
18086 	uint32_t shdr_status, shdr_add_status;
18087 	union lpfc_sli4_cfg_shdr *shdr;
18088 
18089 	reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
18090 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18091 	if (reqlen > SLI4_PAGE_SIZE) {
18092 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18093 				"2559 Block sgl registration required DMA "
18094 				"size (%d) great than a page\n", reqlen);
18095 		return -ENOMEM;
18096 	}
18097 
18098 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18099 	if (!mbox)
18100 		return -ENOMEM;
18101 
18102 	/* Allocate DMA memory and set up the non-embedded mailbox command */
18103 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18104 			 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18105 			 LPFC_SLI4_MBX_NEMBED);
18106 
18107 	if (alloclen < reqlen) {
18108 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18109 				"0285 Allocated DMA memory size (%d) is "
18110 				"less than the requested DMA memory "
18111 				"size (%d)\n", alloclen, reqlen);
18112 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18113 		return -ENOMEM;
18114 	}
18115 	/* Set up the SGL pages in the non-embedded DMA pages */
18116 	viraddr = mbox->sge_array->addr[0];
18117 	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18118 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
18119 
18120 	pg_pairs = 0;
18121 	list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18122 		/* Set up the sge entry */
18123 		sgl_pg_pairs->sgl_pg0_addr_lo =
18124 				cpu_to_le32(putPaddrLow(sglq_entry->phys));
18125 		sgl_pg_pairs->sgl_pg0_addr_hi =
18126 				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18127 		sgl_pg_pairs->sgl_pg1_addr_lo =
18128 				cpu_to_le32(putPaddrLow(0));
18129 		sgl_pg_pairs->sgl_pg1_addr_hi =
18130 				cpu_to_le32(putPaddrHigh(0));
18131 
18132 		/* Keep the first xritag on the list */
18133 		if (pg_pairs == 0)
18134 			xritag_start = sglq_entry->sli4_xritag;
18135 		sgl_pg_pairs++;
18136 		pg_pairs++;
18137 	}
18138 
18139 	/* Complete initialization and perform endian conversion. */
18140 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18141 	bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18142 	sgl->word0 = cpu_to_le32(sgl->word0);
18143 
18144 	if (!phba->sli4_hba.intr_enable)
18145 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18146 	else {
18147 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18148 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18149 	}
18150 	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18151 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18152 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18153 	if (!phba->sli4_hba.intr_enable)
18154 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18155 	else if (rc != MBX_TIMEOUT)
18156 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18157 	if (shdr_status || shdr_add_status || rc) {
18158 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18159 				"2513 POST_SGL_BLOCK mailbox command failed "
18160 				"status x%x add_status x%x mbx status x%x\n",
18161 				shdr_status, shdr_add_status, rc);
18162 		rc = -ENXIO;
18163 	}
18164 	return rc;
18165 }
18166 
18167 /**
18168  * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18169  * @phba: pointer to lpfc hba data structure.
18170  * @nblist: pointer to nvme buffer list.
18171  * @count: number of scsi buffers on the list.
18172  *
18173  * This routine is invoked to post a block of @count scsi sgl pages from a
18174  * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18175  * No Lock is held.
18176  *
18177  **/
18178 static int
18179 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18180 			    int count)
18181 {
18182 	struct lpfc_io_buf *lpfc_ncmd;
18183 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18184 	struct sgl_page_pairs *sgl_pg_pairs;
18185 	void *viraddr;
18186 	LPFC_MBOXQ_t *mbox;
18187 	uint32_t reqlen, alloclen, pg_pairs;
18188 	uint32_t mbox_tmo;
18189 	uint16_t xritag_start = 0;
18190 	int rc = 0;
18191 	uint32_t shdr_status, shdr_add_status;
18192 	dma_addr_t pdma_phys_bpl1;
18193 	union lpfc_sli4_cfg_shdr *shdr;
18194 
18195 	/* Calculate the requested length of the dma memory */
18196 	reqlen = count * sizeof(struct sgl_page_pairs) +
18197 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18198 	if (reqlen > SLI4_PAGE_SIZE) {
18199 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18200 				"6118 Block sgl registration required DMA "
18201 				"size (%d) great than a page\n", reqlen);
18202 		return -ENOMEM;
18203 	}
18204 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18205 	if (!mbox) {
18206 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18207 				"6119 Failed to allocate mbox cmd memory\n");
18208 		return -ENOMEM;
18209 	}
18210 
18211 	/* Allocate DMA memory and set up the non-embedded mailbox command */
18212 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18213 				    LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18214 				    reqlen, LPFC_SLI4_MBX_NEMBED);
18215 
18216 	if (alloclen < reqlen) {
18217 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18218 				"6120 Allocated DMA memory size (%d) is "
18219 				"less than the requested DMA memory "
18220 				"size (%d)\n", alloclen, reqlen);
18221 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18222 		return -ENOMEM;
18223 	}
18224 
18225 	/* Get the first SGE entry from the non-embedded DMA memory */
18226 	viraddr = mbox->sge_array->addr[0];
18227 
18228 	/* Set up the SGL pages in the non-embedded DMA pages */
18229 	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18230 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
18231 
18232 	pg_pairs = 0;
18233 	list_for_each_entry(lpfc_ncmd, nblist, list) {
18234 		/* Set up the sge entry */
18235 		sgl_pg_pairs->sgl_pg0_addr_lo =
18236 			cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18237 		sgl_pg_pairs->sgl_pg0_addr_hi =
18238 			cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18239 		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18240 			pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18241 						SGL_PAGE_SIZE;
18242 		else
18243 			pdma_phys_bpl1 = 0;
18244 		sgl_pg_pairs->sgl_pg1_addr_lo =
18245 			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18246 		sgl_pg_pairs->sgl_pg1_addr_hi =
18247 			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18248 		/* Keep the first xritag on the list */
18249 		if (pg_pairs == 0)
18250 			xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18251 		sgl_pg_pairs++;
18252 		pg_pairs++;
18253 	}
18254 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18255 	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18256 	/* Perform endian conversion if necessary */
18257 	sgl->word0 = cpu_to_le32(sgl->word0);
18258 
18259 	if (!phba->sli4_hba.intr_enable) {
18260 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18261 	} else {
18262 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18263 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18264 	}
18265 	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18266 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18267 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18268 	if (!phba->sli4_hba.intr_enable)
18269 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18270 	else if (rc != MBX_TIMEOUT)
18271 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18272 	if (shdr_status || shdr_add_status || rc) {
18273 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18274 				"6125 POST_SGL_BLOCK mailbox command failed "
18275 				"status x%x add_status x%x mbx status x%x\n",
18276 				shdr_status, shdr_add_status, rc);
18277 		rc = -ENXIO;
18278 	}
18279 	return rc;
18280 }
18281 
18282 /**
18283  * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18284  * @phba: pointer to lpfc hba data structure.
18285  * @post_nblist: pointer to the nvme buffer list.
18286  * @sb_count: number of nvme buffers.
18287  *
18288  * This routine walks a list of nvme buffers that was passed in. It attempts
18289  * to construct blocks of nvme buffer sgls which contains contiguous xris and
18290  * uses the non-embedded SGL block post mailbox commands to post to the port.
18291  * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18292  * embedded SGL post mailbox command for posting. The @post_nblist passed in
18293  * must be local list, thus no lock is needed when manipulate the list.
18294  *
18295  * Returns: 0 = failure, non-zero number of successfully posted buffers.
18296  **/
18297 int
18298 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18299 			   struct list_head *post_nblist, int sb_count)
18300 {
18301 	struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18302 	int status, sgl_size;
18303 	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18304 	dma_addr_t pdma_phys_sgl1;
18305 	int last_xritag = NO_XRI;
18306 	int cur_xritag;
18307 	LIST_HEAD(prep_nblist);
18308 	LIST_HEAD(blck_nblist);
18309 	LIST_HEAD(nvme_nblist);
18310 
18311 	/* sanity check */
18312 	if (sb_count <= 0)
18313 		return -EINVAL;
18314 
18315 	sgl_size = phba->cfg_sg_dma_buf_size;
18316 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18317 		list_del_init(&lpfc_ncmd->list);
18318 		block_cnt++;
18319 		if ((last_xritag != NO_XRI) &&
18320 		    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18321 			/* a hole in xri block, form a sgl posting block */
18322 			list_splice_init(&prep_nblist, &blck_nblist);
18323 			post_cnt = block_cnt - 1;
18324 			/* prepare list for next posting block */
18325 			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18326 			block_cnt = 1;
18327 		} else {
18328 			/* prepare list for next posting block */
18329 			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18330 			/* enough sgls for non-embed sgl mbox command */
18331 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18332 				list_splice_init(&prep_nblist, &blck_nblist);
18333 				post_cnt = block_cnt;
18334 				block_cnt = 0;
18335 			}
18336 		}
18337 		num_posting++;
18338 		last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18339 
18340 		/* end of repost sgl list condition for NVME buffers */
18341 		if (num_posting == sb_count) {
18342 			if (post_cnt == 0) {
18343 				/* last sgl posting block */
18344 				list_splice_init(&prep_nblist, &blck_nblist);
18345 				post_cnt = block_cnt;
18346 			} else if (block_cnt == 1) {
18347 				/* last single sgl with non-contiguous xri */
18348 				if (sgl_size > SGL_PAGE_SIZE)
18349 					pdma_phys_sgl1 =
18350 						lpfc_ncmd->dma_phys_sgl +
18351 						SGL_PAGE_SIZE;
18352 				else
18353 					pdma_phys_sgl1 = 0;
18354 				cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18355 				status = lpfc_sli4_post_sgl(
18356 						phba, lpfc_ncmd->dma_phys_sgl,
18357 						pdma_phys_sgl1, cur_xritag);
18358 				if (status) {
18359 					/* Post error.  Buffer unavailable. */
18360 					lpfc_ncmd->flags |=
18361 						LPFC_SBUF_NOT_POSTED;
18362 				} else {
18363 					/* Post success. Bffer available. */
18364 					lpfc_ncmd->flags &=
18365 						~LPFC_SBUF_NOT_POSTED;
18366 					lpfc_ncmd->status = IOSTAT_SUCCESS;
18367 					num_posted++;
18368 				}
18369 				/* success, put on NVME buffer sgl list */
18370 				list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18371 			}
18372 		}
18373 
18374 		/* continue until a nembed page worth of sgls */
18375 		if (post_cnt == 0)
18376 			continue;
18377 
18378 		/* post block of NVME buffer list sgls */
18379 		status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18380 						     post_cnt);
18381 
18382 		/* don't reset xirtag due to hole in xri block */
18383 		if (block_cnt == 0)
18384 			last_xritag = NO_XRI;
18385 
18386 		/* reset NVME buffer post count for next round of posting */
18387 		post_cnt = 0;
18388 
18389 		/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18390 		while (!list_empty(&blck_nblist)) {
18391 			list_remove_head(&blck_nblist, lpfc_ncmd,
18392 					 struct lpfc_io_buf, list);
18393 			if (status) {
18394 				/* Post error.  Mark buffer unavailable. */
18395 				lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18396 			} else {
18397 				/* Post success, Mark buffer available. */
18398 				lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18399 				lpfc_ncmd->status = IOSTAT_SUCCESS;
18400 				num_posted++;
18401 			}
18402 			list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18403 		}
18404 	}
18405 	/* Push NVME buffers with sgl posted to the available list */
18406 	lpfc_io_buf_replenish(phba, &nvme_nblist);
18407 
18408 	return num_posted;
18409 }
18410 
18411 /**
18412  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18413  * @phba: pointer to lpfc_hba struct that the frame was received on
18414  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18415  *
18416  * This function checks the fields in the @fc_hdr to see if the FC frame is a
18417  * valid type of frame that the LPFC driver will handle. This function will
18418  * return a zero if the frame is a valid frame or a non zero value when the
18419  * frame does not pass the check.
18420  **/
18421 static int
18422 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18423 {
18424 	/*  make rctl_names static to save stack space */
18425 	struct fc_vft_header *fc_vft_hdr;
18426 	uint32_t *header = (uint32_t *) fc_hdr;
18427 
18428 #define FC_RCTL_MDS_DIAGS	0xF4
18429 
18430 	switch (fc_hdr->fh_r_ctl) {
18431 	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
18432 	case FC_RCTL_DD_SOL_DATA:	/* solicited data */
18433 	case FC_RCTL_DD_UNSOL_CTL:	/* unsolicited control */
18434 	case FC_RCTL_DD_SOL_CTL:	/* solicited control or reply */
18435 	case FC_RCTL_DD_UNSOL_DATA:	/* unsolicited data */
18436 	case FC_RCTL_DD_DATA_DESC:	/* data descriptor */
18437 	case FC_RCTL_DD_UNSOL_CMD:	/* unsolicited command */
18438 	case FC_RCTL_DD_CMD_STATUS:	/* command status */
18439 	case FC_RCTL_ELS_REQ:	/* extended link services request */
18440 	case FC_RCTL_ELS_REP:	/* extended link services reply */
18441 	case FC_RCTL_ELS4_REQ:	/* FC-4 ELS request */
18442 	case FC_RCTL_ELS4_REP:	/* FC-4 ELS reply */
18443 	case FC_RCTL_BA_ABTS: 	/* basic link service abort */
18444 	case FC_RCTL_BA_RMC: 	/* remove connection */
18445 	case FC_RCTL_BA_ACC:	/* basic accept */
18446 	case FC_RCTL_BA_RJT:	/* basic reject */
18447 	case FC_RCTL_BA_PRMT:
18448 	case FC_RCTL_ACK_1:	/* acknowledge_1 */
18449 	case FC_RCTL_ACK_0:	/* acknowledge_0 */
18450 	case FC_RCTL_P_RJT:	/* port reject */
18451 	case FC_RCTL_F_RJT:	/* fabric reject */
18452 	case FC_RCTL_P_BSY:	/* port busy */
18453 	case FC_RCTL_F_BSY:	/* fabric busy to data frame */
18454 	case FC_RCTL_F_BSYL:	/* fabric busy to link control frame */
18455 	case FC_RCTL_LCR:	/* link credit reset */
18456 	case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18457 	case FC_RCTL_END:	/* end */
18458 		break;
18459 	case FC_RCTL_VFTH:	/* Virtual Fabric tagging Header */
18460 		fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18461 		fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18462 		return lpfc_fc_frame_check(phba, fc_hdr);
18463 	case FC_RCTL_BA_NOP:	/* basic link service NOP */
18464 	default:
18465 		goto drop;
18466 	}
18467 
18468 	switch (fc_hdr->fh_type) {
18469 	case FC_TYPE_BLS:
18470 	case FC_TYPE_ELS:
18471 	case FC_TYPE_FCP:
18472 	case FC_TYPE_CT:
18473 	case FC_TYPE_NVME:
18474 		break;
18475 	case FC_TYPE_IP:
18476 	case FC_TYPE_ILS:
18477 	default:
18478 		goto drop;
18479 	}
18480 
18481 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18482 			"2538 Received frame rctl:x%x, type:x%x, "
18483 			"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18484 			fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18485 			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18486 			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18487 			be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18488 			be32_to_cpu(header[6]));
18489 	return 0;
18490 drop:
18491 	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18492 			"2539 Dropped frame rctl:x%x type:x%x\n",
18493 			fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18494 	return 1;
18495 }
18496 
18497 /**
18498  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18499  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18500  *
18501  * This function processes the FC header to retrieve the VFI from the VF
18502  * header, if one exists. This function will return the VFI if one exists
18503  * or 0 if no VSAN Header exists.
18504  **/
18505 static uint32_t
18506 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18507 {
18508 	struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18509 
18510 	if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18511 		return 0;
18512 	return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18513 }
18514 
18515 /**
18516  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18517  * @phba: Pointer to the HBA structure to search for the vport on
18518  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18519  * @fcfi: The FC Fabric ID that the frame came from
18520  * @did: Destination ID to match against
18521  *
18522  * This function searches the @phba for a vport that matches the content of the
18523  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18524  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18525  * returns the matching vport pointer or NULL if unable to match frame to a
18526  * vport.
18527  **/
18528 static struct lpfc_vport *
18529 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18530 		       uint16_t fcfi, uint32_t did)
18531 {
18532 	struct lpfc_vport **vports;
18533 	struct lpfc_vport *vport = NULL;
18534 	int i;
18535 
18536 	if (did == Fabric_DID)
18537 		return phba->pport;
18538 	if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
18539 	    phba->link_state != LPFC_HBA_READY)
18540 		return phba->pport;
18541 
18542 	vports = lpfc_create_vport_work_array(phba);
18543 	if (vports != NULL) {
18544 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18545 			if (phba->fcf.fcfi == fcfi &&
18546 			    vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18547 			    vports[i]->fc_myDID == did) {
18548 				vport = vports[i];
18549 				break;
18550 			}
18551 		}
18552 	}
18553 	lpfc_destroy_vport_work_array(phba, vports);
18554 	return vport;
18555 }
18556 
18557 /**
18558  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18559  * @vport: The vport to work on.
18560  *
18561  * This function updates the receive sequence time stamp for this vport. The
18562  * receive sequence time stamp indicates the time that the last frame of the
18563  * the sequence that has been idle for the longest amount of time was received.
18564  * the driver uses this time stamp to indicate if any received sequences have
18565  * timed out.
18566  **/
18567 static void
18568 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18569 {
18570 	struct lpfc_dmabuf *h_buf;
18571 	struct hbq_dmabuf *dmabuf = NULL;
18572 
18573 	/* get the oldest sequence on the rcv list */
18574 	h_buf = list_get_first(&vport->rcv_buffer_list,
18575 			       struct lpfc_dmabuf, list);
18576 	if (!h_buf)
18577 		return;
18578 	dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18579 	vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18580 }
18581 
18582 /**
18583  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18584  * @vport: The vport that the received sequences were sent to.
18585  *
18586  * This function cleans up all outstanding received sequences. This is called
18587  * by the driver when a link event or user action invalidates all the received
18588  * sequences.
18589  **/
18590 void
18591 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18592 {
18593 	struct lpfc_dmabuf *h_buf, *hnext;
18594 	struct lpfc_dmabuf *d_buf, *dnext;
18595 	struct hbq_dmabuf *dmabuf = NULL;
18596 
18597 	/* start with the oldest sequence on the rcv list */
18598 	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18599 		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18600 		list_del_init(&dmabuf->hbuf.list);
18601 		list_for_each_entry_safe(d_buf, dnext,
18602 					 &dmabuf->dbuf.list, list) {
18603 			list_del_init(&d_buf->list);
18604 			lpfc_in_buf_free(vport->phba, d_buf);
18605 		}
18606 		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18607 	}
18608 }
18609 
18610 /**
18611  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18612  * @vport: The vport that the received sequences were sent to.
18613  *
18614  * This function determines whether any received sequences have timed out by
18615  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18616  * indicates that there is at least one timed out sequence this routine will
18617  * go through the received sequences one at a time from most inactive to most
18618  * active to determine which ones need to be cleaned up. Once it has determined
18619  * that a sequence needs to be cleaned up it will simply free up the resources
18620  * without sending an abort.
18621  **/
18622 void
18623 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18624 {
18625 	struct lpfc_dmabuf *h_buf, *hnext;
18626 	struct lpfc_dmabuf *d_buf, *dnext;
18627 	struct hbq_dmabuf *dmabuf = NULL;
18628 	unsigned long timeout;
18629 	int abort_count = 0;
18630 
18631 	timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18632 		   vport->rcv_buffer_time_stamp);
18633 	if (list_empty(&vport->rcv_buffer_list) ||
18634 	    time_before(jiffies, timeout))
18635 		return;
18636 	/* start with the oldest sequence on the rcv list */
18637 	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18638 		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18639 		timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18640 			   dmabuf->time_stamp);
18641 		if (time_before(jiffies, timeout))
18642 			break;
18643 		abort_count++;
18644 		list_del_init(&dmabuf->hbuf.list);
18645 		list_for_each_entry_safe(d_buf, dnext,
18646 					 &dmabuf->dbuf.list, list) {
18647 			list_del_init(&d_buf->list);
18648 			lpfc_in_buf_free(vport->phba, d_buf);
18649 		}
18650 		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18651 	}
18652 	if (abort_count)
18653 		lpfc_update_rcv_time_stamp(vport);
18654 }
18655 
18656 /**
18657  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18658  * @vport: pointer to a vitural port
18659  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18660  *
18661  * This function searches through the existing incomplete sequences that have
18662  * been sent to this @vport. If the frame matches one of the incomplete
18663  * sequences then the dbuf in the @dmabuf is added to the list of frames that
18664  * make up that sequence. If no sequence is found that matches this frame then
18665  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18666  * This function returns a pointer to the first dmabuf in the sequence list that
18667  * the frame was linked to.
18668  **/
18669 static struct hbq_dmabuf *
18670 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18671 {
18672 	struct fc_frame_header *new_hdr;
18673 	struct fc_frame_header *temp_hdr;
18674 	struct lpfc_dmabuf *d_buf;
18675 	struct lpfc_dmabuf *h_buf;
18676 	struct hbq_dmabuf *seq_dmabuf = NULL;
18677 	struct hbq_dmabuf *temp_dmabuf = NULL;
18678 	uint8_t	found = 0;
18679 
18680 	INIT_LIST_HEAD(&dmabuf->dbuf.list);
18681 	dmabuf->time_stamp = jiffies;
18682 	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18683 
18684 	/* Use the hdr_buf to find the sequence that this frame belongs to */
18685 	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18686 		temp_hdr = (struct fc_frame_header *)h_buf->virt;
18687 		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18688 		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18689 		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18690 			continue;
18691 		/* found a pending sequence that matches this frame */
18692 		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18693 		break;
18694 	}
18695 	if (!seq_dmabuf) {
18696 		/*
18697 		 * This indicates first frame received for this sequence.
18698 		 * Queue the buffer on the vport's rcv_buffer_list.
18699 		 */
18700 		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18701 		lpfc_update_rcv_time_stamp(vport);
18702 		return dmabuf;
18703 	}
18704 	temp_hdr = seq_dmabuf->hbuf.virt;
18705 	if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18706 		be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18707 		list_del_init(&seq_dmabuf->hbuf.list);
18708 		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18709 		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18710 		lpfc_update_rcv_time_stamp(vport);
18711 		return dmabuf;
18712 	}
18713 	/* move this sequence to the tail to indicate a young sequence */
18714 	list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18715 	seq_dmabuf->time_stamp = jiffies;
18716 	lpfc_update_rcv_time_stamp(vport);
18717 	if (list_empty(&seq_dmabuf->dbuf.list)) {
18718 		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18719 		return seq_dmabuf;
18720 	}
18721 	/* find the correct place in the sequence to insert this frame */
18722 	d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18723 	while (!found) {
18724 		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18725 		temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18726 		/*
18727 		 * If the frame's sequence count is greater than the frame on
18728 		 * the list then insert the frame right after this frame
18729 		 */
18730 		if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18731 			be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18732 			list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18733 			found = 1;
18734 			break;
18735 		}
18736 
18737 		if (&d_buf->list == &seq_dmabuf->dbuf.list)
18738 			break;
18739 		d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18740 	}
18741 
18742 	if (found)
18743 		return seq_dmabuf;
18744 	return NULL;
18745 }
18746 
18747 /**
18748  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18749  * @vport: pointer to a vitural port
18750  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18751  *
18752  * This function tries to abort from the partially assembed sequence, described
18753  * by the information from basic abbort @dmabuf. It checks to see whether such
18754  * partially assembled sequence held by the driver. If so, it shall free up all
18755  * the frames from the partially assembled sequence.
18756  *
18757  * Return
18758  * true  -- if there is matching partially assembled sequence present and all
18759  *          the frames freed with the sequence;
18760  * false -- if there is no matching partially assembled sequence present so
18761  *          nothing got aborted in the lower layer driver
18762  **/
18763 static bool
18764 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18765 			    struct hbq_dmabuf *dmabuf)
18766 {
18767 	struct fc_frame_header *new_hdr;
18768 	struct fc_frame_header *temp_hdr;
18769 	struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18770 	struct hbq_dmabuf *seq_dmabuf = NULL;
18771 
18772 	/* Use the hdr_buf to find the sequence that matches this frame */
18773 	INIT_LIST_HEAD(&dmabuf->dbuf.list);
18774 	INIT_LIST_HEAD(&dmabuf->hbuf.list);
18775 	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18776 	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18777 		temp_hdr = (struct fc_frame_header *)h_buf->virt;
18778 		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18779 		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18780 		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18781 			continue;
18782 		/* found a pending sequence that matches this frame */
18783 		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18784 		break;
18785 	}
18786 
18787 	/* Free up all the frames from the partially assembled sequence */
18788 	if (seq_dmabuf) {
18789 		list_for_each_entry_safe(d_buf, n_buf,
18790 					 &seq_dmabuf->dbuf.list, list) {
18791 			list_del_init(&d_buf->list);
18792 			lpfc_in_buf_free(vport->phba, d_buf);
18793 		}
18794 		return true;
18795 	}
18796 	return false;
18797 }
18798 
18799 /**
18800  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18801  * @vport: pointer to a vitural port
18802  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18803  *
18804  * This function tries to abort from the assembed sequence from upper level
18805  * protocol, described by the information from basic abbort @dmabuf. It
18806  * checks to see whether such pending context exists at upper level protocol.
18807  * If so, it shall clean up the pending context.
18808  *
18809  * Return
18810  * true  -- if there is matching pending context of the sequence cleaned
18811  *          at ulp;
18812  * false -- if there is no matching pending context of the sequence present
18813  *          at ulp.
18814  **/
18815 static bool
18816 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18817 {
18818 	struct lpfc_hba *phba = vport->phba;
18819 	int handled;
18820 
18821 	/* Accepting abort at ulp with SLI4 only */
18822 	if (phba->sli_rev < LPFC_SLI_REV4)
18823 		return false;
18824 
18825 	/* Register all caring upper level protocols to attend abort */
18826 	handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18827 	if (handled)
18828 		return true;
18829 
18830 	return false;
18831 }
18832 
18833 /**
18834  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18835  * @phba: Pointer to HBA context object.
18836  * @cmd_iocbq: pointer to the command iocbq structure.
18837  * @rsp_iocbq: pointer to the response iocbq structure.
18838  *
18839  * This function handles the sequence abort response iocb command complete
18840  * event. It properly releases the memory allocated to the sequence abort
18841  * accept iocb.
18842  **/
18843 static void
18844 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18845 			     struct lpfc_iocbq *cmd_iocbq,
18846 			     struct lpfc_iocbq *rsp_iocbq)
18847 {
18848 	if (cmd_iocbq) {
18849 		lpfc_nlp_put(cmd_iocbq->ndlp);
18850 		lpfc_sli_release_iocbq(phba, cmd_iocbq);
18851 	}
18852 
18853 	/* Failure means BLS ABORT RSP did not get delivered to remote node*/
18854 	if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18855 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18856 			"3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
18857 			get_job_ulpstatus(phba, rsp_iocbq),
18858 			get_job_word4(phba, rsp_iocbq));
18859 }
18860 
18861 /**
18862  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18863  * @phba: Pointer to HBA context object.
18864  * @xri: xri id in transaction.
18865  *
18866  * This function validates the xri maps to the known range of XRIs allocated an
18867  * used by the driver.
18868  **/
18869 uint16_t
18870 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18871 		      uint16_t xri)
18872 {
18873 	uint16_t i;
18874 
18875 	for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
18876 		if (xri == phba->sli4_hba.xri_ids[i])
18877 			return i;
18878 	}
18879 	return NO_XRI;
18880 }
18881 
18882 /**
18883  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
18884  * @vport: pointer to a virtual port.
18885  * @fc_hdr: pointer to a FC frame header.
18886  * @aborted: was the partially assembled receive sequence successfully aborted
18887  *
18888  * This function sends a basic response to a previous unsol sequence abort
18889  * event after aborting the sequence handling.
18890  **/
18891 void
18892 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
18893 			struct fc_frame_header *fc_hdr, bool aborted)
18894 {
18895 	struct lpfc_hba *phba = vport->phba;
18896 	struct lpfc_iocbq *ctiocb = NULL;
18897 	struct lpfc_nodelist *ndlp;
18898 	uint16_t oxid, rxid, xri, lxri;
18899 	uint32_t sid, fctl;
18900 	union lpfc_wqe128 *icmd;
18901 	int rc;
18902 
18903 	if (!lpfc_is_link_up(phba))
18904 		return;
18905 
18906 	sid = sli4_sid_from_fc_hdr(fc_hdr);
18907 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
18908 	rxid = be16_to_cpu(fc_hdr->fh_rx_id);
18909 
18910 	ndlp = lpfc_findnode_did(vport, sid);
18911 	if (!ndlp) {
18912 		ndlp = lpfc_nlp_init(vport, sid);
18913 		if (!ndlp) {
18914 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
18915 					 "1268 Failed to allocate ndlp for "
18916 					 "oxid:x%x SID:x%x\n", oxid, sid);
18917 			return;
18918 		}
18919 		/* Put ndlp onto vport node list */
18920 		lpfc_enqueue_node(vport, ndlp);
18921 	}
18922 
18923 	/* Allocate buffer for rsp iocb */
18924 	ctiocb = lpfc_sli_get_iocbq(phba);
18925 	if (!ctiocb)
18926 		return;
18927 
18928 	icmd = &ctiocb->wqe;
18929 
18930 	/* Extract the F_CTL field from FC_HDR */
18931 	fctl = sli4_fctl_from_fc_hdr(fc_hdr);
18932 
18933 	ctiocb->ndlp = lpfc_nlp_get(ndlp);
18934 	if (!ctiocb->ndlp) {
18935 		lpfc_sli_release_iocbq(phba, ctiocb);
18936 		return;
18937 	}
18938 
18939 	ctiocb->vport = vport;
18940 	ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
18941 	ctiocb->sli4_lxritag = NO_XRI;
18942 	ctiocb->sli4_xritag = NO_XRI;
18943 	ctiocb->abort_rctl = FC_RCTL_BA_ACC;
18944 
18945 	if (fctl & FC_FC_EX_CTX)
18946 		/* Exchange responder sent the abort so we
18947 		 * own the oxid.
18948 		 */
18949 		xri = oxid;
18950 	else
18951 		xri = rxid;
18952 	lxri = lpfc_sli4_xri_inrange(phba, xri);
18953 	if (lxri != NO_XRI)
18954 		lpfc_set_rrq_active(phba, ndlp, lxri,
18955 			(xri == oxid) ? rxid : oxid, 0);
18956 	/* For BA_ABTS from exchange responder, if the logical xri with
18957 	 * the oxid maps to the FCP XRI range, the port no longer has
18958 	 * that exchange context, send a BLS_RJT. Override the IOCB for
18959 	 * a BA_RJT.
18960 	 */
18961 	if ((fctl & FC_FC_EX_CTX) &&
18962 	    (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
18963 		ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18964 		bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18965 		bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18966 		       FC_BA_RJT_INV_XID);
18967 		bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18968 		       FC_BA_RJT_UNABLE);
18969 	}
18970 
18971 	/* If BA_ABTS failed to abort a partially assembled receive sequence,
18972 	 * the driver no longer has that exchange, send a BLS_RJT. Override
18973 	 * the IOCB for a BA_RJT.
18974 	 */
18975 	if (aborted == false) {
18976 		ctiocb->abort_rctl = FC_RCTL_BA_RJT;
18977 		bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
18978 		bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
18979 		       FC_BA_RJT_INV_XID);
18980 		bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
18981 		       FC_BA_RJT_UNABLE);
18982 	}
18983 
18984 	if (fctl & FC_FC_EX_CTX) {
18985 		/* ABTS sent by responder to CT exchange, construction
18986 		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
18987 		 * field and RX_ID from ABTS for RX_ID field.
18988 		 */
18989 		ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
18990 		bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
18991 	} else {
18992 		/* ABTS sent by initiator to CT exchange, construction
18993 		 * of BA_ACC will need to allocate a new XRI as for the
18994 		 * XRI_TAG field.
18995 		 */
18996 		ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
18997 	}
18998 
18999 	/* OX_ID is invariable to who sent ABTS to CT exchange */
19000 	bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
19001 	bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
19002 
19003 	/* Use CT=VPI */
19004 	bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
19005 	       ndlp->nlp_DID);
19006 	bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
19007 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
19008 	bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
19009 
19010 	/* Xmit CT abts response on exchange <xid> */
19011 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
19012 			 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
19013 			 ctiocb->abort_rctl, oxid, phba->link_state);
19014 
19015 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
19016 	if (rc == IOCB_ERROR) {
19017 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19018 				 "2925 Failed to issue CT ABTS RSP x%x on "
19019 				 "xri x%x, Data x%x\n",
19020 				 ctiocb->abort_rctl, oxid,
19021 				 phba->link_state);
19022 		lpfc_nlp_put(ndlp);
19023 		ctiocb->ndlp = NULL;
19024 		lpfc_sli_release_iocbq(phba, ctiocb);
19025 	}
19026 
19027 	/* if only usage of this nodelist is BLS response, release initial ref
19028 	 * to free ndlp when transmit completes
19029 	 */
19030 	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE &&
19031 	    !(ndlp->nlp_flag & NLP_DROPPED) &&
19032 	    !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) {
19033 		ndlp->nlp_flag |= NLP_DROPPED;
19034 		lpfc_nlp_put(ndlp);
19035 	}
19036 }
19037 
19038 /**
19039  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
19040  * @vport: Pointer to the vport on which this sequence was received
19041  * @dmabuf: pointer to a dmabuf that describes the FC sequence
19042  *
19043  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
19044  * receive sequence is only partially assembed by the driver, it shall abort
19045  * the partially assembled frames for the sequence. Otherwise, if the
19046  * unsolicited receive sequence has been completely assembled and passed to
19047  * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
19048  * unsolicited sequence has been aborted. After that, it will issue a basic
19049  * accept to accept the abort.
19050  **/
19051 static void
19052 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
19053 			     struct hbq_dmabuf *dmabuf)
19054 {
19055 	struct lpfc_hba *phba = vport->phba;
19056 	struct fc_frame_header fc_hdr;
19057 	uint32_t fctl;
19058 	bool aborted;
19059 
19060 	/* Make a copy of fc_hdr before the dmabuf being released */
19061 	memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
19062 	fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
19063 
19064 	if (fctl & FC_FC_EX_CTX) {
19065 		/* ABTS by responder to exchange, no cleanup needed */
19066 		aborted = true;
19067 	} else {
19068 		/* ABTS by initiator to exchange, need to do cleanup */
19069 		aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
19070 		if (aborted == false)
19071 			aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
19072 	}
19073 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
19074 
19075 	if (phba->nvmet_support) {
19076 		lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
19077 		return;
19078 	}
19079 
19080 	/* Respond with BA_ACC or BA_RJT accordingly */
19081 	lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
19082 }
19083 
19084 /**
19085  * lpfc_seq_complete - Indicates if a sequence is complete
19086  * @dmabuf: pointer to a dmabuf that describes the FC sequence
19087  *
19088  * This function checks the sequence, starting with the frame described by
19089  * @dmabuf, to see if all the frames associated with this sequence are present.
19090  * the frames associated with this sequence are linked to the @dmabuf using the
19091  * dbuf list. This function looks for two major things. 1) That the first frame
19092  * has a sequence count of zero. 2) There is a frame with last frame of sequence
19093  * set. 3) That there are no holes in the sequence count. The function will
19094  * return 1 when the sequence is complete, otherwise it will return 0.
19095  **/
19096 static int
19097 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
19098 {
19099 	struct fc_frame_header *hdr;
19100 	struct lpfc_dmabuf *d_buf;
19101 	struct hbq_dmabuf *seq_dmabuf;
19102 	uint32_t fctl;
19103 	int seq_count = 0;
19104 
19105 	hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19106 	/* make sure first fame of sequence has a sequence count of zero */
19107 	if (hdr->fh_seq_cnt != seq_count)
19108 		return 0;
19109 	fctl = (hdr->fh_f_ctl[0] << 16 |
19110 		hdr->fh_f_ctl[1] << 8 |
19111 		hdr->fh_f_ctl[2]);
19112 	/* If last frame of sequence we can return success. */
19113 	if (fctl & FC_FC_END_SEQ)
19114 		return 1;
19115 	list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19116 		seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19117 		hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19118 		/* If there is a hole in the sequence count then fail. */
19119 		if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19120 			return 0;
19121 		fctl = (hdr->fh_f_ctl[0] << 16 |
19122 			hdr->fh_f_ctl[1] << 8 |
19123 			hdr->fh_f_ctl[2]);
19124 		/* If last frame of sequence we can return success. */
19125 		if (fctl & FC_FC_END_SEQ)
19126 			return 1;
19127 	}
19128 	return 0;
19129 }
19130 
19131 /**
19132  * lpfc_prep_seq - Prep sequence for ULP processing
19133  * @vport: Pointer to the vport on which this sequence was received
19134  * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19135  *
19136  * This function takes a sequence, described by a list of frames, and creates
19137  * a list of iocbq structures to describe the sequence. This iocbq list will be
19138  * used to issue to the generic unsolicited sequence handler. This routine
19139  * returns a pointer to the first iocbq in the list. If the function is unable
19140  * to allocate an iocbq then it throw out the received frames that were not
19141  * able to be described and return a pointer to the first iocbq. If unable to
19142  * allocate any iocbqs (including the first) this function will return NULL.
19143  **/
19144 static struct lpfc_iocbq *
19145 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19146 {
19147 	struct hbq_dmabuf *hbq_buf;
19148 	struct lpfc_dmabuf *d_buf, *n_buf;
19149 	struct lpfc_iocbq *first_iocbq, *iocbq;
19150 	struct fc_frame_header *fc_hdr;
19151 	uint32_t sid;
19152 	uint32_t len, tot_len;
19153 
19154 	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19155 	/* remove from receive buffer list */
19156 	list_del_init(&seq_dmabuf->hbuf.list);
19157 	lpfc_update_rcv_time_stamp(vport);
19158 	/* get the Remote Port's SID */
19159 	sid = sli4_sid_from_fc_hdr(fc_hdr);
19160 	tot_len = 0;
19161 	/* Get an iocbq struct to fill in. */
19162 	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19163 	if (first_iocbq) {
19164 		/* Initialize the first IOCB. */
19165 		first_iocbq->wcqe_cmpl.total_data_placed = 0;
19166 		bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
19167 		       IOSTAT_SUCCESS);
19168 		first_iocbq->vport = vport;
19169 
19170 		/* Check FC Header to see what TYPE of frame we are rcv'ing */
19171 		if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19172 			bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
19173 			       sli4_did_from_fc_hdr(fc_hdr));
19174 		}
19175 
19176 		bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19177 		       NO_XRI);
19178 		bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19179 		       be16_to_cpu(fc_hdr->fh_ox_id));
19180 
19181 		/* put the first buffer into the first iocb */
19182 		tot_len = bf_get(lpfc_rcqe_length,
19183 				 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19184 
19185 		first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
19186 		first_iocbq->bpl_dmabuf = NULL;
19187 		/* Keep track of the BDE count */
19188 		first_iocbq->wcqe_cmpl.word3 = 1;
19189 
19190 		if (tot_len > LPFC_DATA_BUF_SIZE)
19191 			first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
19192 				LPFC_DATA_BUF_SIZE;
19193 		else
19194 			first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
19195 
19196 		first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
19197 		bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
19198 		       sid);
19199 	}
19200 	iocbq = first_iocbq;
19201 	/*
19202 	 * Each IOCBq can have two Buffers assigned, so go through the list
19203 	 * of buffers for this sequence and save two buffers in each IOCBq
19204 	 */
19205 	list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19206 		if (!iocbq) {
19207 			lpfc_in_buf_free(vport->phba, d_buf);
19208 			continue;
19209 		}
19210 		if (!iocbq->bpl_dmabuf) {
19211 			iocbq->bpl_dmabuf = d_buf;
19212 			iocbq->wcqe_cmpl.word3++;
19213 			/* We need to get the size out of the right CQE */
19214 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19215 			len = bf_get(lpfc_rcqe_length,
19216 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
19217 			iocbq->unsol_rcv_len = len;
19218 			iocbq->wcqe_cmpl.total_data_placed += len;
19219 			tot_len += len;
19220 		} else {
19221 			iocbq = lpfc_sli_get_iocbq(vport->phba);
19222 			if (!iocbq) {
19223 				if (first_iocbq) {
19224 					bf_set(lpfc_wcqe_c_status,
19225 					       &first_iocbq->wcqe_cmpl,
19226 					       IOSTAT_SUCCESS);
19227 					first_iocbq->wcqe_cmpl.parameter =
19228 						IOERR_NO_RESOURCES;
19229 				}
19230 				lpfc_in_buf_free(vport->phba, d_buf);
19231 				continue;
19232 			}
19233 			/* We need to get the size out of the right CQE */
19234 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19235 			len = bf_get(lpfc_rcqe_length,
19236 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
19237 			iocbq->cmd_dmabuf = d_buf;
19238 			iocbq->bpl_dmabuf = NULL;
19239 			iocbq->wcqe_cmpl.word3 = 1;
19240 
19241 			if (len > LPFC_DATA_BUF_SIZE)
19242 				iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19243 					LPFC_DATA_BUF_SIZE;
19244 			else
19245 				iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19246 					len;
19247 
19248 			tot_len += len;
19249 			iocbq->wcqe_cmpl.total_data_placed = tot_len;
19250 			bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
19251 			       sid);
19252 			list_add_tail(&iocbq->list, &first_iocbq->list);
19253 		}
19254 	}
19255 	/* Free the sequence's header buffer */
19256 	if (!first_iocbq)
19257 		lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19258 
19259 	return first_iocbq;
19260 }
19261 
19262 static void
19263 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19264 			  struct hbq_dmabuf *seq_dmabuf)
19265 {
19266 	struct fc_frame_header *fc_hdr;
19267 	struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19268 	struct lpfc_hba *phba = vport->phba;
19269 
19270 	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19271 	iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19272 	if (!iocbq) {
19273 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19274 				"2707 Ring %d handler: Failed to allocate "
19275 				"iocb Rctl x%x Type x%x received\n",
19276 				LPFC_ELS_RING,
19277 				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19278 		return;
19279 	}
19280 	if (!lpfc_complete_unsol_iocb(phba,
19281 				      phba->sli4_hba.els_wq->pring,
19282 				      iocbq, fc_hdr->fh_r_ctl,
19283 				      fc_hdr->fh_type)) {
19284 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19285 				"2540 Ring %d handler: unexpected Rctl "
19286 				"x%x Type x%x received\n",
19287 				LPFC_ELS_RING,
19288 				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19289 		lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19290 	}
19291 
19292 	/* Free iocb created in lpfc_prep_seq */
19293 	list_for_each_entry_safe(curr_iocb, next_iocb,
19294 				 &iocbq->list, list) {
19295 		list_del_init(&curr_iocb->list);
19296 		lpfc_sli_release_iocbq(phba, curr_iocb);
19297 	}
19298 	lpfc_sli_release_iocbq(phba, iocbq);
19299 }
19300 
19301 static void
19302 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19303 			    struct lpfc_iocbq *rspiocb)
19304 {
19305 	struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
19306 
19307 	if (pcmd && pcmd->virt)
19308 		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19309 	kfree(pcmd);
19310 	lpfc_sli_release_iocbq(phba, cmdiocb);
19311 	lpfc_drain_txq(phba);
19312 }
19313 
19314 static void
19315 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19316 			      struct hbq_dmabuf *dmabuf)
19317 {
19318 	struct fc_frame_header *fc_hdr;
19319 	struct lpfc_hba *phba = vport->phba;
19320 	struct lpfc_iocbq *iocbq = NULL;
19321 	union  lpfc_wqe128 *pwqe;
19322 	struct lpfc_dmabuf *pcmd = NULL;
19323 	uint32_t frame_len;
19324 	int rc;
19325 	unsigned long iflags;
19326 
19327 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19328 	frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19329 
19330 	/* Send the received frame back */
19331 	iocbq = lpfc_sli_get_iocbq(phba);
19332 	if (!iocbq) {
19333 		/* Queue cq event and wakeup worker thread to process it */
19334 		spin_lock_irqsave(&phba->hbalock, iflags);
19335 		list_add_tail(&dmabuf->cq_event.list,
19336 			      &phba->sli4_hba.sp_queue_event);
19337 		spin_unlock_irqrestore(&phba->hbalock, iflags);
19338 		set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
19339 		lpfc_worker_wake_up(phba);
19340 		return;
19341 	}
19342 
19343 	/* Allocate buffer for command payload */
19344 	pcmd = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
19345 	if (pcmd)
19346 		pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19347 					    &pcmd->phys);
19348 	if (!pcmd || !pcmd->virt)
19349 		goto exit;
19350 
19351 	INIT_LIST_HEAD(&pcmd->list);
19352 
19353 	/* copyin the payload */
19354 	memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19355 
19356 	iocbq->cmd_dmabuf = pcmd;
19357 	iocbq->vport = vport;
19358 	iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19359 	iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19360 	iocbq->num_bdes = 0;
19361 
19362 	pwqe = &iocbq->wqe;
19363 	/* fill in BDE's for command */
19364 	pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19365 	pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19366 	pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19367 	pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19368 
19369 	pwqe->send_frame.frame_len = frame_len;
19370 	pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19371 	pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19372 	pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19373 	pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19374 	pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19375 	pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19376 
19377 	pwqe->generic.wqe_com.word7 = 0;
19378 	pwqe->generic.wqe_com.word10 = 0;
19379 
19380 	bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19381 	bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19382 	bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19383 	bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19384 	bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19385 	bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19386 	bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19387 	bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19388 	bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19389 	bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19390 	bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19391 	bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19392 	pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19393 
19394 	iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19395 
19396 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19397 	if (rc == IOCB_ERROR)
19398 		goto exit;
19399 
19400 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
19401 	return;
19402 
19403 exit:
19404 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19405 			"2023 Unable to process MDS loopback frame\n");
19406 	if (pcmd && pcmd->virt)
19407 		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19408 	kfree(pcmd);
19409 	if (iocbq)
19410 		lpfc_sli_release_iocbq(phba, iocbq);
19411 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
19412 }
19413 
19414 /**
19415  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19416  * @phba: Pointer to HBA context object.
19417  * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19418  *
19419  * This function is called with no lock held. This function processes all
19420  * the received buffers and gives it to upper layers when a received buffer
19421  * indicates that it is the final frame in the sequence. The interrupt
19422  * service routine processes received buffers at interrupt contexts.
19423  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19424  * appropriate receive function when the final frame in a sequence is received.
19425  **/
19426 void
19427 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19428 				 struct hbq_dmabuf *dmabuf)
19429 {
19430 	struct hbq_dmabuf *seq_dmabuf;
19431 	struct fc_frame_header *fc_hdr;
19432 	struct lpfc_vport *vport;
19433 	uint32_t fcfi;
19434 	uint32_t did;
19435 
19436 	/* Process each received buffer */
19437 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19438 
19439 	if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19440 	    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19441 		vport = phba->pport;
19442 		/* Handle MDS Loopback frames */
19443 		if  (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
19444 			lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19445 		else
19446 			lpfc_in_buf_free(phba, &dmabuf->dbuf);
19447 		return;
19448 	}
19449 
19450 	/* check to see if this a valid type of frame */
19451 	if (lpfc_fc_frame_check(phba, fc_hdr)) {
19452 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
19453 		return;
19454 	}
19455 
19456 	if ((bf_get(lpfc_cqe_code,
19457 		    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19458 		fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19459 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
19460 	else
19461 		fcfi = bf_get(lpfc_rcqe_fcf_id,
19462 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
19463 
19464 	if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19465 		vport = phba->pport;
19466 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19467 				"2023 MDS Loopback %d bytes\n",
19468 				bf_get(lpfc_rcqe_length,
19469 				       &dmabuf->cq_event.cqe.rcqe_cmpl));
19470 		/* Handle MDS Loopback frames */
19471 		lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19472 		return;
19473 	}
19474 
19475 	/* d_id this frame is directed to */
19476 	did = sli4_did_from_fc_hdr(fc_hdr);
19477 
19478 	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19479 	if (!vport) {
19480 		/* throw out the frame */
19481 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
19482 		return;
19483 	}
19484 
19485 	/* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19486 	if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19487 		(did != Fabric_DID)) {
19488 		/*
19489 		 * Throw out the frame if we are not pt2pt.
19490 		 * The pt2pt protocol allows for discovery frames
19491 		 * to be received without a registered VPI.
19492 		 */
19493 		if (!test_bit(FC_PT2PT, &vport->fc_flag) ||
19494 		    phba->link_state == LPFC_HBA_READY) {
19495 			lpfc_in_buf_free(phba, &dmabuf->dbuf);
19496 			return;
19497 		}
19498 	}
19499 
19500 	/* Handle the basic abort sequence (BA_ABTS) event */
19501 	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19502 		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19503 		return;
19504 	}
19505 
19506 	/* Link this frame */
19507 	seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19508 	if (!seq_dmabuf) {
19509 		/* unable to add frame to vport - throw it out */
19510 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
19511 		return;
19512 	}
19513 	/* If not last frame in sequence continue processing frames. */
19514 	if (!lpfc_seq_complete(seq_dmabuf))
19515 		return;
19516 
19517 	/* Send the complete sequence to the upper layer protocol */
19518 	lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19519 }
19520 
19521 /**
19522  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19523  * @phba: pointer to lpfc hba data structure.
19524  *
19525  * This routine is invoked to post rpi header templates to the
19526  * HBA consistent with the SLI-4 interface spec.  This routine
19527  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19528  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19529  *
19530  * This routine does not require any locks.  It's usage is expected
19531  * to be driver load or reset recovery when the driver is
19532  * sequential.
19533  *
19534  * Return codes
19535  * 	0 - successful
19536  *      -EIO - The mailbox failed to complete successfully.
19537  * 	When this error occurs, the driver is not guaranteed
19538  *	to have any rpi regions posted to the device and
19539  *	must either attempt to repost the regions or take a
19540  *	fatal error.
19541  **/
19542 int
19543 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19544 {
19545 	struct lpfc_rpi_hdr *rpi_page;
19546 	uint32_t rc = 0;
19547 	uint16_t lrpi = 0;
19548 
19549 	/* SLI4 ports that support extents do not require RPI headers. */
19550 	if (!phba->sli4_hba.rpi_hdrs_in_use)
19551 		goto exit;
19552 	if (phba->sli4_hba.extents_in_use)
19553 		return -EIO;
19554 
19555 	list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19556 		/*
19557 		 * Assign the rpi headers a physical rpi only if the driver
19558 		 * has not initialized those resources.  A port reset only
19559 		 * needs the headers posted.
19560 		 */
19561 		if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19562 		    LPFC_RPI_RSRC_RDY)
19563 			rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19564 
19565 		rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19566 		if (rc != MBX_SUCCESS) {
19567 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19568 					"2008 Error %d posting all rpi "
19569 					"headers\n", rc);
19570 			rc = -EIO;
19571 			break;
19572 		}
19573 	}
19574 
19575  exit:
19576 	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19577 	       LPFC_RPI_RSRC_RDY);
19578 	return rc;
19579 }
19580 
19581 /**
19582  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19583  * @phba: pointer to lpfc hba data structure.
19584  * @rpi_page:  pointer to the rpi memory region.
19585  *
19586  * This routine is invoked to post a single rpi header to the
19587  * HBA consistent with the SLI-4 interface spec.  This memory region
19588  * maps up to 64 rpi context regions.
19589  *
19590  * Return codes
19591  * 	0 - successful
19592  * 	-ENOMEM - No available memory
19593  *      -EIO - The mailbox failed to complete successfully.
19594  **/
19595 int
19596 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19597 {
19598 	LPFC_MBOXQ_t *mboxq;
19599 	struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19600 	uint32_t rc = 0;
19601 	uint32_t shdr_status, shdr_add_status;
19602 	union lpfc_sli4_cfg_shdr *shdr;
19603 
19604 	/* SLI4 ports that support extents do not require RPI headers. */
19605 	if (!phba->sli4_hba.rpi_hdrs_in_use)
19606 		return rc;
19607 	if (phba->sli4_hba.extents_in_use)
19608 		return -EIO;
19609 
19610 	/* The port is notified of the header region via a mailbox command. */
19611 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19612 	if (!mboxq) {
19613 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19614 				"2001 Unable to allocate memory for issuing "
19615 				"SLI_CONFIG_SPECIAL mailbox command\n");
19616 		return -ENOMEM;
19617 	}
19618 
19619 	/* Post all rpi memory regions to the port. */
19620 	hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19621 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19622 			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19623 			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19624 			 sizeof(struct lpfc_sli4_cfg_mhdr),
19625 			 LPFC_SLI4_MBX_EMBED);
19626 
19627 
19628 	/* Post the physical rpi to the port for this rpi header. */
19629 	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19630 	       rpi_page->start_rpi);
19631 	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19632 	       hdr_tmpl, rpi_page->page_count);
19633 
19634 	hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19635 	hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19636 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19637 	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19638 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19639 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19640 	mempool_free(mboxq, phba->mbox_mem_pool);
19641 	if (shdr_status || shdr_add_status || rc) {
19642 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19643 				"2514 POST_RPI_HDR mailbox failed with "
19644 				"status x%x add_status x%x, mbx status x%x\n",
19645 				shdr_status, shdr_add_status, rc);
19646 		rc = -ENXIO;
19647 	} else {
19648 		/*
19649 		 * The next_rpi stores the next logical module-64 rpi value used
19650 		 * to post physical rpis in subsequent rpi postings.
19651 		 */
19652 		spin_lock_irq(&phba->hbalock);
19653 		phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19654 		spin_unlock_irq(&phba->hbalock);
19655 	}
19656 	return rc;
19657 }
19658 
19659 /**
19660  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19661  * @phba: pointer to lpfc hba data structure.
19662  *
19663  * This routine is invoked to post rpi header templates to the
19664  * HBA consistent with the SLI-4 interface spec.  This routine
19665  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19666  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19667  *
19668  * Returns
19669  * 	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19670  * 	LPFC_RPI_ALLOC_ERROR if no rpis are available.
19671  **/
19672 int
19673 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19674 {
19675 	unsigned long rpi;
19676 	uint16_t max_rpi, rpi_limit;
19677 	uint16_t rpi_remaining, lrpi = 0;
19678 	struct lpfc_rpi_hdr *rpi_hdr;
19679 	unsigned long iflag;
19680 
19681 	/*
19682 	 * Fetch the next logical rpi.  Because this index is logical,
19683 	 * the  driver starts at 0 each time.
19684 	 */
19685 	spin_lock_irqsave(&phba->hbalock, iflag);
19686 	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19687 	rpi_limit = phba->sli4_hba.next_rpi;
19688 
19689 	rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19690 	if (rpi >= rpi_limit)
19691 		rpi = LPFC_RPI_ALLOC_ERROR;
19692 	else {
19693 		set_bit(rpi, phba->sli4_hba.rpi_bmask);
19694 		phba->sli4_hba.max_cfg_param.rpi_used++;
19695 		phba->sli4_hba.rpi_count++;
19696 	}
19697 	lpfc_printf_log(phba, KERN_INFO,
19698 			LOG_NODE | LOG_DISCOVERY,
19699 			"0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19700 			(int) rpi, max_rpi, rpi_limit);
19701 
19702 	/*
19703 	 * Don't try to allocate more rpi header regions if the device limit
19704 	 * has been exhausted.
19705 	 */
19706 	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19707 	    (phba->sli4_hba.rpi_count >= max_rpi)) {
19708 		spin_unlock_irqrestore(&phba->hbalock, iflag);
19709 		return rpi;
19710 	}
19711 
19712 	/*
19713 	 * RPI header postings are not required for SLI4 ports capable of
19714 	 * extents.
19715 	 */
19716 	if (!phba->sli4_hba.rpi_hdrs_in_use) {
19717 		spin_unlock_irqrestore(&phba->hbalock, iflag);
19718 		return rpi;
19719 	}
19720 
19721 	/*
19722 	 * If the driver is running low on rpi resources, allocate another
19723 	 * page now.  Note that the next_rpi value is used because
19724 	 * it represents how many are actually in use whereas max_rpi notes
19725 	 * how many are supported max by the device.
19726 	 */
19727 	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19728 	spin_unlock_irqrestore(&phba->hbalock, iflag);
19729 	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19730 		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19731 		if (!rpi_hdr) {
19732 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19733 					"2002 Error Could not grow rpi "
19734 					"count\n");
19735 		} else {
19736 			lrpi = rpi_hdr->start_rpi;
19737 			rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19738 			lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19739 		}
19740 	}
19741 
19742 	return rpi;
19743 }
19744 
19745 /**
19746  * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19747  * @phba: pointer to lpfc hba data structure.
19748  * @rpi: rpi to free
19749  *
19750  * This routine is invoked to release an rpi to the pool of
19751  * available rpis maintained by the driver.
19752  **/
19753 static void
19754 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19755 {
19756 	/*
19757 	 * if the rpi value indicates a prior unreg has already
19758 	 * been done, skip the unreg.
19759 	 */
19760 	if (rpi == LPFC_RPI_ALLOC_ERROR)
19761 		return;
19762 
19763 	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19764 		phba->sli4_hba.rpi_count--;
19765 		phba->sli4_hba.max_cfg_param.rpi_used--;
19766 	} else {
19767 		lpfc_printf_log(phba, KERN_INFO,
19768 				LOG_NODE | LOG_DISCOVERY,
19769 				"2016 rpi %x not inuse\n",
19770 				rpi);
19771 	}
19772 }
19773 
19774 /**
19775  * lpfc_sli4_free_rpi - Release an rpi for reuse.
19776  * @phba: pointer to lpfc hba data structure.
19777  * @rpi: rpi to free
19778  *
19779  * This routine is invoked to release an rpi to the pool of
19780  * available rpis maintained by the driver.
19781  **/
19782 void
19783 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19784 {
19785 	spin_lock_irq(&phba->hbalock);
19786 	__lpfc_sli4_free_rpi(phba, rpi);
19787 	spin_unlock_irq(&phba->hbalock);
19788 }
19789 
19790 /**
19791  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19792  * @phba: pointer to lpfc hba data structure.
19793  *
19794  * This routine is invoked to remove the memory region that
19795  * provided rpi via a bitmask.
19796  **/
19797 void
19798 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19799 {
19800 	kfree(phba->sli4_hba.rpi_bmask);
19801 	kfree(phba->sli4_hba.rpi_ids);
19802 	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19803 }
19804 
19805 /**
19806  * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
19807  * @ndlp: pointer to lpfc nodelist data structure.
19808  * @cmpl: completion call-back.
19809  * @iocbq: data to load as mbox ctx_u information
19810  *
19811  * This routine is invoked to remove the memory region that
19812  * provided rpi via a bitmask.
19813  **/
19814 int
19815 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19816 		     void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
19817 		     struct lpfc_iocbq *iocbq)
19818 {
19819 	LPFC_MBOXQ_t *mboxq;
19820 	struct lpfc_hba *phba = ndlp->phba;
19821 	int rc;
19822 
19823 	/* The port is notified of the header region via a mailbox command. */
19824 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19825 	if (!mboxq)
19826 		return -ENOMEM;
19827 
19828 	/* If cmpl assigned, then this nlp_get pairs with
19829 	 * lpfc_mbx_cmpl_resume_rpi.
19830 	 *
19831 	 * Else cmpl is NULL, then this nlp_get pairs with
19832 	 * lpfc_sli_def_mbox_cmpl.
19833 	 */
19834 	if (!lpfc_nlp_get(ndlp)) {
19835 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19836 				"2122 %s: Failed to get nlp ref\n",
19837 				__func__);
19838 		mempool_free(mboxq, phba->mbox_mem_pool);
19839 		return -EIO;
19840 	}
19841 
19842 	/* Post all rpi memory regions to the port. */
19843 	lpfc_resume_rpi(mboxq, ndlp);
19844 	if (cmpl) {
19845 		mboxq->mbox_cmpl = cmpl;
19846 		mboxq->ctx_u.save_iocb = iocbq;
19847 	} else
19848 		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19849 	mboxq->ctx_ndlp = ndlp;
19850 	mboxq->vport = ndlp->vport;
19851 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19852 	if (rc == MBX_NOT_FINISHED) {
19853 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19854 				"2010 Resume RPI Mailbox failed "
19855 				"status %d, mbxStatus x%x\n", rc,
19856 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19857 		lpfc_nlp_put(ndlp);
19858 		mempool_free(mboxq, phba->mbox_mem_pool);
19859 		return -EIO;
19860 	}
19861 	return 0;
19862 }
19863 
19864 /**
19865  * lpfc_sli4_init_vpi - Initialize a vpi with the port
19866  * @vport: Pointer to the vport for which the vpi is being initialized
19867  *
19868  * This routine is invoked to activate a vpi with the port.
19869  *
19870  * Returns:
19871  *    0 success
19872  *    -Evalue otherwise
19873  **/
19874 int
19875 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
19876 {
19877 	LPFC_MBOXQ_t *mboxq;
19878 	int rc = 0;
19879 	int retval = MBX_SUCCESS;
19880 	uint32_t mbox_tmo;
19881 	struct lpfc_hba *phba = vport->phba;
19882 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19883 	if (!mboxq)
19884 		return -ENOMEM;
19885 	lpfc_init_vpi(phba, mboxq, vport->vpi);
19886 	mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
19887 	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
19888 	if (rc != MBX_SUCCESS) {
19889 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19890 				"2022 INIT VPI Mailbox failed "
19891 				"status %d, mbxStatus x%x\n", rc,
19892 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19893 		retval = -EIO;
19894 	}
19895 	if (rc != MBX_TIMEOUT)
19896 		mempool_free(mboxq, vport->phba->mbox_mem_pool);
19897 
19898 	return retval;
19899 }
19900 
19901 /**
19902  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
19903  * @phba: pointer to lpfc hba data structure.
19904  * @mboxq: Pointer to mailbox object.
19905  *
19906  * This routine is invoked to manually add a single FCF record. The caller
19907  * must pass a completely initialized FCF_Record.  This routine takes
19908  * care of the nonembedded mailbox operations.
19909  **/
19910 static void
19911 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
19912 {
19913 	void *virt_addr;
19914 	union lpfc_sli4_cfg_shdr *shdr;
19915 	uint32_t shdr_status, shdr_add_status;
19916 
19917 	virt_addr = mboxq->sge_array->addr[0];
19918 	/* The IOCTL status is embedded in the mailbox subheader. */
19919 	shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
19920 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19921 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19922 
19923 	if ((shdr_status || shdr_add_status) &&
19924 		(shdr_status != STATUS_FCF_IN_USE))
19925 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19926 			"2558 ADD_FCF_RECORD mailbox failed with "
19927 			"status x%x add_status x%x\n",
19928 			shdr_status, shdr_add_status);
19929 
19930 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
19931 }
19932 
19933 /**
19934  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
19935  * @phba: pointer to lpfc hba data structure.
19936  * @fcf_record:  pointer to the initialized fcf record to add.
19937  *
19938  * This routine is invoked to manually add a single FCF record. The caller
19939  * must pass a completely initialized FCF_Record.  This routine takes
19940  * care of the nonembedded mailbox operations.
19941  **/
19942 int
19943 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
19944 {
19945 	int rc = 0;
19946 	LPFC_MBOXQ_t *mboxq;
19947 	uint8_t *bytep;
19948 	void *virt_addr;
19949 	struct lpfc_mbx_sge sge;
19950 	uint32_t alloc_len, req_len;
19951 	uint32_t fcfindex;
19952 
19953 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19954 	if (!mboxq) {
19955 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19956 			"2009 Failed to allocate mbox for ADD_FCF cmd\n");
19957 		return -ENOMEM;
19958 	}
19959 
19960 	req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
19961 		  sizeof(uint32_t);
19962 
19963 	/* Allocate DMA memory and set up the non-embedded mailbox command */
19964 	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19965 				     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
19966 				     req_len, LPFC_SLI4_MBX_NEMBED);
19967 	if (alloc_len < req_len) {
19968 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19969 			"2523 Allocated DMA memory size (x%x) is "
19970 			"less than the requested DMA memory "
19971 			"size (x%x)\n", alloc_len, req_len);
19972 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
19973 		return -ENOMEM;
19974 	}
19975 
19976 	/*
19977 	 * Get the first SGE entry from the non-embedded DMA memory.  This
19978 	 * routine only uses a single SGE.
19979 	 */
19980 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
19981 	virt_addr = mboxq->sge_array->addr[0];
19982 	/*
19983 	 * Configure the FCF record for FCFI 0.  This is the driver's
19984 	 * hardcoded default and gets used in nonFIP mode.
19985 	 */
19986 	fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
19987 	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
19988 	lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
19989 
19990 	/*
19991 	 * Copy the fcf_index and the FCF Record Data. The data starts after
19992 	 * the FCoE header plus word10. The data copy needs to be endian
19993 	 * correct.
19994 	 */
19995 	bytep += sizeof(uint32_t);
19996 	lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
19997 	mboxq->vport = phba->pport;
19998 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
19999 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20000 	if (rc == MBX_NOT_FINISHED) {
20001 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20002 			"2515 ADD_FCF_RECORD mailbox failed with "
20003 			"status 0x%x\n", rc);
20004 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20005 		rc = -EIO;
20006 	} else
20007 		rc = 0;
20008 
20009 	return rc;
20010 }
20011 
20012 /**
20013  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
20014  * @phba: pointer to lpfc hba data structure.
20015  * @fcf_record:  pointer to the fcf record to write the default data.
20016  * @fcf_index: FCF table entry index.
20017  *
20018  * This routine is invoked to build the driver's default FCF record.  The
20019  * values used are hardcoded.  This routine handles memory initialization.
20020  *
20021  **/
20022 void
20023 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
20024 				struct fcf_record *fcf_record,
20025 				uint16_t fcf_index)
20026 {
20027 	memset(fcf_record, 0, sizeof(struct fcf_record));
20028 	fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
20029 	fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
20030 	fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
20031 	bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
20032 	bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
20033 	bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
20034 	bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
20035 	bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
20036 	bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
20037 	bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
20038 	bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
20039 	bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
20040 	bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
20041 	bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
20042 	bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
20043 	bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
20044 		LPFC_FCF_FPMA | LPFC_FCF_SPMA);
20045 	/* Set the VLAN bit map */
20046 	if (phba->valid_vlan) {
20047 		fcf_record->vlan_bitmap[phba->vlan_id / 8]
20048 			= 1 << (phba->vlan_id % 8);
20049 	}
20050 }
20051 
20052 /**
20053  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
20054  * @phba: pointer to lpfc hba data structure.
20055  * @fcf_index: FCF table entry offset.
20056  *
20057  * This routine is invoked to scan the entire FCF table by reading FCF
20058  * record and processing it one at a time starting from the @fcf_index
20059  * for initial FCF discovery or fast FCF failover rediscovery.
20060  *
20061  * Return 0 if the mailbox command is submitted successfully, none 0
20062  * otherwise.
20063  **/
20064 int
20065 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20066 {
20067 	int rc = 0, error;
20068 	LPFC_MBOXQ_t *mboxq;
20069 
20070 	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
20071 	phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
20072 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20073 	if (!mboxq) {
20074 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20075 				"2000 Failed to allocate mbox for "
20076 				"READ_FCF cmd\n");
20077 		error = -ENOMEM;
20078 		goto fail_fcf_scan;
20079 	}
20080 	/* Construct the read FCF record mailbox command */
20081 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20082 	if (rc) {
20083 		error = -EINVAL;
20084 		goto fail_fcf_scan;
20085 	}
20086 	/* Issue the mailbox command asynchronously */
20087 	mboxq->vport = phba->pport;
20088 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
20089 
20090 	set_bit(FCF_TS_INPROG, &phba->hba_flag);
20091 
20092 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20093 	if (rc == MBX_NOT_FINISHED)
20094 		error = -EIO;
20095 	else {
20096 		/* Reset eligible FCF count for new scan */
20097 		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
20098 			phba->fcf.eligible_fcf_cnt = 0;
20099 		error = 0;
20100 	}
20101 fail_fcf_scan:
20102 	if (error) {
20103 		if (mboxq)
20104 			lpfc_sli4_mbox_cmd_free(phba, mboxq);
20105 		/* FCF scan failed, clear FCF_TS_INPROG flag */
20106 		clear_bit(FCF_TS_INPROG, &phba->hba_flag);
20107 	}
20108 	return error;
20109 }
20110 
20111 /**
20112  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20113  * @phba: pointer to lpfc hba data structure.
20114  * @fcf_index: FCF table entry offset.
20115  *
20116  * This routine is invoked to read an FCF record indicated by @fcf_index
20117  * and to use it for FLOGI roundrobin FCF failover.
20118  *
20119  * Return 0 if the mailbox command is submitted successfully, none 0
20120  * otherwise.
20121  **/
20122 int
20123 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20124 {
20125 	int rc = 0, error;
20126 	LPFC_MBOXQ_t *mboxq;
20127 
20128 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20129 	if (!mboxq) {
20130 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20131 				"2763 Failed to allocate mbox for "
20132 				"READ_FCF cmd\n");
20133 		error = -ENOMEM;
20134 		goto fail_fcf_read;
20135 	}
20136 	/* Construct the read FCF record mailbox command */
20137 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20138 	if (rc) {
20139 		error = -EINVAL;
20140 		goto fail_fcf_read;
20141 	}
20142 	/* Issue the mailbox command asynchronously */
20143 	mboxq->vport = phba->pport;
20144 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20145 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20146 	if (rc == MBX_NOT_FINISHED)
20147 		error = -EIO;
20148 	else
20149 		error = 0;
20150 
20151 fail_fcf_read:
20152 	if (error && mboxq)
20153 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20154 	return error;
20155 }
20156 
20157 /**
20158  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20159  * @phba: pointer to lpfc hba data structure.
20160  * @fcf_index: FCF table entry offset.
20161  *
20162  * This routine is invoked to read an FCF record indicated by @fcf_index to
20163  * determine whether it's eligible for FLOGI roundrobin failover list.
20164  *
20165  * Return 0 if the mailbox command is submitted successfully, none 0
20166  * otherwise.
20167  **/
20168 int
20169 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20170 {
20171 	int rc = 0, error;
20172 	LPFC_MBOXQ_t *mboxq;
20173 
20174 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20175 	if (!mboxq) {
20176 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20177 				"2758 Failed to allocate mbox for "
20178 				"READ_FCF cmd\n");
20179 				error = -ENOMEM;
20180 				goto fail_fcf_read;
20181 	}
20182 	/* Construct the read FCF record mailbox command */
20183 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20184 	if (rc) {
20185 		error = -EINVAL;
20186 		goto fail_fcf_read;
20187 	}
20188 	/* Issue the mailbox command asynchronously */
20189 	mboxq->vport = phba->pport;
20190 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20191 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20192 	if (rc == MBX_NOT_FINISHED)
20193 		error = -EIO;
20194 	else
20195 		error = 0;
20196 
20197 fail_fcf_read:
20198 	if (error && mboxq)
20199 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20200 	return error;
20201 }
20202 
20203 /**
20204  * lpfc_check_next_fcf_pri_level
20205  * @phba: pointer to the lpfc_hba struct for this port.
20206  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20207  * routine when the rr_bmask is empty. The FCF indecies are put into the
20208  * rr_bmask based on their priority level. Starting from the highest priority
20209  * to the lowest. The most likely FCF candidate will be in the highest
20210  * priority group. When this routine is called it searches the fcf_pri list for
20211  * next lowest priority group and repopulates the rr_bmask with only those
20212  * fcf_indexes.
20213  * returns:
20214  * 1=success 0=failure
20215  **/
20216 static int
20217 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20218 {
20219 	uint16_t next_fcf_pri;
20220 	uint16_t last_index;
20221 	struct lpfc_fcf_pri *fcf_pri;
20222 	int rc;
20223 	int ret = 0;
20224 
20225 	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20226 			LPFC_SLI4_FCF_TBL_INDX_MAX);
20227 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20228 			"3060 Last IDX %d\n", last_index);
20229 
20230 	/* Verify the priority list has 2 or more entries */
20231 	spin_lock_irq(&phba->hbalock);
20232 	if (list_empty(&phba->fcf.fcf_pri_list) ||
20233 	    list_is_singular(&phba->fcf.fcf_pri_list)) {
20234 		spin_unlock_irq(&phba->hbalock);
20235 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20236 			"3061 Last IDX %d\n", last_index);
20237 		return 0; /* Empty rr list */
20238 	}
20239 	spin_unlock_irq(&phba->hbalock);
20240 
20241 	next_fcf_pri = 0;
20242 	/*
20243 	 * Clear the rr_bmask and set all of the bits that are at this
20244 	 * priority.
20245 	 */
20246 	memset(phba->fcf.fcf_rr_bmask, 0,
20247 			sizeof(*phba->fcf.fcf_rr_bmask));
20248 	spin_lock_irq(&phba->hbalock);
20249 	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20250 		if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20251 			continue;
20252 		/*
20253 		 * the 1st priority that has not FLOGI failed
20254 		 * will be the highest.
20255 		 */
20256 		if (!next_fcf_pri)
20257 			next_fcf_pri = fcf_pri->fcf_rec.priority;
20258 		spin_unlock_irq(&phba->hbalock);
20259 		if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20260 			rc = lpfc_sli4_fcf_rr_index_set(phba,
20261 						fcf_pri->fcf_rec.fcf_index);
20262 			if (rc)
20263 				return 0;
20264 		}
20265 		spin_lock_irq(&phba->hbalock);
20266 	}
20267 	/*
20268 	 * if next_fcf_pri was not set above and the list is not empty then
20269 	 * we have failed flogis on all of them. So reset flogi failed
20270 	 * and start at the beginning.
20271 	 */
20272 	if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20273 		list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20274 			fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20275 			/*
20276 			 * the 1st priority that has not FLOGI failed
20277 			 * will be the highest.
20278 			 */
20279 			if (!next_fcf_pri)
20280 				next_fcf_pri = fcf_pri->fcf_rec.priority;
20281 			spin_unlock_irq(&phba->hbalock);
20282 			if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20283 				rc = lpfc_sli4_fcf_rr_index_set(phba,
20284 						fcf_pri->fcf_rec.fcf_index);
20285 				if (rc)
20286 					return 0;
20287 			}
20288 			spin_lock_irq(&phba->hbalock);
20289 		}
20290 	} else
20291 		ret = 1;
20292 	spin_unlock_irq(&phba->hbalock);
20293 
20294 	return ret;
20295 }
20296 /**
20297  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20298  * @phba: pointer to lpfc hba data structure.
20299  *
20300  * This routine is to get the next eligible FCF record index in a round
20301  * robin fashion. If the next eligible FCF record index equals to the
20302  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20303  * shall be returned, otherwise, the next eligible FCF record's index
20304  * shall be returned.
20305  **/
20306 uint16_t
20307 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20308 {
20309 	uint16_t next_fcf_index;
20310 
20311 initial_priority:
20312 	/* Search start from next bit of currently registered FCF index */
20313 	next_fcf_index = phba->fcf.current_rec.fcf_indx;
20314 
20315 next_priority:
20316 	/* Determine the next fcf index to check */
20317 	next_fcf_index = (next_fcf_index + 1) % LPFC_SLI4_FCF_TBL_INDX_MAX;
20318 	next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
20319 				       LPFC_SLI4_FCF_TBL_INDX_MAX,
20320 				       next_fcf_index);
20321 
20322 	/* Wrap around condition on phba->fcf.fcf_rr_bmask */
20323 	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20324 		/*
20325 		 * If we have wrapped then we need to clear the bits that
20326 		 * have been tested so that we can detect when we should
20327 		 * change the priority level.
20328 		 */
20329 		next_fcf_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20330 					       LPFC_SLI4_FCF_TBL_INDX_MAX);
20331 	}
20332 
20333 
20334 	/* Check roundrobin failover list empty condition */
20335 	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX ||
20336 		next_fcf_index == phba->fcf.current_rec.fcf_indx) {
20337 		/*
20338 		 * If next fcf index is not found check if there are lower
20339 		 * Priority level fcf's in the fcf_priority list.
20340 		 * Set up the rr_bmask with all of the avaiable fcf bits
20341 		 * at that level and continue the selection process.
20342 		 */
20343 		if (lpfc_check_next_fcf_pri_level(phba))
20344 			goto initial_priority;
20345 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20346 				"2844 No roundrobin failover FCF available\n");
20347 
20348 		return LPFC_FCOE_FCF_NEXT_NONE;
20349 	}
20350 
20351 	if (next_fcf_index < LPFC_SLI4_FCF_TBL_INDX_MAX &&
20352 		phba->fcf.fcf_pri[next_fcf_index].fcf_rec.flag &
20353 		LPFC_FCF_FLOGI_FAILED) {
20354 		if (list_is_singular(&phba->fcf.fcf_pri_list))
20355 			return LPFC_FCOE_FCF_NEXT_NONE;
20356 
20357 		goto next_priority;
20358 	}
20359 
20360 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20361 			"2845 Get next roundrobin failover FCF (x%x)\n",
20362 			next_fcf_index);
20363 
20364 	return next_fcf_index;
20365 }
20366 
20367 /**
20368  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20369  * @phba: pointer to lpfc hba data structure.
20370  * @fcf_index: index into the FCF table to 'set'
20371  *
20372  * This routine sets the FCF record index in to the eligible bmask for
20373  * roundrobin failover search. It checks to make sure that the index
20374  * does not go beyond the range of the driver allocated bmask dimension
20375  * before setting the bit.
20376  *
20377  * Returns 0 if the index bit successfully set, otherwise, it returns
20378  * -EINVAL.
20379  **/
20380 int
20381 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20382 {
20383 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20384 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20385 				"2610 FCF (x%x) reached driver's book "
20386 				"keeping dimension:x%x\n",
20387 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20388 		return -EINVAL;
20389 	}
20390 	/* Set the eligible FCF record index bmask */
20391 	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20392 
20393 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20394 			"2790 Set FCF (x%x) to roundrobin FCF failover "
20395 			"bmask\n", fcf_index);
20396 
20397 	return 0;
20398 }
20399 
20400 /**
20401  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20402  * @phba: pointer to lpfc hba data structure.
20403  * @fcf_index: index into the FCF table to 'clear'
20404  *
20405  * This routine clears the FCF record index from the eligible bmask for
20406  * roundrobin failover search. It checks to make sure that the index
20407  * does not go beyond the range of the driver allocated bmask dimension
20408  * before clearing the bit.
20409  **/
20410 void
20411 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20412 {
20413 	struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20414 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20415 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20416 				"2762 FCF (x%x) reached driver's book "
20417 				"keeping dimension:x%x\n",
20418 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20419 		return;
20420 	}
20421 	/* Clear the eligible FCF record index bmask */
20422 	spin_lock_irq(&phba->hbalock);
20423 	list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20424 				 list) {
20425 		if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20426 			list_del_init(&fcf_pri->list);
20427 			break;
20428 		}
20429 	}
20430 	spin_unlock_irq(&phba->hbalock);
20431 	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20432 
20433 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20434 			"2791 Clear FCF (x%x) from roundrobin failover "
20435 			"bmask\n", fcf_index);
20436 }
20437 
20438 /**
20439  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20440  * @phba: pointer to lpfc hba data structure.
20441  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20442  *
20443  * This routine is the completion routine for the rediscover FCF table mailbox
20444  * command. If the mailbox command returned failure, it will try to stop the
20445  * FCF rediscover wait timer.
20446  **/
20447 static void
20448 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20449 {
20450 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20451 	uint32_t shdr_status, shdr_add_status;
20452 
20453 	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20454 
20455 	shdr_status = bf_get(lpfc_mbox_hdr_status,
20456 			     &redisc_fcf->header.cfg_shdr.response);
20457 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20458 			     &redisc_fcf->header.cfg_shdr.response);
20459 	if (shdr_status || shdr_add_status) {
20460 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20461 				"2746 Requesting for FCF rediscovery failed "
20462 				"status x%x add_status x%x\n",
20463 				shdr_status, shdr_add_status);
20464 		if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20465 			spin_lock_irq(&phba->hbalock);
20466 			phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20467 			spin_unlock_irq(&phba->hbalock);
20468 			/*
20469 			 * CVL event triggered FCF rediscover request failed,
20470 			 * last resort to re-try current registered FCF entry.
20471 			 */
20472 			lpfc_retry_pport_discovery(phba);
20473 		} else {
20474 			spin_lock_irq(&phba->hbalock);
20475 			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20476 			spin_unlock_irq(&phba->hbalock);
20477 			/*
20478 			 * DEAD FCF event triggered FCF rediscover request
20479 			 * failed, last resort to fail over as a link down
20480 			 * to FCF registration.
20481 			 */
20482 			lpfc_sli4_fcf_dead_failthrough(phba);
20483 		}
20484 	} else {
20485 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20486 				"2775 Start FCF rediscover quiescent timer\n");
20487 		/*
20488 		 * Start FCF rediscovery wait timer for pending FCF
20489 		 * before rescan FCF record table.
20490 		 */
20491 		lpfc_fcf_redisc_wait_start_timer(phba);
20492 	}
20493 
20494 	mempool_free(mbox, phba->mbox_mem_pool);
20495 }
20496 
20497 /**
20498  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20499  * @phba: pointer to lpfc hba data structure.
20500  *
20501  * This routine is invoked to request for rediscovery of the entire FCF table
20502  * by the port.
20503  **/
20504 int
20505 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20506 {
20507 	LPFC_MBOXQ_t *mbox;
20508 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20509 	int rc, length;
20510 
20511 	/* Cancel retry delay timers to all vports before FCF rediscover */
20512 	lpfc_cancel_all_vport_retry_delay_timer(phba);
20513 
20514 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20515 	if (!mbox) {
20516 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20517 				"2745 Failed to allocate mbox for "
20518 				"requesting FCF rediscover.\n");
20519 		return -ENOMEM;
20520 	}
20521 
20522 	length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20523 		  sizeof(struct lpfc_sli4_cfg_mhdr));
20524 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20525 			 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20526 			 length, LPFC_SLI4_MBX_EMBED);
20527 
20528 	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20529 	/* Set count to 0 for invalidating the entire FCF database */
20530 	bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20531 
20532 	/* Issue the mailbox command asynchronously */
20533 	mbox->vport = phba->pport;
20534 	mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20535 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20536 
20537 	if (rc == MBX_NOT_FINISHED) {
20538 		mempool_free(mbox, phba->mbox_mem_pool);
20539 		return -EIO;
20540 	}
20541 	return 0;
20542 }
20543 
20544 /**
20545  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20546  * @phba: pointer to lpfc hba data structure.
20547  *
20548  * This function is the failover routine as a last resort to the FCF DEAD
20549  * event when driver failed to perform fast FCF failover.
20550  **/
20551 void
20552 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20553 {
20554 	uint32_t link_state;
20555 
20556 	/*
20557 	 * Last resort as FCF DEAD event failover will treat this as
20558 	 * a link down, but save the link state because we don't want
20559 	 * it to be changed to Link Down unless it is already down.
20560 	 */
20561 	link_state = phba->link_state;
20562 	lpfc_linkdown(phba);
20563 	phba->link_state = link_state;
20564 
20565 	/* Unregister FCF if no devices connected to it */
20566 	lpfc_unregister_unused_fcf(phba);
20567 }
20568 
20569 /**
20570  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20571  * @phba: pointer to lpfc hba data structure.
20572  * @rgn23_data: pointer to configure region 23 data.
20573  *
20574  * This function gets SLI3 port configure region 23 data through memory dump
20575  * mailbox command. When it successfully retrieves data, the size of the data
20576  * will be returned, otherwise, 0 will be returned.
20577  **/
20578 static uint32_t
20579 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20580 {
20581 	LPFC_MBOXQ_t *pmb = NULL;
20582 	MAILBOX_t *mb;
20583 	uint32_t offset = 0;
20584 	int rc;
20585 
20586 	if (!rgn23_data)
20587 		return 0;
20588 
20589 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20590 	if (!pmb) {
20591 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20592 				"2600 failed to allocate mailbox memory\n");
20593 		return 0;
20594 	}
20595 	mb = &pmb->u.mb;
20596 
20597 	do {
20598 		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20599 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20600 
20601 		if (rc != MBX_SUCCESS) {
20602 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20603 					"2601 failed to read config "
20604 					"region 23, rc 0x%x Status 0x%x\n",
20605 					rc, mb->mbxStatus);
20606 			mb->un.varDmp.word_cnt = 0;
20607 		}
20608 		/*
20609 		 * dump mem may return a zero when finished or we got a
20610 		 * mailbox error, either way we are done.
20611 		 */
20612 		if (mb->un.varDmp.word_cnt == 0)
20613 			break;
20614 
20615 		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20616 			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20617 
20618 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20619 				       rgn23_data + offset,
20620 				       mb->un.varDmp.word_cnt);
20621 		offset += mb->un.varDmp.word_cnt;
20622 	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20623 
20624 	mempool_free(pmb, phba->mbox_mem_pool);
20625 	return offset;
20626 }
20627 
20628 /**
20629  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20630  * @phba: pointer to lpfc hba data structure.
20631  * @rgn23_data: pointer to configure region 23 data.
20632  *
20633  * This function gets SLI4 port configure region 23 data through memory dump
20634  * mailbox command. When it successfully retrieves data, the size of the data
20635  * will be returned, otherwise, 0 will be returned.
20636  **/
20637 static uint32_t
20638 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20639 {
20640 	LPFC_MBOXQ_t *mboxq = NULL;
20641 	struct lpfc_dmabuf *mp = NULL;
20642 	struct lpfc_mqe *mqe;
20643 	uint32_t data_length = 0;
20644 	int rc;
20645 
20646 	if (!rgn23_data)
20647 		return 0;
20648 
20649 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20650 	if (!mboxq) {
20651 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20652 				"3105 failed to allocate mailbox memory\n");
20653 		return 0;
20654 	}
20655 
20656 	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20657 		goto out;
20658 	mqe = &mboxq->u.mqe;
20659 	mp = mboxq->ctx_buf;
20660 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20661 	if (rc)
20662 		goto out;
20663 	data_length = mqe->un.mb_words[5];
20664 	if (data_length == 0)
20665 		goto out;
20666 	if (data_length > DMP_RGN23_SIZE) {
20667 		data_length = 0;
20668 		goto out;
20669 	}
20670 	lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20671 out:
20672 	lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20673 	return data_length;
20674 }
20675 
20676 /**
20677  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20678  * @phba: pointer to lpfc hba data structure.
20679  *
20680  * This function read region 23 and parse TLV for port status to
20681  * decide if the user disaled the port. If the TLV indicates the
20682  * port is disabled, the hba_flag is set accordingly.
20683  **/
20684 void
20685 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20686 {
20687 	uint8_t *rgn23_data = NULL;
20688 	uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20689 	uint32_t offset = 0;
20690 
20691 	/* Get adapter Region 23 data */
20692 	rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20693 	if (!rgn23_data)
20694 		goto out;
20695 
20696 	if (phba->sli_rev < LPFC_SLI_REV4)
20697 		data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20698 	else {
20699 		if_type = bf_get(lpfc_sli_intf_if_type,
20700 				 &phba->sli4_hba.sli_intf);
20701 		if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20702 			goto out;
20703 		data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20704 	}
20705 
20706 	if (!data_size)
20707 		goto out;
20708 
20709 	/* Check the region signature first */
20710 	if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20711 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20712 			"2619 Config region 23 has bad signature\n");
20713 			goto out;
20714 	}
20715 	offset += 4;
20716 
20717 	/* Check the data structure version */
20718 	if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20719 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20720 			"2620 Config region 23 has bad version\n");
20721 		goto out;
20722 	}
20723 	offset += 4;
20724 
20725 	/* Parse TLV entries in the region */
20726 	while (offset < data_size) {
20727 		if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20728 			break;
20729 		/*
20730 		 * If the TLV is not driver specific TLV or driver id is
20731 		 * not linux driver id, skip the record.
20732 		 */
20733 		if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20734 		    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20735 		    (rgn23_data[offset + 3] != 0)) {
20736 			offset += rgn23_data[offset + 1] * 4 + 4;
20737 			continue;
20738 		}
20739 
20740 		/* Driver found a driver specific TLV in the config region */
20741 		sub_tlv_len = rgn23_data[offset + 1] * 4;
20742 		offset += 4;
20743 		tlv_offset = 0;
20744 
20745 		/*
20746 		 * Search for configured port state sub-TLV.
20747 		 */
20748 		while ((offset < data_size) &&
20749 			(tlv_offset < sub_tlv_len)) {
20750 			if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20751 				offset += 4;
20752 				tlv_offset += 4;
20753 				break;
20754 			}
20755 			if (rgn23_data[offset] != PORT_STE_TYPE) {
20756 				offset += rgn23_data[offset + 1] * 4 + 4;
20757 				tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20758 				continue;
20759 			}
20760 
20761 			/* This HBA contains PORT_STE configured */
20762 			if (!rgn23_data[offset + 2])
20763 				set_bit(LINK_DISABLED, &phba->hba_flag);
20764 
20765 			goto out;
20766 		}
20767 	}
20768 
20769 out:
20770 	kfree(rgn23_data);
20771 	return;
20772 }
20773 
20774 /**
20775  * lpfc_log_fw_write_cmpl - logs firmware write completion status
20776  * @phba: pointer to lpfc hba data structure
20777  * @shdr_status: wr_object rsp's status field
20778  * @shdr_add_status: wr_object rsp's add_status field
20779  * @shdr_add_status_2: wr_object rsp's add_status_2 field
20780  * @shdr_change_status: wr_object rsp's change_status field
20781  * @shdr_csf: wr_object rsp's csf bit
20782  *
20783  * This routine is intended to be called after a firmware write completes.
20784  * It will log next action items to be performed by the user to instantiate
20785  * the newly downloaded firmware or reason for incompatibility.
20786  **/
20787 static void
20788 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20789 		       u32 shdr_add_status, u32 shdr_add_status_2,
20790 		       u32 shdr_change_status, u32 shdr_csf)
20791 {
20792 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20793 			"4198 %s: flash_id x%02x, asic_rev x%02x, "
20794 			"status x%02x, add_status x%02x, add_status_2 x%02x, "
20795 			"change_status x%02x, csf %01x\n", __func__,
20796 			phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20797 			shdr_status, shdr_add_status, shdr_add_status_2,
20798 			shdr_change_status, shdr_csf);
20799 
20800 	if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20801 		switch (shdr_add_status_2) {
20802 		case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20803 			lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20804 				     "4199 Firmware write failed: "
20805 				     "image incompatible with flash x%02x\n",
20806 				     phba->sli4_hba.flash_id);
20807 			break;
20808 		case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20809 			lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20810 				     "4200 Firmware write failed: "
20811 				     "image incompatible with ASIC "
20812 				     "architecture x%02x\n",
20813 				     phba->sli4_hba.asic_rev);
20814 			break;
20815 		default:
20816 			lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20817 				     "4210 Firmware write failed: "
20818 				     "add_status_2 x%02x\n",
20819 				     shdr_add_status_2);
20820 			break;
20821 		}
20822 	} else if (!shdr_status && !shdr_add_status) {
20823 		if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20824 		    shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20825 			if (shdr_csf)
20826 				shdr_change_status =
20827 						   LPFC_CHANGE_STATUS_PCI_RESET;
20828 		}
20829 
20830 		switch (shdr_change_status) {
20831 		case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20832 			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20833 				     "3198 Firmware write complete: System "
20834 				     "reboot required to instantiate\n");
20835 			break;
20836 		case (LPFC_CHANGE_STATUS_FW_RESET):
20837 			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20838 				     "3199 Firmware write complete: "
20839 				     "Firmware reset required to "
20840 				     "instantiate\n");
20841 			break;
20842 		case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20843 			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20844 				     "3200 Firmware write complete: Port "
20845 				     "Migration or PCI Reset required to "
20846 				     "instantiate\n");
20847 			break;
20848 		case (LPFC_CHANGE_STATUS_PCI_RESET):
20849 			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20850 				     "3201 Firmware write complete: PCI "
20851 				     "Reset required to instantiate\n");
20852 			break;
20853 		default:
20854 			break;
20855 		}
20856 	}
20857 }
20858 
20859 /**
20860  * lpfc_wr_object - write an object to the firmware
20861  * @phba: HBA structure that indicates port to create a queue on.
20862  * @dmabuf_list: list of dmabufs to write to the port.
20863  * @size: the total byte value of the objects to write to the port.
20864  * @offset: the current offset to be used to start the transfer.
20865  *
20866  * This routine will create a wr_object mailbox command to send to the port.
20867  * the mailbox command will be constructed using the dma buffers described in
20868  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20869  * BDEs that the imbedded mailbox can support. The @offset variable will be
20870  * used to indicate the starting offset of the transfer and will also return
20871  * the offset after the write object mailbox has completed. @size is used to
20872  * determine the end of the object and whether the eof bit should be set.
20873  *
20874  * Return 0 is successful and offset will contain the new offset to use
20875  * for the next write.
20876  * Return negative value for error cases.
20877  **/
20878 int
20879 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20880 	       uint32_t size, uint32_t *offset)
20881 {
20882 	struct lpfc_mbx_wr_object *wr_object;
20883 	LPFC_MBOXQ_t *mbox;
20884 	int rc = 0, i = 0;
20885 	int mbox_status = 0;
20886 	uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20887 	uint32_t shdr_change_status = 0, shdr_csf = 0;
20888 	uint32_t mbox_tmo;
20889 	struct lpfc_dmabuf *dmabuf;
20890 	uint32_t written = 0;
20891 	bool check_change_status = false;
20892 
20893 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20894 	if (!mbox)
20895 		return -ENOMEM;
20896 
20897 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20898 			LPFC_MBOX_OPCODE_WRITE_OBJECT,
20899 			sizeof(struct lpfc_mbx_wr_object) -
20900 			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
20901 
20902 	wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
20903 	wr_object->u.request.write_offset = *offset;
20904 	sprintf((uint8_t *)wr_object->u.request.object_name, "/");
20905 	wr_object->u.request.object_name[0] =
20906 		cpu_to_le32(wr_object->u.request.object_name[0]);
20907 	bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
20908 	list_for_each_entry(dmabuf, dmabuf_list, list) {
20909 		if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
20910 			break;
20911 		wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
20912 		wr_object->u.request.bde[i].addrHigh =
20913 			putPaddrHigh(dmabuf->phys);
20914 		if (written + SLI4_PAGE_SIZE >= size) {
20915 			wr_object->u.request.bde[i].tus.f.bdeSize =
20916 				(size - written);
20917 			written += (size - written);
20918 			bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
20919 			bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
20920 			check_change_status = true;
20921 		} else {
20922 			wr_object->u.request.bde[i].tus.f.bdeSize =
20923 				SLI4_PAGE_SIZE;
20924 			written += SLI4_PAGE_SIZE;
20925 		}
20926 		i++;
20927 	}
20928 	wr_object->u.request.bde_count = i;
20929 	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
20930 	if (!phba->sli4_hba.intr_enable)
20931 		mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
20932 	else {
20933 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
20934 		mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
20935 	}
20936 
20937 	/* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
20938 	rc = mbox_status;
20939 
20940 	/* The IOCTL status is embedded in the mailbox subheader. */
20941 	shdr_status = bf_get(lpfc_mbox_hdr_status,
20942 			     &wr_object->header.cfg_shdr.response);
20943 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20944 				 &wr_object->header.cfg_shdr.response);
20945 	shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
20946 				   &wr_object->header.cfg_shdr.response);
20947 	if (check_change_status) {
20948 		shdr_change_status = bf_get(lpfc_wr_object_change_status,
20949 					    &wr_object->u.response);
20950 		shdr_csf = bf_get(lpfc_wr_object_csf,
20951 				  &wr_object->u.response);
20952 	}
20953 
20954 	if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
20955 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20956 				"3025 Write Object mailbox failed with "
20957 				"status x%x add_status x%x, add_status_2 x%x, "
20958 				"mbx status x%x\n",
20959 				shdr_status, shdr_add_status, shdr_add_status_2,
20960 				rc);
20961 		rc = -ENXIO;
20962 		*offset = shdr_add_status;
20963 	} else {
20964 		*offset += wr_object->u.response.actual_write_length;
20965 	}
20966 
20967 	if (rc || check_change_status)
20968 		lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
20969 				       shdr_add_status_2, shdr_change_status,
20970 				       shdr_csf);
20971 
20972 	if (!phba->sli4_hba.intr_enable)
20973 		mempool_free(mbox, phba->mbox_mem_pool);
20974 	else if (mbox_status != MBX_TIMEOUT)
20975 		mempool_free(mbox, phba->mbox_mem_pool);
20976 
20977 	return rc;
20978 }
20979 
20980 /**
20981  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
20982  * @vport: pointer to vport data structure.
20983  *
20984  * This function iterate through the mailboxq and clean up all REG_LOGIN
20985  * and REG_VPI mailbox commands associated with the vport. This function
20986  * is called when driver want to restart discovery of the vport due to
20987  * a Clear Virtual Link event.
20988  **/
20989 void
20990 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
20991 {
20992 	struct lpfc_hba *phba = vport->phba;
20993 	LPFC_MBOXQ_t *mb, *nextmb;
20994 	struct lpfc_nodelist *ndlp;
20995 	struct lpfc_nodelist *act_mbx_ndlp = NULL;
20996 	LIST_HEAD(mbox_cmd_list);
20997 	uint8_t restart_loop;
20998 
20999 	/* Clean up internally queued mailbox commands with the vport */
21000 	spin_lock_irq(&phba->hbalock);
21001 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
21002 		if (mb->vport != vport)
21003 			continue;
21004 
21005 		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21006 			(mb->u.mb.mbxCommand != MBX_REG_VPI))
21007 			continue;
21008 
21009 		list_move_tail(&mb->list, &mbox_cmd_list);
21010 	}
21011 	/* Clean up active mailbox command with the vport */
21012 	mb = phba->sli.mbox_active;
21013 	if (mb && (mb->vport == vport)) {
21014 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
21015 			(mb->u.mb.mbxCommand == MBX_REG_VPI))
21016 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21017 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21018 			act_mbx_ndlp = mb->ctx_ndlp;
21019 
21020 			/* This reference is local to this routine.  The
21021 			 * reference is removed at routine exit.
21022 			 */
21023 			act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
21024 
21025 			/* Unregister the RPI when mailbox complete */
21026 			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21027 		}
21028 	}
21029 	/* Cleanup any mailbox completions which are not yet processed */
21030 	do {
21031 		restart_loop = 0;
21032 		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
21033 			/*
21034 			 * If this mailox is already processed or it is
21035 			 * for another vport ignore it.
21036 			 */
21037 			if ((mb->vport != vport) ||
21038 				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
21039 				continue;
21040 
21041 			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21042 				(mb->u.mb.mbxCommand != MBX_REG_VPI))
21043 				continue;
21044 
21045 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21046 			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21047 				ndlp = mb->ctx_ndlp;
21048 				/* Unregister the RPI when mailbox complete */
21049 				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21050 				restart_loop = 1;
21051 				spin_unlock_irq(&phba->hbalock);
21052 				spin_lock(&ndlp->lock);
21053 				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21054 				spin_unlock(&ndlp->lock);
21055 				spin_lock_irq(&phba->hbalock);
21056 				break;
21057 			}
21058 		}
21059 	} while (restart_loop);
21060 
21061 	spin_unlock_irq(&phba->hbalock);
21062 
21063 	/* Release the cleaned-up mailbox commands */
21064 	while (!list_empty(&mbox_cmd_list)) {
21065 		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
21066 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21067 			ndlp = mb->ctx_ndlp;
21068 			mb->ctx_ndlp = NULL;
21069 			if (ndlp) {
21070 				spin_lock(&ndlp->lock);
21071 				ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21072 				spin_unlock(&ndlp->lock);
21073 				lpfc_nlp_put(ndlp);
21074 			}
21075 		}
21076 		lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
21077 	}
21078 
21079 	/* Release the ndlp with the cleaned-up active mailbox command */
21080 	if (act_mbx_ndlp) {
21081 		spin_lock(&act_mbx_ndlp->lock);
21082 		act_mbx_ndlp->nlp_flag &= ~NLP_IGNR_REG_CMPL;
21083 		spin_unlock(&act_mbx_ndlp->lock);
21084 		lpfc_nlp_put(act_mbx_ndlp);
21085 	}
21086 }
21087 
21088 /**
21089  * lpfc_drain_txq - Drain the txq
21090  * @phba: Pointer to HBA context object.
21091  *
21092  * This function attempt to submit IOCBs on the txq
21093  * to the adapter.  For SLI4 adapters, the txq contains
21094  * ELS IOCBs that have been deferred because the there
21095  * are no SGLs.  This congestion can occur with large
21096  * vport counts during node discovery.
21097  **/
21098 
21099 uint32_t
21100 lpfc_drain_txq(struct lpfc_hba *phba)
21101 {
21102 	LIST_HEAD(completions);
21103 	struct lpfc_sli_ring *pring;
21104 	struct lpfc_iocbq *piocbq = NULL;
21105 	unsigned long iflags = 0;
21106 	char *fail_msg = NULL;
21107 	uint32_t txq_cnt = 0;
21108 	struct lpfc_queue *wq;
21109 	int ret = 0;
21110 
21111 	if (phba->link_flag & LS_MDS_LOOPBACK) {
21112 		/* MDS WQE are posted only to first WQ*/
21113 		wq = phba->sli4_hba.hdwq[0].io_wq;
21114 		if (unlikely(!wq))
21115 			return 0;
21116 		pring = wq->pring;
21117 	} else {
21118 		wq = phba->sli4_hba.els_wq;
21119 		if (unlikely(!wq))
21120 			return 0;
21121 		pring = lpfc_phba_elsring(phba);
21122 	}
21123 
21124 	if (unlikely(!pring) || list_empty(&pring->txq))
21125 		return 0;
21126 
21127 	spin_lock_irqsave(&pring->ring_lock, iflags);
21128 	list_for_each_entry(piocbq, &pring->txq, list) {
21129 		txq_cnt++;
21130 	}
21131 
21132 	if (txq_cnt > pring->txq_max)
21133 		pring->txq_max = txq_cnt;
21134 
21135 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
21136 
21137 	while (!list_empty(&pring->txq)) {
21138 		spin_lock_irqsave(&pring->ring_lock, iflags);
21139 
21140 		piocbq = lpfc_sli_ringtx_get(phba, pring);
21141 		if (!piocbq) {
21142 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21143 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21144 				"2823 txq empty and txq_cnt is %d\n ",
21145 				txq_cnt);
21146 			break;
21147 		}
21148 		txq_cnt--;
21149 
21150 		ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
21151 
21152 		if (ret && ret != IOCB_BUSY) {
21153 			fail_msg = " - Cannot send IO ";
21154 			piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21155 		}
21156 		if (fail_msg) {
21157 			piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
21158 			/* Failed means we can't issue and need to cancel */
21159 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21160 					"2822 IOCB failed %s iotag 0x%x "
21161 					"xri 0x%x %d flg x%x\n",
21162 					fail_msg, piocbq->iotag,
21163 					piocbq->sli4_xritag, ret,
21164 					piocbq->cmd_flag);
21165 			list_add_tail(&piocbq->list, &completions);
21166 			fail_msg = NULL;
21167 		}
21168 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21169 		if (txq_cnt == 0 || ret == IOCB_BUSY)
21170 			break;
21171 	}
21172 	/* Cancel all the IOCBs that cannot be issued */
21173 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21174 			      IOERR_SLI_ABORTED);
21175 
21176 	return txq_cnt;
21177 }
21178 
21179 /**
21180  * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21181  * @phba: Pointer to HBA context object.
21182  * @pwqeq: Pointer to command WQE.
21183  * @sglq: Pointer to the scatter gather queue object.
21184  *
21185  * This routine converts the bpl or bde that is in the WQE
21186  * to a sgl list for the sli4 hardware. The physical address
21187  * of the bpl/bde is converted back to a virtual address.
21188  * If the WQE contains a BPL then the list of BDE's is
21189  * converted to sli4_sge's. If the WQE contains a single
21190  * BDE then it is converted to a single sli_sge.
21191  * The WQE is still in cpu endianness so the contents of
21192  * the bpl can be used without byte swapping.
21193  *
21194  * Returns valid XRI = Success, NO_XRI = Failure.
21195  */
21196 static uint16_t
21197 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21198 		 struct lpfc_sglq *sglq)
21199 {
21200 	uint16_t xritag = NO_XRI;
21201 	struct ulp_bde64 *bpl = NULL;
21202 	struct ulp_bde64 bde;
21203 	struct sli4_sge *sgl  = NULL;
21204 	struct lpfc_dmabuf *dmabuf;
21205 	union lpfc_wqe128 *wqe;
21206 	int numBdes = 0;
21207 	int i = 0;
21208 	uint32_t offset = 0; /* accumulated offset in the sg request list */
21209 	int inbound = 0; /* number of sg reply entries inbound from firmware */
21210 	uint32_t cmd;
21211 
21212 	if (!pwqeq || !sglq)
21213 		return xritag;
21214 
21215 	sgl  = (struct sli4_sge *)sglq->sgl;
21216 	wqe = &pwqeq->wqe;
21217 	pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21218 
21219 	cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21220 	if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21221 		return sglq->sli4_xritag;
21222 	numBdes = pwqeq->num_bdes;
21223 	if (numBdes) {
21224 		/* The addrHigh and addrLow fields within the WQE
21225 		 * have not been byteswapped yet so there is no
21226 		 * need to swap them back.
21227 		 */
21228 		if (pwqeq->bpl_dmabuf)
21229 			dmabuf = pwqeq->bpl_dmabuf;
21230 		else
21231 			return xritag;
21232 
21233 		bpl  = (struct ulp_bde64 *)dmabuf->virt;
21234 		if (!bpl)
21235 			return xritag;
21236 
21237 		for (i = 0; i < numBdes; i++) {
21238 			/* Should already be byte swapped. */
21239 			sgl->addr_hi = bpl->addrHigh;
21240 			sgl->addr_lo = bpl->addrLow;
21241 
21242 			sgl->word2 = le32_to_cpu(sgl->word2);
21243 			if ((i+1) == numBdes)
21244 				bf_set(lpfc_sli4_sge_last, sgl, 1);
21245 			else
21246 				bf_set(lpfc_sli4_sge_last, sgl, 0);
21247 			/* swap the size field back to the cpu so we
21248 			 * can assign it to the sgl.
21249 			 */
21250 			bde.tus.w = le32_to_cpu(bpl->tus.w);
21251 			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21252 			/* The offsets in the sgl need to be accumulated
21253 			 * separately for the request and reply lists.
21254 			 * The request is always first, the reply follows.
21255 			 */
21256 			switch (cmd) {
21257 			case CMD_GEN_REQUEST64_WQE:
21258 				/* add up the reply sg entries */
21259 				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21260 					inbound++;
21261 				/* first inbound? reset the offset */
21262 				if (inbound == 1)
21263 					offset = 0;
21264 				bf_set(lpfc_sli4_sge_offset, sgl, offset);
21265 				bf_set(lpfc_sli4_sge_type, sgl,
21266 					LPFC_SGE_TYPE_DATA);
21267 				offset += bde.tus.f.bdeSize;
21268 				break;
21269 			case CMD_FCP_TRSP64_WQE:
21270 				bf_set(lpfc_sli4_sge_offset, sgl, 0);
21271 				bf_set(lpfc_sli4_sge_type, sgl,
21272 					LPFC_SGE_TYPE_DATA);
21273 				break;
21274 			case CMD_FCP_TSEND64_WQE:
21275 			case CMD_FCP_TRECEIVE64_WQE:
21276 				bf_set(lpfc_sli4_sge_type, sgl,
21277 					bpl->tus.f.bdeFlags);
21278 				if (i < 3)
21279 					offset = 0;
21280 				else
21281 					offset += bde.tus.f.bdeSize;
21282 				bf_set(lpfc_sli4_sge_offset, sgl, offset);
21283 				break;
21284 			}
21285 			sgl->word2 = cpu_to_le32(sgl->word2);
21286 			bpl++;
21287 			sgl++;
21288 		}
21289 	} else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21290 		/* The addrHigh and addrLow fields of the BDE have not
21291 		 * been byteswapped yet so they need to be swapped
21292 		 * before putting them in the sgl.
21293 		 */
21294 		sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21295 		sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21296 		sgl->word2 = le32_to_cpu(sgl->word2);
21297 		bf_set(lpfc_sli4_sge_last, sgl, 1);
21298 		sgl->word2 = cpu_to_le32(sgl->word2);
21299 		sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21300 	}
21301 	return sglq->sli4_xritag;
21302 }
21303 
21304 /**
21305  * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21306  * @phba: Pointer to HBA context object.
21307  * @qp: Pointer to HDW queue.
21308  * @pwqe: Pointer to command WQE.
21309  **/
21310 int
21311 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21312 		    struct lpfc_iocbq *pwqe)
21313 {
21314 	union lpfc_wqe128 *wqe = &pwqe->wqe;
21315 	struct lpfc_async_xchg_ctx *ctxp;
21316 	struct lpfc_queue *wq;
21317 	struct lpfc_sglq *sglq;
21318 	struct lpfc_sli_ring *pring;
21319 	unsigned long iflags;
21320 	uint32_t ret = 0;
21321 
21322 	/* NVME_LS and NVME_LS ABTS requests. */
21323 	if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
21324 		pring =  phba->sli4_hba.nvmels_wq->pring;
21325 		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21326 					  qp, wq_access);
21327 		sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21328 		if (!sglq) {
21329 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21330 			return WQE_BUSY;
21331 		}
21332 		pwqe->sli4_lxritag = sglq->sli4_lxritag;
21333 		pwqe->sli4_xritag = sglq->sli4_xritag;
21334 		if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21335 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21336 			return WQE_ERROR;
21337 		}
21338 		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21339 		       pwqe->sli4_xritag);
21340 		ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21341 		if (ret) {
21342 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21343 			return ret;
21344 		}
21345 
21346 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21347 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21348 
21349 		lpfc_sli4_poll_eq(qp->hba_eq);
21350 		return 0;
21351 	}
21352 
21353 	/* NVME_FCREQ and NVME_ABTS requests */
21354 	if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21355 		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
21356 		wq = qp->io_wq;
21357 		pring = wq->pring;
21358 
21359 		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21360 
21361 		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21362 					  qp, wq_access);
21363 		ret = lpfc_sli4_wq_put(wq, wqe);
21364 		if (ret) {
21365 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21366 			return ret;
21367 		}
21368 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21369 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21370 
21371 		lpfc_sli4_poll_eq(qp->hba_eq);
21372 		return 0;
21373 	}
21374 
21375 	/* NVMET requests */
21376 	if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21377 		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
21378 		wq = qp->io_wq;
21379 		pring = wq->pring;
21380 
21381 		ctxp = pwqe->context_un.axchg;
21382 		sglq = ctxp->ctxbuf->sglq;
21383 		if (pwqe->sli4_xritag ==  NO_XRI) {
21384 			pwqe->sli4_lxritag = sglq->sli4_lxritag;
21385 			pwqe->sli4_xritag = sglq->sli4_xritag;
21386 		}
21387 		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21388 		       pwqe->sli4_xritag);
21389 		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21390 
21391 		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21392 					  qp, wq_access);
21393 		ret = lpfc_sli4_wq_put(wq, wqe);
21394 		if (ret) {
21395 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21396 			return ret;
21397 		}
21398 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21399 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21400 
21401 		lpfc_sli4_poll_eq(qp->hba_eq);
21402 		return 0;
21403 	}
21404 	return WQE_ERROR;
21405 }
21406 
21407 /**
21408  * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21409  * @phba: Pointer to HBA context object.
21410  * @cmdiocb: Pointer to driver command iocb object.
21411  * @cmpl: completion function.
21412  *
21413  * Fill the appropriate fields for the abort WQE and call
21414  * internal routine lpfc_sli4_issue_wqe to send the WQE
21415  * This function is called with hbalock held and no ring_lock held.
21416  *
21417  * RETURNS 0 - SUCCESS
21418  **/
21419 
21420 int
21421 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21422 			    void *cmpl)
21423 {
21424 	struct lpfc_vport *vport = cmdiocb->vport;
21425 	struct lpfc_iocbq *abtsiocb = NULL;
21426 	union lpfc_wqe128 *abtswqe;
21427 	struct lpfc_io_buf *lpfc_cmd;
21428 	int retval = IOCB_ERROR;
21429 	u16 xritag = cmdiocb->sli4_xritag;
21430 
21431 	/*
21432 	 * The scsi command can not be in txq and it is in flight because the
21433 	 * pCmd is still pointing at the SCSI command we have to abort. There
21434 	 * is no need to search the txcmplq. Just send an abort to the FW.
21435 	 */
21436 
21437 	abtsiocb = __lpfc_sli_get_iocbq(phba);
21438 	if (!abtsiocb)
21439 		return WQE_NORESOURCE;
21440 
21441 	/* Indicate the IO is being aborted by the driver. */
21442 	cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21443 
21444 	abtswqe = &abtsiocb->wqe;
21445 	memset(abtswqe, 0, sizeof(*abtswqe));
21446 
21447 	if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21448 		bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21449 	bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21450 	abtswqe->abort_cmd.rsrvd5 = 0;
21451 	abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21452 	bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21453 	bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21454 	bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21455 	bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21456 	bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21457 	bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21458 
21459 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
21460 	abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21461 	abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21462 	if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21463 		abtsiocb->cmd_flag |= LPFC_IO_FCP;
21464 	if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21465 		abtsiocb->cmd_flag |= LPFC_IO_NVME;
21466 	if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21467 		abtsiocb->cmd_flag |= LPFC_IO_FOF;
21468 	abtsiocb->vport = vport;
21469 	abtsiocb->cmd_cmpl = cmpl;
21470 
21471 	lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21472 	retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21473 
21474 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21475 			 "0359 Abort xri x%x, original iotag x%x, "
21476 			 "abort cmd iotag x%x retval x%x\n",
21477 			 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21478 
21479 	if (retval) {
21480 		cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21481 		__lpfc_sli_release_iocbq(phba, abtsiocb);
21482 	}
21483 
21484 	return retval;
21485 }
21486 
21487 #ifdef LPFC_MXP_STAT
21488 /**
21489  * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21490  * @phba: pointer to lpfc hba data structure.
21491  * @hwqid: belong to which HWQ.
21492  *
21493  * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21494  * 15 seconds after a test case is running.
21495  *
21496  * The user should call lpfc_debugfs_multixripools_write before running a test
21497  * case to clear stat_snapshot_taken. Then the user starts a test case. During
21498  * test case is running, stat_snapshot_taken is incremented by 1 every time when
21499  * this routine is called from heartbeat timer. When stat_snapshot_taken is
21500  * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21501  **/
21502 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21503 {
21504 	struct lpfc_sli4_hdw_queue *qp;
21505 	struct lpfc_multixri_pool *multixri_pool;
21506 	struct lpfc_pvt_pool *pvt_pool;
21507 	struct lpfc_pbl_pool *pbl_pool;
21508 	u32 txcmplq_cnt;
21509 
21510 	qp = &phba->sli4_hba.hdwq[hwqid];
21511 	multixri_pool = qp->p_multixri_pool;
21512 	if (!multixri_pool)
21513 		return;
21514 
21515 	if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21516 		pvt_pool = &qp->p_multixri_pool->pvt_pool;
21517 		pbl_pool = &qp->p_multixri_pool->pbl_pool;
21518 		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21519 
21520 		multixri_pool->stat_pbl_count = pbl_pool->count;
21521 		multixri_pool->stat_pvt_count = pvt_pool->count;
21522 		multixri_pool->stat_busy_count = txcmplq_cnt;
21523 	}
21524 
21525 	multixri_pool->stat_snapshot_taken++;
21526 }
21527 #endif
21528 
21529 /**
21530  * lpfc_adjust_pvt_pool_count - Adjust private pool count
21531  * @phba: pointer to lpfc hba data structure.
21532  * @hwqid: belong to which HWQ.
21533  *
21534  * This routine moves some XRIs from private to public pool when private pool
21535  * is not busy.
21536  **/
21537 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21538 {
21539 	struct lpfc_multixri_pool *multixri_pool;
21540 	u32 io_req_count;
21541 	u32 prev_io_req_count;
21542 
21543 	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21544 	if (!multixri_pool)
21545 		return;
21546 	io_req_count = multixri_pool->io_req_count;
21547 	prev_io_req_count = multixri_pool->prev_io_req_count;
21548 
21549 	if (prev_io_req_count != io_req_count) {
21550 		/* Private pool is busy */
21551 		multixri_pool->prev_io_req_count = io_req_count;
21552 	} else {
21553 		/* Private pool is not busy.
21554 		 * Move XRIs from private to public pool.
21555 		 */
21556 		lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21557 	}
21558 }
21559 
21560 /**
21561  * lpfc_adjust_high_watermark - Adjust high watermark
21562  * @phba: pointer to lpfc hba data structure.
21563  * @hwqid: belong to which HWQ.
21564  *
21565  * This routine sets high watermark as number of outstanding XRIs,
21566  * but make sure the new value is between xri_limit/2 and xri_limit.
21567  **/
21568 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21569 {
21570 	u32 new_watermark;
21571 	u32 watermark_max;
21572 	u32 watermark_min;
21573 	u32 xri_limit;
21574 	u32 txcmplq_cnt;
21575 	u32 abts_io_bufs;
21576 	struct lpfc_multixri_pool *multixri_pool;
21577 	struct lpfc_sli4_hdw_queue *qp;
21578 
21579 	qp = &phba->sli4_hba.hdwq[hwqid];
21580 	multixri_pool = qp->p_multixri_pool;
21581 	if (!multixri_pool)
21582 		return;
21583 	xri_limit = multixri_pool->xri_limit;
21584 
21585 	watermark_max = xri_limit;
21586 	watermark_min = xri_limit / 2;
21587 
21588 	txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21589 	abts_io_bufs = qp->abts_scsi_io_bufs;
21590 	abts_io_bufs += qp->abts_nvme_io_bufs;
21591 
21592 	new_watermark = txcmplq_cnt + abts_io_bufs;
21593 	new_watermark = min(watermark_max, new_watermark);
21594 	new_watermark = max(watermark_min, new_watermark);
21595 	multixri_pool->pvt_pool.high_watermark = new_watermark;
21596 
21597 #ifdef LPFC_MXP_STAT
21598 	multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21599 					  new_watermark);
21600 #endif
21601 }
21602 
21603 /**
21604  * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21605  * @phba: pointer to lpfc hba data structure.
21606  * @hwqid: belong to which HWQ.
21607  *
21608  * This routine is called from hearbeat timer when pvt_pool is idle.
21609  * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21610  * The first step moves (all - low_watermark) amount of XRIs.
21611  * The second step moves the rest of XRIs.
21612  **/
21613 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21614 {
21615 	struct lpfc_pbl_pool *pbl_pool;
21616 	struct lpfc_pvt_pool *pvt_pool;
21617 	struct lpfc_sli4_hdw_queue *qp;
21618 	struct lpfc_io_buf *lpfc_ncmd;
21619 	struct lpfc_io_buf *lpfc_ncmd_next;
21620 	unsigned long iflag;
21621 	struct list_head tmp_list;
21622 	u32 tmp_count;
21623 
21624 	qp = &phba->sli4_hba.hdwq[hwqid];
21625 	pbl_pool = &qp->p_multixri_pool->pbl_pool;
21626 	pvt_pool = &qp->p_multixri_pool->pvt_pool;
21627 	tmp_count = 0;
21628 
21629 	lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21630 	lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21631 
21632 	if (pvt_pool->count > pvt_pool->low_watermark) {
21633 		/* Step 1: move (all - low_watermark) from pvt_pool
21634 		 * to pbl_pool
21635 		 */
21636 
21637 		/* Move low watermark of bufs from pvt_pool to tmp_list */
21638 		INIT_LIST_HEAD(&tmp_list);
21639 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21640 					 &pvt_pool->list, list) {
21641 			list_move_tail(&lpfc_ncmd->list, &tmp_list);
21642 			tmp_count++;
21643 			if (tmp_count >= pvt_pool->low_watermark)
21644 				break;
21645 		}
21646 
21647 		/* Move all bufs from pvt_pool to pbl_pool */
21648 		list_splice_init(&pvt_pool->list, &pbl_pool->list);
21649 
21650 		/* Move all bufs from tmp_list to pvt_pool */
21651 		list_splice(&tmp_list, &pvt_pool->list);
21652 
21653 		pbl_pool->count += (pvt_pool->count - tmp_count);
21654 		pvt_pool->count = tmp_count;
21655 	} else {
21656 		/* Step 2: move the rest from pvt_pool to pbl_pool */
21657 		list_splice_init(&pvt_pool->list, &pbl_pool->list);
21658 		pbl_pool->count += pvt_pool->count;
21659 		pvt_pool->count = 0;
21660 	}
21661 
21662 	spin_unlock(&pvt_pool->lock);
21663 	spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21664 }
21665 
21666 /**
21667  * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21668  * @phba: pointer to lpfc hba data structure
21669  * @qp: pointer to HDW queue
21670  * @pbl_pool: specified public free XRI pool
21671  * @pvt_pool: specified private free XRI pool
21672  * @count: number of XRIs to move
21673  *
21674  * This routine tries to move some free common bufs from the specified pbl_pool
21675  * to the specified pvt_pool. It might move less than count XRIs if there's not
21676  * enough in public pool.
21677  *
21678  * Return:
21679  *   true - if XRIs are successfully moved from the specified pbl_pool to the
21680  *          specified pvt_pool
21681  *   false - if the specified pbl_pool is empty or locked by someone else
21682  **/
21683 static bool
21684 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21685 			  struct lpfc_pbl_pool *pbl_pool,
21686 			  struct lpfc_pvt_pool *pvt_pool, u32 count)
21687 {
21688 	struct lpfc_io_buf *lpfc_ncmd;
21689 	struct lpfc_io_buf *lpfc_ncmd_next;
21690 	unsigned long iflag;
21691 	int ret;
21692 
21693 	ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21694 	if (ret) {
21695 		if (pbl_pool->count) {
21696 			/* Move a batch of XRIs from public to private pool */
21697 			lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21698 			list_for_each_entry_safe(lpfc_ncmd,
21699 						 lpfc_ncmd_next,
21700 						 &pbl_pool->list,
21701 						 list) {
21702 				list_move_tail(&lpfc_ncmd->list,
21703 					       &pvt_pool->list);
21704 				pvt_pool->count++;
21705 				pbl_pool->count--;
21706 				count--;
21707 				if (count == 0)
21708 					break;
21709 			}
21710 
21711 			spin_unlock(&pvt_pool->lock);
21712 			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21713 			return true;
21714 		}
21715 		spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21716 	}
21717 
21718 	return false;
21719 }
21720 
21721 /**
21722  * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21723  * @phba: pointer to lpfc hba data structure.
21724  * @hwqid: belong to which HWQ.
21725  * @count: number of XRIs to move
21726  *
21727  * This routine tries to find some free common bufs in one of public pools with
21728  * Round Robin method. The search always starts from local hwqid, then the next
21729  * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21730  * a batch of free common bufs are moved to private pool on hwqid.
21731  * It might move less than count XRIs if there's not enough in public pool.
21732  **/
21733 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21734 {
21735 	struct lpfc_multixri_pool *multixri_pool;
21736 	struct lpfc_multixri_pool *next_multixri_pool;
21737 	struct lpfc_pvt_pool *pvt_pool;
21738 	struct lpfc_pbl_pool *pbl_pool;
21739 	struct lpfc_sli4_hdw_queue *qp;
21740 	u32 next_hwqid;
21741 	u32 hwq_count;
21742 	int ret;
21743 
21744 	qp = &phba->sli4_hba.hdwq[hwqid];
21745 	multixri_pool = qp->p_multixri_pool;
21746 	pvt_pool = &multixri_pool->pvt_pool;
21747 	pbl_pool = &multixri_pool->pbl_pool;
21748 
21749 	/* Check if local pbl_pool is available */
21750 	ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21751 	if (ret) {
21752 #ifdef LPFC_MXP_STAT
21753 		multixri_pool->local_pbl_hit_count++;
21754 #endif
21755 		return;
21756 	}
21757 
21758 	hwq_count = phba->cfg_hdw_queue;
21759 
21760 	/* Get the next hwqid which was found last time */
21761 	next_hwqid = multixri_pool->rrb_next_hwqid;
21762 
21763 	do {
21764 		/* Go to next hwq */
21765 		next_hwqid = (next_hwqid + 1) % hwq_count;
21766 
21767 		next_multixri_pool =
21768 			phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21769 		pbl_pool = &next_multixri_pool->pbl_pool;
21770 
21771 		/* Check if the public free xri pool is available */
21772 		ret = _lpfc_move_xri_pbl_to_pvt(
21773 			phba, qp, pbl_pool, pvt_pool, count);
21774 
21775 		/* Exit while-loop if success or all hwqid are checked */
21776 	} while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21777 
21778 	/* Starting point for the next time */
21779 	multixri_pool->rrb_next_hwqid = next_hwqid;
21780 
21781 	if (!ret) {
21782 		/* stats: all public pools are empty*/
21783 		multixri_pool->pbl_empty_count++;
21784 	}
21785 
21786 #ifdef LPFC_MXP_STAT
21787 	if (ret) {
21788 		if (next_hwqid == hwqid)
21789 			multixri_pool->local_pbl_hit_count++;
21790 		else
21791 			multixri_pool->other_pbl_hit_count++;
21792 	}
21793 #endif
21794 }
21795 
21796 /**
21797  * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21798  * @phba: pointer to lpfc hba data structure.
21799  * @hwqid: belong to which HWQ.
21800  *
21801  * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21802  * low watermark.
21803  **/
21804 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21805 {
21806 	struct lpfc_multixri_pool *multixri_pool;
21807 	struct lpfc_pvt_pool *pvt_pool;
21808 
21809 	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21810 	pvt_pool = &multixri_pool->pvt_pool;
21811 
21812 	if (pvt_pool->count < pvt_pool->low_watermark)
21813 		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21814 }
21815 
21816 /**
21817  * lpfc_release_io_buf - Return one IO buf back to free pool
21818  * @phba: pointer to lpfc hba data structure.
21819  * @lpfc_ncmd: IO buf to be returned.
21820  * @qp: belong to which HWQ.
21821  *
21822  * This routine returns one IO buf back to free pool. If this is an urgent IO,
21823  * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21824  * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21825  * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
21826  * lpfc_io_buf_list_put.
21827  **/
21828 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21829 			 struct lpfc_sli4_hdw_queue *qp)
21830 {
21831 	unsigned long iflag;
21832 	struct lpfc_pbl_pool *pbl_pool;
21833 	struct lpfc_pvt_pool *pvt_pool;
21834 	struct lpfc_epd_pool *epd_pool;
21835 	u32 txcmplq_cnt;
21836 	u32 xri_owned;
21837 	u32 xri_limit;
21838 	u32 abts_io_bufs;
21839 
21840 	/* MUST zero fields if buffer is reused by another protocol */
21841 	lpfc_ncmd->nvmeCmd = NULL;
21842 	lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21843 
21844 	if (phba->cfg_xpsgl && !phba->nvmet_support &&
21845 	    !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21846 		lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21847 
21848 	if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21849 		lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21850 
21851 	if (phba->cfg_xri_rebalancing) {
21852 		if (lpfc_ncmd->expedite) {
21853 			/* Return to expedite pool */
21854 			epd_pool = &phba->epd_pool;
21855 			spin_lock_irqsave(&epd_pool->lock, iflag);
21856 			list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21857 			epd_pool->count++;
21858 			spin_unlock_irqrestore(&epd_pool->lock, iflag);
21859 			return;
21860 		}
21861 
21862 		/* Avoid invalid access if an IO sneaks in and is being rejected
21863 		 * just _after_ xri pools are destroyed in lpfc_offline.
21864 		 * Nothing much can be done at this point.
21865 		 */
21866 		if (!qp->p_multixri_pool)
21867 			return;
21868 
21869 		pbl_pool = &qp->p_multixri_pool->pbl_pool;
21870 		pvt_pool = &qp->p_multixri_pool->pvt_pool;
21871 
21872 		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21873 		abts_io_bufs = qp->abts_scsi_io_bufs;
21874 		abts_io_bufs += qp->abts_nvme_io_bufs;
21875 
21876 		xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21877 		xri_limit = qp->p_multixri_pool->xri_limit;
21878 
21879 #ifdef LPFC_MXP_STAT
21880 		if (xri_owned <= xri_limit)
21881 			qp->p_multixri_pool->below_limit_count++;
21882 		else
21883 			qp->p_multixri_pool->above_limit_count++;
21884 #endif
21885 
21886 		/* XRI goes to either public or private free xri pool
21887 		 *     based on watermark and xri_limit
21888 		 */
21889 		if ((pvt_pool->count < pvt_pool->low_watermark) ||
21890 		    (xri_owned < xri_limit &&
21891 		     pvt_pool->count < pvt_pool->high_watermark)) {
21892 			lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21893 						  qp, free_pvt_pool);
21894 			list_add_tail(&lpfc_ncmd->list,
21895 				      &pvt_pool->list);
21896 			pvt_pool->count++;
21897 			spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21898 		} else {
21899 			lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21900 						  qp, free_pub_pool);
21901 			list_add_tail(&lpfc_ncmd->list,
21902 				      &pbl_pool->list);
21903 			pbl_pool->count++;
21904 			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21905 		}
21906 	} else {
21907 		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
21908 					  qp, free_xri);
21909 		list_add_tail(&lpfc_ncmd->list,
21910 			      &qp->lpfc_io_buf_list_put);
21911 		qp->put_io_bufs++;
21912 		spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
21913 				       iflag);
21914 	}
21915 }
21916 
21917 /**
21918  * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
21919  * @phba: pointer to lpfc hba data structure.
21920  * @qp: pointer to HDW queue
21921  * @pvt_pool: pointer to private pool data structure.
21922  * @ndlp: pointer to lpfc nodelist data structure.
21923  *
21924  * This routine tries to get one free IO buf from private pool.
21925  *
21926  * Return:
21927  *   pointer to one free IO buf - if private pool is not empty
21928  *   NULL - if private pool is empty
21929  **/
21930 static struct lpfc_io_buf *
21931 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
21932 				  struct lpfc_sli4_hdw_queue *qp,
21933 				  struct lpfc_pvt_pool *pvt_pool,
21934 				  struct lpfc_nodelist *ndlp)
21935 {
21936 	struct lpfc_io_buf *lpfc_ncmd;
21937 	struct lpfc_io_buf *lpfc_ncmd_next;
21938 	unsigned long iflag;
21939 
21940 	lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
21941 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21942 				 &pvt_pool->list, list) {
21943 		if (lpfc_test_rrq_active(
21944 			phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
21945 			continue;
21946 		list_del(&lpfc_ncmd->list);
21947 		pvt_pool->count--;
21948 		spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21949 		return lpfc_ncmd;
21950 	}
21951 	spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21952 
21953 	return NULL;
21954 }
21955 
21956 /**
21957  * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
21958  * @phba: pointer to lpfc hba data structure.
21959  *
21960  * This routine tries to get one free IO buf from expedite pool.
21961  *
21962  * Return:
21963  *   pointer to one free IO buf - if expedite pool is not empty
21964  *   NULL - if expedite pool is empty
21965  **/
21966 static struct lpfc_io_buf *
21967 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
21968 {
21969 	struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
21970 	struct lpfc_io_buf *lpfc_ncmd_next;
21971 	unsigned long iflag;
21972 	struct lpfc_epd_pool *epd_pool;
21973 
21974 	epd_pool = &phba->epd_pool;
21975 
21976 	spin_lock_irqsave(&epd_pool->lock, iflag);
21977 	if (epd_pool->count > 0) {
21978 		list_for_each_entry_safe(iter, lpfc_ncmd_next,
21979 					 &epd_pool->list, list) {
21980 			list_del(&iter->list);
21981 			epd_pool->count--;
21982 			lpfc_ncmd = iter;
21983 			break;
21984 		}
21985 	}
21986 	spin_unlock_irqrestore(&epd_pool->lock, iflag);
21987 
21988 	return lpfc_ncmd;
21989 }
21990 
21991 /**
21992  * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
21993  * @phba: pointer to lpfc hba data structure.
21994  * @ndlp: pointer to lpfc nodelist data structure.
21995  * @hwqid: belong to which HWQ
21996  * @expedite: 1 means this request is urgent.
21997  *
21998  * This routine will do the following actions and then return a pointer to
21999  * one free IO buf.
22000  *
22001  * 1. If private free xri count is empty, move some XRIs from public to
22002  *    private pool.
22003  * 2. Get one XRI from private free xri pool.
22004  * 3. If we fail to get one from pvt_pool and this is an expedite request,
22005  *    get one free xri from expedite pool.
22006  *
22007  * Note: ndlp is only used on SCSI side for RRQ testing.
22008  *       The caller should pass NULL for ndlp on NVME side.
22009  *
22010  * Return:
22011  *   pointer to one free IO buf - if private pool is not empty
22012  *   NULL - if private pool is empty
22013  **/
22014 static struct lpfc_io_buf *
22015 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
22016 				    struct lpfc_nodelist *ndlp,
22017 				    int hwqid, int expedite)
22018 {
22019 	struct lpfc_sli4_hdw_queue *qp;
22020 	struct lpfc_multixri_pool *multixri_pool;
22021 	struct lpfc_pvt_pool *pvt_pool;
22022 	struct lpfc_io_buf *lpfc_ncmd;
22023 
22024 	qp = &phba->sli4_hba.hdwq[hwqid];
22025 	lpfc_ncmd = NULL;
22026 	if (!qp) {
22027 		lpfc_printf_log(phba, KERN_INFO,
22028 				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22029 				"5556 NULL qp for hwqid  x%x\n", hwqid);
22030 		return lpfc_ncmd;
22031 	}
22032 	multixri_pool = qp->p_multixri_pool;
22033 	if (!multixri_pool) {
22034 		lpfc_printf_log(phba, KERN_INFO,
22035 				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22036 				"5557 NULL multixri for hwqid  x%x\n", hwqid);
22037 		return lpfc_ncmd;
22038 	}
22039 	pvt_pool = &multixri_pool->pvt_pool;
22040 	if (!pvt_pool) {
22041 		lpfc_printf_log(phba, KERN_INFO,
22042 				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22043 				"5558 NULL pvt_pool for hwqid  x%x\n", hwqid);
22044 		return lpfc_ncmd;
22045 	}
22046 	multixri_pool->io_req_count++;
22047 
22048 	/* If pvt_pool is empty, move some XRIs from public to private pool */
22049 	if (pvt_pool->count == 0)
22050 		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
22051 
22052 	/* Get one XRI from private free xri pool */
22053 	lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
22054 
22055 	if (lpfc_ncmd) {
22056 		lpfc_ncmd->hdwq = qp;
22057 		lpfc_ncmd->hdwq_no = hwqid;
22058 	} else if (expedite) {
22059 		/* If we fail to get one from pvt_pool and this is an expedite
22060 		 * request, get one free xri from expedite pool.
22061 		 */
22062 		lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
22063 	}
22064 
22065 	return lpfc_ncmd;
22066 }
22067 
22068 static inline struct lpfc_io_buf *
22069 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
22070 {
22071 	struct lpfc_sli4_hdw_queue *qp;
22072 	struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
22073 
22074 	qp = &phba->sli4_hba.hdwq[idx];
22075 	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
22076 				 &qp->lpfc_io_buf_list_get, list) {
22077 		if (lpfc_test_rrq_active(phba, ndlp,
22078 					 lpfc_cmd->cur_iocbq.sli4_lxritag))
22079 			continue;
22080 
22081 		if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
22082 			continue;
22083 
22084 		list_del_init(&lpfc_cmd->list);
22085 		qp->get_io_bufs--;
22086 		lpfc_cmd->hdwq = qp;
22087 		lpfc_cmd->hdwq_no = idx;
22088 		return lpfc_cmd;
22089 	}
22090 	return NULL;
22091 }
22092 
22093 /**
22094  * lpfc_get_io_buf - Get one IO buffer from free pool
22095  * @phba: The HBA for which this call is being executed.
22096  * @ndlp: pointer to lpfc nodelist data structure.
22097  * @hwqid: belong to which HWQ
22098  * @expedite: 1 means this request is urgent.
22099  *
22100  * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
22101  * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
22102  * a IO buffer from head of @hdwq io_buf_list and returns to caller.
22103  *
22104  * Note: ndlp is only used on SCSI side for RRQ testing.
22105  *       The caller should pass NULL for ndlp on NVME side.
22106  *
22107  * Return codes:
22108  *   NULL - Error
22109  *   Pointer to lpfc_io_buf - Success
22110  **/
22111 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22112 				    struct lpfc_nodelist *ndlp,
22113 				    u32 hwqid, int expedite)
22114 {
22115 	struct lpfc_sli4_hdw_queue *qp;
22116 	unsigned long iflag;
22117 	struct lpfc_io_buf *lpfc_cmd;
22118 
22119 	qp = &phba->sli4_hba.hdwq[hwqid];
22120 	lpfc_cmd = NULL;
22121 	if (!qp) {
22122 		lpfc_printf_log(phba, KERN_WARNING,
22123 				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22124 				"5555 NULL qp for hwqid  x%x\n", hwqid);
22125 		return lpfc_cmd;
22126 	}
22127 
22128 	if (phba->cfg_xri_rebalancing)
22129 		lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22130 			phba, ndlp, hwqid, expedite);
22131 	else {
22132 		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22133 					  qp, alloc_xri_get);
22134 		if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22135 			lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22136 		if (!lpfc_cmd) {
22137 			lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22138 					  qp, alloc_xri_put);
22139 			list_splice(&qp->lpfc_io_buf_list_put,
22140 				    &qp->lpfc_io_buf_list_get);
22141 			qp->get_io_bufs += qp->put_io_bufs;
22142 			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22143 			qp->put_io_bufs = 0;
22144 			spin_unlock(&qp->io_buf_list_put_lock);
22145 			if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22146 			    expedite)
22147 				lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22148 		}
22149 		spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22150 	}
22151 
22152 	return lpfc_cmd;
22153 }
22154 
22155 /**
22156  * lpfc_read_object - Retrieve object data from HBA
22157  * @phba: The HBA for which this call is being executed.
22158  * @rdobject: Pathname of object data we want to read.
22159  * @datap: Pointer to where data will be copied to.
22160  * @datasz: size of data area
22161  *
22162  * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22163  * The data will be truncated if datasz is not large enough.
22164  * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22165  * Returns the actual bytes read from the object.
22166  *
22167  * This routine is hard coded to use a poll completion.  Unlike other
22168  * sli4_config mailboxes, it uses lpfc_mbuf memory which is not
22169  * cleaned up in lpfc_sli4_cmd_mbox_free.  If this routine is modified
22170  * to use interrupt-based completions, code is needed to fully cleanup
22171  * the memory.
22172  */
22173 int
22174 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22175 		 uint32_t datasz)
22176 {
22177 	struct lpfc_mbx_read_object *read_object;
22178 	LPFC_MBOXQ_t *mbox;
22179 	int rc, length, eof, j, byte_cnt = 0;
22180 	uint32_t shdr_status, shdr_add_status;
22181 	union lpfc_sli4_cfg_shdr *shdr;
22182 	struct lpfc_dmabuf *pcmd;
22183 	u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22184 
22185 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22186 	if (!mbox)
22187 		return -ENOMEM;
22188 	length = (sizeof(struct lpfc_mbx_read_object) -
22189 		  sizeof(struct lpfc_sli4_cfg_mhdr));
22190 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22191 			 LPFC_MBOX_OPCODE_READ_OBJECT,
22192 			 length, LPFC_SLI4_MBX_EMBED);
22193 	read_object = &mbox->u.mqe.un.read_object;
22194 	shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22195 
22196 	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22197 	bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22198 	read_object->u.request.rd_object_offset = 0;
22199 	read_object->u.request.rd_object_cnt = 1;
22200 
22201 	memset((void *)read_object->u.request.rd_object_name, 0,
22202 	       LPFC_OBJ_NAME_SZ);
22203 	scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22204 	for (j = 0; j < strlen(rdobject); j++)
22205 		read_object->u.request.rd_object_name[j] =
22206 			cpu_to_le32(rd_object_name[j]);
22207 
22208 	pcmd = kmalloc(sizeof(*pcmd), GFP_KERNEL);
22209 	if (pcmd)
22210 		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22211 	if (!pcmd || !pcmd->virt) {
22212 		kfree(pcmd);
22213 		mempool_free(mbox, phba->mbox_mem_pool);
22214 		return -ENOMEM;
22215 	}
22216 	memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22217 	read_object->u.request.rd_object_hbuf[0].pa_lo =
22218 		putPaddrLow(pcmd->phys);
22219 	read_object->u.request.rd_object_hbuf[0].pa_hi =
22220 		putPaddrHigh(pcmd->phys);
22221 	read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22222 
22223 	mbox->vport = phba->pport;
22224 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22225 	mbox->ctx_ndlp = NULL;
22226 
22227 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22228 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22229 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22230 
22231 	if (shdr_status == STATUS_FAILED &&
22232 	    shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22233 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22234 				"4674 No port cfg file in FW.\n");
22235 		byte_cnt = -ENOENT;
22236 	} else if (shdr_status || shdr_add_status || rc) {
22237 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22238 				"2625 READ_OBJECT mailbox failed with "
22239 				"status x%x add_status x%x, mbx status x%x\n",
22240 				shdr_status, shdr_add_status, rc);
22241 		byte_cnt = -ENXIO;
22242 	} else {
22243 		/* Success */
22244 		length = read_object->u.response.rd_object_actual_rlen;
22245 		eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22246 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22247 				"2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22248 				length, datasz, eof);
22249 
22250 		/* Detect the port config file exists but is empty */
22251 		if (!length && eof) {
22252 			byte_cnt = 0;
22253 			goto exit;
22254 		}
22255 
22256 		byte_cnt = length;
22257 		lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22258 	}
22259 
22260  exit:
22261 	/* This is an embedded SLI4 mailbox with an external buffer allocated.
22262 	 * Free the pcmd and then cleanup with the correct routine.
22263 	 */
22264 	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22265 	kfree(pcmd);
22266 	lpfc_sli4_mbox_cmd_free(phba, mbox);
22267 	return byte_cnt;
22268 }
22269 
22270 /**
22271  * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22272  * @phba: The HBA for which this call is being executed.
22273  * @lpfc_buf: IO buf structure to append the SGL chunk
22274  *
22275  * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22276  * and will allocate an SGL chunk if the pool is empty.
22277  *
22278  * Return codes:
22279  *   NULL - Error
22280  *   Pointer to sli4_hybrid_sgl - Success
22281  **/
22282 struct sli4_hybrid_sgl *
22283 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22284 {
22285 	struct sli4_hybrid_sgl *list_entry = NULL;
22286 	struct sli4_hybrid_sgl *tmp = NULL;
22287 	struct sli4_hybrid_sgl *allocated_sgl = NULL;
22288 	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22289 	struct list_head *buf_list = &hdwq->sgl_list;
22290 	unsigned long iflags;
22291 
22292 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22293 
22294 	if (likely(!list_empty(buf_list))) {
22295 		/* break off 1 chunk from the sgl_list */
22296 		list_for_each_entry_safe(list_entry, tmp,
22297 					 buf_list, list_node) {
22298 			list_move_tail(&list_entry->list_node,
22299 				       &lpfc_buf->dma_sgl_xtra_list);
22300 			break;
22301 		}
22302 	} else {
22303 		/* allocate more */
22304 		spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22305 		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22306 				   cpu_to_node(hdwq->io_wq->chann));
22307 		if (!tmp) {
22308 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22309 					"8353 error kmalloc memory for HDWQ "
22310 					"%d %s\n",
22311 					lpfc_buf->hdwq_no, __func__);
22312 			return NULL;
22313 		}
22314 
22315 		tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22316 					      GFP_ATOMIC, &tmp->dma_phys_sgl);
22317 		if (!tmp->dma_sgl) {
22318 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22319 					"8354 error pool_alloc memory for HDWQ "
22320 					"%d %s\n",
22321 					lpfc_buf->hdwq_no, __func__);
22322 			kfree(tmp);
22323 			return NULL;
22324 		}
22325 
22326 		spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22327 		list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22328 	}
22329 
22330 	allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22331 					struct sli4_hybrid_sgl,
22332 					list_node);
22333 
22334 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22335 
22336 	return allocated_sgl;
22337 }
22338 
22339 /**
22340  * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22341  * @phba: The HBA for which this call is being executed.
22342  * @lpfc_buf: IO buf structure with the SGL chunk
22343  *
22344  * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22345  *
22346  * Return codes:
22347  *   0 - Success
22348  *   -EINVAL - Error
22349  **/
22350 int
22351 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22352 {
22353 	int rc = 0;
22354 	struct sli4_hybrid_sgl *list_entry = NULL;
22355 	struct sli4_hybrid_sgl *tmp = NULL;
22356 	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22357 	struct list_head *buf_list = &hdwq->sgl_list;
22358 	unsigned long iflags;
22359 
22360 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22361 
22362 	if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22363 		list_for_each_entry_safe(list_entry, tmp,
22364 					 &lpfc_buf->dma_sgl_xtra_list,
22365 					 list_node) {
22366 			list_move_tail(&list_entry->list_node,
22367 				       buf_list);
22368 		}
22369 	} else {
22370 		rc = -EINVAL;
22371 	}
22372 
22373 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22374 	return rc;
22375 }
22376 
22377 /**
22378  * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22379  * @phba: phba object
22380  * @hdwq: hdwq to cleanup sgl buff resources on
22381  *
22382  * This routine frees all SGL chunks of hdwq SGL chunk pool.
22383  *
22384  * Return codes:
22385  *   None
22386  **/
22387 void
22388 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22389 		       struct lpfc_sli4_hdw_queue *hdwq)
22390 {
22391 	struct list_head *buf_list = &hdwq->sgl_list;
22392 	struct sli4_hybrid_sgl *list_entry = NULL;
22393 	struct sli4_hybrid_sgl *tmp = NULL;
22394 	unsigned long iflags;
22395 
22396 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22397 
22398 	/* Free sgl pool */
22399 	list_for_each_entry_safe(list_entry, tmp,
22400 				 buf_list, list_node) {
22401 		list_del(&list_entry->list_node);
22402 		dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22403 			      list_entry->dma_sgl,
22404 			      list_entry->dma_phys_sgl);
22405 		kfree(list_entry);
22406 	}
22407 
22408 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22409 }
22410 
22411 /**
22412  * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22413  * @phba: The HBA for which this call is being executed.
22414  * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22415  *
22416  * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22417  * and will allocate an CMD/RSP buffer if the pool is empty.
22418  *
22419  * Return codes:
22420  *   NULL - Error
22421  *   Pointer to fcp_cmd_rsp_buf - Success
22422  **/
22423 struct fcp_cmd_rsp_buf *
22424 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22425 			      struct lpfc_io_buf *lpfc_buf)
22426 {
22427 	struct fcp_cmd_rsp_buf *list_entry = NULL;
22428 	struct fcp_cmd_rsp_buf *tmp = NULL;
22429 	struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22430 	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22431 	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22432 	unsigned long iflags;
22433 
22434 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22435 
22436 	if (likely(!list_empty(buf_list))) {
22437 		/* break off 1 chunk from the list */
22438 		list_for_each_entry_safe(list_entry, tmp,
22439 					 buf_list,
22440 					 list_node) {
22441 			list_move_tail(&list_entry->list_node,
22442 				       &lpfc_buf->dma_cmd_rsp_list);
22443 			break;
22444 		}
22445 	} else {
22446 		/* allocate more */
22447 		spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22448 		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22449 				   cpu_to_node(hdwq->io_wq->chann));
22450 		if (!tmp) {
22451 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22452 					"8355 error kmalloc memory for HDWQ "
22453 					"%d %s\n",
22454 					lpfc_buf->hdwq_no, __func__);
22455 			return NULL;
22456 		}
22457 
22458 		tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22459 						GFP_ATOMIC,
22460 						&tmp->fcp_cmd_rsp_dma_handle);
22461 
22462 		if (!tmp->fcp_cmnd) {
22463 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22464 					"8356 error pool_alloc memory for HDWQ "
22465 					"%d %s\n",
22466 					lpfc_buf->hdwq_no, __func__);
22467 			kfree(tmp);
22468 			return NULL;
22469 		}
22470 
22471 		tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22472 				sizeof(struct fcp_cmnd32));
22473 
22474 		spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22475 		list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22476 	}
22477 
22478 	allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22479 					struct fcp_cmd_rsp_buf,
22480 					list_node);
22481 
22482 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22483 
22484 	return allocated_buf;
22485 }
22486 
22487 /**
22488  * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22489  * @phba: The HBA for which this call is being executed.
22490  * @lpfc_buf: IO buf structure with the CMD/RSP buf
22491  *
22492  * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22493  *
22494  * Return codes:
22495  *   0 - Success
22496  *   -EINVAL - Error
22497  **/
22498 int
22499 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22500 			      struct lpfc_io_buf *lpfc_buf)
22501 {
22502 	int rc = 0;
22503 	struct fcp_cmd_rsp_buf *list_entry = NULL;
22504 	struct fcp_cmd_rsp_buf *tmp = NULL;
22505 	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22506 	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22507 	unsigned long iflags;
22508 
22509 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22510 
22511 	if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22512 		list_for_each_entry_safe(list_entry, tmp,
22513 					 &lpfc_buf->dma_cmd_rsp_list,
22514 					 list_node) {
22515 			list_move_tail(&list_entry->list_node,
22516 				       buf_list);
22517 		}
22518 	} else {
22519 		rc = -EINVAL;
22520 	}
22521 
22522 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22523 	return rc;
22524 }
22525 
22526 /**
22527  * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22528  * @phba: phba object
22529  * @hdwq: hdwq to cleanup cmd rsp buff resources on
22530  *
22531  * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22532  *
22533  * Return codes:
22534  *   None
22535  **/
22536 void
22537 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22538 			       struct lpfc_sli4_hdw_queue *hdwq)
22539 {
22540 	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22541 	struct fcp_cmd_rsp_buf *list_entry = NULL;
22542 	struct fcp_cmd_rsp_buf *tmp = NULL;
22543 	unsigned long iflags;
22544 
22545 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22546 
22547 	/* Free cmd_rsp buf pool */
22548 	list_for_each_entry_safe(list_entry, tmp,
22549 				 buf_list,
22550 				 list_node) {
22551 		list_del(&list_entry->list_node);
22552 		dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22553 			      list_entry->fcp_cmnd,
22554 			      list_entry->fcp_cmd_rsp_dma_handle);
22555 		kfree(list_entry);
22556 	}
22557 
22558 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22559 }
22560 
22561 /**
22562  * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22563  * @phba: phba object
22564  * @job: job entry of the command to be posted.
22565  *
22566  * Fill the common fields of the wqe for each of the command.
22567  *
22568  * Return codes:
22569  *	None
22570  **/
22571 void
22572 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22573 {
22574 	u8 cmnd;
22575 	u32 *pcmd;
22576 	u32 if_type = 0;
22577 	u32 abort_tag;
22578 	bool fip;
22579 	struct lpfc_nodelist *ndlp = NULL;
22580 	union lpfc_wqe128 *wqe = &job->wqe;
22581 	u8 command_type = ELS_COMMAND_NON_FIP;
22582 
22583 	fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
22584 	/* The fcp commands will set command type */
22585 	if (job->cmd_flag &  LPFC_IO_FCP)
22586 		command_type = FCP_COMMAND;
22587 	else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22588 		command_type = ELS_COMMAND_FIP;
22589 	else
22590 		command_type = ELS_COMMAND_NON_FIP;
22591 
22592 	abort_tag = job->iotag;
22593 	cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22594 
22595 	switch (cmnd) {
22596 	case CMD_ELS_REQUEST64_WQE:
22597 		ndlp = job->ndlp;
22598 
22599 		if_type = bf_get(lpfc_sli_intf_if_type,
22600 				 &phba->sli4_hba.sli_intf);
22601 		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22602 			pcmd = (u32 *)job->cmd_dmabuf->virt;
22603 			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22604 				     *pcmd == ELS_CMD_SCR ||
22605 				     *pcmd == ELS_CMD_RDF ||
22606 				     *pcmd == ELS_CMD_EDC ||
22607 				     *pcmd == ELS_CMD_RSCN_XMT ||
22608 				     *pcmd == ELS_CMD_FDISC ||
22609 				     *pcmd == ELS_CMD_LOGO ||
22610 				     *pcmd == ELS_CMD_QFPA ||
22611 				     *pcmd == ELS_CMD_UVEM ||
22612 				     *pcmd == ELS_CMD_PLOGI)) {
22613 				bf_set(els_req64_sp, &wqe->els_req, 1);
22614 				bf_set(els_req64_sid, &wqe->els_req,
22615 				       job->vport->fc_myDID);
22616 
22617 				if ((*pcmd == ELS_CMD_FLOGI) &&
22618 				    !(phba->fc_topology ==
22619 				      LPFC_TOPOLOGY_LOOP))
22620 					bf_set(els_req64_sid, &wqe->els_req, 0);
22621 
22622 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22623 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22624 				       phba->vpi_ids[job->vport->vpi]);
22625 			} else if (pcmd) {
22626 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22627 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22628 				       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22629 			}
22630 		}
22631 
22632 		bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22633 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22634 
22635 		bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22636 		bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22637 		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22638 		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22639 		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22640 		break;
22641 	case CMD_XMIT_ELS_RSP64_WQE:
22642 		ndlp = job->ndlp;
22643 
22644 		/* word4 */
22645 		wqe->xmit_els_rsp.word4 = 0;
22646 
22647 		if_type = bf_get(lpfc_sli_intf_if_type,
22648 				 &phba->sli4_hba.sli_intf);
22649 		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22650 			if (test_bit(FC_PT2PT, &job->vport->fc_flag)) {
22651 				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22652 				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22653 				       job->vport->fc_myDID);
22654 				if (job->vport->fc_myDID == Fabric_DID) {
22655 					bf_set(wqe_els_did,
22656 					       &wqe->xmit_els_rsp.wqe_dest, 0);
22657 				}
22658 			}
22659 		}
22660 
22661 		bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22662 		bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22663 		bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22664 		bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22665 		       LPFC_WQE_LENLOC_WORD3);
22666 		bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22667 
22668 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22669 			bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22670 			bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22671 			       job->vport->fc_myDID);
22672 			bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22673 		}
22674 
22675 		if (phba->sli_rev == LPFC_SLI_REV4) {
22676 			bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22677 			       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22678 
22679 			if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22680 				bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22681 				       phba->vpi_ids[job->vport->vpi]);
22682 		}
22683 		command_type = OTHER_COMMAND;
22684 		break;
22685 	case CMD_GEN_REQUEST64_WQE:
22686 		/* Word 10 */
22687 		bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22688 		bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22689 		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22690 		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22691 		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22692 		command_type = OTHER_COMMAND;
22693 		break;
22694 	case CMD_XMIT_SEQUENCE64_WQE:
22695 		if (phba->link_flag & LS_LOOPBACK_MODE)
22696 			bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22697 
22698 		wqe->xmit_sequence.rsvd3 = 0;
22699 		bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22700 		bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22701 		bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22702 		       LPFC_WQE_IOD_WRITE);
22703 		bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22704 		       LPFC_WQE_LENLOC_WORD12);
22705 		bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22706 		command_type = OTHER_COMMAND;
22707 		break;
22708 	case CMD_XMIT_BLS_RSP64_WQE:
22709 		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22710 		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22711 		bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22712 		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22713 		       phba->vpi_ids[phba->pport->vpi]);
22714 		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22715 		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22716 		       LPFC_WQE_LENLOC_NONE);
22717 		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
22718 		command_type = OTHER_COMMAND;
22719 		break;
22720 	case CMD_FCP_ICMND64_WQE:	/* task mgmt commands */
22721 	case CMD_ABORT_XRI_WQE:		/* abort iotag */
22722 	case CMD_SEND_FRAME:		/* mds loopback */
22723 		/* cases already formatted for sli4 wqe - no chgs necessary */
22724 		return;
22725 	default:
22726 		dump_stack();
22727 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22728 				"6207 Invalid command 0x%x\n",
22729 				cmnd);
22730 		break;
22731 	}
22732 
22733 	wqe->generic.wqe_com.abort_tag = abort_tag;
22734 	bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22735 	bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22736 	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22737 }
22738