xref: /linux/drivers/scsi/lpfc/lpfc_sli.c (revision 9e4e86a604dfd06402933467578c4b79f5412b2c)
1 /*******************************************************************
2  * This file is part of the Emulex Linux Device Driver for         *
3  * Fibre Channel Host Bus Adapters.                                *
4  * Copyright (C) 2017-2026 Broadcom. All Rights Reserved. The term *
5  * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries.     *
6  * Copyright (C) 2004-2016 Emulex.  All rights reserved.           *
7  * EMULEX and SLI are trademarks of Emulex.                        *
8  * www.broadcom.com                                                *
9  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
10  *                                                                 *
11  * This program is free software; you can redistribute it and/or   *
12  * modify it under the terms of version 2 of the GNU General       *
13  * Public License as published by the Free Software Foundation.    *
14  * This program is distributed in the hope that it will be useful. *
15  * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
16  * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
17  * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
18  * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
19  * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
20  * more details, a copy of which can be found in the file COPYING  *
21  * included with this package.                                     *
22  *******************************************************************/
23 
24 #include <linux/blkdev.h>
25 #include <linux/pci.h>
26 #include <linux/interrupt.h>
27 #include <linux/delay.h>
28 #include <linux/slab.h>
29 #include <linux/lockdep.h>
30 #include <linux/dmi.h>
31 #include <linux/of.h>
32 
33 #include <scsi/scsi.h>
34 #include <scsi/scsi_cmnd.h>
35 #include <scsi/scsi_device.h>
36 #include <scsi/scsi_host.h>
37 #include <scsi/scsi_transport_fc.h>
38 #include <scsi/fc/fc_fs.h>
39 #include <linux/crash_dump.h>
40 #ifdef CONFIG_X86
41 #include <asm/set_memory.h>
42 #endif
43 
44 #include "lpfc_hw4.h"
45 #include "lpfc_hw.h"
46 #include "lpfc_sli.h"
47 #include "lpfc_sli4.h"
48 #include "lpfc_nl.h"
49 #include "lpfc_disc.h"
50 #include "lpfc.h"
51 #include "lpfc_scsi.h"
52 #include "lpfc_nvme.h"
53 #include "lpfc_crtn.h"
54 #include "lpfc_logmsg.h"
55 #include "lpfc_compat.h"
56 #include "lpfc_debugfs.h"
57 #include "lpfc_vport.h"
58 #include "lpfc_version.h"
59 
60 /* There are only four IOCB completion types. */
61 typedef enum _lpfc_iocb_type {
62 	LPFC_UNKNOWN_IOCB,
63 	LPFC_UNSOL_IOCB,
64 	LPFC_SOL_IOCB,
65 	LPFC_ABORT_IOCB
66 } lpfc_iocb_type;
67 
68 
69 /* Provide function prototypes local to this module. */
70 static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
71 				  uint32_t);
72 static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
73 			      uint8_t *, uint32_t *);
74 static struct lpfc_iocbq *
75 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
76 				  struct lpfc_iocbq *rspiocbq);
77 static void lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *,
78 				      struct hbq_dmabuf *);
79 static void lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
80 					  struct hbq_dmabuf *dmabuf);
81 static bool lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba,
82 				   struct lpfc_queue *cq, struct lpfc_cqe *cqe);
83 static int lpfc_sli4_post_sgl_list(struct lpfc_hba *, struct list_head *,
84 				       int);
85 static void lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba,
86 				     struct lpfc_queue *eq,
87 				     struct lpfc_eqe *eqe,
88 				     enum lpfc_poll_mode poll_mode);
89 static bool lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba);
90 static bool lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba);
91 static struct lpfc_cqe *lpfc_sli4_cq_get(struct lpfc_queue *q);
92 static void __lpfc_sli4_consume_cqe(struct lpfc_hba *phba,
93 				    struct lpfc_queue *cq,
94 				    struct lpfc_cqe *cqe);
95 static uint16_t lpfc_wqe_bpl2sgl(struct lpfc_hba *phba,
96 				 struct lpfc_iocbq *pwqeq,
97 				 struct lpfc_sglq *sglq);
98 
99 union lpfc_wqe128 lpfc_iread_cmd_template;
100 union lpfc_wqe128 lpfc_iwrite_cmd_template;
101 union lpfc_wqe128 lpfc_icmnd_cmd_template;
102 
103 /* Setup WQE templates for IOs */
lpfc_wqe_cmd_template(void)104 void lpfc_wqe_cmd_template(void)
105 {
106 	union lpfc_wqe128 *wqe;
107 
108 	/* IREAD template */
109 	wqe = &lpfc_iread_cmd_template;
110 	memset(wqe, 0, sizeof(union lpfc_wqe128));
111 
112 	/* Word 0, 1, 2 - BDE is variable */
113 
114 	/* Word 3 - cmd_buff_len, payload_offset_len is zero */
115 
116 	/* Word 4 - total_xfer_len is variable */
117 
118 	/* Word 5 - is zero */
119 
120 	/* Word 6 - ctxt_tag, xri_tag is variable */
121 
122 	/* Word 7 */
123 	bf_set(wqe_cmnd, &wqe->fcp_iread.wqe_com, CMD_FCP_IREAD64_WQE);
124 	bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, PARM_READ_CHECK);
125 	bf_set(wqe_class, &wqe->fcp_iread.wqe_com, CLASS3);
126 	bf_set(wqe_ct, &wqe->fcp_iread.wqe_com, SLI4_CT_RPI);
127 
128 	/* Word 8 - abort_tag is variable */
129 
130 	/* Word 9  - reqtag is variable */
131 
132 	/* Word 10 - dbde, wqes is variable */
133 	bf_set(wqe_qosd, &wqe->fcp_iread.wqe_com, 0);
134 	bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
135 	bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com, LPFC_WQE_LENLOC_WORD4);
136 	bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 0);
137 	bf_set(wqe_wqes, &wqe->fcp_iread.wqe_com, 1);
138 
139 	/* Word 11 */
140 	bf_set(wqe_cmd_type, &wqe->fcp_iread.wqe_com, COMMAND_DATA_IN);
141 	bf_set(wqe_cqid, &wqe->fcp_iread.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
142 
143 	/* Word 12 - is zero */
144 
145 	/* IWRITE template */
146 	wqe = &lpfc_iwrite_cmd_template;
147 	memset(wqe, 0, sizeof(union lpfc_wqe128));
148 
149 	/* Word 0, 1, 2 - BDE is variable */
150 
151 	/* Word 3 - cmd_buff_len, payload_offset_len is zero */
152 
153 	/* Word 4 - total_xfer_len is variable */
154 
155 	/* Word 5 - initial_xfer_len is variable */
156 
157 	/* Word 6 - ctxt_tag, xri_tag is variable */
158 
159 	/* Word 7 */
160 	bf_set(wqe_cmnd, &wqe->fcp_iwrite.wqe_com, CMD_FCP_IWRITE64_WQE);
161 	bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, PARM_READ_CHECK);
162 	bf_set(wqe_class, &wqe->fcp_iwrite.wqe_com, CLASS3);
163 	bf_set(wqe_ct, &wqe->fcp_iwrite.wqe_com, SLI4_CT_RPI);
164 
165 	/* Word 8 - abort_tag is variable */
166 
167 	/* Word 9  - reqtag is variable */
168 
169 	/* Word 10 - dbde, wqes is variable */
170 	bf_set(wqe_qosd, &wqe->fcp_iwrite.wqe_com, 0);
171 	bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
172 	bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_LENLOC_WORD4);
173 	bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
174 	bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
175 
176 	/* Word 11 */
177 	bf_set(wqe_cmd_type, &wqe->fcp_iwrite.wqe_com, COMMAND_DATA_OUT);
178 	bf_set(wqe_cqid, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
179 
180 	/* Word 12 - is zero */
181 
182 	/* ICMND template */
183 	wqe = &lpfc_icmnd_cmd_template;
184 	memset(wqe, 0, sizeof(union lpfc_wqe128));
185 
186 	/* Word 0, 1, 2 - BDE is variable */
187 
188 	/* Word 3 - payload_offset_len is variable */
189 
190 	/* Word 4, 5 - is zero */
191 
192 	/* Word 6 - ctxt_tag, xri_tag is variable */
193 
194 	/* Word 7 */
195 	bf_set(wqe_cmnd, &wqe->fcp_icmd.wqe_com, CMD_FCP_ICMND64_WQE);
196 	bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
197 	bf_set(wqe_class, &wqe->fcp_icmd.wqe_com, CLASS3);
198 	bf_set(wqe_ct, &wqe->fcp_icmd.wqe_com, SLI4_CT_RPI);
199 
200 	/* Word 8 - abort_tag is variable */
201 
202 	/* Word 9  - reqtag is variable */
203 
204 	/* Word 10 - dbde, wqes is variable */
205 	bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
206 	bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_NONE);
207 	bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com, LPFC_WQE_LENLOC_NONE);
208 	bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 0);
209 	bf_set(wqe_wqes, &wqe->fcp_icmd.wqe_com, 1);
210 
211 	/* Word 11 */
212 	bf_set(wqe_cmd_type, &wqe->fcp_icmd.wqe_com, COMMAND_DATA_IN);
213 	bf_set(wqe_cqid, &wqe->fcp_icmd.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
214 
215 	/* Word 12, 13, 14, 15 - is zero */
216 }
217 
218 #if defined(CONFIG_64BIT) && defined(__LITTLE_ENDIAN)
219 /**
220  * lpfc_sli4_pcimem_bcopy - SLI4 memory copy function
221  * @srcp: Source memory pointer.
222  * @destp: Destination memory pointer.
223  * @cnt: Number of words required to be copied.
224  *       Must be a multiple of sizeof(uint64_t)
225  *
226  * This function is used for copying data between driver memory
227  * and the SLI WQ. This function also changes the endianness
228  * of each word if native endianness is different from SLI
229  * endianness. This function can be called with or without
230  * lock.
231  **/
232 static void
lpfc_sli4_pcimem_bcopy(void * srcp,void * destp,uint32_t cnt)233 lpfc_sli4_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
234 {
235 	uint64_t *src = srcp;
236 	uint64_t *dest = destp;
237 	int i;
238 
239 	for (i = 0; i < (int)cnt; i += sizeof(uint64_t))
240 		*dest++ = *src++;
241 }
242 #else
243 #define lpfc_sli4_pcimem_bcopy(a, b, c) lpfc_sli_pcimem_bcopy(a, b, c)
244 #endif
245 
246 /**
247  * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
248  * @q: The Work Queue to operate on.
249  * @wqe: The work Queue Entry to put on the Work queue.
250  *
251  * This routine will copy the contents of @wqe to the next available entry on
252  * the @q. This function will then ring the Work Queue Doorbell to signal the
253  * HBA to start processing the Work Queue Entry. This function returns 0 if
254  * successful. If no entries are available on @q then this function will return
255  * -ENOMEM.
256  * The caller is expected to hold the hbalock when calling this routine.
257  **/
258 static int
lpfc_sli4_wq_put(struct lpfc_queue * q,union lpfc_wqe128 * wqe)259 lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe128 *wqe)
260 {
261 	union lpfc_wqe *temp_wqe;
262 	struct lpfc_register doorbell;
263 	uint32_t host_index;
264 	uint32_t idx;
265 	uint32_t i = 0;
266 	uint8_t *tmp;
267 	u32 if_type;
268 
269 	/* sanity check on queue memory */
270 	if (unlikely(!q))
271 		return -ENOMEM;
272 
273 	temp_wqe = lpfc_sli4_qe(q, q->host_index);
274 
275 	/* If the host has not yet processed the next entry then we are done */
276 	idx = ((q->host_index + 1) % q->entry_count);
277 	if (idx == q->hba_index) {
278 		q->WQ_overflow++;
279 		return -EBUSY;
280 	}
281 	q->WQ_posted++;
282 	/* set consumption flag every once in a while */
283 	if (!((q->host_index + 1) % q->notify_interval))
284 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
285 	else
286 		bf_set(wqe_wqec, &wqe->generic.wqe_com, 0);
287 	if (q->phba->sli3_options & LPFC_SLI4_PHWQ_ENABLED)
288 		bf_set(wqe_wqid, &wqe->generic.wqe_com, q->queue_id);
289 	lpfc_sli4_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
290 	if (q->dpp_enable && q->phba->cfg_enable_dpp) {
291 		/* write to DPP aperture taking advatage of Combined Writes */
292 		tmp = (uint8_t *)temp_wqe;
293 #ifdef __raw_writeq
294 		for (i = 0; i < q->entry_size; i += sizeof(uint64_t))
295 			__raw_writeq(*((uint64_t *)(tmp + i)),
296 					q->dpp_regaddr + i);
297 #else
298 		for (i = 0; i < q->entry_size; i += sizeof(uint32_t))
299 			__raw_writel(*((uint32_t *)(tmp + i)),
300 					q->dpp_regaddr + i);
301 #endif
302 	}
303 	/* ensure WQE bcopy and DPP flushed before doorbell write */
304 	wmb();
305 
306 	/* Update the host index before invoking device */
307 	host_index = q->host_index;
308 
309 	q->host_index = idx;
310 
311 	/* Ring Doorbell */
312 	doorbell.word0 = 0;
313 	if (q->db_format == LPFC_DB_LIST_FORMAT) {
314 		if (q->dpp_enable && q->phba->cfg_enable_dpp) {
315 			bf_set(lpfc_if6_wq_db_list_fm_num_posted, &doorbell, 1);
316 			bf_set(lpfc_if6_wq_db_list_fm_dpp, &doorbell, 1);
317 			bf_set(lpfc_if6_wq_db_list_fm_dpp_id, &doorbell,
318 			    q->dpp_id);
319 			bf_set(lpfc_if6_wq_db_list_fm_id, &doorbell,
320 			    q->queue_id);
321 		} else {
322 			bf_set(lpfc_wq_db_list_fm_num_posted, &doorbell, 1);
323 			bf_set(lpfc_wq_db_list_fm_id, &doorbell, q->queue_id);
324 
325 			/* Leave bits <23:16> clear for if_type 6 dpp */
326 			if_type = bf_get(lpfc_sli_intf_if_type,
327 					 &q->phba->sli4_hba.sli_intf);
328 			if (if_type != LPFC_SLI_INTF_IF_TYPE_6)
329 				bf_set(lpfc_wq_db_list_fm_index, &doorbell,
330 				       host_index);
331 		}
332 	} else if (q->db_format == LPFC_DB_RING_FORMAT) {
333 		bf_set(lpfc_wq_db_ring_fm_num_posted, &doorbell, 1);
334 		bf_set(lpfc_wq_db_ring_fm_id, &doorbell, q->queue_id);
335 	} else {
336 		return -EINVAL;
337 	}
338 	writel(doorbell.word0, q->db_regaddr);
339 
340 	return 0;
341 }
342 
343 /**
344  * lpfc_sli4_wq_release - Updates internal hba index for WQ
345  * @q: The Work Queue to operate on.
346  * @index: The index to advance the hba index to.
347  *
348  * This routine will update the HBA index of a queue to reflect consumption of
349  * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
350  * an entry the host calls this function to update the queue's internal
351  * pointers.
352  **/
353 static void
lpfc_sli4_wq_release(struct lpfc_queue * q,uint32_t index)354 lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
355 {
356 	/* sanity check on queue memory */
357 	if (unlikely(!q))
358 		return;
359 
360 	q->hba_index = index;
361 }
362 
363 /**
364  * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
365  * @q: The Mailbox Queue to operate on.
366  * @mqe: The Mailbox Queue Entry to put on the Work queue.
367  *
368  * This routine will copy the contents of @mqe to the next available entry on
369  * the @q. This function will then ring the Work Queue Doorbell to signal the
370  * HBA to start processing the Work Queue Entry. This function returns 0 if
371  * successful. If no entries are available on @q then this function will return
372  * -ENOMEM.
373  * The caller is expected to hold the hbalock when calling this routine.
374  **/
375 static uint32_t
lpfc_sli4_mq_put(struct lpfc_queue * q,struct lpfc_mqe * mqe)376 lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
377 {
378 	struct lpfc_mqe *temp_mqe;
379 	struct lpfc_register doorbell;
380 
381 	/* sanity check on queue memory */
382 	if (unlikely(!q))
383 		return -ENOMEM;
384 	temp_mqe = lpfc_sli4_qe(q, q->host_index);
385 
386 	/* If the host has not yet processed the next entry then we are done */
387 	if (((q->host_index + 1) % q->entry_count) == q->hba_index)
388 		return -ENOMEM;
389 	lpfc_sli4_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
390 	/* Save off the mailbox pointer for completion */
391 	q->phba->mbox = (MAILBOX_t *)temp_mqe;
392 
393 	/* Update the host index before invoking device */
394 	q->host_index = ((q->host_index + 1) % q->entry_count);
395 
396 	/* Ring Doorbell */
397 	doorbell.word0 = 0;
398 	bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
399 	bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
400 	writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
401 	return 0;
402 }
403 
404 /**
405  * lpfc_sli4_mq_release - Updates internal hba index for MQ
406  * @q: The Mailbox Queue to operate on.
407  *
408  * This routine will update the HBA index of a queue to reflect consumption of
409  * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
410  * an entry the host calls this function to update the queue's internal
411  * pointers. This routine returns the number of entries that were consumed by
412  * the HBA.
413  **/
414 static uint32_t
lpfc_sli4_mq_release(struct lpfc_queue * q)415 lpfc_sli4_mq_release(struct lpfc_queue *q)
416 {
417 	/* sanity check on queue memory */
418 	if (unlikely(!q))
419 		return 0;
420 
421 	/* Clear the mailbox pointer for completion */
422 	q->phba->mbox = NULL;
423 	q->hba_index = ((q->hba_index + 1) % q->entry_count);
424 	return 1;
425 }
426 
427 /**
428  * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
429  * @q: The Event Queue to get the first valid EQE from
430  *
431  * This routine will get the first valid Event Queue Entry from @q, update
432  * the queue's internal hba index, and return the EQE. If no valid EQEs are in
433  * the Queue (no more work to do), or the Queue is full of EQEs that have been
434  * processed, but not popped back to the HBA then this routine will return NULL.
435  **/
436 static struct lpfc_eqe *
lpfc_sli4_eq_get(struct lpfc_queue * q)437 lpfc_sli4_eq_get(struct lpfc_queue *q)
438 {
439 	struct lpfc_eqe *eqe;
440 
441 	/* sanity check on queue memory */
442 	if (unlikely(!q))
443 		return NULL;
444 	eqe = lpfc_sli4_qe(q, q->host_index);
445 
446 	/* If the next EQE is not valid then we are done */
447 	if (bf_get_le32(lpfc_eqe_valid, eqe) != q->qe_valid)
448 		return NULL;
449 
450 	/*
451 	 * insert barrier for instruction interlock : data from the hardware
452 	 * must have the valid bit checked before it can be copied and acted
453 	 * upon. Speculative instructions were allowing a bcopy at the start
454 	 * of lpfc_sli4_fp_handle_wcqe(), which is called immediately
455 	 * after our return, to copy data before the valid bit check above
456 	 * was done. As such, some of the copied data was stale. The barrier
457 	 * ensures the check is before any data is copied.
458 	 */
459 	mb();
460 	return eqe;
461 }
462 
463 /**
464  * lpfc_sli4_eq_clr_intr - Turn off interrupts from this EQ
465  * @q: The Event Queue to disable interrupts
466  *
467  **/
468 void
lpfc_sli4_eq_clr_intr(struct lpfc_queue * q)469 lpfc_sli4_eq_clr_intr(struct lpfc_queue *q)
470 {
471 	struct lpfc_register doorbell;
472 
473 	doorbell.word0 = 0;
474 	bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
475 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
476 	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
477 		(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
478 	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
479 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
480 }
481 
482 /**
483  * lpfc_sli4_if6_eq_clr_intr - Turn off interrupts from this EQ
484  * @q: The Event Queue to disable interrupts
485  *
486  **/
487 void
lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue * q)488 lpfc_sli4_if6_eq_clr_intr(struct lpfc_queue *q)
489 {
490 	struct lpfc_register doorbell;
491 
492 	doorbell.word0 = 0;
493 	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
494 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
495 }
496 
497 /**
498  * lpfc_sli4_write_eq_db - write EQ DB for eqe's consumed or arm state
499  * @phba: adapter with EQ
500  * @q: The Event Queue that the host has completed processing for.
501  * @count: Number of elements that have been consumed
502  * @arm: Indicates whether the host wants to arms this CQ.
503  *
504  * This routine will notify the HBA, by ringing the doorbell, that count
505  * number of EQEs have been processed. The @arm parameter indicates whether
506  * the queue should be rearmed when ringing the doorbell.
507  **/
508 void
lpfc_sli4_write_eq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)509 lpfc_sli4_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
510 		     uint32_t count, bool arm)
511 {
512 	struct lpfc_register doorbell;
513 
514 	/* sanity check on queue memory */
515 	if (unlikely(!q || (count == 0 && !arm)))
516 		return;
517 
518 	/* ring doorbell for number popped */
519 	doorbell.word0 = 0;
520 	if (arm) {
521 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
522 		bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
523 	}
524 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
525 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
526 	bf_set(lpfc_eqcq_doorbell_eqid_hi, &doorbell,
527 			(q->queue_id >> LPFC_EQID_HI_FIELD_SHIFT));
528 	bf_set(lpfc_eqcq_doorbell_eqid_lo, &doorbell, q->queue_id);
529 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
530 	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
531 	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
532 		readl(q->phba->sli4_hba.EQDBregaddr);
533 }
534 
535 /**
536  * lpfc_sli4_if6_write_eq_db - write EQ DB for eqe's consumed or arm state
537  * @phba: adapter with EQ
538  * @q: The Event Queue that the host has completed processing for.
539  * @count: Number of elements that have been consumed
540  * @arm: Indicates whether the host wants to arms this CQ.
541  *
542  * This routine will notify the HBA, by ringing the doorbell, that count
543  * number of EQEs have been processed. The @arm parameter indicates whether
544  * the queue should be rearmed when ringing the doorbell.
545  **/
546 void
lpfc_sli4_if6_write_eq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)547 lpfc_sli4_if6_write_eq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
548 			  uint32_t count, bool arm)
549 {
550 	struct lpfc_register doorbell;
551 
552 	/* sanity check on queue memory */
553 	if (unlikely(!q || (count == 0 && !arm)))
554 		return;
555 
556 	/* ring doorbell for number popped */
557 	doorbell.word0 = 0;
558 	if (arm)
559 		bf_set(lpfc_if6_eq_doorbell_arm, &doorbell, 1);
560 	bf_set(lpfc_if6_eq_doorbell_num_released, &doorbell, count);
561 	bf_set(lpfc_if6_eq_doorbell_eqid, &doorbell, q->queue_id);
562 	writel(doorbell.word0, q->phba->sli4_hba.EQDBregaddr);
563 	/* PCI read to flush PCI pipeline on re-arming for INTx mode */
564 	if ((q->phba->intr_type == INTx) && (arm == LPFC_QUEUE_REARM))
565 		readl(q->phba->sli4_hba.EQDBregaddr);
566 }
567 
568 static void
__lpfc_sli4_consume_eqe(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_eqe * eqe)569 __lpfc_sli4_consume_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
570 			struct lpfc_eqe *eqe)
571 {
572 	if (!phba->sli4_hba.pc_sli4_params.eqav)
573 		bf_set_le32(lpfc_eqe_valid, eqe, 0);
574 
575 	eq->host_index = ((eq->host_index + 1) % eq->entry_count);
576 
577 	/* if the index wrapped around, toggle the valid bit */
578 	if (phba->sli4_hba.pc_sli4_params.eqav && !eq->host_index)
579 		eq->qe_valid = (eq->qe_valid) ? 0 : 1;
580 }
581 
582 static void
lpfc_sli4_eqcq_flush(struct lpfc_hba * phba,struct lpfc_queue * eq)583 lpfc_sli4_eqcq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
584 {
585 	struct lpfc_eqe *eqe = NULL;
586 	u32 eq_count = 0, cq_count = 0;
587 	struct lpfc_cqe *cqe = NULL;
588 	struct lpfc_queue *cq = NULL, *childq = NULL;
589 	int cqid = 0;
590 
591 	/* walk all the EQ entries and drop on the floor */
592 	eqe = lpfc_sli4_eq_get(eq);
593 	while (eqe) {
594 		/* Get the reference to the corresponding CQ */
595 		cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
596 		cq = NULL;
597 
598 		list_for_each_entry(childq, &eq->child_list, list) {
599 			if (childq->queue_id == cqid) {
600 				cq = childq;
601 				break;
602 			}
603 		}
604 		/* If CQ is valid, iterate through it and drop all the CQEs */
605 		if (cq) {
606 			cqe = lpfc_sli4_cq_get(cq);
607 			while (cqe) {
608 				__lpfc_sli4_consume_cqe(phba, cq, cqe);
609 				cq_count++;
610 				cqe = lpfc_sli4_cq_get(cq);
611 			}
612 			/* Clear and re-arm the CQ */
613 			phba->sli4_hba.sli4_write_cq_db(phba, cq, cq_count,
614 			    LPFC_QUEUE_REARM);
615 			cq_count = 0;
616 		}
617 		__lpfc_sli4_consume_eqe(phba, eq, eqe);
618 		eq_count++;
619 		eqe = lpfc_sli4_eq_get(eq);
620 	}
621 
622 	/* Clear and re-arm the EQ */
623 	phba->sli4_hba.sli4_write_eq_db(phba, eq, eq_count, LPFC_QUEUE_REARM);
624 }
625 
626 static int
lpfc_sli4_process_eq(struct lpfc_hba * phba,struct lpfc_queue * eq,u8 rearm,enum lpfc_poll_mode poll_mode)627 lpfc_sli4_process_eq(struct lpfc_hba *phba, struct lpfc_queue *eq,
628 		     u8 rearm, enum lpfc_poll_mode poll_mode)
629 {
630 	struct lpfc_eqe *eqe;
631 	int count = 0, consumed = 0;
632 
633 	if (cmpxchg(&eq->queue_claimed, 0, 1) != 0)
634 		goto rearm_and_exit;
635 
636 	eqe = lpfc_sli4_eq_get(eq);
637 	while (eqe) {
638 		lpfc_sli4_hba_handle_eqe(phba, eq, eqe, poll_mode);
639 		__lpfc_sli4_consume_eqe(phba, eq, eqe);
640 
641 		consumed++;
642 		if (!(++count % eq->max_proc_limit))
643 			break;
644 
645 		if (!(count % eq->notify_interval)) {
646 			phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed,
647 							LPFC_QUEUE_NOARM);
648 			consumed = 0;
649 		}
650 
651 		eqe = lpfc_sli4_eq_get(eq);
652 	}
653 	eq->EQ_processed += count;
654 
655 	/* Track the max number of EQEs processed in 1 intr */
656 	if (count > eq->EQ_max_eqe)
657 		eq->EQ_max_eqe = count;
658 
659 	xchg(&eq->queue_claimed, 0);
660 
661 rearm_and_exit:
662 	/* Always clear the EQ. */
663 	phba->sli4_hba.sli4_write_eq_db(phba, eq, consumed, rearm);
664 
665 	return count;
666 }
667 
668 /**
669  * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
670  * @q: The Completion Queue to get the first valid CQE from
671  *
672  * This routine will get the first valid Completion Queue Entry from @q, update
673  * the queue's internal hba index, and return the CQE. If no valid CQEs are in
674  * the Queue (no more work to do), or the Queue is full of CQEs that have been
675  * processed, but not popped back to the HBA then this routine will return NULL.
676  **/
677 static struct lpfc_cqe *
lpfc_sli4_cq_get(struct lpfc_queue * q)678 lpfc_sli4_cq_get(struct lpfc_queue *q)
679 {
680 	struct lpfc_cqe *cqe;
681 
682 	/* sanity check on queue memory */
683 	if (unlikely(!q))
684 		return NULL;
685 	cqe = lpfc_sli4_qe(q, q->host_index);
686 
687 	/* If the next CQE is not valid then we are done */
688 	if (bf_get_le32(lpfc_cqe_valid, cqe) != q->qe_valid)
689 		return NULL;
690 
691 	/*
692 	 * insert barrier for instruction interlock : data from the hardware
693 	 * must have the valid bit checked before it can be copied and acted
694 	 * upon. Given what was seen in lpfc_sli4_cq_get() of speculative
695 	 * instructions allowing action on content before valid bit checked,
696 	 * add barrier here as well. May not be needed as "content" is a
697 	 * single 32-bit entity here (vs multi word structure for cq's).
698 	 */
699 	mb();
700 	return cqe;
701 }
702 
703 static void
__lpfc_sli4_consume_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)704 __lpfc_sli4_consume_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
705 			struct lpfc_cqe *cqe)
706 {
707 	if (!phba->sli4_hba.pc_sli4_params.cqav)
708 		bf_set_le32(lpfc_cqe_valid, cqe, 0);
709 
710 	cq->host_index = ((cq->host_index + 1) % cq->entry_count);
711 
712 	/* if the index wrapped around, toggle the valid bit */
713 	if (phba->sli4_hba.pc_sli4_params.cqav && !cq->host_index)
714 		cq->qe_valid = (cq->qe_valid) ? 0 : 1;
715 }
716 
717 /**
718  * lpfc_sli4_write_cq_db - write cq DB for entries consumed or arm state.
719  * @phba: the adapter with the CQ
720  * @q: The Completion Queue that the host has completed processing for.
721  * @count: the number of elements that were consumed
722  * @arm: Indicates whether the host wants to arms this CQ.
723  *
724  * This routine will notify the HBA, by ringing the doorbell, that the
725  * CQEs have been processed. The @arm parameter specifies whether the
726  * queue should be rearmed when ringing the doorbell.
727  **/
728 void
lpfc_sli4_write_cq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)729 lpfc_sli4_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
730 		     uint32_t count, bool arm)
731 {
732 	struct lpfc_register doorbell;
733 
734 	/* sanity check on queue memory */
735 	if (unlikely(!q || (count == 0 && !arm)))
736 		return;
737 
738 	/* ring doorbell for number popped */
739 	doorbell.word0 = 0;
740 	if (arm)
741 		bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
742 	bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, count);
743 	bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
744 	bf_set(lpfc_eqcq_doorbell_cqid_hi, &doorbell,
745 			(q->queue_id >> LPFC_CQID_HI_FIELD_SHIFT));
746 	bf_set(lpfc_eqcq_doorbell_cqid_lo, &doorbell, q->queue_id);
747 	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
748 }
749 
750 /**
751  * lpfc_sli4_if6_write_cq_db - write cq DB for entries consumed or arm state.
752  * @phba: the adapter with the CQ
753  * @q: The Completion Queue that the host has completed processing for.
754  * @count: the number of elements that were consumed
755  * @arm: Indicates whether the host wants to arms this CQ.
756  *
757  * This routine will notify the HBA, by ringing the doorbell, that the
758  * CQEs have been processed. The @arm parameter specifies whether the
759  * queue should be rearmed when ringing the doorbell.
760  **/
761 void
lpfc_sli4_if6_write_cq_db(struct lpfc_hba * phba,struct lpfc_queue * q,uint32_t count,bool arm)762 lpfc_sli4_if6_write_cq_db(struct lpfc_hba *phba, struct lpfc_queue *q,
763 			 uint32_t count, bool arm)
764 {
765 	struct lpfc_register doorbell;
766 
767 	/* sanity check on queue memory */
768 	if (unlikely(!q || (count == 0 && !arm)))
769 		return;
770 
771 	/* ring doorbell for number popped */
772 	doorbell.word0 = 0;
773 	if (arm)
774 		bf_set(lpfc_if6_cq_doorbell_arm, &doorbell, 1);
775 	bf_set(lpfc_if6_cq_doorbell_num_released, &doorbell, count);
776 	bf_set(lpfc_if6_cq_doorbell_cqid, &doorbell, q->queue_id);
777 	writel(doorbell.word0, q->phba->sli4_hba.CQDBregaddr);
778 }
779 
780 /*
781  * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
782  *
783  * This routine will copy the contents of @wqe to the next available entry on
784  * the @q. This function will then ring the Receive Queue Doorbell to signal the
785  * HBA to start processing the Receive Queue Entry. This function returns the
786  * index that the rqe was copied to if successful. If no entries are available
787  * on @q then this function will return -ENOMEM.
788  * The caller is expected to hold the hbalock when calling this routine.
789  **/
790 int
lpfc_sli4_rq_put(struct lpfc_queue * hq,struct lpfc_queue * dq,struct lpfc_rqe * hrqe,struct lpfc_rqe * drqe)791 lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
792 		 struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
793 {
794 	struct lpfc_rqe *temp_hrqe;
795 	struct lpfc_rqe *temp_drqe;
796 	struct lpfc_register doorbell;
797 	int hq_put_index;
798 	int dq_put_index;
799 
800 	/* sanity check on queue memory */
801 	if (unlikely(!hq) || unlikely(!dq))
802 		return -ENOMEM;
803 	hq_put_index = hq->host_index;
804 	dq_put_index = dq->host_index;
805 	temp_hrqe = lpfc_sli4_qe(hq, hq_put_index);
806 	temp_drqe = lpfc_sli4_qe(dq, dq_put_index);
807 
808 	if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
809 		return -EINVAL;
810 	if (hq_put_index != dq_put_index)
811 		return -EINVAL;
812 	/* If the host has not yet processed the next entry then we are done */
813 	if (((hq_put_index + 1) % hq->entry_count) == hq->hba_index)
814 		return -EBUSY;
815 	lpfc_sli4_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
816 	lpfc_sli4_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
817 
818 	/* Update the host index to point to the next slot */
819 	hq->host_index = ((hq_put_index + 1) % hq->entry_count);
820 	dq->host_index = ((dq_put_index + 1) % dq->entry_count);
821 	hq->RQ_buf_posted++;
822 
823 	/* Ring The Header Receive Queue Doorbell */
824 	if (!(hq->host_index % hq->notify_interval)) {
825 		doorbell.word0 = 0;
826 		if (hq->db_format == LPFC_DB_RING_FORMAT) {
827 			bf_set(lpfc_rq_db_ring_fm_num_posted, &doorbell,
828 			       hq->notify_interval);
829 			bf_set(lpfc_rq_db_ring_fm_id, &doorbell, hq->queue_id);
830 		} else if (hq->db_format == LPFC_DB_LIST_FORMAT) {
831 			bf_set(lpfc_rq_db_list_fm_num_posted, &doorbell,
832 			       hq->notify_interval);
833 			bf_set(lpfc_rq_db_list_fm_index, &doorbell,
834 			       hq->host_index);
835 			bf_set(lpfc_rq_db_list_fm_id, &doorbell, hq->queue_id);
836 		} else {
837 			return -EINVAL;
838 		}
839 		writel(doorbell.word0, hq->db_regaddr);
840 	}
841 	return hq_put_index;
842 }
843 
844 /*
845  * lpfc_sli4_rq_release - Updates internal hba index for RQ
846  *
847  * This routine will update the HBA index of a queue to reflect consumption of
848  * one Receive Queue Entry by the HBA. When the HBA indicates that it has
849  * consumed an entry the host calls this function to update the queue's
850  * internal pointers. This routine returns the number of entries that were
851  * consumed by the HBA.
852  **/
853 static uint32_t
lpfc_sli4_rq_release(struct lpfc_queue * hq,struct lpfc_queue * dq)854 lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
855 {
856 	/* sanity check on queue memory */
857 	if (unlikely(!hq) || unlikely(!dq))
858 		return 0;
859 
860 	if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
861 		return 0;
862 	hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
863 	dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
864 	return 1;
865 }
866 
867 /**
868  * lpfc_cmd_iocb - Get next command iocb entry in the ring
869  * @phba: Pointer to HBA context object.
870  * @pring: Pointer to driver SLI ring object.
871  *
872  * This function returns pointer to next command iocb entry
873  * in the command ring. The caller must hold hbalock to prevent
874  * other threads consume the next command iocb.
875  * SLI-2/SLI-3 provide different sized iocbs.
876  **/
877 static inline IOCB_t *
lpfc_cmd_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)878 lpfc_cmd_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
879 {
880 	return (IOCB_t *) (((char *) pring->sli.sli3.cmdringaddr) +
881 			   pring->sli.sli3.cmdidx * phba->iocb_cmd_size);
882 }
883 
884 /**
885  * lpfc_resp_iocb - Get next response iocb entry in the ring
886  * @phba: Pointer to HBA context object.
887  * @pring: Pointer to driver SLI ring object.
888  *
889  * This function returns pointer to next response iocb entry
890  * in the response ring. The caller must hold hbalock to make sure
891  * that no other thread consume the next response iocb.
892  * SLI-2/SLI-3 provide different sized iocbs.
893  **/
894 static inline IOCB_t *
lpfc_resp_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)895 lpfc_resp_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
896 {
897 	return (IOCB_t *) (((char *) pring->sli.sli3.rspringaddr) +
898 			   pring->sli.sli3.rspidx * phba->iocb_rsp_size);
899 }
900 
901 /**
902  * __lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
903  * @phba: Pointer to HBA context object.
904  *
905  * This function is called with hbalock held. This function
906  * allocates a new driver iocb object from the iocb pool. If the
907  * allocation is successful, it returns pointer to the newly
908  * allocated iocb object else it returns NULL.
909  **/
910 struct lpfc_iocbq *
__lpfc_sli_get_iocbq(struct lpfc_hba * phba)911 __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
912 {
913 	struct list_head *lpfc_iocb_list = &phba->lpfc_iocb_list;
914 	struct lpfc_iocbq * iocbq = NULL;
915 
916 	lockdep_assert_held(&phba->hbalock);
917 
918 	list_remove_head(lpfc_iocb_list, iocbq, struct lpfc_iocbq, list);
919 	if (iocbq)
920 		phba->iocb_cnt++;
921 	if (phba->iocb_cnt > phba->iocb_max)
922 		phba->iocb_max = phba->iocb_cnt;
923 	return iocbq;
924 }
925 
926 /**
927  * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
928  * @phba: Pointer to HBA context object.
929  * @xritag: XRI value.
930  *
931  * This function clears the sglq pointer from the array of active
932  * sglq's. The xritag that is passed in is used to index into the
933  * array. Before the xritag can be used it needs to be adjusted
934  * by subtracting the xribase.
935  *
936  * Returns sglq ponter = success, NULL = Failure.
937  **/
938 struct lpfc_sglq *
__lpfc_clear_active_sglq(struct lpfc_hba * phba,uint16_t xritag)939 __lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
940 {
941 	struct lpfc_sglq *sglq;
942 
943 	sglq = phba->sli4_hba.lpfc_sglq_active_list[xritag];
944 	phba->sli4_hba.lpfc_sglq_active_list[xritag] = NULL;
945 	return sglq;
946 }
947 
948 /**
949  * __lpfc_get_active_sglq - Get the active sglq for this XRI.
950  * @phba: Pointer to HBA context object.
951  * @xritag: XRI value.
952  *
953  * This function returns the sglq pointer from the array of active
954  * sglq's. The xritag that is passed in is used to index into the
955  * array. Before the xritag can be used it needs to be adjusted
956  * by subtracting the xribase.
957  *
958  * Returns sglq ponter = success, NULL = Failure.
959  **/
960 struct lpfc_sglq *
__lpfc_get_active_sglq(struct lpfc_hba * phba,uint16_t xritag)961 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
962 {
963 	struct lpfc_sglq *sglq;
964 
965 	sglq =  phba->sli4_hba.lpfc_sglq_active_list[xritag];
966 	return sglq;
967 }
968 
969 /**
970  * lpfc_clr_rrq_active - Clears RRQ active bit in xri_bitmap.
971  * @phba: Pointer to HBA context object.
972  * @xritag: xri used in this exchange.
973  * @rrq: The RRQ to be cleared.
974  *
975  **/
976 void
lpfc_clr_rrq_active(struct lpfc_hba * phba,uint16_t xritag,struct lpfc_node_rrq * rrq)977 lpfc_clr_rrq_active(struct lpfc_hba *phba,
978 		    uint16_t xritag,
979 		    struct lpfc_node_rrq *rrq)
980 {
981 	struct lpfc_nodelist *ndlp = NULL;
982 
983 	/* Lookup did to verify if did is still active on this vport */
984 	if (rrq->vport)
985 		ndlp = lpfc_findnode_did(rrq->vport, rrq->nlp_DID);
986 
987 	if (!ndlp)
988 		goto out;
989 
990 	if (test_and_clear_bit(xritag, ndlp->active_rrqs_xri_bitmap)) {
991 		rrq->send_rrq = 0;
992 		rrq->xritag = 0;
993 		rrq->rrq_stop_time = 0;
994 	}
995 out:
996 	mempool_free(rrq, phba->rrq_pool);
997 }
998 
999 /**
1000  * lpfc_handle_rrq_active - Checks if RRQ has waithed RATOV.
1001  * @phba: Pointer to HBA context object.
1002  *
1003  * This function is called with hbalock held. This function
1004  * Checks if stop_time (ratov from setting rrq active) has
1005  * been reached, if it has and the send_rrq flag is set then
1006  * it will call lpfc_send_rrq. If the send_rrq flag is not set
1007  * then it will just call the routine to clear the rrq and
1008  * free the rrq resource.
1009  * The timer is set to the next rrq that is going to expire before
1010  * leaving the routine.
1011  *
1012  **/
1013 void
lpfc_handle_rrq_active(struct lpfc_hba * phba)1014 lpfc_handle_rrq_active(struct lpfc_hba *phba)
1015 {
1016 	struct lpfc_node_rrq *rrq;
1017 	struct lpfc_node_rrq *nextrrq;
1018 	unsigned long next_time;
1019 	unsigned long iflags;
1020 	LIST_HEAD(send_rrq);
1021 
1022 	clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1023 	next_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
1024 	spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1025 	list_for_each_entry_safe(rrq, nextrrq,
1026 				 &phba->active_rrq_list, list) {
1027 		if (time_after(jiffies, rrq->rrq_stop_time))
1028 			list_move(&rrq->list, &send_rrq);
1029 		else if (time_before(rrq->rrq_stop_time, next_time))
1030 			next_time = rrq->rrq_stop_time;
1031 	}
1032 	spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1033 	if ((!list_empty(&phba->active_rrq_list)) &&
1034 	    (!test_bit(FC_UNLOADING, &phba->pport->load_flag)))
1035 		mod_timer(&phba->rrq_tmr, next_time);
1036 	list_for_each_entry_safe(rrq, nextrrq, &send_rrq, list) {
1037 		list_del(&rrq->list);
1038 		if (!rrq->send_rrq) {
1039 			/* this call will free the rrq */
1040 			lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1041 		} else if (lpfc_send_rrq(phba, rrq)) {
1042 			/* if we send the rrq then the completion handler
1043 			*  will clear the bit in the xribitmap.
1044 			*/
1045 			lpfc_clr_rrq_active(phba, rrq->xritag,
1046 					    rrq);
1047 		}
1048 	}
1049 }
1050 
1051 /**
1052  * lpfc_get_active_rrq - Get the active RRQ for this exchange.
1053  * @vport: Pointer to vport context object.
1054  * @xri: The xri used in the exchange.
1055  * @did: The targets DID for this exchange.
1056  *
1057  * returns NULL = rrq not found in the phba->active_rrq_list.
1058  *         rrq = rrq for this xri and target.
1059  **/
1060 struct lpfc_node_rrq *
lpfc_get_active_rrq(struct lpfc_vport * vport,uint16_t xri,uint32_t did)1061 lpfc_get_active_rrq(struct lpfc_vport *vport, uint16_t xri, uint32_t did)
1062 {
1063 	struct lpfc_hba *phba = vport->phba;
1064 	struct lpfc_node_rrq *rrq;
1065 	struct lpfc_node_rrq *nextrrq;
1066 	unsigned long iflags;
1067 
1068 	if (phba->sli_rev != LPFC_SLI_REV4)
1069 		return NULL;
1070 	spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1071 	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1072 		if (rrq->vport == vport && rrq->xritag == xri &&
1073 				rrq->nlp_DID == did){
1074 			list_del(&rrq->list);
1075 			spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1076 			return rrq;
1077 		}
1078 	}
1079 	spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1080 	return NULL;
1081 }
1082 
1083 /**
1084  * lpfc_cleanup_vports_rrqs - Remove and clear the active RRQ for this vport.
1085  * @vport: Pointer to vport context object.
1086  * @ndlp: Pointer to the lpfc_node_list structure.
1087  * If ndlp is NULL Remove all active RRQs for this vport from the
1088  * phba->active_rrq_list and clear the rrq.
1089  * If ndlp is not NULL then only remove rrqs for this vport & this ndlp.
1090  **/
1091 void
lpfc_cleanup_vports_rrqs(struct lpfc_vport * vport,struct lpfc_nodelist * ndlp)1092 lpfc_cleanup_vports_rrqs(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
1093 
1094 {
1095 	struct lpfc_hba *phba = vport->phba;
1096 	struct lpfc_node_rrq *rrq;
1097 	struct lpfc_node_rrq *nextrrq;
1098 	unsigned long iflags;
1099 	LIST_HEAD(rrq_list);
1100 
1101 	if (phba->sli_rev != LPFC_SLI_REV4)
1102 		return;
1103 	if (!ndlp) {
1104 		lpfc_sli4_vport_delete_els_xri_aborted(vport);
1105 		lpfc_sli4_vport_delete_fcp_xri_aborted(vport);
1106 	}
1107 	spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1108 	list_for_each_entry_safe(rrq, nextrrq, &phba->active_rrq_list, list) {
1109 		if (rrq->vport != vport)
1110 			continue;
1111 
1112 		if (!ndlp || ndlp == lpfc_findnode_did(vport, rrq->nlp_DID))
1113 			list_move(&rrq->list, &rrq_list);
1114 
1115 	}
1116 	spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1117 
1118 	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
1119 		list_del(&rrq->list);
1120 		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
1121 	}
1122 }
1123 
1124 /**
1125  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
1126  * @phba: Pointer to HBA context object.
1127  * @ndlp: Targets nodelist pointer for this exchange.
1128  * @xritag: the xri in the bitmap to test.
1129  *
1130  * This function returns:
1131  * 0 = rrq not active for this xri
1132  * 1 = rrq is valid for this xri.
1133  **/
1134 int
lpfc_test_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag)1135 lpfc_test_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1136 			uint16_t  xritag)
1137 {
1138 	if (!ndlp)
1139 		return 0;
1140 	if (!ndlp->active_rrqs_xri_bitmap)
1141 		return 0;
1142 	if (test_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1143 		return 1;
1144 	else
1145 		return 0;
1146 }
1147 
1148 /**
1149  * lpfc_set_rrq_active - set RRQ active bit in xri_bitmap.
1150  * @phba: Pointer to HBA context object.
1151  * @ndlp: nodelist pointer for this target.
1152  * @xritag: xri used in this exchange.
1153  * @rxid: Remote Exchange ID.
1154  * @send_rrq: Flag used to determine if we should send rrq els cmd.
1155  *
1156  * This function takes the hbalock.
1157  * The active bit is always set in the active rrq xri_bitmap even
1158  * if there is no slot avaiable for the other rrq information.
1159  *
1160  * returns 0 rrq actived for this xri
1161  *         < 0 No memory or invalid ndlp.
1162  **/
1163 int
lpfc_set_rrq_active(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,uint16_t xritag,uint16_t rxid,uint16_t send_rrq)1164 lpfc_set_rrq_active(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp,
1165 		    uint16_t xritag, uint16_t rxid, uint16_t send_rrq)
1166 {
1167 	unsigned long iflags;
1168 	struct lpfc_node_rrq *rrq;
1169 	int empty;
1170 
1171 	if (!ndlp)
1172 		return -EINVAL;
1173 
1174 	if (!phba->cfg_enable_rrq)
1175 		return -EINVAL;
1176 
1177 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
1178 		clear_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1179 		goto outnl;
1180 	}
1181 
1182 	spin_lock_irqsave(&phba->hbalock, iflags);
1183 	if (ndlp->vport && test_bit(FC_UNLOADING, &ndlp->vport->load_flag))
1184 		goto out;
1185 
1186 	if (!ndlp->active_rrqs_xri_bitmap)
1187 		goto out;
1188 
1189 	if (test_and_set_bit(xritag, ndlp->active_rrqs_xri_bitmap))
1190 		goto out;
1191 
1192 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1193 	rrq = mempool_alloc(phba->rrq_pool, GFP_ATOMIC);
1194 	if (!rrq) {
1195 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1196 				"3155 Unable to allocate RRQ xri:0x%x rxid:0x%x"
1197 				" DID:0x%x Send:%d\n",
1198 				xritag, rxid, ndlp->nlp_DID, send_rrq);
1199 		return -EINVAL;
1200 	}
1201 	if (phba->cfg_enable_rrq == 1)
1202 		rrq->send_rrq = send_rrq;
1203 	else
1204 		rrq->send_rrq = 0;
1205 	rrq->xritag = xritag;
1206 	rrq->rrq_stop_time = jiffies + secs_to_jiffies(phba->fc_ratov + 1);
1207 	rrq->nlp_DID = ndlp->nlp_DID;
1208 	rrq->vport = ndlp->vport;
1209 	rrq->rxid = rxid;
1210 
1211 	spin_lock_irqsave(&phba->rrq_list_lock, iflags);
1212 	empty = list_empty(&phba->active_rrq_list);
1213 	list_add_tail(&rrq->list, &phba->active_rrq_list);
1214 	spin_unlock_irqrestore(&phba->rrq_list_lock, iflags);
1215 	set_bit(HBA_RRQ_ACTIVE, &phba->hba_flag);
1216 	if (empty)
1217 		lpfc_worker_wake_up(phba);
1218 	return 0;
1219 out:
1220 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1221 outnl:
1222 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
1223 			"2921 Can't set rrq active xri:0x%x rxid:0x%x"
1224 			" DID:0x%x Send:%d\n",
1225 			xritag, rxid, ndlp->nlp_DID, send_rrq);
1226 	return -EINVAL;
1227 }
1228 
1229 /**
1230  * __lpfc_sli_get_els_sglq - Allocates an iocb object from sgl pool
1231  * @phba: Pointer to HBA context object.
1232  * @piocbq: Pointer to the iocbq.
1233  *
1234  * The driver calls this function with either the nvme ls ring lock
1235  * or the fc els ring lock held depending on the iocb usage.  This function
1236  * gets a new driver sglq object from the sglq list. If the list is not empty
1237  * then it is successful, it returns pointer to the newly allocated sglq
1238  * object else it returns NULL.
1239  **/
1240 static struct lpfc_sglq *
__lpfc_sli_get_els_sglq(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq)1241 __lpfc_sli_get_els_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1242 {
1243 	struct list_head *lpfc_els_sgl_list = &phba->sli4_hba.lpfc_els_sgl_list;
1244 	struct lpfc_sglq *sglq = NULL;
1245 	struct lpfc_sglq *start_sglq = NULL;
1246 	struct lpfc_io_buf *lpfc_cmd;
1247 	struct lpfc_nodelist *ndlp;
1248 	int found = 0;
1249 	u8 cmnd;
1250 
1251 	cmnd = get_job_cmnd(phba, piocbq);
1252 
1253 	if (piocbq->cmd_flag & LPFC_IO_FCP) {
1254 		lpfc_cmd = piocbq->io_buf;
1255 		ndlp = lpfc_cmd->rdata->pnode;
1256 	} else  if ((cmnd == CMD_GEN_REQUEST64_CR) &&
1257 			!(piocbq->cmd_flag & LPFC_IO_LIBDFC)) {
1258 		ndlp = piocbq->ndlp;
1259 	} else  if (piocbq->cmd_flag & LPFC_IO_LIBDFC) {
1260 		if (piocbq->cmd_flag & LPFC_IO_LOOPBACK)
1261 			ndlp = NULL;
1262 		else
1263 			ndlp = piocbq->ndlp;
1264 	} else {
1265 		ndlp = piocbq->ndlp;
1266 	}
1267 
1268 	spin_lock(&phba->sli4_hba.sgl_list_lock);
1269 	list_remove_head(lpfc_els_sgl_list, sglq, struct lpfc_sglq, list);
1270 	start_sglq = sglq;
1271 	while (!found) {
1272 		if (!sglq)
1273 			break;
1274 		if (ndlp && ndlp->active_rrqs_xri_bitmap &&
1275 		    test_bit(sglq->sli4_lxritag,
1276 		    ndlp->active_rrqs_xri_bitmap)) {
1277 			/* This xri has an rrq outstanding for this DID.
1278 			 * put it back in the list and get another xri.
1279 			 */
1280 			list_add_tail(&sglq->list, lpfc_els_sgl_list);
1281 			sglq = NULL;
1282 			list_remove_head(lpfc_els_sgl_list, sglq,
1283 						struct lpfc_sglq, list);
1284 			if (sglq == start_sglq) {
1285 				list_add_tail(&sglq->list, lpfc_els_sgl_list);
1286 				sglq = NULL;
1287 				break;
1288 			} else
1289 				continue;
1290 		}
1291 		sglq->ndlp = ndlp;
1292 		found = 1;
1293 		phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1294 		sglq->state = SGL_ALLOCATED;
1295 	}
1296 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
1297 	return sglq;
1298 }
1299 
1300 /**
1301  * __lpfc_sli_get_nvmet_sglq - Allocates an iocb object from sgl pool
1302  * @phba: Pointer to HBA context object.
1303  * @piocbq: Pointer to the iocbq.
1304  *
1305  * This function is called with the sgl_list lock held. This function
1306  * gets a new driver sglq object from the sglq list. If the
1307  * list is not empty then it is successful, it returns pointer to the newly
1308  * allocated sglq object else it returns NULL.
1309  **/
1310 struct lpfc_sglq *
__lpfc_sli_get_nvmet_sglq(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq)1311 __lpfc_sli_get_nvmet_sglq(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq)
1312 {
1313 	struct list_head *lpfc_nvmet_sgl_list;
1314 	struct lpfc_sglq *sglq = NULL;
1315 
1316 	lpfc_nvmet_sgl_list = &phba->sli4_hba.lpfc_nvmet_sgl_list;
1317 
1318 	lockdep_assert_held(&phba->sli4_hba.sgl_list_lock);
1319 
1320 	list_remove_head(lpfc_nvmet_sgl_list, sglq, struct lpfc_sglq, list);
1321 	if (!sglq)
1322 		return NULL;
1323 	phba->sli4_hba.lpfc_sglq_active_list[sglq->sli4_lxritag] = sglq;
1324 	sglq->state = SGL_ALLOCATED;
1325 	return sglq;
1326 }
1327 
1328 /**
1329  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
1330  * @phba: Pointer to HBA context object.
1331  *
1332  * This function is called with no lock held. This function
1333  * allocates a new driver iocb object from the iocb pool. If the
1334  * allocation is successful, it returns pointer to the newly
1335  * allocated iocb object else it returns NULL.
1336  **/
1337 struct lpfc_iocbq *
lpfc_sli_get_iocbq(struct lpfc_hba * phba)1338 lpfc_sli_get_iocbq(struct lpfc_hba *phba)
1339 {
1340 	struct lpfc_iocbq * iocbq = NULL;
1341 	unsigned long iflags;
1342 
1343 	spin_lock_irqsave(&phba->hbalock, iflags);
1344 	iocbq = __lpfc_sli_get_iocbq(phba);
1345 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1346 	return iocbq;
1347 }
1348 
1349 /**
1350  * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
1351  * @phba: Pointer to HBA context object.
1352  * @iocbq: Pointer to driver iocb object.
1353  *
1354  * This function is called to release the driver iocb object
1355  * to the iocb pool. The iotag in the iocb object
1356  * does not change for each use of the iocb object. This function
1357  * clears all other fields of the iocb object when it is freed.
1358  * The sqlq structure that holds the xritag and phys and virtual
1359  * mappings for the scatter gather list is retrieved from the
1360  * active array of sglq. The get of the sglq pointer also clears
1361  * the entry in the array. If the status of the IO indiactes that
1362  * this IO was aborted then the sglq entry it put on the
1363  * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
1364  * IO has good status or fails for any other reason then the sglq
1365  * entry is added to the free list (lpfc_els_sgl_list). The hbalock is
1366  *  asserted held in the code path calling this routine.
1367  **/
1368 static void
__lpfc_sli_release_iocbq_s4(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1369 __lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1370 {
1371 	struct lpfc_sglq *sglq;
1372 	unsigned long iflag = 0;
1373 	struct lpfc_sli_ring *pring;
1374 
1375 	if (iocbq->sli4_xritag == NO_XRI)
1376 		sglq = NULL;
1377 	else
1378 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_lxritag);
1379 
1380 
1381 	if (sglq)  {
1382 		if (iocbq->cmd_flag & LPFC_IO_NVMET) {
1383 			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1384 					  iflag);
1385 			sglq->state = SGL_FREED;
1386 			sglq->ndlp = NULL;
1387 			list_add_tail(&sglq->list,
1388 				      &phba->sli4_hba.lpfc_nvmet_sgl_list);
1389 			spin_unlock_irqrestore(
1390 				&phba->sli4_hba.sgl_list_lock, iflag);
1391 			goto out;
1392 		}
1393 
1394 		if ((iocbq->cmd_flag & LPFC_EXCHANGE_BUSY) &&
1395 		    (!(unlikely(pci_channel_offline(phba->pcidev)))) &&
1396 		    sglq->state != SGL_XRI_ABORTED) {
1397 			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1398 					  iflag);
1399 
1400 			/* Check if we can get a reference on ndlp */
1401 			if (sglq->ndlp && !lpfc_nlp_get(sglq->ndlp))
1402 				sglq->ndlp = NULL;
1403 
1404 			list_add(&sglq->list,
1405 				 &phba->sli4_hba.lpfc_abts_els_sgl_list);
1406 			spin_unlock_irqrestore(
1407 				&phba->sli4_hba.sgl_list_lock, iflag);
1408 		} else {
1409 			spin_lock_irqsave(&phba->sli4_hba.sgl_list_lock,
1410 					  iflag);
1411 			sglq->state = SGL_FREED;
1412 			sglq->ndlp = NULL;
1413 			list_add_tail(&sglq->list,
1414 				      &phba->sli4_hba.lpfc_els_sgl_list);
1415 			spin_unlock_irqrestore(
1416 				&phba->sli4_hba.sgl_list_lock, iflag);
1417 			pring = lpfc_phba_elsring(phba);
1418 			/* Check if TXQ queue needs to be serviced */
1419 			if (pring && (!list_empty(&pring->txq)))
1420 				lpfc_worker_wake_up(phba);
1421 		}
1422 	}
1423 
1424 out:
1425 	/*
1426 	 * Clean all volatile data fields, preserve iotag and node struct.
1427 	 */
1428 	memset_startat(iocbq, 0, wqe);
1429 	iocbq->sli4_lxritag = NO_XRI;
1430 	iocbq->sli4_xritag = NO_XRI;
1431 	iocbq->cmd_flag &= ~(LPFC_IO_NVME | LPFC_IO_NVMET | LPFC_IO_CMF |
1432 			      LPFC_IO_NVME_LS);
1433 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1434 }
1435 
1436 
1437 /**
1438  * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
1439  * @phba: Pointer to HBA context object.
1440  * @iocbq: Pointer to driver iocb object.
1441  *
1442  * This function is called to release the driver iocb object to the
1443  * iocb pool. The iotag in the iocb object does not change for each
1444  * use of the iocb object. This function clears all other fields of
1445  * the iocb object when it is freed. The hbalock is asserted held in
1446  * the code path calling this routine.
1447  **/
1448 static void
__lpfc_sli_release_iocbq_s3(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1449 __lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1450 {
1451 
1452 	/*
1453 	 * Clean all volatile data fields, preserve iotag and node struct.
1454 	 */
1455 	memset_startat(iocbq, 0, iocb);
1456 	iocbq->sli4_xritag = NO_XRI;
1457 	list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
1458 }
1459 
1460 /**
1461  * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
1462  * @phba: Pointer to HBA context object.
1463  * @iocbq: Pointer to driver iocb object.
1464  *
1465  * This function is called with hbalock held to release driver
1466  * iocb object to the iocb pool. The iotag in the iocb object
1467  * does not change for each use of the iocb object. This function
1468  * clears all other fields of the iocb object when it is freed.
1469  **/
1470 static void
__lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1471 __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1472 {
1473 	lockdep_assert_held(&phba->hbalock);
1474 
1475 	phba->__lpfc_sli_release_iocbq(phba, iocbq);
1476 	phba->iocb_cnt--;
1477 }
1478 
1479 /**
1480  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
1481  * @phba: Pointer to HBA context object.
1482  * @iocbq: Pointer to driver iocb object.
1483  *
1484  * This function is called with no lock held to release the iocb to
1485  * iocb pool.
1486  **/
1487 void
lpfc_sli_release_iocbq(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)1488 lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
1489 {
1490 	unsigned long iflags;
1491 
1492 	/*
1493 	 * Clean all volatile data fields, preserve iotag and node struct.
1494 	 */
1495 	spin_lock_irqsave(&phba->hbalock, iflags);
1496 	__lpfc_sli_release_iocbq(phba, iocbq);
1497 	spin_unlock_irqrestore(&phba->hbalock, iflags);
1498 }
1499 
1500 /**
1501  * lpfc_sli_cancel_iocbs - Cancel all iocbs from a list.
1502  * @phba: Pointer to HBA context object.
1503  * @iocblist: List of IOCBs.
1504  * @ulpstatus: ULP status in IOCB command field.
1505  * @ulpWord4: ULP word-4 in IOCB command field.
1506  *
1507  * This function is called with a list of IOCBs to cancel. It cancels the IOCB
1508  * on the list by invoking the complete callback function associated with the
1509  * IOCB with the provided @ulpstatus and @ulpword4 set to the IOCB commond
1510  * fields.
1511  **/
1512 void
lpfc_sli_cancel_iocbs(struct lpfc_hba * phba,struct list_head * iocblist,uint32_t ulpstatus,uint32_t ulpWord4)1513 lpfc_sli_cancel_iocbs(struct lpfc_hba *phba, struct list_head *iocblist,
1514 		      uint32_t ulpstatus, uint32_t ulpWord4)
1515 {
1516 	struct lpfc_iocbq *piocb;
1517 
1518 	while (!list_empty(iocblist)) {
1519 		list_remove_head(iocblist, piocb, struct lpfc_iocbq, list);
1520 		if (piocb->cmd_cmpl) {
1521 			if (piocb->cmd_flag & LPFC_IO_NVME) {
1522 				lpfc_nvme_cancel_iocb(phba, piocb,
1523 						      ulpstatus, ulpWord4);
1524 			} else {
1525 				if (phba->sli_rev == LPFC_SLI_REV4) {
1526 					bf_set(lpfc_wcqe_c_status,
1527 					       &piocb->wcqe_cmpl, ulpstatus);
1528 					piocb->wcqe_cmpl.parameter = ulpWord4;
1529 				} else {
1530 					piocb->iocb.ulpStatus = ulpstatus;
1531 					piocb->iocb.un.ulpWord[4] = ulpWord4;
1532 				}
1533 				(piocb->cmd_cmpl) (phba, piocb, piocb);
1534 			}
1535 		} else {
1536 			lpfc_sli_release_iocbq(phba, piocb);
1537 		}
1538 	}
1539 	return;
1540 }
1541 
1542 /**
1543  * lpfc_sli_iocb_cmd_type - Get the iocb type
1544  * @iocb_cmnd: iocb command code.
1545  *
1546  * This function is called by ring event handler function to get the iocb type.
1547  * This function translates the iocb command to an iocb command type used to
1548  * decide the final disposition of each completed IOCB.
1549  * The function returns
1550  * LPFC_UNKNOWN_IOCB if it is an unsupported iocb
1551  * LPFC_SOL_IOCB     if it is a solicited iocb completion
1552  * LPFC_ABORT_IOCB   if it is an abort iocb
1553  * LPFC_UNSOL_IOCB   if it is an unsolicited iocb
1554  *
1555  * The caller is not required to hold any lock.
1556  **/
1557 static lpfc_iocb_type
lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)1558 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
1559 {
1560 	lpfc_iocb_type type = LPFC_UNKNOWN_IOCB;
1561 
1562 	if (iocb_cmnd > CMD_MAX_IOCB_CMD)
1563 		return 0;
1564 
1565 	switch (iocb_cmnd) {
1566 	case CMD_XMIT_SEQUENCE_CR:
1567 	case CMD_XMIT_SEQUENCE_CX:
1568 	case CMD_XMIT_BCAST_CN:
1569 	case CMD_XMIT_BCAST_CX:
1570 	case CMD_ELS_REQUEST_CR:
1571 	case CMD_ELS_REQUEST_CX:
1572 	case CMD_CREATE_XRI_CR:
1573 	case CMD_CREATE_XRI_CX:
1574 	case CMD_GET_RPI_CN:
1575 	case CMD_XMIT_ELS_RSP_CX:
1576 	case CMD_GET_RPI_CR:
1577 	case CMD_FCP_IWRITE_CR:
1578 	case CMD_FCP_IWRITE_CX:
1579 	case CMD_FCP_IREAD_CR:
1580 	case CMD_FCP_IREAD_CX:
1581 	case CMD_FCP_ICMND_CR:
1582 	case CMD_FCP_ICMND_CX:
1583 	case CMD_FCP_TSEND_CX:
1584 	case CMD_FCP_TRSP_CX:
1585 	case CMD_FCP_TRECEIVE_CX:
1586 	case CMD_FCP_AUTO_TRSP_CX:
1587 	case CMD_ADAPTER_MSG:
1588 	case CMD_ADAPTER_DUMP:
1589 	case CMD_XMIT_SEQUENCE64_CR:
1590 	case CMD_XMIT_SEQUENCE64_CX:
1591 	case CMD_XMIT_BCAST64_CN:
1592 	case CMD_XMIT_BCAST64_CX:
1593 	case CMD_ELS_REQUEST64_CR:
1594 	case CMD_ELS_REQUEST64_CX:
1595 	case CMD_FCP_IWRITE64_CR:
1596 	case CMD_FCP_IWRITE64_CX:
1597 	case CMD_FCP_IREAD64_CR:
1598 	case CMD_FCP_IREAD64_CX:
1599 	case CMD_FCP_ICMND64_CR:
1600 	case CMD_FCP_ICMND64_CX:
1601 	case CMD_FCP_TSEND64_CX:
1602 	case CMD_FCP_TRSP64_CX:
1603 	case CMD_FCP_TRECEIVE64_CX:
1604 	case CMD_GEN_REQUEST64_CR:
1605 	case CMD_GEN_REQUEST64_CX:
1606 	case CMD_XMIT_ELS_RSP64_CX:
1607 	case DSSCMD_IWRITE64_CR:
1608 	case DSSCMD_IWRITE64_CX:
1609 	case DSSCMD_IREAD64_CR:
1610 	case DSSCMD_IREAD64_CX:
1611 	case CMD_SEND_FRAME:
1612 		type = LPFC_SOL_IOCB;
1613 		break;
1614 	case CMD_ABORT_XRI_CN:
1615 	case CMD_ABORT_XRI_CX:
1616 	case CMD_CLOSE_XRI_CN:
1617 	case CMD_CLOSE_XRI_CX:
1618 	case CMD_XRI_ABORTED_CX:
1619 	case CMD_ABORT_MXRI64_CN:
1620 	case CMD_XMIT_BLS_RSP64_CX:
1621 		type = LPFC_ABORT_IOCB;
1622 		break;
1623 	case CMD_RCV_SEQUENCE_CX:
1624 	case CMD_RCV_ELS_REQ_CX:
1625 	case CMD_RCV_SEQUENCE64_CX:
1626 	case CMD_RCV_ELS_REQ64_CX:
1627 	case CMD_ASYNC_STATUS:
1628 	case CMD_IOCB_RCV_SEQ64_CX:
1629 	case CMD_IOCB_RCV_ELS64_CX:
1630 	case CMD_IOCB_RCV_CONT64_CX:
1631 	case CMD_IOCB_RET_XRI64_CX:
1632 		type = LPFC_UNSOL_IOCB;
1633 		break;
1634 	case CMD_IOCB_XMIT_MSEQ64_CR:
1635 	case CMD_IOCB_XMIT_MSEQ64_CX:
1636 	case CMD_IOCB_RCV_SEQ_LIST64_CX:
1637 	case CMD_IOCB_RCV_ELS_LIST64_CX:
1638 	case CMD_IOCB_CLOSE_EXTENDED_CN:
1639 	case CMD_IOCB_ABORT_EXTENDED_CN:
1640 	case CMD_IOCB_RET_HBQE64_CN:
1641 	case CMD_IOCB_FCP_IBIDIR64_CR:
1642 	case CMD_IOCB_FCP_IBIDIR64_CX:
1643 	case CMD_IOCB_FCP_ITASKMGT64_CX:
1644 	case CMD_IOCB_LOGENTRY_CN:
1645 	case CMD_IOCB_LOGENTRY_ASYNC_CN:
1646 		printk("%s - Unhandled SLI-3 Command x%x\n",
1647 				__func__, iocb_cmnd);
1648 		type = LPFC_UNKNOWN_IOCB;
1649 		break;
1650 	default:
1651 		type = LPFC_UNKNOWN_IOCB;
1652 		break;
1653 	}
1654 
1655 	return type;
1656 }
1657 
1658 /**
1659  * lpfc_sli_ring_map - Issue config_ring mbox for all rings
1660  * @phba: Pointer to HBA context object.
1661  *
1662  * This function is called from SLI initialization code
1663  * to configure every ring of the HBA's SLI interface. The
1664  * caller is not required to hold any lock. This function issues
1665  * a config_ring mailbox command for each ring.
1666  * This function returns zero if successful else returns a negative
1667  * error code.
1668  **/
1669 static int
lpfc_sli_ring_map(struct lpfc_hba * phba)1670 lpfc_sli_ring_map(struct lpfc_hba *phba)
1671 {
1672 	struct lpfc_sli *psli = &phba->sli;
1673 	LPFC_MBOXQ_t *pmb;
1674 	MAILBOX_t *pmbox;
1675 	int i, rc, ret = 0;
1676 
1677 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1678 	if (!pmb)
1679 		return -ENOMEM;
1680 	pmbox = &pmb->u.mb;
1681 	phba->link_state = LPFC_INIT_MBX_CMDS;
1682 	for (i = 0; i < psli->num_rings; i++) {
1683 		lpfc_config_ring(phba, i, pmb);
1684 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
1685 		if (rc != MBX_SUCCESS) {
1686 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
1687 					"0446 Adapter failed to init (%d), "
1688 					"mbxCmd x%x CFG_RING, mbxStatus x%x, "
1689 					"ring %d\n",
1690 					rc, pmbox->mbxCommand,
1691 					pmbox->mbxStatus, i);
1692 			phba->link_state = LPFC_HBA_ERROR;
1693 			ret = -ENXIO;
1694 			break;
1695 		}
1696 	}
1697 	mempool_free(pmb, phba->mbox_mem_pool);
1698 	return ret;
1699 }
1700 
1701 /**
1702  * lpfc_sli_ringtxcmpl_put - Adds new iocb to the txcmplq
1703  * @phba: Pointer to HBA context object.
1704  * @pring: Pointer to driver SLI ring object.
1705  * @piocb: Pointer to the driver iocb object.
1706  *
1707  * The driver calls this function with the hbalock held for SLI3 ports or
1708  * the ring lock held for SLI4 ports. The function adds the
1709  * new iocb to txcmplq of the given ring. This function always returns
1710  * 0. If this function is called for ELS ring, this function checks if
1711  * there is a vport associated with the ELS command. This function also
1712  * starts els_tmofunc timer if this is an ELS command.
1713  **/
1714 static int
lpfc_sli_ringtxcmpl_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)1715 lpfc_sli_ringtxcmpl_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
1716 			struct lpfc_iocbq *piocb)
1717 {
1718 	u32 ulp_command = 0;
1719 
1720 	BUG_ON(!piocb);
1721 	ulp_command = get_job_cmnd(phba, piocb);
1722 
1723 	list_add_tail(&piocb->list, &pring->txcmplq);
1724 	piocb->cmd_flag |= LPFC_IO_ON_TXCMPLQ;
1725 	pring->txcmplq_cnt++;
1726 	if ((unlikely(pring->ringno == LPFC_ELS_RING)) &&
1727 	   (ulp_command != CMD_ABORT_XRI_WQE) &&
1728 	   (ulp_command != CMD_ABORT_XRI_CN) &&
1729 	   (ulp_command != CMD_CLOSE_XRI_CN)) {
1730 		BUG_ON(!piocb->vport);
1731 		if (!test_bit(FC_UNLOADING, &piocb->vport->load_flag))
1732 			mod_timer(&piocb->vport->els_tmofunc,
1733 				  jiffies + secs_to_jiffies(phba->fc_ratov << 1));
1734 	}
1735 
1736 	return 0;
1737 }
1738 
1739 /**
1740  * lpfc_sli_ringtx_get - Get first element of the txq
1741  * @phba: Pointer to HBA context object.
1742  * @pring: Pointer to driver SLI ring object.
1743  *
1744  * This function is called with hbalock held to get next
1745  * iocb in txq of the given ring. If there is any iocb in
1746  * the txq, the function returns first iocb in the list after
1747  * removing the iocb from the list, else it returns NULL.
1748  **/
1749 struct lpfc_iocbq *
lpfc_sli_ringtx_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)1750 lpfc_sli_ringtx_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
1751 {
1752 	struct lpfc_iocbq *cmd_iocb;
1753 
1754 	lockdep_assert_held(&phba->hbalock);
1755 
1756 	list_remove_head((&pring->txq), cmd_iocb, struct lpfc_iocbq, list);
1757 	return cmd_iocb;
1758 }
1759 
1760 /**
1761  * lpfc_cmf_sync_cmpl - Process a CMF_SYNC_WQE cmpl
1762  * @phba: Pointer to HBA context object.
1763  * @cmdiocb: Pointer to driver command iocb object.
1764  * @rspiocb: Pointer to driver response iocb object.
1765  *
1766  * This routine will inform the driver of any BW adjustments we need
1767  * to make. These changes will be picked up during the next CMF
1768  * timer interrupt. In addition, any BW changes will be logged
1769  * with LOG_CGN_MGMT.
1770  **/
1771 static void
lpfc_cmf_sync_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)1772 lpfc_cmf_sync_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
1773 		   struct lpfc_iocbq *rspiocb)
1774 {
1775 	union lpfc_wqe128 *wqe;
1776 	uint32_t status, info;
1777 	struct lpfc_wcqe_complete *wcqe = &rspiocb->wcqe_cmpl;
1778 	uint64_t bw, bwdif, slop;
1779 	uint64_t pcent, bwpcent;
1780 	int asig, afpin, sigcnt, fpincnt;
1781 	int wsigmax, wfpinmax, cg, tdp;
1782 	char *s;
1783 
1784 	/* First check for error */
1785 	status = bf_get(lpfc_wcqe_c_status, wcqe);
1786 	if (status) {
1787 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1788 				"6211 CMF_SYNC_WQE Error "
1789 				"req_tag x%x status x%x hwstatus x%x "
1790 				"tdatap x%x parm x%x\n",
1791 				bf_get(lpfc_wcqe_c_request_tag, wcqe),
1792 				bf_get(lpfc_wcqe_c_status, wcqe),
1793 				bf_get(lpfc_wcqe_c_hw_status, wcqe),
1794 				wcqe->total_data_placed,
1795 				wcqe->parameter);
1796 		goto out;
1797 	}
1798 
1799 	/* Gather congestion information on a successful cmpl */
1800 	info = wcqe->parameter;
1801 	phba->cmf_active_info = info;
1802 
1803 	/* See if firmware info count is valid or has changed */
1804 	if (info > LPFC_MAX_CMF_INFO || phba->cmf_info_per_interval == info)
1805 		info = 0;
1806 	else
1807 		phba->cmf_info_per_interval = info;
1808 
1809 	tdp = bf_get(lpfc_wcqe_c_cmf_bw, wcqe);
1810 	cg = bf_get(lpfc_wcqe_c_cmf_cg, wcqe);
1811 
1812 	/* Get BW requirement from firmware */
1813 	bw = (uint64_t)tdp * LPFC_CMF_BLK_SIZE;
1814 	if (!bw) {
1815 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1816 				"6212 CMF_SYNC_WQE x%x: NULL bw\n",
1817 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
1818 		goto out;
1819 	}
1820 
1821 	/* Gather information needed for logging if a BW change is required */
1822 	wqe = &cmdiocb->wqe;
1823 	asig = bf_get(cmf_sync_asig, &wqe->cmf_sync);
1824 	afpin = bf_get(cmf_sync_afpin, &wqe->cmf_sync);
1825 	fpincnt = bf_get(cmf_sync_wfpincnt, &wqe->cmf_sync);
1826 	sigcnt = bf_get(cmf_sync_wsigcnt, &wqe->cmf_sync);
1827 	if (phba->cmf_max_bytes_per_interval != bw ||
1828 	    (asig || afpin || sigcnt || fpincnt)) {
1829 		/* Are we increasing or decreasing BW */
1830 		if (phba->cmf_max_bytes_per_interval <  bw) {
1831 			bwdif = bw - phba->cmf_max_bytes_per_interval;
1832 			s = "Increase";
1833 		} else {
1834 			bwdif = phba->cmf_max_bytes_per_interval - bw;
1835 			s = "Decrease";
1836 		}
1837 
1838 		/* What is the change percentage */
1839 		slop = div_u64(phba->cmf_link_byte_count, 200); /*For rounding*/
1840 		pcent = div64_u64(bwdif * 100 + slop,
1841 				  phba->cmf_link_byte_count);
1842 		bwpcent = div64_u64(bw * 100 + slop,
1843 				    phba->cmf_link_byte_count);
1844 		/* Because of bytes adjustment due to shorter timer in
1845 		 * lpfc_cmf_timer() the cmf_link_byte_count can be shorter and
1846 		 * may seem like BW is above 100%.
1847 		 */
1848 		if (bwpcent > 100)
1849 			bwpcent = 100;
1850 
1851 		if (phba->cmf_max_bytes_per_interval < bw &&
1852 		    bwpcent > 95)
1853 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1854 					"6208 Congestion bandwidth "
1855 					"limits removed\n");
1856 		else if ((phba->cmf_max_bytes_per_interval > bw) &&
1857 			 ((bwpcent + pcent) <= 100) && ((bwpcent + pcent) > 95))
1858 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1859 					"6209 Congestion bandwidth "
1860 					"limits in effect\n");
1861 
1862 		if (asig) {
1863 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1864 					"6237 BW Threshold %lld%% (%lld): "
1865 					"%lld%% %s: Signal Alarm: cg:%d "
1866 					"Info:%u\n",
1867 					bwpcent, bw, pcent, s, cg,
1868 					phba->cmf_active_info);
1869 		} else if (afpin) {
1870 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1871 					"6238 BW Threshold %lld%% (%lld): "
1872 					"%lld%% %s: FPIN Alarm: cg:%d "
1873 					"Info:%u\n",
1874 					bwpcent, bw, pcent, s, cg,
1875 					phba->cmf_active_info);
1876 		} else if (sigcnt) {
1877 			wsigmax = bf_get(cmf_sync_wsigmax, &wqe->cmf_sync);
1878 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1879 					"6239 BW Threshold %lld%% (%lld): "
1880 					"%lld%% %s: Signal Warning: "
1881 					"Cnt %d Max %d: cg:%d Info:%u\n",
1882 					bwpcent, bw, pcent, s, sigcnt,
1883 					wsigmax, cg, phba->cmf_active_info);
1884 		} else if (fpincnt) {
1885 			wfpinmax = bf_get(cmf_sync_wfpinmax, &wqe->cmf_sync);
1886 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1887 					"6240 BW Threshold %lld%% (%lld): "
1888 					"%lld%% %s: FPIN Warning: "
1889 					"Cnt %d Max %d: cg:%d Info:%u\n",
1890 					bwpcent, bw, pcent, s, fpincnt,
1891 					wfpinmax, cg, phba->cmf_active_info);
1892 		} else {
1893 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1894 					"6241 BW Threshold %lld%% (%lld): "
1895 					"CMF %lld%% %s: cg:%d Info:%u\n",
1896 					bwpcent, bw, pcent, s, cg,
1897 					phba->cmf_active_info);
1898 		}
1899 	} else if (info) {
1900 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1901 				"6246 Info Threshold %u\n", info);
1902 	}
1903 
1904 	/* Save BW change to be picked up during next timer interrupt */
1905 	phba->cmf_last_sync_bw = bw;
1906 out:
1907 	lpfc_sli_release_iocbq(phba, cmdiocb);
1908 }
1909 
1910 /**
1911  * lpfc_issue_cmf_sync_wqe - Issue a CMF_SYNC_WQE
1912  * @phba: Pointer to HBA context object.
1913  * @ms:   ms to set in WQE interval, 0 means use init op
1914  * @total: Total rcv bytes for this interval
1915  *
1916  * This routine is called every CMF timer interrupt. Its purpose is
1917  * to issue a CMF_SYNC_WQE to the firmware to inform it of any events
1918  * that may indicate we have congestion (FPINs or Signals). Upon
1919  * completion, the firmware will indicate any BW restrictions the
1920  * driver may need to take.
1921  **/
1922 int
lpfc_issue_cmf_sync_wqe(struct lpfc_hba * phba,u32 ms,u64 total)1923 lpfc_issue_cmf_sync_wqe(struct lpfc_hba *phba, u32 ms, u64 total)
1924 {
1925 	union lpfc_wqe128 *wqe;
1926 	struct lpfc_iocbq *sync_buf;
1927 	unsigned long iflags;
1928 	u32 ret_val, cgn_sig_freq;
1929 	u32 atot, wtot, max;
1930 	u8 warn_sync_period = 0;
1931 
1932 	/* First address any alarm / warning activity */
1933 	atot = atomic_xchg(&phba->cgn_sync_alarm_cnt, 0);
1934 	wtot = atomic_xchg(&phba->cgn_sync_warn_cnt, 0);
1935 
1936 	spin_lock_irqsave(&phba->hbalock, iflags);
1937 
1938 	/* ONLY Managed mode will send the CMF_SYNC_WQE to the HBA */
1939 	if (phba->cmf_active_mode != LPFC_CFG_MANAGED ||
1940 	    phba->link_state < LPFC_LINK_UP) {
1941 		ret_val = 0;
1942 		goto out_unlock;
1943 	}
1944 
1945 	sync_buf = __lpfc_sli_get_iocbq(phba);
1946 	if (!sync_buf) {
1947 		lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
1948 				"6244 No available WQEs for CMF_SYNC_WQE\n");
1949 		ret_val = ENOMEM;
1950 		goto out_unlock;
1951 	}
1952 
1953 	wqe = &sync_buf->wqe;
1954 
1955 	/* WQEs are reused.  Clear stale data and set key fields to zero */
1956 	memset(wqe, 0, sizeof(*wqe));
1957 
1958 	/* If this is the very first CMF_SYNC_WQE, issue an init operation */
1959 	if (!ms) {
1960 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
1961 				"6441 CMF Init %d - CMF_SYNC_WQE\n",
1962 				phba->fc_eventTag);
1963 		bf_set(cmf_sync_op, &wqe->cmf_sync, 1); /* 1=init */
1964 		bf_set(cmf_sync_interval, &wqe->cmf_sync, LPFC_CMF_INTERVAL);
1965 		goto initpath;
1966 	}
1967 
1968 	bf_set(cmf_sync_op, &wqe->cmf_sync, 0); /* 0=recalc */
1969 	bf_set(cmf_sync_interval, &wqe->cmf_sync, ms);
1970 
1971 	/* Check for alarms / warnings */
1972 	if (atot) {
1973 		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1974 			/* We hit an Signal alarm condition */
1975 			bf_set(cmf_sync_asig, &wqe->cmf_sync, 1);
1976 		} else {
1977 			/* We hit a FPIN alarm condition */
1978 			bf_set(cmf_sync_afpin, &wqe->cmf_sync, 1);
1979 		}
1980 	} else if (wtot) {
1981 		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY ||
1982 		    phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
1983 			cgn_sig_freq = phba->cgn_sig_freq ? phba->cgn_sig_freq :
1984 					lpfc_fabric_cgn_frequency;
1985 			/* We hit an Signal warning condition */
1986 			max = LPFC_SEC_TO_MSEC / cgn_sig_freq *
1987 				lpfc_acqe_cgn_frequency;
1988 			bf_set(cmf_sync_wsigmax, &wqe->cmf_sync, max);
1989 			bf_set(cmf_sync_wsigcnt, &wqe->cmf_sync, wtot);
1990 			warn_sync_period = lpfc_acqe_cgn_frequency;
1991 		} else {
1992 			/* We hit a FPIN warning condition */
1993 			bf_set(cmf_sync_wfpinmax, &wqe->cmf_sync, 1);
1994 			bf_set(cmf_sync_wfpincnt, &wqe->cmf_sync, 1);
1995 			if (phba->cgn_fpin_frequency != LPFC_FPIN_INIT_FREQ)
1996 				warn_sync_period =
1997 				LPFC_MSECS_TO_SECS(phba->cgn_fpin_frequency);
1998 		}
1999 	}
2000 
2001 	/* Update total read blocks during previous timer interval */
2002 	wqe->cmf_sync.read_bytes = (u32)(total / LPFC_CMF_BLK_SIZE);
2003 
2004 initpath:
2005 	bf_set(cmf_sync_ver, &wqe->cmf_sync, LPFC_CMF_SYNC_VER);
2006 	wqe->cmf_sync.event_tag = phba->fc_eventTag;
2007 	bf_set(cmf_sync_cmnd, &wqe->cmf_sync, CMD_CMF_SYNC_WQE);
2008 
2009 	/* Setup reqtag to match the wqe completion. */
2010 	bf_set(cmf_sync_reqtag, &wqe->cmf_sync, sync_buf->iotag);
2011 
2012 	bf_set(cmf_sync_qosd, &wqe->cmf_sync, 1);
2013 	bf_set(cmf_sync_period, &wqe->cmf_sync, warn_sync_period);
2014 
2015 	bf_set(cmf_sync_cmd_type, &wqe->cmf_sync, CMF_SYNC_COMMAND);
2016 	bf_set(cmf_sync_wqec, &wqe->cmf_sync, 1);
2017 	bf_set(cmf_sync_cqid, &wqe->cmf_sync, LPFC_WQE_CQ_ID_DEFAULT);
2018 
2019 	sync_buf->vport = phba->pport;
2020 	sync_buf->cmd_cmpl = lpfc_cmf_sync_cmpl;
2021 	sync_buf->cmd_dmabuf = NULL;
2022 	sync_buf->rsp_dmabuf = NULL;
2023 	sync_buf->bpl_dmabuf = NULL;
2024 	sync_buf->sli4_xritag = NO_XRI;
2025 
2026 	sync_buf->cmd_flag |= LPFC_IO_CMF;
2027 	ret_val = lpfc_sli4_issue_wqe(phba, &phba->sli4_hba.hdwq[0], sync_buf);
2028 	if (ret_val) {
2029 		lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
2030 				"6214 Cannot issue CMF_SYNC_WQE: x%x\n",
2031 				ret_val);
2032 		__lpfc_sli_release_iocbq(phba, sync_buf);
2033 	}
2034 out_unlock:
2035 	spin_unlock_irqrestore(&phba->hbalock, iflags);
2036 	return ret_val;
2037 }
2038 
2039 /**
2040  * lpfc_sli_next_iocb_slot - Get next iocb slot in the ring
2041  * @phba: Pointer to HBA context object.
2042  * @pring: Pointer to driver SLI ring object.
2043  *
2044  * This function is called with hbalock held and the caller must post the
2045  * iocb without releasing the lock. If the caller releases the lock,
2046  * iocb slot returned by the function is not guaranteed to be available.
2047  * The function returns pointer to the next available iocb slot if there
2048  * is available slot in the ring, else it returns NULL.
2049  * If the get index of the ring is ahead of the put index, the function
2050  * will post an error attention event to the worker thread to take the
2051  * HBA to offline state.
2052  **/
2053 static IOCB_t *
lpfc_sli_next_iocb_slot(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2054 lpfc_sli_next_iocb_slot (struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2055 {
2056 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
2057 	uint32_t  max_cmd_idx = pring->sli.sli3.numCiocb;
2058 
2059 	lockdep_assert_held(&phba->hbalock);
2060 
2061 	if ((pring->sli.sli3.next_cmdidx == pring->sli.sli3.cmdidx) &&
2062 	   (++pring->sli.sli3.next_cmdidx >= max_cmd_idx))
2063 		pring->sli.sli3.next_cmdidx = 0;
2064 
2065 	if (unlikely(pring->sli.sli3.local_getidx ==
2066 		pring->sli.sli3.next_cmdidx)) {
2067 
2068 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
2069 
2070 		if (unlikely(pring->sli.sli3.local_getidx >= max_cmd_idx)) {
2071 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2072 					"0315 Ring %d issue: portCmdGet %d "
2073 					"is bigger than cmd ring %d\n",
2074 					pring->ringno,
2075 					pring->sli.sli3.local_getidx,
2076 					max_cmd_idx);
2077 
2078 			phba->link_state = LPFC_HBA_ERROR;
2079 			/*
2080 			 * All error attention handlers are posted to
2081 			 * worker thread
2082 			 */
2083 			phba->work_ha |= HA_ERATT;
2084 			phba->work_hs = HS_FFER3;
2085 
2086 			lpfc_worker_wake_up(phba);
2087 
2088 			return NULL;
2089 		}
2090 
2091 		if (pring->sli.sli3.local_getidx == pring->sli.sli3.next_cmdidx)
2092 			return NULL;
2093 	}
2094 
2095 	return lpfc_cmd_iocb(phba, pring);
2096 }
2097 
2098 /**
2099  * lpfc_sli_next_iotag - Get an iotag for the iocb
2100  * @phba: Pointer to HBA context object.
2101  * @iocbq: Pointer to driver iocb object.
2102  *
2103  * This function gets an iotag for the iocb. If there is no unused iotag and
2104  * the iocbq_lookup_len < 0xffff, this function allocates a bigger iotag_lookup
2105  * array and assigns a new iotag.
2106  * The function returns the allocated iotag if successful, else returns zero.
2107  * Zero is not a valid iotag.
2108  * The caller is not required to hold any lock.
2109  **/
2110 uint16_t
lpfc_sli_next_iotag(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)2111 lpfc_sli_next_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
2112 {
2113 	struct lpfc_iocbq **new_arr;
2114 	struct lpfc_iocbq **old_arr;
2115 	size_t new_len;
2116 	struct lpfc_sli *psli = &phba->sli;
2117 	uint16_t iotag;
2118 
2119 	spin_lock_irq(&phba->hbalock);
2120 	iotag = psli->last_iotag;
2121 	if(++iotag < psli->iocbq_lookup_len) {
2122 		psli->last_iotag = iotag;
2123 		psli->iocbq_lookup[iotag] = iocbq;
2124 		spin_unlock_irq(&phba->hbalock);
2125 		iocbq->iotag = iotag;
2126 		return iotag;
2127 	} else if (psli->iocbq_lookup_len < (0xffff
2128 					   - LPFC_IOCBQ_LOOKUP_INCREMENT)) {
2129 		new_len = psli->iocbq_lookup_len + LPFC_IOCBQ_LOOKUP_INCREMENT;
2130 		spin_unlock_irq(&phba->hbalock);
2131 		new_arr = kzalloc_objs(struct lpfc_iocbq *, new_len);
2132 		if (new_arr) {
2133 			spin_lock_irq(&phba->hbalock);
2134 			old_arr = psli->iocbq_lookup;
2135 			if (new_len <= psli->iocbq_lookup_len) {
2136 				/* highly unprobable case */
2137 				kfree(new_arr);
2138 				iotag = psli->last_iotag;
2139 				if(++iotag < psli->iocbq_lookup_len) {
2140 					psli->last_iotag = iotag;
2141 					psli->iocbq_lookup[iotag] = iocbq;
2142 					spin_unlock_irq(&phba->hbalock);
2143 					iocbq->iotag = iotag;
2144 					return iotag;
2145 				}
2146 				spin_unlock_irq(&phba->hbalock);
2147 				return 0;
2148 			}
2149 			if (psli->iocbq_lookup)
2150 				memcpy(new_arr, old_arr,
2151 				       ((psli->last_iotag  + 1) *
2152 					sizeof (struct lpfc_iocbq *)));
2153 			psli->iocbq_lookup = new_arr;
2154 			psli->iocbq_lookup_len = new_len;
2155 			psli->last_iotag = iotag;
2156 			psli->iocbq_lookup[iotag] = iocbq;
2157 			spin_unlock_irq(&phba->hbalock);
2158 			iocbq->iotag = iotag;
2159 			kfree(old_arr);
2160 			return iotag;
2161 		}
2162 	} else
2163 		spin_unlock_irq(&phba->hbalock);
2164 
2165 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
2166 			"0318 Failed to allocate IOTAG.last IOTAG is %d\n",
2167 			psli->last_iotag);
2168 
2169 	return 0;
2170 }
2171 
2172 /**
2173  * lpfc_sli_submit_iocb - Submit an iocb to the firmware
2174  * @phba: Pointer to HBA context object.
2175  * @pring: Pointer to driver SLI ring object.
2176  * @iocb: Pointer to iocb slot in the ring.
2177  * @nextiocb: Pointer to driver iocb object which need to be
2178  *            posted to firmware.
2179  *
2180  * This function is called to post a new iocb to the firmware. This
2181  * function copies the new iocb to ring iocb slot and updates the
2182  * ring pointers. It adds the new iocb to txcmplq if there is
2183  * a completion call back for this iocb else the function will free the
2184  * iocb object.  The hbalock is asserted held in the code path calling
2185  * this routine.
2186  **/
2187 static void
lpfc_sli_submit_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,IOCB_t * iocb,struct lpfc_iocbq * nextiocb)2188 lpfc_sli_submit_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
2189 		IOCB_t *iocb, struct lpfc_iocbq *nextiocb)
2190 {
2191 	/*
2192 	 * Set up an iotag
2193 	 */
2194 	nextiocb->iocb.ulpIoTag = (nextiocb->cmd_cmpl) ? nextiocb->iotag : 0;
2195 
2196 
2197 	if (pring->ringno == LPFC_ELS_RING) {
2198 		lpfc_debugfs_slow_ring_trc(phba,
2199 			"IOCB cmd ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
2200 			*(((uint32_t *) &nextiocb->iocb) + 4),
2201 			*(((uint32_t *) &nextiocb->iocb) + 6),
2202 			*(((uint32_t *) &nextiocb->iocb) + 7));
2203 	}
2204 
2205 	/*
2206 	 * Issue iocb command to adapter
2207 	 */
2208 	lpfc_sli_pcimem_bcopy(&nextiocb->iocb, iocb, phba->iocb_cmd_size);
2209 	wmb();
2210 	pring->stats.iocb_cmd++;
2211 
2212 	/*
2213 	 * If there is no completion routine to call, we can release the
2214 	 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
2215 	 * that have no rsp ring completion, cmd_cmpl MUST be NULL.
2216 	 */
2217 	if (nextiocb->cmd_cmpl)
2218 		lpfc_sli_ringtxcmpl_put(phba, pring, nextiocb);
2219 	else
2220 		__lpfc_sli_release_iocbq(phba, nextiocb);
2221 
2222 	/*
2223 	 * Let the HBA know what IOCB slot will be the next one the
2224 	 * driver will put a command into.
2225 	 */
2226 	pring->sli.sli3.cmdidx = pring->sli.sli3.next_cmdidx;
2227 	writel(pring->sli.sli3.cmdidx, &phba->host_gp[pring->ringno].cmdPutInx);
2228 }
2229 
2230 /**
2231  * lpfc_sli_update_full_ring - Update the chip attention register
2232  * @phba: Pointer to HBA context object.
2233  * @pring: Pointer to driver SLI ring object.
2234  *
2235  * The caller is not required to hold any lock for calling this function.
2236  * This function updates the chip attention bits for the ring to inform firmware
2237  * that there are pending work to be done for this ring and requests an
2238  * interrupt when there is space available in the ring. This function is
2239  * called when the driver is unable to post more iocbs to the ring due
2240  * to unavailability of space in the ring.
2241  **/
2242 static void
lpfc_sli_update_full_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2243 lpfc_sli_update_full_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2244 {
2245 	int ringno = pring->ringno;
2246 
2247 	pring->flag |= LPFC_CALL_RING_AVAILABLE;
2248 
2249 	wmb();
2250 
2251 	/*
2252 	 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
2253 	 * The HBA will tell us when an IOCB entry is available.
2254 	 */
2255 	writel((CA_R0ATT|CA_R0CE_REQ) << (ringno*4), phba->CAregaddr);
2256 	readl(phba->CAregaddr); /* flush */
2257 
2258 	pring->stats.iocb_cmd_full++;
2259 }
2260 
2261 /**
2262  * lpfc_sli_update_ring - Update chip attention register
2263  * @phba: Pointer to HBA context object.
2264  * @pring: Pointer to driver SLI ring object.
2265  *
2266  * This function updates the chip attention register bit for the
2267  * given ring to inform HBA that there is more work to be done
2268  * in this ring. The caller is not required to hold any lock.
2269  **/
2270 static void
lpfc_sli_update_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2271 lpfc_sli_update_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2272 {
2273 	int ringno = pring->ringno;
2274 
2275 	/*
2276 	 * Tell the HBA that there is work to do in this ring.
2277 	 */
2278 	if (!(phba->sli3_options & LPFC_SLI3_CRP_ENABLED)) {
2279 		wmb();
2280 		writel(CA_R0ATT << (ringno * 4), phba->CAregaddr);
2281 		readl(phba->CAregaddr); /* flush */
2282 	}
2283 }
2284 
2285 /**
2286  * lpfc_sli_resume_iocb - Process iocbs in the txq
2287  * @phba: Pointer to HBA context object.
2288  * @pring: Pointer to driver SLI ring object.
2289  *
2290  * This function is called with hbalock held to post pending iocbs
2291  * in the txq to the firmware. This function is called when driver
2292  * detects space available in the ring.
2293  **/
2294 static void
lpfc_sli_resume_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)2295 lpfc_sli_resume_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
2296 {
2297 	IOCB_t *iocb;
2298 	struct lpfc_iocbq *nextiocb;
2299 
2300 	lockdep_assert_held(&phba->hbalock);
2301 
2302 	/*
2303 	 * Check to see if:
2304 	 *  (a) there is anything on the txq to send
2305 	 *  (b) link is up
2306 	 *  (c) link attention events can be processed (fcp ring only)
2307 	 *  (d) IOCB processing is not blocked by the outstanding mbox command.
2308 	 */
2309 
2310 	if (lpfc_is_link_up(phba) &&
2311 	    (!list_empty(&pring->txq)) &&
2312 	    (pring->ringno != LPFC_FCP_RING ||
2313 	     phba->sli.sli_flag & LPFC_PROCESS_LA)) {
2314 
2315 		while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
2316 		       (nextiocb = lpfc_sli_ringtx_get(phba, pring)))
2317 			lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
2318 
2319 		if (iocb)
2320 			lpfc_sli_update_ring(phba, pring);
2321 		else
2322 			lpfc_sli_update_full_ring(phba, pring);
2323 	}
2324 
2325 	return;
2326 }
2327 
2328 /**
2329  * lpfc_sli_next_hbq_slot - Get next hbq entry for the HBQ
2330  * @phba: Pointer to HBA context object.
2331  * @hbqno: HBQ number.
2332  *
2333  * This function is called with hbalock held to get the next
2334  * available slot for the given HBQ. If there is free slot
2335  * available for the HBQ it will return pointer to the next available
2336  * HBQ entry else it will return NULL.
2337  **/
2338 static struct lpfc_hbq_entry *
lpfc_sli_next_hbq_slot(struct lpfc_hba * phba,uint32_t hbqno)2339 lpfc_sli_next_hbq_slot(struct lpfc_hba *phba, uint32_t hbqno)
2340 {
2341 	struct hbq_s *hbqp = &phba->hbqs[hbqno];
2342 
2343 	lockdep_assert_held(&phba->hbalock);
2344 
2345 	if (hbqp->next_hbqPutIdx == hbqp->hbqPutIdx &&
2346 	    ++hbqp->next_hbqPutIdx >= hbqp->entry_count)
2347 		hbqp->next_hbqPutIdx = 0;
2348 
2349 	if (unlikely(hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)) {
2350 		uint32_t raw_index = phba->hbq_get[hbqno];
2351 		uint32_t getidx = le32_to_cpu(raw_index);
2352 
2353 		hbqp->local_hbqGetIdx = getidx;
2354 
2355 		if (unlikely(hbqp->local_hbqGetIdx >= hbqp->entry_count)) {
2356 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2357 					"1802 HBQ %d: local_hbqGetIdx "
2358 					"%u is > than hbqp->entry_count %u\n",
2359 					hbqno, hbqp->local_hbqGetIdx,
2360 					hbqp->entry_count);
2361 
2362 			phba->link_state = LPFC_HBA_ERROR;
2363 			return NULL;
2364 		}
2365 
2366 		if (hbqp->local_hbqGetIdx == hbqp->next_hbqPutIdx)
2367 			return NULL;
2368 	}
2369 
2370 	return (struct lpfc_hbq_entry *) phba->hbqs[hbqno].hbq_virt +
2371 			hbqp->hbqPutIdx;
2372 }
2373 
2374 /**
2375  * lpfc_sli_hbqbuf_free_all - Free all the hbq buffers
2376  * @phba: Pointer to HBA context object.
2377  *
2378  * This function is called with no lock held to free all the
2379  * hbq buffers while uninitializing the SLI interface. It also
2380  * frees the HBQ buffers returned by the firmware but not yet
2381  * processed by the upper layers.
2382  **/
2383 void
lpfc_sli_hbqbuf_free_all(struct lpfc_hba * phba)2384 lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
2385 {
2386 	struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2387 	struct hbq_dmabuf *hbq_buf;
2388 	unsigned long flags;
2389 	int i, hbq_count;
2390 
2391 	hbq_count = lpfc_sli_hbq_count();
2392 	/* Return all memory used by all HBQs */
2393 	spin_lock_irqsave(&phba->hbalock, flags);
2394 	for (i = 0; i < hbq_count; ++i) {
2395 		list_for_each_entry_safe(dmabuf, next_dmabuf,
2396 				&phba->hbqs[i].hbq_buffer_list, list) {
2397 			hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
2398 			list_del(&hbq_buf->dbuf.list);
2399 			(phba->hbqs[i].hbq_free_buffer)(phba, hbq_buf);
2400 		}
2401 		phba->hbqs[i].buffer_count = 0;
2402 	}
2403 
2404 	/* Mark the HBQs not in use */
2405 	phba->hbq_in_use = 0;
2406 	spin_unlock_irqrestore(&phba->hbalock, flags);
2407 }
2408 
2409 /**
2410  * lpfc_sli_hbq_to_firmware - Post the hbq buffer to firmware
2411  * @phba: Pointer to HBA context object.
2412  * @hbqno: HBQ number.
2413  * @hbq_buf: Pointer to HBQ buffer.
2414  *
2415  * This function is called with the hbalock held to post a
2416  * hbq buffer to the firmware. If the function finds an empty
2417  * slot in the HBQ, it will post the buffer. The function will return
2418  * pointer to the hbq entry if it successfully post the buffer
2419  * else it will return NULL.
2420  **/
2421 static int
lpfc_sli_hbq_to_firmware(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2422 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
2423 			 struct hbq_dmabuf *hbq_buf)
2424 {
2425 	lockdep_assert_held(&phba->hbalock);
2426 	return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
2427 }
2428 
2429 /**
2430  * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
2431  * @phba: Pointer to HBA context object.
2432  * @hbqno: HBQ number.
2433  * @hbq_buf: Pointer to HBQ buffer.
2434  *
2435  * This function is called with the hbalock held to post a hbq buffer to the
2436  * firmware. If the function finds an empty slot in the HBQ, it will post the
2437  * buffer and place it on the hbq_buffer_list. The function will return zero if
2438  * it successfully post the buffer else it will return an error.
2439  **/
2440 static int
lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2441 lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
2442 			    struct hbq_dmabuf *hbq_buf)
2443 {
2444 	struct lpfc_hbq_entry *hbqe;
2445 	dma_addr_t physaddr = hbq_buf->dbuf.phys;
2446 
2447 	lockdep_assert_held(&phba->hbalock);
2448 	/* Get next HBQ entry slot to use */
2449 	hbqe = lpfc_sli_next_hbq_slot(phba, hbqno);
2450 	if (hbqe) {
2451 		struct hbq_s *hbqp = &phba->hbqs[hbqno];
2452 
2453 		hbqe->bde.addrHigh = le32_to_cpu(putPaddrHigh(physaddr));
2454 		hbqe->bde.addrLow  = le32_to_cpu(putPaddrLow(physaddr));
2455 		hbqe->bde.tus.f.bdeSize = hbq_buf->total_size;
2456 		hbqe->bde.tus.f.bdeFlags = 0;
2457 		hbqe->bde.tus.w = le32_to_cpu(hbqe->bde.tus.w);
2458 		hbqe->buffer_tag = le32_to_cpu(hbq_buf->tag);
2459 				/* Sync SLIM */
2460 		hbqp->hbqPutIdx = hbqp->next_hbqPutIdx;
2461 		writel(hbqp->hbqPutIdx, phba->hbq_put + hbqno);
2462 				/* flush */
2463 		readl(phba->hbq_put + hbqno);
2464 		list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
2465 		return 0;
2466 	} else
2467 		return -ENOMEM;
2468 }
2469 
2470 /**
2471  * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
2472  * @phba: Pointer to HBA context object.
2473  * @hbqno: HBQ number.
2474  * @hbq_buf: Pointer to HBQ buffer.
2475  *
2476  * This function is called with the hbalock held to post an RQE to the SLI4
2477  * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
2478  * the hbq_buffer_list and return zero, otherwise it will return an error.
2479  **/
2480 static int
lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba * phba,uint32_t hbqno,struct hbq_dmabuf * hbq_buf)2481 lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
2482 			    struct hbq_dmabuf *hbq_buf)
2483 {
2484 	int rc;
2485 	struct lpfc_rqe hrqe;
2486 	struct lpfc_rqe drqe;
2487 	struct lpfc_queue *hrq;
2488 	struct lpfc_queue *drq;
2489 
2490 	if (hbqno != LPFC_ELS_HBQ)
2491 		return 1;
2492 	hrq = phba->sli4_hba.hdr_rq;
2493 	drq = phba->sli4_hba.dat_rq;
2494 
2495 	lockdep_assert_held(&phba->hbalock);
2496 	hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
2497 	hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
2498 	drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
2499 	drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
2500 	rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
2501 	if (rc < 0)
2502 		return rc;
2503 	hbq_buf->tag = (rc | (hbqno << 16));
2504 	list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
2505 	return 0;
2506 }
2507 
2508 /* HBQ for ELS and CT traffic. */
2509 static struct lpfc_hbq_init lpfc_els_hbq = {
2510 	.rn = 1,
2511 	.entry_count = 256,
2512 	.mask_count = 0,
2513 	.profile = 0,
2514 	.ring_mask = (1 << LPFC_ELS_RING),
2515 	.buffer_count = 0,
2516 	.init_count = 40,
2517 	.add_count = 40,
2518 };
2519 
2520 /* Array of HBQs */
2521 struct lpfc_hbq_init *lpfc_hbq_defs[] = {
2522 	&lpfc_els_hbq,
2523 };
2524 
2525 /**
2526  * lpfc_sli_hbqbuf_fill_hbqs - Post more hbq buffers to HBQ
2527  * @phba: Pointer to HBA context object.
2528  * @hbqno: HBQ number.
2529  * @count: Number of HBQ buffers to be posted.
2530  *
2531  * This function is called with no lock held to post more hbq buffers to the
2532  * given HBQ. The function returns the number of HBQ buffers successfully
2533  * posted.
2534  **/
2535 static int
lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba * phba,uint32_t hbqno,uint32_t count)2536 lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
2537 {
2538 	uint32_t i, posted = 0;
2539 	unsigned long flags;
2540 	struct hbq_dmabuf *hbq_buffer;
2541 	LIST_HEAD(hbq_buf_list);
2542 	if (!phba->hbqs[hbqno].hbq_alloc_buffer)
2543 		return 0;
2544 
2545 	if ((phba->hbqs[hbqno].buffer_count + count) >
2546 	    lpfc_hbq_defs[hbqno]->entry_count)
2547 		count = lpfc_hbq_defs[hbqno]->entry_count -
2548 					phba->hbqs[hbqno].buffer_count;
2549 	if (!count)
2550 		return 0;
2551 	/* Allocate HBQ entries */
2552 	for (i = 0; i < count; i++) {
2553 		hbq_buffer = (phba->hbqs[hbqno].hbq_alloc_buffer)(phba);
2554 		if (!hbq_buffer)
2555 			break;
2556 		list_add_tail(&hbq_buffer->dbuf.list, &hbq_buf_list);
2557 	}
2558 	/* Check whether HBQ is still in use */
2559 	spin_lock_irqsave(&phba->hbalock, flags);
2560 	if (!phba->hbq_in_use)
2561 		goto err;
2562 	while (!list_empty(&hbq_buf_list)) {
2563 		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2564 				 dbuf.list);
2565 		hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
2566 				      (hbqno << 16));
2567 		if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
2568 			phba->hbqs[hbqno].buffer_count++;
2569 			posted++;
2570 		} else
2571 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2572 	}
2573 	spin_unlock_irqrestore(&phba->hbalock, flags);
2574 	return posted;
2575 err:
2576 	spin_unlock_irqrestore(&phba->hbalock, flags);
2577 	while (!list_empty(&hbq_buf_list)) {
2578 		list_remove_head(&hbq_buf_list, hbq_buffer, struct hbq_dmabuf,
2579 				 dbuf.list);
2580 		(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2581 	}
2582 	return 0;
2583 }
2584 
2585 /**
2586  * lpfc_sli_hbqbuf_add_hbqs - Post more HBQ buffers to firmware
2587  * @phba: Pointer to HBA context object.
2588  * @qno: HBQ number.
2589  *
2590  * This function posts more buffers to the HBQ. This function
2591  * is called with no lock held. The function returns the number of HBQ entries
2592  * successfully allocated.
2593  **/
2594 int
lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba * phba,uint32_t qno)2595 lpfc_sli_hbqbuf_add_hbqs(struct lpfc_hba *phba, uint32_t qno)
2596 {
2597 	if (phba->sli_rev == LPFC_SLI_REV4)
2598 		return 0;
2599 	else
2600 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2601 					 lpfc_hbq_defs[qno]->add_count);
2602 }
2603 
2604 /**
2605  * lpfc_sli_hbqbuf_init_hbqs - Post initial buffers to the HBQ
2606  * @phba: Pointer to HBA context object.
2607  * @qno:  HBQ queue number.
2608  *
2609  * This function is called from SLI initialization code path with
2610  * no lock held to post initial HBQ buffers to firmware. The
2611  * function returns the number of HBQ entries successfully allocated.
2612  **/
2613 static int
lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba * phba,uint32_t qno)2614 lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
2615 {
2616 	if (phba->sli_rev == LPFC_SLI_REV4)
2617 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2618 					lpfc_hbq_defs[qno]->entry_count);
2619 	else
2620 		return lpfc_sli_hbqbuf_fill_hbqs(phba, qno,
2621 					 lpfc_hbq_defs[qno]->init_count);
2622 }
2623 
2624 /*
2625  * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
2626  *
2627  * This function removes the first hbq buffer on an hbq list and returns a
2628  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2629  **/
2630 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_get(struct list_head * rb_list)2631 lpfc_sli_hbqbuf_get(struct list_head *rb_list)
2632 {
2633 	struct lpfc_dmabuf *d_buf;
2634 
2635 	list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
2636 	if (!d_buf)
2637 		return NULL;
2638 	return container_of(d_buf, struct hbq_dmabuf, dbuf);
2639 }
2640 
2641 /**
2642  * lpfc_sli_rqbuf_get - Remove the first dma buffer off of an RQ list
2643  * @phba: Pointer to HBA context object.
2644  * @hrq: HBQ number.
2645  *
2646  * This function removes the first RQ buffer on an RQ buffer list and returns a
2647  * pointer to that buffer. If it finds no buffers on the list it returns NULL.
2648  **/
2649 static struct rqb_dmabuf *
lpfc_sli_rqbuf_get(struct lpfc_hba * phba,struct lpfc_queue * hrq)2650 lpfc_sli_rqbuf_get(struct lpfc_hba *phba, struct lpfc_queue *hrq)
2651 {
2652 	struct lpfc_dmabuf *h_buf;
2653 	struct lpfc_rqb *rqbp;
2654 
2655 	rqbp = hrq->rqbp;
2656 	list_remove_head(&rqbp->rqb_buffer_list, h_buf,
2657 			 struct lpfc_dmabuf, list);
2658 	if (!h_buf)
2659 		return NULL;
2660 	rqbp->buffer_count--;
2661 	return container_of(h_buf, struct rqb_dmabuf, hbuf);
2662 }
2663 
2664 /**
2665  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
2666  * @phba: Pointer to HBA context object.
2667  * @tag: Tag of the hbq buffer.
2668  *
2669  * This function searches for the hbq buffer associated with the given tag in
2670  * the hbq buffer list. If it finds the hbq buffer, it returns the hbq_buffer
2671  * otherwise it returns NULL.
2672  **/
2673 static struct hbq_dmabuf *
lpfc_sli_hbqbuf_find(struct lpfc_hba * phba,uint32_t tag)2674 lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
2675 {
2676 	struct lpfc_dmabuf *d_buf;
2677 	struct hbq_dmabuf *hbq_buf;
2678 	uint32_t hbqno;
2679 
2680 	hbqno = tag >> 16;
2681 	if (hbqno >= LPFC_MAX_HBQS)
2682 		return NULL;
2683 
2684 	spin_lock_irq(&phba->hbalock);
2685 	list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
2686 		hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
2687 		if (hbq_buf->tag == tag) {
2688 			spin_unlock_irq(&phba->hbalock);
2689 			return hbq_buf;
2690 		}
2691 	}
2692 	spin_unlock_irq(&phba->hbalock);
2693 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2694 			"1803 Bad hbq tag. Data: x%x x%x\n",
2695 			tag, phba->hbqs[tag >> 16].buffer_count);
2696 	return NULL;
2697 }
2698 
2699 /**
2700  * lpfc_sli_free_hbq - Give back the hbq buffer to firmware
2701  * @phba: Pointer to HBA context object.
2702  * @hbq_buffer: Pointer to HBQ buffer.
2703  *
2704  * This function is called with hbalock. This function gives back
2705  * the hbq buffer to firmware. If the HBQ does not have space to
2706  * post the buffer, it will free the buffer.
2707  **/
2708 void
lpfc_sli_free_hbq(struct lpfc_hba * phba,struct hbq_dmabuf * hbq_buffer)2709 lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
2710 {
2711 	uint32_t hbqno;
2712 
2713 	if (hbq_buffer) {
2714 		hbqno = hbq_buffer->tag >> 16;
2715 		if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
2716 			(phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
2717 	}
2718 }
2719 
2720 /**
2721  * lpfc_sli_chk_mbx_command - Check if the mailbox is a legitimate mailbox
2722  * @mbxCommand: mailbox command code.
2723  *
2724  * This function is called by the mailbox event handler function to verify
2725  * that the completed mailbox command is a legitimate mailbox command. If the
2726  * completed mailbox is not known to the function, it will return MBX_SHUTDOWN
2727  * and the mailbox event handler will take the HBA offline.
2728  **/
2729 static int
lpfc_sli_chk_mbx_command(uint8_t mbxCommand)2730 lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
2731 {
2732 	uint8_t ret;
2733 
2734 	switch (mbxCommand) {
2735 	case MBX_LOAD_SM:
2736 	case MBX_READ_NV:
2737 	case MBX_WRITE_NV:
2738 	case MBX_WRITE_VPARMS:
2739 	case MBX_RUN_BIU_DIAG:
2740 	case MBX_INIT_LINK:
2741 	case MBX_DOWN_LINK:
2742 	case MBX_CONFIG_LINK:
2743 	case MBX_CONFIG_RING:
2744 	case MBX_RESET_RING:
2745 	case MBX_READ_CONFIG:
2746 	case MBX_READ_RCONFIG:
2747 	case MBX_READ_SPARM:
2748 	case MBX_READ_STATUS:
2749 	case MBX_READ_RPI:
2750 	case MBX_READ_XRI:
2751 	case MBX_READ_REV:
2752 	case MBX_READ_LNK_STAT:
2753 	case MBX_REG_LOGIN:
2754 	case MBX_UNREG_LOGIN:
2755 	case MBX_CLEAR_LA:
2756 	case MBX_DUMP_MEMORY:
2757 	case MBX_DUMP_CONTEXT:
2758 	case MBX_RUN_DIAGS:
2759 	case MBX_RESTART:
2760 	case MBX_UPDATE_CFG:
2761 	case MBX_DOWN_LOAD:
2762 	case MBX_DEL_LD_ENTRY:
2763 	case MBX_RUN_PROGRAM:
2764 	case MBX_SET_MASK:
2765 	case MBX_SET_VARIABLE:
2766 	case MBX_UNREG_D_ID:
2767 	case MBX_KILL_BOARD:
2768 	case MBX_CONFIG_FARP:
2769 	case MBX_BEACON:
2770 	case MBX_LOAD_AREA:
2771 	case MBX_RUN_BIU_DIAG64:
2772 	case MBX_CONFIG_PORT:
2773 	case MBX_READ_SPARM64:
2774 	case MBX_READ_RPI64:
2775 	case MBX_REG_LOGIN64:
2776 	case MBX_READ_TOPOLOGY:
2777 	case MBX_WRITE_WWN:
2778 	case MBX_SET_DEBUG:
2779 	case MBX_LOAD_EXP_ROM:
2780 	case MBX_ASYNCEVT_ENABLE:
2781 	case MBX_REG_VPI:
2782 	case MBX_UNREG_VPI:
2783 	case MBX_HEARTBEAT:
2784 	case MBX_PORT_CAPABILITIES:
2785 	case MBX_PORT_IOV_CONTROL:
2786 	case MBX_SLI4_CONFIG:
2787 	case MBX_SLI4_REQ_FTRS:
2788 	case MBX_REG_FCFI:
2789 	case MBX_UNREG_FCFI:
2790 	case MBX_REG_VFI:
2791 	case MBX_UNREG_VFI:
2792 	case MBX_INIT_VPI:
2793 	case MBX_INIT_VFI:
2794 	case MBX_RESUME_RPI:
2795 	case MBX_READ_EVENT_LOG_STATUS:
2796 	case MBX_READ_EVENT_LOG:
2797 	case MBX_SECURITY_MGMT:
2798 	case MBX_AUTH_PORT:
2799 	case MBX_ACCESS_VDATA:
2800 		ret = mbxCommand;
2801 		break;
2802 	default:
2803 		ret = MBX_SHUTDOWN;
2804 		break;
2805 	}
2806 	return ret;
2807 }
2808 
2809 /**
2810  * lpfc_sli_wake_mbox_wait - lpfc_sli_issue_mbox_wait mbox completion handler
2811  * @phba: Pointer to HBA context object.
2812  * @pmboxq: Pointer to mailbox command.
2813  *
2814  * This is completion handler function for mailbox commands issued from
2815  * lpfc_sli_issue_mbox_wait function. This function is called by the
2816  * mailbox event handler function with no lock held. This function
2817  * will wake up thread waiting on the wait queue pointed by context1
2818  * of the mailbox.
2819  **/
2820 void
lpfc_sli_wake_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq)2821 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
2822 {
2823 	unsigned long drvr_flag;
2824 	struct completion *pmbox_done;
2825 
2826 	/*
2827 	 * If pmbox_done is empty, the driver thread gave up waiting and
2828 	 * continued running.
2829 	 */
2830 	pmboxq->mbox_flag |= LPFC_MBX_WAKE;
2831 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
2832 	pmbox_done = pmboxq->ctx_u.mbox_wait;
2833 	if (pmbox_done)
2834 		complete(pmbox_done);
2835 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
2836 	return;
2837 }
2838 
2839 /**
2840  * lpfc_sli_def_mbox_cmpl - Default mailbox completion handler
2841  * @phba: Pointer to HBA context object.
2842  * @pmb: Pointer to mailbox object.
2843  *
2844  * This function is the default mailbox completion handler. It
2845  * frees the memory resources associated with the completed mailbox
2846  * command. If the completed command is a REG_LOGIN mailbox command,
2847  * this function will issue a UREG_LOGIN to re-claim the RPI.
2848  **/
2849 void
lpfc_sli_def_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)2850 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2851 {
2852 	struct lpfc_vport  *vport = pmb->vport;
2853 	struct lpfc_dmabuf *mp;
2854 	struct lpfc_nodelist *ndlp;
2855 	struct Scsi_Host *shost;
2856 	uint16_t rpi, vpi;
2857 	int rc;
2858 
2859 	/*
2860 	 * If a REG_LOGIN succeeded  after node is destroyed or node
2861 	 * is in re-discovery driver need to cleanup the RPI.
2862 	 */
2863 	if (!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
2864 	    pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
2865 	    !pmb->u.mb.mbxStatus) {
2866 		mp = pmb->ctx_buf;
2867 		if (mp) {
2868 			pmb->ctx_buf = NULL;
2869 			lpfc_mbuf_free(phba, mp->virt, mp->phys);
2870 			kfree(mp);
2871 		}
2872 		rpi = pmb->u.mb.un.varWords[0];
2873 		vpi = pmb->u.mb.un.varRegLogin.vpi;
2874 		if (phba->sli_rev == LPFC_SLI_REV4)
2875 			vpi -= phba->sli4_hba.max_cfg_param.vpi_base;
2876 		lpfc_unreg_login(phba, vpi, rpi, pmb);
2877 		pmb->vport = vport;
2878 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
2879 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
2880 		if (rc != MBX_NOT_FINISHED)
2881 			return;
2882 	}
2883 
2884 	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
2885 		!test_bit(FC_UNLOADING, &phba->pport->load_flag) &&
2886 		!pmb->u.mb.mbxStatus) {
2887 		shost = lpfc_shost_from_vport(vport);
2888 		spin_lock_irq(shost->host_lock);
2889 		vport->vpi_state |= LPFC_VPI_REGISTERED;
2890 		spin_unlock_irq(shost->host_lock);
2891 		clear_bit(FC_VPORT_NEEDS_REG_VPI, &vport->fc_flag);
2892 	}
2893 
2894 	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
2895 		ndlp = pmb->ctx_ndlp;
2896 		lpfc_nlp_put(ndlp);
2897 	}
2898 
2899 	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2900 		ndlp = pmb->ctx_ndlp;
2901 
2902 		/* Check to see if there are any deferred events to process */
2903 		if (ndlp) {
2904 			lpfc_printf_vlog(
2905 				vport,
2906 				KERN_INFO, LOG_MBOX | LOG_DISCOVERY,
2907 				"1438 UNREG cmpl deferred mbox x%x "
2908 				"on NPort x%x Data: x%lx x%x x%px x%lx x%x\n",
2909 				ndlp->nlp_rpi, ndlp->nlp_DID,
2910 				ndlp->nlp_flag, ndlp->nlp_defer_did,
2911 				ndlp, vport->load_flag, kref_read(&ndlp->kref));
2912 
2913 			if (test_bit(NLP_UNREG_INP, &ndlp->nlp_flag) &&
2914 			    ndlp->nlp_defer_did != NLP_EVT_NOTHING_PENDING) {
2915 				clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
2916 				ndlp->nlp_defer_did = NLP_EVT_NOTHING_PENDING;
2917 				lpfc_issue_els_plogi(vport, ndlp->nlp_DID, 0);
2918 			} else {
2919 				clear_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
2920 			}
2921 
2922 			/* The unreg_login mailbox is complete and had a
2923 			 * reference that has to be released.  The PLOGI
2924 			 * got its own ref.
2925 			 */
2926 			lpfc_nlp_put(ndlp);
2927 			pmb->ctx_ndlp = NULL;
2928 		}
2929 	}
2930 
2931 	/* This nlp_put pairs with lpfc_sli4_resume_rpi */
2932 	if (pmb->u.mb.mbxCommand == MBX_RESUME_RPI) {
2933 		ndlp = pmb->ctx_ndlp;
2934 		lpfc_nlp_put(ndlp);
2935 	}
2936 
2937 	/* Check security permission status on INIT_LINK mailbox command */
2938 	if ((pmb->u.mb.mbxCommand == MBX_INIT_LINK) &&
2939 	    (pmb->u.mb.mbxStatus == MBXERR_SEC_NO_PERMISSION))
2940 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
2941 				"2860 SLI authentication is required "
2942 				"for INIT_LINK but has not done yet\n");
2943 
2944 	if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
2945 		lpfc_sli4_mbox_cmd_free(phba, pmb);
2946 	else
2947 		lpfc_mbox_rsrc_cleanup(phba, pmb, MBOX_THD_UNLOCKED);
2948 }
2949  /**
2950  * lpfc_sli4_unreg_rpi_cmpl_clr - mailbox completion handler
2951  * @phba: Pointer to HBA context object.
2952  * @pmb: Pointer to mailbox object.
2953  *
2954  * This function is the unreg rpi mailbox completion handler. It
2955  * frees the memory resources associated with the completed mailbox
2956  * command. An additional reference is put on the ndlp to prevent
2957  * lpfc_nlp_release from freeing the rpi bit in the bitmask before
2958  * the unreg mailbox command completes, this routine puts the
2959  * reference back.
2960  *
2961  **/
2962 void
lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)2963 lpfc_sli4_unreg_rpi_cmpl_clr(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
2964 {
2965 	struct lpfc_vport  *vport = pmb->vport;
2966 	struct lpfc_nodelist *ndlp;
2967 	bool unreg_inp;
2968 
2969 	ndlp = pmb->ctx_ndlp;
2970 	if (pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) {
2971 		if (phba->sli_rev == LPFC_SLI_REV4 &&
2972 		    (bf_get(lpfc_sli_intf_if_type,
2973 		     &phba->sli4_hba.sli_intf) >=
2974 		     LPFC_SLI_INTF_IF_TYPE_2)) {
2975 			if (ndlp) {
2976 				lpfc_printf_vlog(
2977 					 vport, KERN_INFO,
2978 					 LOG_MBOX | LOG_SLI | LOG_NODE,
2979 					 "0010 UNREG_LOGIN vpi:x%x "
2980 					 "rpi:%x DID:%x defer x%x flg x%lx "
2981 					 "x%px\n",
2982 					 vport->vpi, ndlp->nlp_rpi,
2983 					 ndlp->nlp_DID, ndlp->nlp_defer_did,
2984 					 ndlp->nlp_flag,
2985 					 ndlp);
2986 
2987 				/* Cleanup the nlp_flag now that the UNREG RPI
2988 				 * has completed.
2989 				 */
2990 				unreg_inp = test_and_clear_bit(NLP_UNREG_INP,
2991 							       &ndlp->nlp_flag);
2992 				clear_bit(NLP_LOGO_ACC, &ndlp->nlp_flag);
2993 
2994 				/* Check to see if there are any deferred
2995 				 * events to process
2996 				 */
2997 				if (unreg_inp &&
2998 				    ndlp->nlp_defer_did !=
2999 				    NLP_EVT_NOTHING_PENDING) {
3000 					lpfc_printf_vlog(
3001 						vport, KERN_INFO,
3002 						LOG_MBOX | LOG_SLI | LOG_NODE,
3003 						"4111 UNREG cmpl deferred "
3004 						"clr x%x on "
3005 						"NPort x%x Data: x%x x%px\n",
3006 						ndlp->nlp_rpi, ndlp->nlp_DID,
3007 						ndlp->nlp_defer_did, ndlp);
3008 					ndlp->nlp_defer_did =
3009 						NLP_EVT_NOTHING_PENDING;
3010 					lpfc_issue_els_plogi(
3011 						vport, ndlp->nlp_DID, 0);
3012 				}
3013 
3014 				lpfc_nlp_put(ndlp);
3015 			}
3016 		}
3017 	}
3018 
3019 	mempool_free(pmb, phba->mbox_mem_pool);
3020 }
3021 
3022 /**
3023  * lpfc_sli_handle_mb_event - Handle mailbox completions from firmware
3024  * @phba: Pointer to HBA context object.
3025  *
3026  * This function is called with no lock held. This function processes all
3027  * the completed mailbox commands and gives it to upper layers. The interrupt
3028  * service routine processes mailbox completion interrupt and adds completed
3029  * mailbox commands to the mboxq_cmpl queue and signals the worker thread.
3030  * Worker thread call lpfc_sli_handle_mb_event, which will return the
3031  * completed mailbox commands in mboxq_cmpl queue to the upper layers. This
3032  * function returns the mailbox commands to the upper layer by calling the
3033  * completion handler function of each mailbox.
3034  **/
3035 int
lpfc_sli_handle_mb_event(struct lpfc_hba * phba)3036 lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
3037 {
3038 	MAILBOX_t *pmbox;
3039 	LPFC_MBOXQ_t *pmb;
3040 	int rc;
3041 	LIST_HEAD(cmplq);
3042 
3043 	phba->sli.slistat.mbox_event++;
3044 
3045 	/* Get all completed mailboxe buffers into the cmplq */
3046 	spin_lock_irq(&phba->hbalock);
3047 	list_splice_init(&phba->sli.mboxq_cmpl, &cmplq);
3048 	spin_unlock_irq(&phba->hbalock);
3049 
3050 	/* Get a Mailbox buffer to setup mailbox commands for callback */
3051 	do {
3052 		list_remove_head(&cmplq, pmb, LPFC_MBOXQ_t, list);
3053 		if (pmb == NULL)
3054 			break;
3055 
3056 		pmbox = &pmb->u.mb;
3057 
3058 		if (pmbox->mbxCommand != MBX_HEARTBEAT) {
3059 			if (pmb->vport) {
3060 				lpfc_debugfs_disc_trc(pmb->vport,
3061 					LPFC_DISC_TRC_MBOX_VPORT,
3062 					"MBOX cmpl vport: cmd:x%x mb:x%x x%x",
3063 					(uint32_t)pmbox->mbxCommand,
3064 					pmbox->un.varWords[0],
3065 					pmbox->un.varWords[1]);
3066 			}
3067 			else {
3068 				lpfc_debugfs_disc_trc(phba->pport,
3069 					LPFC_DISC_TRC_MBOX,
3070 					"MBOX cmpl:       cmd:x%x mb:x%x x%x",
3071 					(uint32_t)pmbox->mbxCommand,
3072 					pmbox->un.varWords[0],
3073 					pmbox->un.varWords[1]);
3074 			}
3075 		}
3076 
3077 		/*
3078 		 * It is a fatal error if unknown mbox command completion.
3079 		 */
3080 		if (lpfc_sli_chk_mbx_command(pmbox->mbxCommand) ==
3081 		    MBX_SHUTDOWN) {
3082 			/* Unknown mailbox command compl */
3083 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3084 					"(%d):0323 Unknown Mailbox command "
3085 					"x%x (x%x/x%x) Cmpl\n",
3086 					pmb->vport ? pmb->vport->vpi :
3087 					LPFC_VPORT_UNKNOWN,
3088 					pmbox->mbxCommand,
3089 					lpfc_sli_config_mbox_subsys_get(phba,
3090 									pmb),
3091 					lpfc_sli_config_mbox_opcode_get(phba,
3092 									pmb));
3093 			phba->link_state = LPFC_HBA_ERROR;
3094 			phba->work_hs = HS_FFER3;
3095 			lpfc_handle_eratt(phba);
3096 			continue;
3097 		}
3098 
3099 		if (pmbox->mbxStatus) {
3100 			phba->sli.slistat.mbox_stat_err++;
3101 			if (pmbox->mbxStatus == MBXERR_NO_RESOURCES) {
3102 				/* Mbox cmd cmpl error - RETRYing */
3103 				lpfc_printf_log(phba, KERN_INFO,
3104 					LOG_MBOX | LOG_SLI,
3105 					"(%d):0305 Mbox cmd cmpl "
3106 					"error - RETRYing Data: x%x "
3107 					"(x%x/x%x) x%x x%x x%x\n",
3108 					pmb->vport ? pmb->vport->vpi :
3109 					LPFC_VPORT_UNKNOWN,
3110 					pmbox->mbxCommand,
3111 					lpfc_sli_config_mbox_subsys_get(phba,
3112 									pmb),
3113 					lpfc_sli_config_mbox_opcode_get(phba,
3114 									pmb),
3115 					pmbox->mbxStatus,
3116 					pmbox->un.varWords[0],
3117 					pmb->vport ? pmb->vport->port_state :
3118 					LPFC_VPORT_UNKNOWN);
3119 				pmbox->mbxStatus = 0;
3120 				pmbox->mbxOwner = OWN_HOST;
3121 				rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
3122 				if (rc != MBX_NOT_FINISHED)
3123 					continue;
3124 			}
3125 		}
3126 
3127 		/* Mailbox cmd <cmd> Cmpl <cmpl> */
3128 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
3129 				"(%d):0307 Mailbox cmd x%x (x%x/x%x) Cmpl %ps "
3130 				"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
3131 				"x%x x%x x%x\n",
3132 				pmb->vport ? pmb->vport->vpi : 0,
3133 				pmbox->mbxCommand,
3134 				lpfc_sli_config_mbox_subsys_get(phba, pmb),
3135 				lpfc_sli_config_mbox_opcode_get(phba, pmb),
3136 				pmb->mbox_cmpl,
3137 				*((uint32_t *) pmbox),
3138 				pmbox->un.varWords[0],
3139 				pmbox->un.varWords[1],
3140 				pmbox->un.varWords[2],
3141 				pmbox->un.varWords[3],
3142 				pmbox->un.varWords[4],
3143 				pmbox->un.varWords[5],
3144 				pmbox->un.varWords[6],
3145 				pmbox->un.varWords[7],
3146 				pmbox->un.varWords[8],
3147 				pmbox->un.varWords[9],
3148 				pmbox->un.varWords[10]);
3149 
3150 		if (pmb->mbox_cmpl)
3151 			pmb->mbox_cmpl(phba,pmb);
3152 	} while (1);
3153 	return 0;
3154 }
3155 
3156 /**
3157  * lpfc_sli_get_buff - Get the buffer associated with the buffer tag
3158  * @phba: Pointer to HBA context object.
3159  * @pring: Pointer to driver SLI ring object.
3160  * @tag: buffer tag.
3161  *
3162  * This function is called with no lock held. When QUE_BUFTAG_BIT bit
3163  * is set in the tag the buffer is posted for a particular exchange,
3164  * the function will return the buffer without replacing the buffer.
3165  * If the buffer is for unsolicited ELS or CT traffic, this function
3166  * returns the buffer and also posts another buffer to the firmware.
3167  **/
3168 static struct lpfc_dmabuf *
lpfc_sli_get_buff(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)3169 lpfc_sli_get_buff(struct lpfc_hba *phba,
3170 		  struct lpfc_sli_ring *pring,
3171 		  uint32_t tag)
3172 {
3173 	struct hbq_dmabuf *hbq_entry;
3174 
3175 	if (tag & QUE_BUFTAG_BIT)
3176 		return lpfc_sli_ring_taggedbuf_get(phba, pring, tag);
3177 	hbq_entry = lpfc_sli_hbqbuf_find(phba, tag);
3178 	if (!hbq_entry)
3179 		return NULL;
3180 	return &hbq_entry->dbuf;
3181 }
3182 
3183 /**
3184  * lpfc_nvme_unsol_ls_handler - Process an unsolicited event data buffer
3185  *                              containing a NVME LS request.
3186  * @phba: pointer to lpfc hba data structure.
3187  * @piocb: pointer to the iocbq struct representing the sequence starting
3188  *        frame.
3189  *
3190  * This routine initially validates the NVME LS, validates there is a login
3191  * with the port that sent the LS, and then calls the appropriate nvme host
3192  * or target LS request handler.
3193  **/
3194 static void
lpfc_nvme_unsol_ls_handler(struct lpfc_hba * phba,struct lpfc_iocbq * piocb)3195 lpfc_nvme_unsol_ls_handler(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
3196 {
3197 	struct lpfc_nodelist *ndlp;
3198 	struct lpfc_dmabuf *d_buf;
3199 	struct hbq_dmabuf *nvmebuf;
3200 	struct fc_frame_header *fc_hdr;
3201 	struct lpfc_async_xchg_ctx *axchg = NULL;
3202 	char *failwhy = NULL;
3203 	uint32_t oxid, sid, did, fctl, size;
3204 	int ret = 1;
3205 
3206 	d_buf = piocb->cmd_dmabuf;
3207 
3208 	nvmebuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
3209 	fc_hdr = nvmebuf->hbuf.virt;
3210 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
3211 	sid = sli4_sid_from_fc_hdr(fc_hdr);
3212 	did = sli4_did_from_fc_hdr(fc_hdr);
3213 	fctl = (fc_hdr->fh_f_ctl[0] << 16 |
3214 		fc_hdr->fh_f_ctl[1] << 8 |
3215 		fc_hdr->fh_f_ctl[2]);
3216 	size = bf_get(lpfc_rcqe_length, &nvmebuf->cq_event.cqe.rcqe_cmpl);
3217 
3218 	lpfc_nvmeio_data(phba, "NVME LS    RCV: xri x%x sz %d from %06x\n",
3219 			 oxid, size, sid);
3220 
3221 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag)) {
3222 		failwhy = "Driver Unloading";
3223 	} else if (!(phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME)) {
3224 		failwhy = "NVME FC4 Disabled";
3225 	} else if (!phba->nvmet_support && !phba->pport->localport) {
3226 		failwhy = "No Localport";
3227 	} else if (phba->nvmet_support && !phba->targetport) {
3228 		failwhy = "No Targetport";
3229 	} else if (unlikely(fc_hdr->fh_r_ctl != FC_RCTL_ELS4_REQ)) {
3230 		failwhy = "Bad NVME LS R_CTL";
3231 	} else if (unlikely((fctl & 0x00FF0000) !=
3232 			(FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT))) {
3233 		failwhy = "Bad NVME LS F_CTL";
3234 	} else {
3235 		axchg = kzalloc_obj(*axchg, GFP_ATOMIC);
3236 		if (!axchg)
3237 			failwhy = "No CTX memory";
3238 	}
3239 
3240 	if (unlikely(failwhy)) {
3241 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3242 				"6154 Drop NVME LS: SID %06X OXID x%X: %s\n",
3243 				sid, oxid, failwhy);
3244 		goto out_fail;
3245 	}
3246 
3247 	/* validate the source of the LS is logged in */
3248 	ndlp = lpfc_findnode_did(phba->pport, sid);
3249 	if (!ndlp ||
3250 	    ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
3251 	     (ndlp->nlp_state != NLP_STE_MAPPED_NODE))) {
3252 		lpfc_printf_log(phba, KERN_ERR, LOG_NVME_DISC,
3253 				"6216 NVME Unsol rcv: No ndlp: "
3254 				"NPort_ID x%x oxid x%x\n",
3255 				sid, oxid);
3256 		goto out_fail;
3257 	}
3258 
3259 	axchg->phba = phba;
3260 	axchg->ndlp = ndlp;
3261 	axchg->size = size;
3262 	axchg->oxid = oxid;
3263 	axchg->sid = sid;
3264 	axchg->wqeq = NULL;
3265 	axchg->state = LPFC_NVME_STE_LS_RCV;
3266 	axchg->entry_cnt = 1;
3267 	axchg->rqb_buffer = (void *)nvmebuf;
3268 	axchg->hdwq = &phba->sli4_hba.hdwq[0];
3269 	axchg->payload = nvmebuf->dbuf.virt;
3270 	INIT_LIST_HEAD(&axchg->list);
3271 
3272 	if (phba->nvmet_support) {
3273 		ret = lpfc_nvmet_handle_lsreq(phba, axchg);
3274 		spin_lock_irq(&ndlp->lock);
3275 		if (!ret && !(ndlp->fc4_xpt_flags & NLP_XPT_HAS_HH)) {
3276 			ndlp->fc4_xpt_flags |= NLP_XPT_HAS_HH;
3277 			spin_unlock_irq(&ndlp->lock);
3278 
3279 			/* This reference is a single occurrence to hold the
3280 			 * node valid until the nvmet transport calls
3281 			 * host_release.
3282 			 */
3283 			if (!lpfc_nlp_get(ndlp))
3284 				goto out_fail;
3285 
3286 			lpfc_printf_log(phba, KERN_ERR, LOG_NODE,
3287 					"6206 NVMET unsol ls_req ndlp x%px "
3288 					"DID x%x xflags x%x refcnt %d\n",
3289 					ndlp, ndlp->nlp_DID,
3290 					ndlp->fc4_xpt_flags,
3291 					kref_read(&ndlp->kref));
3292 		} else {
3293 			spin_unlock_irq(&ndlp->lock);
3294 		}
3295 	} else {
3296 		ret = lpfc_nvme_handle_lsreq(phba, axchg);
3297 	}
3298 
3299 	/* if zero, LS was successfully handled. If non-zero, LS not handled */
3300 	if (!ret)
3301 		return;
3302 
3303 out_fail:
3304 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3305 			"6155 Drop NVME LS from DID %06X: SID %06X OXID x%X "
3306 			"NVMe%s handler failed %d\n",
3307 			did, sid, oxid,
3308 			(phba->nvmet_support) ? "T" : "I", ret);
3309 
3310 	/* recycle receive buffer */
3311 	lpfc_in_buf_free(phba, &nvmebuf->dbuf);
3312 
3313 	/* If start of new exchange, abort it */
3314 	if (axchg && (fctl & FC_FC_FIRST_SEQ && !(fctl & FC_FC_EX_CTX)))
3315 		ret = lpfc_nvme_unsol_ls_issue_abort(phba, axchg, sid, oxid);
3316 
3317 	if (ret)
3318 		kfree(axchg);
3319 }
3320 
3321 /**
3322  * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
3323  * @phba: Pointer to HBA context object.
3324  * @pring: Pointer to driver SLI ring object.
3325  * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
3326  * @fch_r_ctl: the r_ctl for the first frame of the sequence.
3327  * @fch_type: the type for the first frame of the sequence.
3328  *
3329  * This function is called with no lock held. This function uses the r_ctl and
3330  * type of the received sequence to find the correct callback function to call
3331  * to process the sequence.
3332  **/
3333 static int
lpfc_complete_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq,uint32_t fch_r_ctl,uint32_t fch_type)3334 lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3335 			 struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
3336 			 uint32_t fch_type)
3337 {
3338 	int i;
3339 
3340 	switch (fch_type) {
3341 	case FC_TYPE_NVME:
3342 		lpfc_nvme_unsol_ls_handler(phba, saveq);
3343 		return 1;
3344 	default:
3345 		break;
3346 	}
3347 
3348 	/* unSolicited Responses */
3349 	if (pring->prt[0].profile) {
3350 		if (pring->prt[0].lpfc_sli_rcv_unsol_event)
3351 			(pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
3352 									saveq);
3353 		return 1;
3354 	}
3355 	/* We must search, based on rctl / type
3356 	   for the right routine */
3357 	for (i = 0; i < pring->num_mask; i++) {
3358 		if ((pring->prt[i].rctl == fch_r_ctl) &&
3359 		    (pring->prt[i].type == fch_type)) {
3360 			if (pring->prt[i].lpfc_sli_rcv_unsol_event)
3361 				(pring->prt[i].lpfc_sli_rcv_unsol_event)
3362 						(phba, pring, saveq);
3363 			return 1;
3364 		}
3365 	}
3366 	return 0;
3367 }
3368 
3369 static void
lpfc_sli_prep_unsol_wqe(struct lpfc_hba * phba,struct lpfc_iocbq * saveq)3370 lpfc_sli_prep_unsol_wqe(struct lpfc_hba *phba,
3371 			struct lpfc_iocbq *saveq)
3372 {
3373 	IOCB_t *irsp;
3374 	union lpfc_wqe128 *wqe;
3375 	u16 i = 0;
3376 
3377 	irsp = &saveq->iocb;
3378 	wqe = &saveq->wqe;
3379 
3380 	/* Fill wcqe with the IOCB status fields */
3381 	bf_set(lpfc_wcqe_c_status, &saveq->wcqe_cmpl, irsp->ulpStatus);
3382 	saveq->wcqe_cmpl.word3 = irsp->ulpBdeCount;
3383 	saveq->wcqe_cmpl.parameter = irsp->un.ulpWord[4];
3384 	saveq->wcqe_cmpl.total_data_placed = irsp->unsli3.rcvsli3.acc_len;
3385 
3386 	/* Source ID */
3387 	bf_set(els_rsp64_sid, &wqe->xmit_els_rsp, irsp->un.rcvels.parmRo);
3388 
3389 	/* rx-id of the response frame */
3390 	bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com, irsp->ulpContext);
3391 
3392 	/* ox-id of the frame */
3393 	bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
3394 	       irsp->unsli3.rcvsli3.ox_id);
3395 
3396 	/* DID */
3397 	bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
3398 	       irsp->un.rcvels.remoteID);
3399 
3400 	/* unsol data len */
3401 	for (i = 0; i < irsp->ulpBdeCount; i++) {
3402 		struct lpfc_hbq_entry *hbqe = NULL;
3403 
3404 		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3405 			if (i == 0) {
3406 				hbqe = (struct lpfc_hbq_entry *)
3407 					&irsp->un.ulpWord[0];
3408 				saveq->wqe.gen_req.bde.tus.f.bdeSize =
3409 					hbqe->bde.tus.f.bdeSize;
3410 			} else if (i == 1) {
3411 				hbqe = (struct lpfc_hbq_entry *)
3412 					&irsp->unsli3.sli3Words[4];
3413 				saveq->unsol_rcv_len = hbqe->bde.tus.f.bdeSize;
3414 			}
3415 		}
3416 	}
3417 }
3418 
3419 /**
3420  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
3421  * @phba: Pointer to HBA context object.
3422  * @pring: Pointer to driver SLI ring object.
3423  * @saveq: Pointer to the unsolicited iocb.
3424  *
3425  * This function is called with no lock held by the ring event handler
3426  * when there is an unsolicited iocb posted to the response ring by the
3427  * firmware. This function gets the buffer associated with the iocbs
3428  * and calls the event handler for the ring. This function handles both
3429  * qring buffers and hbq buffers.
3430  * When the function returns 1 the caller can free the iocb object otherwise
3431  * upper layer functions will free the iocb objects.
3432  **/
3433 static int
lpfc_sli_process_unsol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)3434 lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3435 			    struct lpfc_iocbq *saveq)
3436 {
3437 	IOCB_t           * irsp;
3438 	WORD5            * w5p;
3439 	dma_addr_t	 paddr;
3440 	uint32_t           Rctl, Type;
3441 	struct lpfc_iocbq *iocbq;
3442 	struct lpfc_dmabuf *dmzbuf;
3443 
3444 	irsp = &saveq->iocb;
3445 	saveq->vport = phba->pport;
3446 
3447 	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
3448 		if (pring->lpfc_sli_rcv_async_status)
3449 			pring->lpfc_sli_rcv_async_status(phba, pring, saveq);
3450 		else
3451 			lpfc_printf_log(phba,
3452 					KERN_WARNING,
3453 					LOG_SLI,
3454 					"0316 Ring %d handler: unexpected "
3455 					"ASYNC_STATUS iocb received evt_code "
3456 					"0x%x\n",
3457 					pring->ringno,
3458 					irsp->un.asyncstat.evt_code);
3459 		return 1;
3460 	}
3461 
3462 	if ((irsp->ulpCommand == CMD_IOCB_RET_XRI64_CX) &&
3463 	    (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) {
3464 		if (irsp->ulpBdeCount > 0) {
3465 			dmzbuf = lpfc_sli_get_buff(phba, pring,
3466 						   irsp->un.ulpWord[3]);
3467 			lpfc_in_buf_free(phba, dmzbuf);
3468 		}
3469 
3470 		if (irsp->ulpBdeCount > 1) {
3471 			dmzbuf = lpfc_sli_get_buff(phba, pring,
3472 						   irsp->unsli3.sli3Words[3]);
3473 			lpfc_in_buf_free(phba, dmzbuf);
3474 		}
3475 
3476 		if (irsp->ulpBdeCount > 2) {
3477 			dmzbuf = lpfc_sli_get_buff(phba, pring,
3478 						   irsp->unsli3.sli3Words[7]);
3479 			lpfc_in_buf_free(phba, dmzbuf);
3480 		}
3481 
3482 		return 1;
3483 	}
3484 
3485 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
3486 		if (irsp->ulpBdeCount != 0) {
3487 			saveq->cmd_dmabuf = lpfc_sli_get_buff(phba, pring,
3488 						irsp->un.ulpWord[3]);
3489 			if (!saveq->cmd_dmabuf)
3490 				lpfc_printf_log(phba,
3491 					KERN_ERR,
3492 					LOG_SLI,
3493 					"0341 Ring %d Cannot find buffer for "
3494 					"an unsolicited iocb. tag 0x%x\n",
3495 					pring->ringno,
3496 					irsp->un.ulpWord[3]);
3497 		}
3498 		if (irsp->ulpBdeCount == 2) {
3499 			saveq->bpl_dmabuf = lpfc_sli_get_buff(phba, pring,
3500 						irsp->unsli3.sli3Words[7]);
3501 			if (!saveq->bpl_dmabuf)
3502 				lpfc_printf_log(phba,
3503 					KERN_ERR,
3504 					LOG_SLI,
3505 					"0342 Ring %d Cannot find buffer for an"
3506 					" unsolicited iocb. tag 0x%x\n",
3507 					pring->ringno,
3508 					irsp->unsli3.sli3Words[7]);
3509 		}
3510 		list_for_each_entry(iocbq, &saveq->list, list) {
3511 			irsp = &iocbq->iocb;
3512 			if (irsp->ulpBdeCount != 0) {
3513 				iocbq->cmd_dmabuf = lpfc_sli_get_buff(phba,
3514 							pring,
3515 							irsp->un.ulpWord[3]);
3516 				if (!iocbq->cmd_dmabuf)
3517 					lpfc_printf_log(phba,
3518 						KERN_ERR,
3519 						LOG_SLI,
3520 						"0343 Ring %d Cannot find "
3521 						"buffer for an unsolicited iocb"
3522 						". tag 0x%x\n", pring->ringno,
3523 						irsp->un.ulpWord[3]);
3524 			}
3525 			if (irsp->ulpBdeCount == 2) {
3526 				iocbq->bpl_dmabuf = lpfc_sli_get_buff(phba,
3527 						pring,
3528 						irsp->unsli3.sli3Words[7]);
3529 				if (!iocbq->bpl_dmabuf)
3530 					lpfc_printf_log(phba,
3531 						KERN_ERR,
3532 						LOG_SLI,
3533 						"0344 Ring %d Cannot find "
3534 						"buffer for an unsolicited "
3535 						"iocb. tag 0x%x\n",
3536 						pring->ringno,
3537 						irsp->unsli3.sli3Words[7]);
3538 			}
3539 		}
3540 	} else {
3541 		paddr = getPaddr(irsp->un.cont64[0].addrHigh,
3542 				 irsp->un.cont64[0].addrLow);
3543 		saveq->cmd_dmabuf = lpfc_sli_ringpostbuf_get(phba, pring,
3544 							     paddr);
3545 		if (irsp->ulpBdeCount == 2) {
3546 			paddr = getPaddr(irsp->un.cont64[1].addrHigh,
3547 					 irsp->un.cont64[1].addrLow);
3548 			saveq->bpl_dmabuf = lpfc_sli_ringpostbuf_get(phba,
3549 								   pring,
3550 								   paddr);
3551 		}
3552 	}
3553 
3554 	if (irsp->ulpBdeCount != 0 &&
3555 	    (irsp->ulpCommand == CMD_IOCB_RCV_CONT64_CX ||
3556 	     irsp->ulpStatus == IOSTAT_INTERMED_RSP)) {
3557 		int found = 0;
3558 
3559 		/* search continue save q for same XRI */
3560 		list_for_each_entry(iocbq, &pring->iocb_continue_saveq, clist) {
3561 			if (iocbq->iocb.unsli3.rcvsli3.ox_id ==
3562 				saveq->iocb.unsli3.rcvsli3.ox_id) {
3563 				list_add_tail(&saveq->list, &iocbq->list);
3564 				found = 1;
3565 				break;
3566 			}
3567 		}
3568 		if (!found)
3569 			list_add_tail(&saveq->clist,
3570 				      &pring->iocb_continue_saveq);
3571 
3572 		if (saveq->iocb.ulpStatus != IOSTAT_INTERMED_RSP) {
3573 			list_del_init(&iocbq->clist);
3574 			saveq = iocbq;
3575 			irsp = &saveq->iocb;
3576 		} else {
3577 			return 0;
3578 		}
3579 	}
3580 	if ((irsp->ulpCommand == CMD_RCV_ELS_REQ64_CX) ||
3581 	    (irsp->ulpCommand == CMD_RCV_ELS_REQ_CX) ||
3582 	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX)) {
3583 		Rctl = FC_RCTL_ELS_REQ;
3584 		Type = FC_TYPE_ELS;
3585 	} else {
3586 		w5p = (WORD5 *)&(saveq->iocb.un.ulpWord[5]);
3587 		Rctl = w5p->hcsw.Rctl;
3588 		Type = w5p->hcsw.Type;
3589 
3590 		/* Firmware Workaround */
3591 		if ((Rctl == 0) && (pring->ringno == LPFC_ELS_RING) &&
3592 			(irsp->ulpCommand == CMD_RCV_SEQUENCE64_CX ||
3593 			 irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3594 			Rctl = FC_RCTL_ELS_REQ;
3595 			Type = FC_TYPE_ELS;
3596 			w5p->hcsw.Rctl = Rctl;
3597 			w5p->hcsw.Type = Type;
3598 		}
3599 	}
3600 
3601 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
3602 	    (irsp->ulpCommand == CMD_IOCB_RCV_ELS64_CX ||
3603 	    irsp->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
3604 		if (irsp->unsli3.rcvsli3.vpi == 0xffff)
3605 			saveq->vport = phba->pport;
3606 		else
3607 			saveq->vport = lpfc_find_vport_by_vpid(phba,
3608 					       irsp->unsli3.rcvsli3.vpi);
3609 	}
3610 
3611 	/* Prepare WQE with Unsol frame */
3612 	lpfc_sli_prep_unsol_wqe(phba, saveq);
3613 
3614 	if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
3615 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3616 				"0313 Ring %d handler: unexpected Rctl x%x "
3617 				"Type x%x received\n",
3618 				pring->ringno, Rctl, Type);
3619 
3620 	return 1;
3621 }
3622 
3623 /**
3624  * lpfc_sli_iocbq_lookup - Find command iocb for the given response iocb
3625  * @phba: Pointer to HBA context object.
3626  * @pring: Pointer to driver SLI ring object.
3627  * @prspiocb: Pointer to response iocb object.
3628  *
3629  * This function looks up the iocb_lookup table to get the command iocb
3630  * corresponding to the given response iocb using the iotag of the
3631  * response iocb. The driver calls this function with the hbalock held
3632  * for SLI3 ports or the ring lock held for SLI4 ports.
3633  * This function returns the command iocb object if it finds the command
3634  * iocb else returns NULL.
3635  **/
3636 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * prspiocb)3637 lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
3638 		      struct lpfc_sli_ring *pring,
3639 		      struct lpfc_iocbq *prspiocb)
3640 {
3641 	struct lpfc_iocbq *cmd_iocb = NULL;
3642 	u16 iotag;
3643 
3644 	if (phba->sli_rev == LPFC_SLI_REV4)
3645 		iotag = get_wqe_reqtag(prspiocb);
3646 	else
3647 		iotag = prspiocb->iocb.ulpIoTag;
3648 
3649 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3650 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
3651 		if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3652 			/* remove from txcmpl queue list */
3653 			list_del_init(&cmd_iocb->list);
3654 			cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3655 			pring->txcmplq_cnt--;
3656 			return cmd_iocb;
3657 		}
3658 	}
3659 
3660 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3661 			"0317 iotag x%x is out of "
3662 			"range: max iotag x%x\n",
3663 			iotag, phba->sli.last_iotag);
3664 	return NULL;
3665 }
3666 
3667 /**
3668  * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
3669  * @phba: Pointer to HBA context object.
3670  * @pring: Pointer to driver SLI ring object.
3671  * @iotag: IOCB tag.
3672  *
3673  * This function looks up the iocb_lookup table to get the command iocb
3674  * corresponding to the given iotag. The driver calls this function with
3675  * the ring lock held because this function is an SLI4 port only helper.
3676  * This function returns the command iocb object if it finds the command
3677  * iocb else returns NULL.
3678  **/
3679 static struct lpfc_iocbq *
lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint16_t iotag)3680 lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
3681 			     struct lpfc_sli_ring *pring, uint16_t iotag)
3682 {
3683 	struct lpfc_iocbq *cmd_iocb = NULL;
3684 
3685 	if (iotag != 0 && iotag <= phba->sli.last_iotag) {
3686 		cmd_iocb = phba->sli.iocbq_lookup[iotag];
3687 		if (cmd_iocb->cmd_flag & LPFC_IO_ON_TXCMPLQ) {
3688 			/* remove from txcmpl queue list */
3689 			list_del_init(&cmd_iocb->list);
3690 			cmd_iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
3691 			pring->txcmplq_cnt--;
3692 			return cmd_iocb;
3693 		}
3694 	}
3695 
3696 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3697 			"0372 iotag x%x lookup error: max iotag (x%x) "
3698 			"cmd_flag x%x\n",
3699 			iotag, phba->sli.last_iotag,
3700 			cmd_iocb ? cmd_iocb->cmd_flag : 0xffff);
3701 	return NULL;
3702 }
3703 
3704 /**
3705  * lpfc_sli_process_sol_iocb - process solicited iocb completion
3706  * @phba: Pointer to HBA context object.
3707  * @pring: Pointer to driver SLI ring object.
3708  * @saveq: Pointer to the response iocb to be processed.
3709  *
3710  * This function is called by the ring event handler for non-fcp
3711  * rings when there is a new response iocb in the response ring.
3712  * The caller is not required to hold any locks. This function
3713  * gets the command iocb associated with the response iocb and
3714  * calls the completion handler for the command iocb. If there
3715  * is no completion handler, the function will free the resources
3716  * associated with command iocb. If the response iocb is for
3717  * an already aborted command iocb, the status of the completion
3718  * is changed to IOSTAT_LOCAL_REJECT/IOERR_SLI_ABORTED.
3719  * This function always returns 1.
3720  **/
3721 static int
lpfc_sli_process_sol_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * saveq)3722 lpfc_sli_process_sol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
3723 			  struct lpfc_iocbq *saveq)
3724 {
3725 	struct lpfc_iocbq *cmdiocbp;
3726 	unsigned long iflag;
3727 	u32 ulp_command, ulp_status, ulp_word4, ulp_context, iotag;
3728 
3729 	if (phba->sli_rev == LPFC_SLI_REV4)
3730 		spin_lock_irqsave(&pring->ring_lock, iflag);
3731 	else
3732 		spin_lock_irqsave(&phba->hbalock, iflag);
3733 	cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring, saveq);
3734 	if (phba->sli_rev == LPFC_SLI_REV4)
3735 		spin_unlock_irqrestore(&pring->ring_lock, iflag);
3736 	else
3737 		spin_unlock_irqrestore(&phba->hbalock, iflag);
3738 
3739 	ulp_command = get_job_cmnd(phba, saveq);
3740 	ulp_status = get_job_ulpstatus(phba, saveq);
3741 	ulp_word4 = get_job_word4(phba, saveq);
3742 	ulp_context = get_job_ulpcontext(phba, saveq);
3743 	if (phba->sli_rev == LPFC_SLI_REV4)
3744 		iotag = get_wqe_reqtag(saveq);
3745 	else
3746 		iotag = saveq->iocb.ulpIoTag;
3747 
3748 	if (cmdiocbp) {
3749 		ulp_command = get_job_cmnd(phba, cmdiocbp);
3750 		if (cmdiocbp->cmd_cmpl) {
3751 			/*
3752 			 * If an ELS command failed send an event to mgmt
3753 			 * application.
3754 			 */
3755 			if (ulp_status &&
3756 			     (pring->ringno == LPFC_ELS_RING) &&
3757 			     (ulp_command == CMD_ELS_REQUEST64_CR))
3758 				lpfc_send_els_failure_event(phba,
3759 					cmdiocbp, saveq);
3760 
3761 			/*
3762 			 * Post all ELS completions to the worker thread.
3763 			 * All other are passed to the completion callback.
3764 			 */
3765 			if (pring->ringno == LPFC_ELS_RING) {
3766 				if ((phba->sli_rev < LPFC_SLI_REV4) &&
3767 				    (cmdiocbp->cmd_flag &
3768 							LPFC_DRIVER_ABORTED)) {
3769 					spin_lock_irqsave(&phba->hbalock,
3770 							  iflag);
3771 					cmdiocbp->cmd_flag &=
3772 						~LPFC_DRIVER_ABORTED;
3773 					spin_unlock_irqrestore(&phba->hbalock,
3774 							       iflag);
3775 					saveq->iocb.ulpStatus =
3776 						IOSTAT_LOCAL_REJECT;
3777 					saveq->iocb.un.ulpWord[4] =
3778 						IOERR_SLI_ABORTED;
3779 
3780 					/* Firmware could still be in progress
3781 					 * of DMAing payload, so don't free data
3782 					 * buffer till after a hbeat.
3783 					 */
3784 					spin_lock_irqsave(&phba->hbalock,
3785 							  iflag);
3786 					saveq->cmd_flag |= LPFC_DELAY_MEM_FREE;
3787 					spin_unlock_irqrestore(&phba->hbalock,
3788 							       iflag);
3789 				}
3790 				if (phba->sli_rev == LPFC_SLI_REV4) {
3791 					if (saveq->cmd_flag &
3792 					    LPFC_EXCHANGE_BUSY) {
3793 						/* Set cmdiocb flag for the
3794 						 * exchange busy so sgl (xri)
3795 						 * will not be released until
3796 						 * the abort xri is received
3797 						 * from hba.
3798 						 */
3799 						spin_lock_irqsave(
3800 							&phba->hbalock, iflag);
3801 						cmdiocbp->cmd_flag |=
3802 							LPFC_EXCHANGE_BUSY;
3803 						spin_unlock_irqrestore(
3804 							&phba->hbalock, iflag);
3805 					}
3806 					if (cmdiocbp->cmd_flag &
3807 					    LPFC_DRIVER_ABORTED) {
3808 						/*
3809 						 * Clear LPFC_DRIVER_ABORTED
3810 						 * bit in case it was driver
3811 						 * initiated abort.
3812 						 */
3813 						spin_lock_irqsave(
3814 							&phba->hbalock, iflag);
3815 						cmdiocbp->cmd_flag &=
3816 							~LPFC_DRIVER_ABORTED;
3817 						spin_unlock_irqrestore(
3818 							&phba->hbalock, iflag);
3819 						set_job_ulpstatus(cmdiocbp,
3820 								  IOSTAT_LOCAL_REJECT);
3821 						set_job_ulpword4(cmdiocbp,
3822 								 IOERR_ABORT_REQUESTED);
3823 						/*
3824 						 * For SLI4, irspiocb contains
3825 						 * NO_XRI in sli_xritag, it
3826 						 * shall not affect releasing
3827 						 * sgl (xri) process.
3828 						 */
3829 						set_job_ulpstatus(saveq,
3830 								  IOSTAT_LOCAL_REJECT);
3831 						set_job_ulpword4(saveq,
3832 								 IOERR_SLI_ABORTED);
3833 						spin_lock_irqsave(
3834 							&phba->hbalock, iflag);
3835 						saveq->cmd_flag |=
3836 							LPFC_DELAY_MEM_FREE;
3837 						spin_unlock_irqrestore(
3838 							&phba->hbalock, iflag);
3839 					}
3840 				}
3841 			}
3842 			cmdiocbp->cmd_cmpl(phba, cmdiocbp, saveq);
3843 		} else
3844 			lpfc_sli_release_iocbq(phba, cmdiocbp);
3845 	} else {
3846 		/*
3847 		 * Unknown initiating command based on the response iotag.
3848 		 * This could be the case on the ELS ring because of
3849 		 * lpfc_els_abort().
3850 		 */
3851 		if (pring->ringno != LPFC_ELS_RING) {
3852 			/*
3853 			 * Ring <ringno> handler: unexpected completion IoTag
3854 			 * <IoTag>
3855 			 */
3856 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
3857 					 "0322 Ring %d handler: "
3858 					 "unexpected completion IoTag x%x "
3859 					 "Data: x%x x%x x%x x%x\n",
3860 					 pring->ringno, iotag, ulp_status,
3861 					 ulp_word4, ulp_command, ulp_context);
3862 		}
3863 	}
3864 
3865 	return 1;
3866 }
3867 
3868 /**
3869  * lpfc_sli_rsp_pointers_error - Response ring pointer error handler
3870  * @phba: Pointer to HBA context object.
3871  * @pring: Pointer to driver SLI ring object.
3872  *
3873  * This function is called from the iocb ring event handlers when
3874  * put pointer is ahead of the get pointer for a ring. This function signal
3875  * an error attention condition to the worker thread and the worker
3876  * thread will transition the HBA to offline state.
3877  **/
3878 static void
lpfc_sli_rsp_pointers_error(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)3879 lpfc_sli_rsp_pointers_error(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
3880 {
3881 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3882 	/*
3883 	 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
3884 	 * rsp ring <portRspMax>
3885 	 */
3886 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
3887 			"0312 Ring %d handler: portRspPut %d "
3888 			"is bigger than rsp ring %d\n",
3889 			pring->ringno, le32_to_cpu(pgp->rspPutInx),
3890 			pring->sli.sli3.numRiocb);
3891 
3892 	phba->link_state = LPFC_HBA_ERROR;
3893 
3894 	/*
3895 	 * All error attention handlers are posted to
3896 	 * worker thread
3897 	 */
3898 	phba->work_ha |= HA_ERATT;
3899 	phba->work_hs = HS_FFER3;
3900 
3901 	lpfc_worker_wake_up(phba);
3902 
3903 	return;
3904 }
3905 
3906 /**
3907  * lpfc_poll_eratt - Error attention polling timer timeout handler
3908  * @t: Context to fetch pointer to address of HBA context object from.
3909  *
3910  * This function is invoked by the Error Attention polling timer when the
3911  * timer times out. It will check the SLI Error Attention register for
3912  * possible attention events. If so, it will post an Error Attention event
3913  * and wake up worker thread to process it. Otherwise, it will set up the
3914  * Error Attention polling timer for the next poll.
3915  **/
lpfc_poll_eratt(struct timer_list * t)3916 void lpfc_poll_eratt(struct timer_list *t)
3917 {
3918 	struct lpfc_hba *phba;
3919 	uint32_t eratt = 0;
3920 	uint64_t sli_intr, cnt;
3921 
3922 	phba = timer_container_of(phba, t, eratt_poll);
3923 
3924 	if (test_bit(FC_UNLOADING, &phba->pport->load_flag))
3925 		return;
3926 
3927 	if (phba->sli_rev == LPFC_SLI_REV4 &&
3928 	    !test_bit(HBA_SETUP, &phba->hba_flag)) {
3929 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
3930 				"0663 HBA still initializing 0x%lx, restart "
3931 				"timer\n",
3932 				phba->hba_flag);
3933 		goto restart_timer;
3934 	}
3935 
3936 	/* Here we will also keep track of interrupts per sec of the hba */
3937 	sli_intr = phba->sli.slistat.sli_intr;
3938 
3939 	if (phba->sli.slistat.sli_prev_intr > sli_intr)
3940 		cnt = (((uint64_t)(-1) - phba->sli.slistat.sli_prev_intr) +
3941 			sli_intr);
3942 	else
3943 		cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
3944 
3945 	/* 64-bit integer division not supported on 32-bit x86 - use do_div */
3946 	do_div(cnt, phba->eratt_poll_interval);
3947 	phba->sli.slistat.sli_ips = cnt;
3948 
3949 	phba->sli.slistat.sli_prev_intr = sli_intr;
3950 
3951 	/* Check chip HA register for error event */
3952 	eratt = lpfc_sli_check_eratt(phba);
3953 
3954 	if (eratt) {
3955 		/* Tell the worker thread there is work to do */
3956 		lpfc_worker_wake_up(phba);
3957 		return;
3958 	}
3959 
3960 restart_timer:
3961 	/* Restart the timer for next eratt poll */
3962 	mod_timer(&phba->eratt_poll,
3963 		  jiffies + secs_to_jiffies(phba->eratt_poll_interval));
3964 	return;
3965 }
3966 
3967 
3968 /**
3969  * lpfc_sli_handle_fast_ring_event - Handle ring events on FCP ring
3970  * @phba: Pointer to HBA context object.
3971  * @pring: Pointer to driver SLI ring object.
3972  * @mask: Host attention register mask for this ring.
3973  *
3974  * This function is called from the interrupt context when there is a ring
3975  * event for the fcp ring. The caller does not hold any lock.
3976  * The function processes each response iocb in the response ring until it
3977  * finds an iocb with LE bit set and chains all the iocbs up to the iocb with
3978  * LE bit set. The function will call the completion handler of the command iocb
3979  * if the response iocb indicates a completion for a command iocb or it is
3980  * an abort completion. The function will call lpfc_sli_process_unsol_iocb
3981  * function if this is an unsolicited iocb.
3982  * This routine presumes LPFC_FCP_RING handling and doesn't bother
3983  * to check it explicitly.
3984  */
3985 int
lpfc_sli_handle_fast_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)3986 lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
3987 				struct lpfc_sli_ring *pring, uint32_t mask)
3988 {
3989 	struct lpfc_pgp *pgp = &phba->port_gp[pring->ringno];
3990 	IOCB_t *irsp = NULL;
3991 	IOCB_t *entry = NULL;
3992 	struct lpfc_iocbq *cmdiocbq = NULL;
3993 	struct lpfc_iocbq rspiocbq;
3994 	uint32_t status;
3995 	uint32_t portRspPut, portRspMax;
3996 	int rc = 1;
3997 	lpfc_iocb_type type;
3998 	unsigned long iflag;
3999 	uint32_t rsp_cmpl = 0;
4000 
4001 	spin_lock_irqsave(&phba->hbalock, iflag);
4002 	pring->stats.iocb_event++;
4003 
4004 	/*
4005 	 * The next available response entry should never exceed the maximum
4006 	 * entries.  If it does, treat it as an adapter hardware error.
4007 	 */
4008 	portRspMax = pring->sli.sli3.numRiocb;
4009 	portRspPut = le32_to_cpu(pgp->rspPutInx);
4010 	if (unlikely(portRspPut >= portRspMax)) {
4011 		lpfc_sli_rsp_pointers_error(phba, pring);
4012 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4013 		return 1;
4014 	}
4015 	if (phba->fcp_ring_in_use) {
4016 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4017 		return 1;
4018 	} else
4019 		phba->fcp_ring_in_use = 1;
4020 
4021 	rmb();
4022 	while (pring->sli.sli3.rspidx != portRspPut) {
4023 		/*
4024 		 * Fetch an entry off the ring and copy it into a local data
4025 		 * structure.  The copy involves a byte-swap since the
4026 		 * network byte order and pci byte orders are different.
4027 		 */
4028 		entry = lpfc_resp_iocb(phba, pring);
4029 		phba->last_completion_time = jiffies;
4030 
4031 		if (++pring->sli.sli3.rspidx >= portRspMax)
4032 			pring->sli.sli3.rspidx = 0;
4033 
4034 		lpfc_sli_pcimem_bcopy((uint32_t *) entry,
4035 				      (uint32_t *) &rspiocbq.iocb,
4036 				      phba->iocb_rsp_size);
4037 		INIT_LIST_HEAD(&(rspiocbq.list));
4038 		irsp = &rspiocbq.iocb;
4039 
4040 		type = lpfc_sli_iocb_cmd_type(irsp->ulpCommand & CMD_IOCB_MASK);
4041 		pring->stats.iocb_rsp++;
4042 		rsp_cmpl++;
4043 
4044 		if (unlikely(irsp->ulpStatus)) {
4045 			/*
4046 			 * If resource errors reported from HBA, reduce
4047 			 * queuedepths of the SCSI device.
4048 			 */
4049 			if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
4050 			    ((irsp->un.ulpWord[4] & IOERR_PARAM_MASK) ==
4051 			     IOERR_NO_RESOURCES)) {
4052 				spin_unlock_irqrestore(&phba->hbalock, iflag);
4053 				phba->lpfc_rampdown_queue_depth(phba);
4054 				spin_lock_irqsave(&phba->hbalock, iflag);
4055 			}
4056 
4057 			/* Rsp ring <ringno> error: IOCB */
4058 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4059 					"0336 Rsp Ring %d error: IOCB Data: "
4060 					"x%x x%x x%x x%x x%x x%x x%x x%x\n",
4061 					pring->ringno,
4062 					irsp->un.ulpWord[0],
4063 					irsp->un.ulpWord[1],
4064 					irsp->un.ulpWord[2],
4065 					irsp->un.ulpWord[3],
4066 					irsp->un.ulpWord[4],
4067 					irsp->un.ulpWord[5],
4068 					*(uint32_t *)&irsp->un1,
4069 					*((uint32_t *)&irsp->un1 + 1));
4070 		}
4071 
4072 		switch (type) {
4073 		case LPFC_ABORT_IOCB:
4074 		case LPFC_SOL_IOCB:
4075 			/*
4076 			 * Idle exchange closed via ABTS from port.  No iocb
4077 			 * resources need to be recovered.
4078 			 */
4079 			if (unlikely(irsp->ulpCommand == CMD_XRI_ABORTED_CX)) {
4080 				lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4081 						"0333 IOCB cmd 0x%x"
4082 						" processed. Skipping"
4083 						" completion\n",
4084 						irsp->ulpCommand);
4085 				break;
4086 			}
4087 
4088 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
4089 							 &rspiocbq);
4090 			if (unlikely(!cmdiocbq))
4091 				break;
4092 			if (cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED)
4093 				cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
4094 			if (cmdiocbq->cmd_cmpl) {
4095 				spin_unlock_irqrestore(&phba->hbalock, iflag);
4096 				cmdiocbq->cmd_cmpl(phba, cmdiocbq, &rspiocbq);
4097 				spin_lock_irqsave(&phba->hbalock, iflag);
4098 			}
4099 			break;
4100 		case LPFC_UNSOL_IOCB:
4101 			spin_unlock_irqrestore(&phba->hbalock, iflag);
4102 			lpfc_sli_process_unsol_iocb(phba, pring, &rspiocbq);
4103 			spin_lock_irqsave(&phba->hbalock, iflag);
4104 			break;
4105 		default:
4106 			if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
4107 				char adaptermsg[LPFC_MAX_ADPTMSG];
4108 				memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4109 				memcpy(&adaptermsg[0], (uint8_t *) irsp,
4110 				       MAX_MSG_DATA);
4111 				dev_warn(&((phba->pcidev)->dev),
4112 					 "lpfc%d: %s\n",
4113 					 phba->brd_no, adaptermsg);
4114 			} else {
4115 				/* Unknown IOCB command */
4116 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4117 						"0334 Unknown IOCB command "
4118 						"Data: x%x, x%x x%x x%x x%x\n",
4119 						type, irsp->ulpCommand,
4120 						irsp->ulpStatus,
4121 						irsp->ulpIoTag,
4122 						irsp->ulpContext);
4123 			}
4124 			break;
4125 		}
4126 
4127 		/*
4128 		 * The response IOCB has been processed.  Update the ring
4129 		 * pointer in SLIM.  If the port response put pointer has not
4130 		 * been updated, sync the pgp->rspPutInx and fetch the new port
4131 		 * response put pointer.
4132 		 */
4133 		writel(pring->sli.sli3.rspidx,
4134 			&phba->host_gp[pring->ringno].rspGetInx);
4135 
4136 		if (pring->sli.sli3.rspidx == portRspPut)
4137 			portRspPut = le32_to_cpu(pgp->rspPutInx);
4138 	}
4139 
4140 	if ((rsp_cmpl > 0) && (mask & HA_R0RE_REQ)) {
4141 		pring->stats.iocb_rsp_full++;
4142 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4143 		writel(status, phba->CAregaddr);
4144 		readl(phba->CAregaddr);
4145 	}
4146 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4147 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4148 		pring->stats.iocb_cmd_empty++;
4149 
4150 		/* Force update of the local copy of cmdGetInx */
4151 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4152 		lpfc_sli_resume_iocb(phba, pring);
4153 
4154 		if ((pring->lpfc_sli_cmd_available))
4155 			(pring->lpfc_sli_cmd_available) (phba, pring);
4156 
4157 	}
4158 
4159 	phba->fcp_ring_in_use = 0;
4160 	spin_unlock_irqrestore(&phba->hbalock, iflag);
4161 	return rc;
4162 }
4163 
4164 /**
4165  * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
4166  * @phba: Pointer to HBA context object.
4167  * @pring: Pointer to driver SLI ring object.
4168  * @rspiocbp: Pointer to driver response IOCB object.
4169  *
4170  * This function is called from the worker thread when there is a slow-path
4171  * response IOCB to process. This function chains all the response iocbs until
4172  * seeing the iocb with the LE bit set. The function will call
4173  * lpfc_sli_process_sol_iocb function if the response iocb indicates a
4174  * completion of a command iocb. The function will call the
4175  * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
4176  * The function frees the resources or calls the completion handler if this
4177  * iocb is an abort completion. The function returns NULL when the response
4178  * iocb has the LE bit set and all the chained iocbs are processed, otherwise
4179  * this function shall chain the iocb on to the iocb_continueq and return the
4180  * response iocb passed in.
4181  **/
4182 static struct lpfc_iocbq *
lpfc_sli_sp_handle_rspiocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * rspiocbp)4183 lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
4184 			struct lpfc_iocbq *rspiocbp)
4185 {
4186 	struct lpfc_iocbq *saveq;
4187 	struct lpfc_iocbq *cmdiocb;
4188 	struct lpfc_iocbq *next_iocb;
4189 	IOCB_t *irsp;
4190 	uint32_t free_saveq;
4191 	u8 cmd_type;
4192 	lpfc_iocb_type type;
4193 	unsigned long iflag;
4194 	u32 ulp_status = get_job_ulpstatus(phba, rspiocbp);
4195 	u32 ulp_word4 = get_job_word4(phba, rspiocbp);
4196 	u32 ulp_command = get_job_cmnd(phba, rspiocbp);
4197 	int rc;
4198 
4199 	spin_lock_irqsave(&phba->hbalock, iflag);
4200 	/* First add the response iocb to the countinueq list */
4201 	list_add_tail(&rspiocbp->list, &pring->iocb_continueq);
4202 	pring->iocb_continueq_cnt++;
4203 
4204 	/*
4205 	 * By default, the driver expects to free all resources
4206 	 * associated with this iocb completion.
4207 	 */
4208 	free_saveq = 1;
4209 	saveq = list_get_first(&pring->iocb_continueq,
4210 			       struct lpfc_iocbq, list);
4211 	list_del_init(&pring->iocb_continueq);
4212 	pring->iocb_continueq_cnt = 0;
4213 
4214 	pring->stats.iocb_rsp++;
4215 
4216 	/*
4217 	 * If resource errors reported from HBA, reduce
4218 	 * queuedepths of the SCSI device.
4219 	 */
4220 	if (ulp_status == IOSTAT_LOCAL_REJECT &&
4221 	    ((ulp_word4 & IOERR_PARAM_MASK) ==
4222 	     IOERR_NO_RESOURCES)) {
4223 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4224 		phba->lpfc_rampdown_queue_depth(phba);
4225 		spin_lock_irqsave(&phba->hbalock, iflag);
4226 	}
4227 
4228 	if (ulp_status) {
4229 		/* Rsp ring <ringno> error: IOCB */
4230 		if (phba->sli_rev < LPFC_SLI_REV4) {
4231 			irsp = &rspiocbp->iocb;
4232 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4233 					"0328 Rsp Ring %d error: ulp_status x%x "
4234 					"IOCB Data: "
4235 					"x%08x x%08x x%08x x%08x "
4236 					"x%08x x%08x x%08x x%08x "
4237 					"x%08x x%08x x%08x x%08x "
4238 					"x%08x x%08x x%08x x%08x\n",
4239 					pring->ringno, ulp_status,
4240 					get_job_ulpword(rspiocbp, 0),
4241 					get_job_ulpword(rspiocbp, 1),
4242 					get_job_ulpword(rspiocbp, 2),
4243 					get_job_ulpword(rspiocbp, 3),
4244 					get_job_ulpword(rspiocbp, 4),
4245 					get_job_ulpword(rspiocbp, 5),
4246 					*(((uint32_t *)irsp) + 6),
4247 					*(((uint32_t *)irsp) + 7),
4248 					*(((uint32_t *)irsp) + 8),
4249 					*(((uint32_t *)irsp) + 9),
4250 					*(((uint32_t *)irsp) + 10),
4251 					*(((uint32_t *)irsp) + 11),
4252 					*(((uint32_t *)irsp) + 12),
4253 					*(((uint32_t *)irsp) + 13),
4254 					*(((uint32_t *)irsp) + 14),
4255 					*(((uint32_t *)irsp) + 15));
4256 		} else {
4257 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
4258 					"0321 Rsp Ring %d error: "
4259 					"IOCB Data: "
4260 					"x%x x%x x%x x%x\n",
4261 					pring->ringno,
4262 					rspiocbp->wcqe_cmpl.word0,
4263 					rspiocbp->wcqe_cmpl.total_data_placed,
4264 					rspiocbp->wcqe_cmpl.parameter,
4265 					rspiocbp->wcqe_cmpl.word3);
4266 		}
4267 	}
4268 
4269 
4270 	/*
4271 	 * Fetch the iocb command type and call the correct completion
4272 	 * routine. Solicited and Unsolicited IOCBs on the ELS ring
4273 	 * get freed back to the lpfc_iocb_list by the discovery
4274 	 * kernel thread.
4275 	 */
4276 	cmd_type = ulp_command & CMD_IOCB_MASK;
4277 	type = lpfc_sli_iocb_cmd_type(cmd_type);
4278 	switch (type) {
4279 	case LPFC_SOL_IOCB:
4280 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4281 		rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
4282 		spin_lock_irqsave(&phba->hbalock, iflag);
4283 		break;
4284 	case LPFC_UNSOL_IOCB:
4285 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4286 		rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
4287 		spin_lock_irqsave(&phba->hbalock, iflag);
4288 		if (!rc)
4289 			free_saveq = 0;
4290 		break;
4291 	case LPFC_ABORT_IOCB:
4292 		cmdiocb = NULL;
4293 		if (ulp_command != CMD_XRI_ABORTED_CX)
4294 			cmdiocb = lpfc_sli_iocbq_lookup(phba, pring,
4295 							saveq);
4296 		if (cmdiocb) {
4297 			/* Call the specified completion routine */
4298 			if (cmdiocb->cmd_cmpl) {
4299 				spin_unlock_irqrestore(&phba->hbalock, iflag);
4300 				cmdiocb->cmd_cmpl(phba, cmdiocb, saveq);
4301 				spin_lock_irqsave(&phba->hbalock, iflag);
4302 			} else {
4303 				__lpfc_sli_release_iocbq(phba, cmdiocb);
4304 			}
4305 		}
4306 		break;
4307 	case LPFC_UNKNOWN_IOCB:
4308 		if (ulp_command == CMD_ADAPTER_MSG) {
4309 			char adaptermsg[LPFC_MAX_ADPTMSG];
4310 
4311 			memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
4312 			memcpy(&adaptermsg[0], (uint8_t *)&rspiocbp->wqe,
4313 			       MAX_MSG_DATA);
4314 			dev_warn(&((phba->pcidev)->dev),
4315 				 "lpfc%d: %s\n",
4316 				 phba->brd_no, adaptermsg);
4317 		} else {
4318 			/* Unknown command */
4319 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4320 					"0335 Unknown IOCB "
4321 					"command Data: x%x "
4322 					"x%x x%x x%x\n",
4323 					ulp_command,
4324 					ulp_status,
4325 					get_wqe_reqtag(rspiocbp),
4326 					get_job_ulpcontext(phba, rspiocbp));
4327 		}
4328 		break;
4329 	}
4330 
4331 	if (free_saveq) {
4332 		list_for_each_entry_safe(rspiocbp, next_iocb,
4333 					 &saveq->list, list) {
4334 			list_del_init(&rspiocbp->list);
4335 			__lpfc_sli_release_iocbq(phba, rspiocbp);
4336 		}
4337 		__lpfc_sli_release_iocbq(phba, saveq);
4338 	}
4339 	rspiocbp = NULL;
4340 	spin_unlock_irqrestore(&phba->hbalock, iflag);
4341 	return rspiocbp;
4342 }
4343 
4344 /**
4345  * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
4346  * @phba: Pointer to HBA context object.
4347  * @pring: Pointer to driver SLI ring object.
4348  * @mask: Host attention register mask for this ring.
4349  *
4350  * This routine wraps the actual slow_ring event process routine from the
4351  * API jump table function pointer from the lpfc_hba struct.
4352  **/
4353 void
lpfc_sli_handle_slow_ring_event(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4354 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
4355 				struct lpfc_sli_ring *pring, uint32_t mask)
4356 {
4357 	phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
4358 }
4359 
4360 /**
4361  * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
4362  * @phba: Pointer to HBA context object.
4363  * @pring: Pointer to driver SLI ring object.
4364  * @mask: Host attention register mask for this ring.
4365  *
4366  * This function is called from the worker thread when there is a ring event
4367  * for non-fcp rings. The caller does not hold any lock. The function will
4368  * remove each response iocb in the response ring and calls the handle
4369  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4370  **/
4371 static void
lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4372 lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
4373 				   struct lpfc_sli_ring *pring, uint32_t mask)
4374 {
4375 	struct lpfc_pgp *pgp;
4376 	IOCB_t *entry;
4377 	IOCB_t *irsp = NULL;
4378 	struct lpfc_iocbq *rspiocbp = NULL;
4379 	uint32_t portRspPut, portRspMax;
4380 	unsigned long iflag;
4381 	uint32_t status;
4382 
4383 	pgp = &phba->port_gp[pring->ringno];
4384 	spin_lock_irqsave(&phba->hbalock, iflag);
4385 	pring->stats.iocb_event++;
4386 
4387 	/*
4388 	 * The next available response entry should never exceed the maximum
4389 	 * entries.  If it does, treat it as an adapter hardware error.
4390 	 */
4391 	portRspMax = pring->sli.sli3.numRiocb;
4392 	portRspPut = le32_to_cpu(pgp->rspPutInx);
4393 	if (portRspPut >= portRspMax) {
4394 		/*
4395 		 * Ring <ringno> handler: portRspPut <portRspPut> is bigger than
4396 		 * rsp ring <portRspMax>
4397 		 */
4398 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4399 				"0303 Ring %d handler: portRspPut %d "
4400 				"is bigger than rsp ring %d\n",
4401 				pring->ringno, portRspPut, portRspMax);
4402 
4403 		phba->link_state = LPFC_HBA_ERROR;
4404 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4405 
4406 		phba->work_hs = HS_FFER3;
4407 		lpfc_handle_eratt(phba);
4408 
4409 		return;
4410 	}
4411 
4412 	rmb();
4413 	while (pring->sli.sli3.rspidx != portRspPut) {
4414 		/*
4415 		 * Build a completion list and call the appropriate handler.
4416 		 * The process is to get the next available response iocb, get
4417 		 * a free iocb from the list, copy the response data into the
4418 		 * free iocb, insert to the continuation list, and update the
4419 		 * next response index to slim.  This process makes response
4420 		 * iocb's in the ring available to DMA as fast as possible but
4421 		 * pays a penalty for a copy operation.  Since the iocb is
4422 		 * only 32 bytes, this penalty is considered small relative to
4423 		 * the PCI reads for register values and a slim write.  When
4424 		 * the ulpLe field is set, the entire Command has been
4425 		 * received.
4426 		 */
4427 		entry = lpfc_resp_iocb(phba, pring);
4428 
4429 		phba->last_completion_time = jiffies;
4430 		rspiocbp = __lpfc_sli_get_iocbq(phba);
4431 		if (rspiocbp == NULL) {
4432 			printk(KERN_ERR "%s: out of buffers! Failing "
4433 			       "completion.\n", __func__);
4434 			break;
4435 		}
4436 
4437 		lpfc_sli_pcimem_bcopy(entry, &rspiocbp->iocb,
4438 				      phba->iocb_rsp_size);
4439 		irsp = &rspiocbp->iocb;
4440 
4441 		if (++pring->sli.sli3.rspidx >= portRspMax)
4442 			pring->sli.sli3.rspidx = 0;
4443 
4444 		if (pring->ringno == LPFC_ELS_RING) {
4445 			lpfc_debugfs_slow_ring_trc(phba,
4446 			"IOCB rsp ring:   wd4:x%08x wd6:x%08x wd7:x%08x",
4447 				*(((uint32_t *) irsp) + 4),
4448 				*(((uint32_t *) irsp) + 6),
4449 				*(((uint32_t *) irsp) + 7));
4450 		}
4451 
4452 		writel(pring->sli.sli3.rspidx,
4453 			&phba->host_gp[pring->ringno].rspGetInx);
4454 
4455 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4456 		/* Handle the response IOCB */
4457 		rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
4458 		spin_lock_irqsave(&phba->hbalock, iflag);
4459 
4460 		/*
4461 		 * If the port response put pointer has not been updated, sync
4462 		 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
4463 		 * response put pointer.
4464 		 */
4465 		if (pring->sli.sli3.rspidx == portRspPut) {
4466 			portRspPut = le32_to_cpu(pgp->rspPutInx);
4467 		}
4468 	} /* while (pring->sli.sli3.rspidx != portRspPut) */
4469 
4470 	if ((rspiocbp != NULL) && (mask & HA_R0RE_REQ)) {
4471 		/* At least one response entry has been freed */
4472 		pring->stats.iocb_rsp_full++;
4473 		/* SET RxRE_RSP in Chip Att register */
4474 		status = ((CA_R0ATT | CA_R0RE_RSP) << (pring->ringno * 4));
4475 		writel(status, phba->CAregaddr);
4476 		readl(phba->CAregaddr); /* flush */
4477 	}
4478 	if ((mask & HA_R0CE_RSP) && (pring->flag & LPFC_CALL_RING_AVAILABLE)) {
4479 		pring->flag &= ~LPFC_CALL_RING_AVAILABLE;
4480 		pring->stats.iocb_cmd_empty++;
4481 
4482 		/* Force update of the local copy of cmdGetInx */
4483 		pring->sli.sli3.local_getidx = le32_to_cpu(pgp->cmdGetInx);
4484 		lpfc_sli_resume_iocb(phba, pring);
4485 
4486 		if ((pring->lpfc_sli_cmd_available))
4487 			(pring->lpfc_sli_cmd_available) (phba, pring);
4488 
4489 	}
4490 
4491 	spin_unlock_irqrestore(&phba->hbalock, iflag);
4492 	return;
4493 }
4494 
4495 /**
4496  * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
4497  * @phba: Pointer to HBA context object.
4498  * @pring: Pointer to driver SLI ring object.
4499  * @mask: Host attention register mask for this ring.
4500  *
4501  * This function is called from the worker thread when there is a pending
4502  * ELS response iocb on the driver internal slow-path response iocb worker
4503  * queue. The caller does not hold any lock. The function will remove each
4504  * response iocb from the response worker queue and calls the handle
4505  * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
4506  **/
4507 static void
lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t mask)4508 lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
4509 				   struct lpfc_sli_ring *pring, uint32_t mask)
4510 {
4511 	struct lpfc_iocbq *irspiocbq;
4512 	struct hbq_dmabuf *dmabuf;
4513 	struct lpfc_cq_event *cq_event;
4514 	unsigned long iflag;
4515 	int count = 0;
4516 
4517 	clear_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
4518 	while (!list_empty(&phba->sli4_hba.sp_queue_event)) {
4519 		/* Get the response iocb from the head of work queue */
4520 		spin_lock_irqsave(&phba->hbalock, iflag);
4521 		list_remove_head(&phba->sli4_hba.sp_queue_event,
4522 				 cq_event, struct lpfc_cq_event, list);
4523 		spin_unlock_irqrestore(&phba->hbalock, iflag);
4524 
4525 		switch (bf_get(lpfc_wcqe_c_code, &cq_event->cqe.wcqe_cmpl)) {
4526 		case CQE_CODE_COMPL_WQE:
4527 			irspiocbq = container_of(cq_event, struct lpfc_iocbq,
4528 						 cq_event);
4529 			/* Translate ELS WCQE to response IOCBQ */
4530 			irspiocbq = lpfc_sli4_els_preprocess_rspiocbq(phba,
4531 								      irspiocbq);
4532 			if (irspiocbq)
4533 				lpfc_sli_sp_handle_rspiocb(phba, pring,
4534 							   irspiocbq);
4535 			count++;
4536 			break;
4537 		case CQE_CODE_RECEIVE:
4538 		case CQE_CODE_RECEIVE_V1:
4539 			dmabuf = container_of(cq_event, struct hbq_dmabuf,
4540 					      cq_event);
4541 			lpfc_sli4_handle_received_buffer(phba, dmabuf);
4542 			count++;
4543 			break;
4544 		default:
4545 			break;
4546 		}
4547 
4548 		/* Limit the number of events to 64 to avoid soft lockups */
4549 		if (count == 64)
4550 			break;
4551 	}
4552 }
4553 
4554 /**
4555  * lpfc_sli_abort_iocb_ring - Abort all iocbs in the ring
4556  * @phba: Pointer to HBA context object.
4557  * @pring: Pointer to driver SLI ring object.
4558  *
4559  * This function aborts all iocbs in the given ring and frees all the iocb
4560  * objects in txq. This function issues an abort iocb for all the iocb commands
4561  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4562  * the return of this function. The caller is not required to hold any locks.
4563  **/
4564 void
lpfc_sli_abort_iocb_ring(struct lpfc_hba * phba,struct lpfc_sli_ring * pring)4565 lpfc_sli_abort_iocb_ring(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
4566 {
4567 	LIST_HEAD(tx_completions);
4568 	spinlock_t *plock;		/* for transmit queue access */
4569 	struct lpfc_iocbq *iocb, *next_iocb;
4570 	int offline;
4571 
4572 	if (phba->sli_rev >= LPFC_SLI_REV4)
4573 		plock = &pring->ring_lock;
4574 	else
4575 		plock = &phba->hbalock;
4576 
4577 	if (pring->ringno == LPFC_ELS_RING)
4578 		lpfc_fabric_abort_hba(phba);
4579 
4580 	offline = pci_channel_offline(phba->pcidev);
4581 
4582 	/* Cancel everything on txq */
4583 	spin_lock_irq(plock);
4584 	list_splice_init(&pring->txq, &tx_completions);
4585 	pring->txq_cnt = 0;
4586 
4587 	if (offline) {
4588 		/* Cancel everything on txcmplq */
4589 		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4590 			iocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4591 		list_splice_init(&pring->txcmplq, &tx_completions);
4592 		pring->txcmplq_cnt = 0;
4593 	} else {
4594 		/* Issue ABTS for everything on the txcmplq */
4595 		list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
4596 			lpfc_sli_issue_abort_iotag(phba, pring, iocb, NULL);
4597 	}
4598 	spin_unlock_irq(plock);
4599 
4600 	if (!offline)
4601 		lpfc_issue_hb_tmo(phba);
4602 
4603 	/* Cancel all the IOCBs from the completions list */
4604 	lpfc_sli_cancel_iocbs(phba, &tx_completions, IOSTAT_LOCAL_REJECT,
4605 			      IOERR_SLI_ABORTED);
4606 }
4607 
4608 /**
4609  * lpfc_sli_abort_fcp_rings - Abort all iocbs in all FCP rings
4610  * @phba: Pointer to HBA context object.
4611  *
4612  * This function aborts all iocbs in FCP rings and frees all the iocb
4613  * objects in txq. This function issues an abort iocb for all the iocb commands
4614  * in txcmplq. The iocbs in the txcmplq is not guaranteed to complete before
4615  * the return of this function. The caller is not required to hold any locks.
4616  **/
4617 void
lpfc_sli_abort_fcp_rings(struct lpfc_hba * phba)4618 lpfc_sli_abort_fcp_rings(struct lpfc_hba *phba)
4619 {
4620 	struct lpfc_sli *psli = &phba->sli;
4621 	struct lpfc_sli_ring  *pring;
4622 	uint32_t i;
4623 
4624 	/* Look on all the FCP Rings for the iotag */
4625 	if (phba->sli_rev >= LPFC_SLI_REV4) {
4626 		for (i = 0; i < phba->cfg_hdw_queue; i++) {
4627 			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4628 			lpfc_sli_abort_iocb_ring(phba, pring);
4629 		}
4630 	} else {
4631 		pring = &psli->sli3_ring[LPFC_FCP_RING];
4632 		lpfc_sli_abort_iocb_ring(phba, pring);
4633 	}
4634 }
4635 
4636 /**
4637  * lpfc_sli_flush_io_rings - flush all iocbs in the IO ring
4638  * @phba: Pointer to HBA context object.
4639  *
4640  * This function flushes all iocbs in the IO ring and frees all the iocb
4641  * objects in txq and txcmplq. This function will not issue abort iocbs
4642  * for all the iocb commands in txcmplq, they will just be returned with
4643  * IOERR_SLI_DOWN. This function is invoked with EEH when device's PCI
4644  * slot has been permanently disabled.
4645  **/
4646 void
lpfc_sli_flush_io_rings(struct lpfc_hba * phba)4647 lpfc_sli_flush_io_rings(struct lpfc_hba *phba)
4648 {
4649 	LIST_HEAD(txq);
4650 	LIST_HEAD(txcmplq);
4651 	struct lpfc_sli *psli = &phba->sli;
4652 	struct lpfc_sli_ring  *pring;
4653 	uint32_t i;
4654 	struct lpfc_iocbq *piocb, *next_iocb;
4655 
4656 	/* Indicate the I/O queues are flushed */
4657 	set_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
4658 
4659 	/* Look on all the FCP Rings for the iotag */
4660 	if (phba->sli_rev >= LPFC_SLI_REV4) {
4661 		for (i = 0; i < phba->cfg_hdw_queue; i++) {
4662 			if (!phba->sli4_hba.hdwq ||
4663 			    !phba->sli4_hba.hdwq[i].io_wq) {
4664 				lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
4665 						"7777 hdwq's deleted %lx "
4666 						"%lx %x %x\n",
4667 						phba->pport->load_flag,
4668 						phba->hba_flag,
4669 						phba->link_state,
4670 						phba->sli.sli_flag);
4671 				return;
4672 			}
4673 			pring = phba->sli4_hba.hdwq[i].io_wq->pring;
4674 
4675 			spin_lock_irq(&pring->ring_lock);
4676 			/* Retrieve everything on txq */
4677 			list_splice_init(&pring->txq, &txq);
4678 			list_for_each_entry_safe(piocb, next_iocb,
4679 						 &pring->txcmplq, list)
4680 				piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4681 			/* Retrieve everything on the txcmplq */
4682 			list_splice_init(&pring->txcmplq, &txcmplq);
4683 			pring->txq_cnt = 0;
4684 			pring->txcmplq_cnt = 0;
4685 			spin_unlock_irq(&pring->ring_lock);
4686 
4687 			/* Flush the txq */
4688 			lpfc_sli_cancel_iocbs(phba, &txq,
4689 					      IOSTAT_LOCAL_REJECT,
4690 					      IOERR_SLI_DOWN);
4691 			/* Flush the txcmplq */
4692 			lpfc_sli_cancel_iocbs(phba, &txcmplq,
4693 					      IOSTAT_LOCAL_REJECT,
4694 					      IOERR_SLI_DOWN);
4695 			if (unlikely(pci_channel_offline(phba->pcidev)))
4696 				lpfc_sli4_io_xri_aborted(phba, NULL, 0);
4697 		}
4698 	} else {
4699 		pring = &psli->sli3_ring[LPFC_FCP_RING];
4700 
4701 		spin_lock_irq(&phba->hbalock);
4702 		/* Retrieve everything on txq */
4703 		list_splice_init(&pring->txq, &txq);
4704 		list_for_each_entry_safe(piocb, next_iocb,
4705 					 &pring->txcmplq, list)
4706 			piocb->cmd_flag &= ~LPFC_IO_ON_TXCMPLQ;
4707 		/* Retrieve everything on the txcmplq */
4708 		list_splice_init(&pring->txcmplq, &txcmplq);
4709 		pring->txq_cnt = 0;
4710 		pring->txcmplq_cnt = 0;
4711 		spin_unlock_irq(&phba->hbalock);
4712 
4713 		/* Flush the txq */
4714 		lpfc_sli_cancel_iocbs(phba, &txq, IOSTAT_LOCAL_REJECT,
4715 				      IOERR_SLI_DOWN);
4716 		/* Flush the txcmpq */
4717 		lpfc_sli_cancel_iocbs(phba, &txcmplq, IOSTAT_LOCAL_REJECT,
4718 				      IOERR_SLI_DOWN);
4719 	}
4720 }
4721 
4722 /**
4723  * lpfc_sli_brdready_s3 - Check for sli3 host ready status
4724  * @phba: Pointer to HBA context object.
4725  * @mask: Bit mask to be checked.
4726  *
4727  * This function reads the host status register and compares
4728  * with the provided bit mask to check if HBA completed
4729  * the restart. This function will wait in a loop for the
4730  * HBA to complete restart. If the HBA does not restart within
4731  * 15 iterations, the function will reset the HBA again. The
4732  * function returns 1 when HBA fail to restart otherwise returns
4733  * zero.
4734  **/
4735 static int
lpfc_sli_brdready_s3(struct lpfc_hba * phba,uint32_t mask)4736 lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
4737 {
4738 	uint32_t status;
4739 	int i = 0;
4740 	int retval = 0;
4741 
4742 	/* Read the HBA Host Status Register */
4743 	if (lpfc_readl(phba->HSregaddr, &status))
4744 		return 1;
4745 
4746 	set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
4747 
4748 	/*
4749 	 * Check status register every 100ms for 5 retries, then every
4750 	 * 500ms for 5, then every 2.5 sec for 5, then reset board and
4751 	 * every 2.5 sec for 4.
4752 	 * Break our of the loop if errors occurred during init.
4753 	 */
4754 	while (((status & mask) != mask) &&
4755 	       !(status & HS_FFERM) &&
4756 	       i++ < 20) {
4757 
4758 		if (i <= 5)
4759 			msleep(10);
4760 		else if (i <= 10)
4761 			msleep(500);
4762 		else
4763 			msleep(2500);
4764 
4765 		if (i == 15) {
4766 				/* Do post */
4767 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4768 			lpfc_sli_brdrestart(phba);
4769 		}
4770 		/* Read the HBA Host Status Register */
4771 		if (lpfc_readl(phba->HSregaddr, &status)) {
4772 			retval = 1;
4773 			break;
4774 		}
4775 	}
4776 
4777 	/* Check to see if any errors occurred during init */
4778 	if ((status & HS_FFERM) || (i >= 20)) {
4779 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
4780 				"2751 Adapter failed to restart, "
4781 				"status reg x%x, FW Data: A8 x%x AC x%x\n",
4782 				status,
4783 				readl(phba->MBslimaddr + 0xa8),
4784 				readl(phba->MBslimaddr + 0xac));
4785 		phba->link_state = LPFC_HBA_ERROR;
4786 		retval = 1;
4787 	}
4788 
4789 	return retval;
4790 }
4791 
4792 /**
4793  * lpfc_sli_brdready_s4 - Check for sli4 host ready status
4794  * @phba: Pointer to HBA context object.
4795  * @mask: Bit mask to be checked.
4796  *
4797  * This function checks the host status register to check if HBA is
4798  * ready. This function will wait in a loop for the HBA to be ready
4799  * If the HBA is not ready , the function will will reset the HBA PCI
4800  * function again. The function returns 1 when HBA fail to be ready
4801  * otherwise returns zero.
4802  **/
4803 static int
lpfc_sli_brdready_s4(struct lpfc_hba * phba,uint32_t mask)4804 lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
4805 {
4806 	uint32_t status;
4807 	int retval = 0;
4808 
4809 	/* Read the HBA Host Status Register */
4810 	status = lpfc_sli4_post_status_check(phba);
4811 
4812 	if (status) {
4813 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
4814 		lpfc_sli_brdrestart(phba);
4815 		status = lpfc_sli4_post_status_check(phba);
4816 	}
4817 
4818 	/* Check to see if any errors occurred during init */
4819 	if (status) {
4820 		phba->link_state = LPFC_HBA_ERROR;
4821 		retval = 1;
4822 	} else
4823 		phba->sli4_hba.intr_enable = 0;
4824 
4825 	clear_bit(HBA_SETUP, &phba->hba_flag);
4826 	return retval;
4827 }
4828 
4829 /**
4830  * lpfc_sli_brdready - Wrapper func for checking the hba readyness
4831  * @phba: Pointer to HBA context object.
4832  * @mask: Bit mask to be checked.
4833  *
4834  * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
4835  * from the API jump table function pointer from the lpfc_hba struct.
4836  **/
4837 int
lpfc_sli_brdready(struct lpfc_hba * phba,uint32_t mask)4838 lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
4839 {
4840 	return phba->lpfc_sli_brdready(phba, mask);
4841 }
4842 
4843 #define BARRIER_TEST_PATTERN (0xdeadbeef)
4844 
4845 /**
4846  * lpfc_reset_barrier - Make HBA ready for HBA reset
4847  * @phba: Pointer to HBA context object.
4848  *
4849  * This function is called before resetting an HBA. This function is called
4850  * with hbalock held and requests HBA to quiesce DMAs before a reset.
4851  **/
lpfc_reset_barrier(struct lpfc_hba * phba)4852 void lpfc_reset_barrier(struct lpfc_hba *phba)
4853 {
4854 	uint32_t __iomem *resp_buf;
4855 	uint32_t __iomem *mbox_buf;
4856 	volatile struct MAILBOX_word0 mbox;
4857 	uint32_t hc_copy, ha_copy, resp_data;
4858 	int  i;
4859 	uint8_t hdrtype;
4860 
4861 	lockdep_assert_held(&phba->hbalock);
4862 
4863 	pci_read_config_byte(phba->pcidev, PCI_HEADER_TYPE, &hdrtype);
4864 	if (hdrtype != PCI_HEADER_TYPE_MFD ||
4865 	    (FC_JEDEC_ID(phba->vpd.rev.biuRev) != HELIOS_JEDEC_ID &&
4866 	     FC_JEDEC_ID(phba->vpd.rev.biuRev) != THOR_JEDEC_ID))
4867 		return;
4868 
4869 	/*
4870 	 * Tell the other part of the chip to suspend temporarily all
4871 	 * its DMA activity.
4872 	 */
4873 	resp_buf = phba->MBslimaddr;
4874 
4875 	/* Disable the error attention */
4876 	if (lpfc_readl(phba->HCregaddr, &hc_copy))
4877 		return;
4878 	writel((hc_copy & ~HC_ERINT_ENA), phba->HCregaddr);
4879 	readl(phba->HCregaddr); /* flush */
4880 	phba->link_flag |= LS_IGNORE_ERATT;
4881 
4882 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
4883 		return;
4884 	if (ha_copy & HA_ERATT) {
4885 		/* Clear Chip error bit */
4886 		writel(HA_ERATT, phba->HAregaddr);
4887 		phba->pport->stopped = 1;
4888 	}
4889 
4890 	mbox.word0 = 0;
4891 	mbox.mbxCommand = MBX_KILL_BOARD;
4892 	mbox.mbxOwner = OWN_CHIP;
4893 
4894 	writel(BARRIER_TEST_PATTERN, (resp_buf + 1));
4895 	mbox_buf = phba->MBslimaddr;
4896 	writel(mbox.word0, mbox_buf);
4897 
4898 	for (i = 0; i < 50; i++) {
4899 		if (lpfc_readl((resp_buf + 1), &resp_data))
4900 			return;
4901 		if (resp_data != ~(BARRIER_TEST_PATTERN))
4902 			mdelay(1);
4903 		else
4904 			break;
4905 	}
4906 	resp_data = 0;
4907 	if (lpfc_readl((resp_buf + 1), &resp_data))
4908 		return;
4909 	if (resp_data  != ~(BARRIER_TEST_PATTERN)) {
4910 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
4911 		    phba->pport->stopped)
4912 			goto restore_hc;
4913 		else
4914 			goto clear_errat;
4915 	}
4916 
4917 	mbox.mbxOwner = OWN_HOST;
4918 	resp_data = 0;
4919 	for (i = 0; i < 500; i++) {
4920 		if (lpfc_readl(resp_buf, &resp_data))
4921 			return;
4922 		if (resp_data != mbox.word0)
4923 			mdelay(1);
4924 		else
4925 			break;
4926 	}
4927 
4928 clear_errat:
4929 
4930 	while (++i < 500) {
4931 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
4932 			return;
4933 		if (!(ha_copy & HA_ERATT))
4934 			mdelay(1);
4935 		else
4936 			break;
4937 	}
4938 
4939 	if (readl(phba->HAregaddr) & HA_ERATT) {
4940 		writel(HA_ERATT, phba->HAregaddr);
4941 		phba->pport->stopped = 1;
4942 	}
4943 
4944 restore_hc:
4945 	phba->link_flag &= ~LS_IGNORE_ERATT;
4946 	writel(hc_copy, phba->HCregaddr);
4947 	readl(phba->HCregaddr); /* flush */
4948 }
4949 
4950 /**
4951  * lpfc_sli_brdkill - Issue a kill_board mailbox command
4952  * @phba: Pointer to HBA context object.
4953  *
4954  * This function issues a kill_board mailbox command and waits for
4955  * the error attention interrupt. This function is called for stopping
4956  * the firmware processing. The caller is not required to hold any
4957  * locks. This function calls lpfc_hba_down_post function to free
4958  * any pending commands after the kill. The function will return 1 when it
4959  * fails to kill the board else will return 0.
4960  **/
4961 int
lpfc_sli_brdkill(struct lpfc_hba * phba)4962 lpfc_sli_brdkill(struct lpfc_hba *phba)
4963 {
4964 	struct lpfc_sli *psli;
4965 	LPFC_MBOXQ_t *pmb;
4966 	uint32_t status;
4967 	uint32_t ha_copy;
4968 	int retval;
4969 	int i = 0;
4970 
4971 	psli = &phba->sli;
4972 
4973 	/* Kill HBA */
4974 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
4975 			"0329 Kill HBA Data: x%x x%x\n",
4976 			phba->pport->port_state, psli->sli_flag);
4977 
4978 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4979 	if (!pmb)
4980 		return 1;
4981 
4982 	/* Disable the error attention */
4983 	spin_lock_irq(&phba->hbalock);
4984 	if (lpfc_readl(phba->HCregaddr, &status)) {
4985 		spin_unlock_irq(&phba->hbalock);
4986 		mempool_free(pmb, phba->mbox_mem_pool);
4987 		return 1;
4988 	}
4989 	status &= ~HC_ERINT_ENA;
4990 	writel(status, phba->HCregaddr);
4991 	readl(phba->HCregaddr); /* flush */
4992 	phba->link_flag |= LS_IGNORE_ERATT;
4993 	spin_unlock_irq(&phba->hbalock);
4994 
4995 	lpfc_kill_board(phba, pmb);
4996 	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
4997 	retval = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
4998 
4999 	if (retval != MBX_SUCCESS) {
5000 		if (retval != MBX_BUSY)
5001 			mempool_free(pmb, phba->mbox_mem_pool);
5002 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5003 				"2752 KILL_BOARD command failed retval %d\n",
5004 				retval);
5005 		spin_lock_irq(&phba->hbalock);
5006 		phba->link_flag &= ~LS_IGNORE_ERATT;
5007 		spin_unlock_irq(&phba->hbalock);
5008 		return 1;
5009 	}
5010 
5011 	spin_lock_irq(&phba->hbalock);
5012 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
5013 	spin_unlock_irq(&phba->hbalock);
5014 
5015 	mempool_free(pmb, phba->mbox_mem_pool);
5016 
5017 	/* There is no completion for a KILL_BOARD mbox cmd. Check for an error
5018 	 * attention every 100ms for 3 seconds. If we don't get ERATT after
5019 	 * 3 seconds we still set HBA_ERROR state because the status of the
5020 	 * board is now undefined.
5021 	 */
5022 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
5023 		return 1;
5024 	while ((i++ < 30) && !(ha_copy & HA_ERATT)) {
5025 		mdelay(100);
5026 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
5027 			return 1;
5028 	}
5029 
5030 	timer_delete_sync(&psli->mbox_tmo);
5031 	if (ha_copy & HA_ERATT) {
5032 		writel(HA_ERATT, phba->HAregaddr);
5033 		phba->pport->stopped = 1;
5034 	}
5035 	spin_lock_irq(&phba->hbalock);
5036 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5037 	psli->mbox_active = NULL;
5038 	phba->link_flag &= ~LS_IGNORE_ERATT;
5039 	spin_unlock_irq(&phba->hbalock);
5040 
5041 	lpfc_hba_down_post(phba);
5042 	phba->link_state = LPFC_HBA_ERROR;
5043 
5044 	return ha_copy & HA_ERATT ? 0 : 1;
5045 }
5046 
5047 /**
5048  * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
5049  * @phba: Pointer to HBA context object.
5050  *
5051  * This function resets the HBA by writing HC_INITFF to the control
5052  * register. After the HBA resets, this function resets all the iocb ring
5053  * indices. This function disables PCI layer parity checking during
5054  * the reset.
5055  * This function returns 0 always.
5056  * The caller is not required to hold any locks.
5057  **/
5058 int
lpfc_sli_brdreset(struct lpfc_hba * phba)5059 lpfc_sli_brdreset(struct lpfc_hba *phba)
5060 {
5061 	struct lpfc_sli *psli;
5062 	struct lpfc_sli_ring *pring;
5063 	uint16_t cfg_value;
5064 	int i;
5065 
5066 	psli = &phba->sli;
5067 
5068 	/* Reset HBA */
5069 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5070 			"0325 Reset HBA Data: x%x x%x\n",
5071 			(phba->pport) ? phba->pport->port_state : 0,
5072 			psli->sli_flag);
5073 
5074 	/* perform board reset */
5075 	phba->fc_eventTag = 0;
5076 	phba->link_events = 0;
5077 	set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
5078 	if (phba->pport) {
5079 		phba->pport->fc_myDID = 0;
5080 		phba->pport->fc_prevDID = 0;
5081 	}
5082 
5083 	/* Turn off parity checking and serr during the physical reset */
5084 	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value))
5085 		return -EIO;
5086 
5087 	pci_write_config_word(phba->pcidev, PCI_COMMAND,
5088 			      (cfg_value &
5089 			       ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5090 
5091 	psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
5092 
5093 	/* Now toggle INITFF bit in the Host Control Register */
5094 	writel(HC_INITFF, phba->HCregaddr);
5095 	mdelay(1);
5096 	readl(phba->HCregaddr); /* flush */
5097 	writel(0, phba->HCregaddr);
5098 	readl(phba->HCregaddr); /* flush */
5099 
5100 	/* Restore PCI cmd register */
5101 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5102 
5103 	/* Initialize relevant SLI info */
5104 	for (i = 0; i < psli->num_rings; i++) {
5105 		pring = &psli->sli3_ring[i];
5106 		pring->flag = 0;
5107 		pring->sli.sli3.rspidx = 0;
5108 		pring->sli.sli3.next_cmdidx  = 0;
5109 		pring->sli.sli3.local_getidx = 0;
5110 		pring->sli.sli3.cmdidx = 0;
5111 		pring->missbufcnt = 0;
5112 	}
5113 
5114 	phba->link_state = LPFC_WARM_START;
5115 	return 0;
5116 }
5117 
5118 /**
5119  * lpfc_sli4_brdreset - Reset a sli-4 HBA
5120  * @phba: Pointer to HBA context object.
5121  *
5122  * This function resets a SLI4 HBA. This function disables PCI layer parity
5123  * checking during resets the device. The caller is not required to hold
5124  * any locks.
5125  *
5126  * This function returns 0 on success else returns negative error code.
5127  **/
5128 int
lpfc_sli4_brdreset(struct lpfc_hba * phba)5129 lpfc_sli4_brdreset(struct lpfc_hba *phba)
5130 {
5131 	struct lpfc_sli *psli = &phba->sli;
5132 	uint16_t cfg_value;
5133 	int rc = 0;
5134 
5135 	/* Reset HBA */
5136 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5137 			"0295 Reset HBA Data: x%x x%x x%lx\n",
5138 			phba->pport->port_state, psli->sli_flag,
5139 			phba->hba_flag);
5140 
5141 	/* perform board reset */
5142 	phba->fc_eventTag = 0;
5143 	phba->link_events = 0;
5144 	phba->pport->fc_myDID = 0;
5145 	phba->pport->fc_prevDID = 0;
5146 
5147 	spin_lock_irq(&phba->hbalock);
5148 	psli->sli_flag &= ~(LPFC_PROCESS_LA);
5149 	phba->fcf.fcf_flag = 0;
5150 	spin_unlock_irq(&phba->hbalock);
5151 
5152 	/* Now physically reset the device */
5153 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5154 			"0389 Performing PCI function reset!\n");
5155 
5156 	/* Turn off parity checking and serr during the physical reset */
5157 	if (pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value)) {
5158 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5159 				"3205 PCI read Config failed\n");
5160 		return -EIO;
5161 	}
5162 
5163 	pci_write_config_word(phba->pcidev, PCI_COMMAND, (cfg_value &
5164 			      ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
5165 
5166 	/* Perform FCoE PCI function reset before freeing queue memory */
5167 	rc = lpfc_pci_function_reset(phba);
5168 
5169 	/* Restore PCI cmd register */
5170 	pci_write_config_word(phba->pcidev, PCI_COMMAND, cfg_value);
5171 
5172 	return rc;
5173 }
5174 
5175 /**
5176  * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
5177  * @phba: Pointer to HBA context object.
5178  *
5179  * This function is called in the SLI initialization code path to
5180  * restart the HBA. The caller is not required to hold any lock.
5181  * This function writes MBX_RESTART mailbox command to the SLIM and
5182  * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
5183  * function to free any pending commands. The function enables
5184  * POST only during the first initialization. The function returns zero.
5185  * The function does not guarantee completion of MBX_RESTART mailbox
5186  * command before the return of this function.
5187  **/
5188 static int
lpfc_sli_brdrestart_s3(struct lpfc_hba * phba)5189 lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
5190 {
5191 	volatile struct MAILBOX_word0 mb;
5192 	struct lpfc_sli *psli;
5193 	void __iomem *to_slim;
5194 
5195 	spin_lock_irq(&phba->hbalock);
5196 
5197 	psli = &phba->sli;
5198 
5199 	/* Restart HBA */
5200 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5201 			"0337 Restart HBA Data: x%x x%x\n",
5202 			(phba->pport) ? phba->pport->port_state : 0,
5203 			psli->sli_flag);
5204 
5205 	mb.word0 = 0;
5206 	mb.mbxCommand = MBX_RESTART;
5207 	mb.mbxHc = 1;
5208 
5209 	lpfc_reset_barrier(phba);
5210 
5211 	to_slim = phba->MBslimaddr;
5212 	writel(mb.word0, to_slim);
5213 	readl(to_slim); /* flush */
5214 
5215 	/* Only skip post after fc_ffinit is completed */
5216 	if (phba->pport && phba->pport->port_state)
5217 		mb.word0 = 1;	/* This is really setting up word1 */
5218 	else
5219 		mb.word0 = 0;	/* This is really setting up word1 */
5220 	to_slim = phba->MBslimaddr + sizeof (uint32_t);
5221 	writel(mb.word0, to_slim);
5222 	readl(to_slim); /* flush */
5223 
5224 	lpfc_sli_brdreset(phba);
5225 	if (phba->pport)
5226 		phba->pport->stopped = 0;
5227 	phba->link_state = LPFC_INIT_START;
5228 	phba->hba_flag = 0;
5229 	spin_unlock_irq(&phba->hbalock);
5230 
5231 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5232 	psli->stats_start = ktime_get_seconds();
5233 
5234 	/* Give the INITFF and Post time to settle. */
5235 	mdelay(100);
5236 
5237 	lpfc_hba_down_post(phba);
5238 
5239 	return 0;
5240 }
5241 
5242 /**
5243  * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
5244  * @phba: Pointer to HBA context object.
5245  *
5246  * This function is called in the SLI initialization code path to restart
5247  * a SLI4 HBA. The caller is not required to hold any lock.
5248  * At the end of the function, it calls lpfc_hba_down_post function to
5249  * free any pending commands.
5250  **/
5251 static int
lpfc_sli_brdrestart_s4(struct lpfc_hba * phba)5252 lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
5253 {
5254 	struct lpfc_sli *psli = &phba->sli;
5255 	int rc;
5256 
5257 	/* Restart HBA */
5258 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5259 			"0296 Restart HBA Data: x%x x%x\n",
5260 			phba->pport->port_state, psli->sli_flag);
5261 
5262 	clear_bit(HBA_SETUP, &phba->hba_flag);
5263 	lpfc_sli4_queue_unset(phba);
5264 
5265 	rc = lpfc_sli4_brdreset(phba);
5266 	if (rc) {
5267 		phba->link_state = LPFC_HBA_ERROR;
5268 		goto hba_down_queue;
5269 	}
5270 
5271 	spin_lock_irq(&phba->hbalock);
5272 	phba->pport->stopped = 0;
5273 	phba->link_state = LPFC_INIT_START;
5274 	phba->hba_flag = 0;
5275 	/* Preserve FA-PWWN expectation */
5276 	phba->sli4_hba.fawwpn_flag &= LPFC_FAWWPN_FABRIC;
5277 	spin_unlock_irq(&phba->hbalock);
5278 
5279 	memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
5280 	psli->stats_start = ktime_get_seconds();
5281 
5282 hba_down_queue:
5283 	lpfc_hba_down_post(phba);
5284 	lpfc_sli4_queue_destroy(phba);
5285 
5286 	return rc;
5287 }
5288 
5289 /**
5290  * lpfc_sli_brdrestart - Wrapper func for restarting hba
5291  * @phba: Pointer to HBA context object.
5292  *
5293  * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
5294  * API jump table function pointer from the lpfc_hba struct.
5295 **/
5296 int
lpfc_sli_brdrestart(struct lpfc_hba * phba)5297 lpfc_sli_brdrestart(struct lpfc_hba *phba)
5298 {
5299 	return phba->lpfc_sli_brdrestart(phba);
5300 }
5301 
5302 /**
5303  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
5304  * @phba: Pointer to HBA context object.
5305  *
5306  * This function is called after a HBA restart to wait for successful
5307  * restart of the HBA. Successful restart of the HBA is indicated by
5308  * HS_FFRDY and HS_MBRDY bits. If the HBA fails to restart even after 15
5309  * iteration, the function will restart the HBA again. The function returns
5310  * zero if HBA successfully restarted else returns negative error code.
5311  **/
5312 int
lpfc_sli_chipset_init(struct lpfc_hba * phba)5313 lpfc_sli_chipset_init(struct lpfc_hba *phba)
5314 {
5315 	uint32_t status, i = 0;
5316 
5317 	/* Read the HBA Host Status Register */
5318 	if (lpfc_readl(phba->HSregaddr, &status))
5319 		return -EIO;
5320 
5321 	/* Check status register to see what current state is */
5322 	i = 0;
5323 	while ((status & (HS_FFRDY | HS_MBRDY)) != (HS_FFRDY | HS_MBRDY)) {
5324 
5325 		/* Check every 10ms for 10 retries, then every 100ms for 90
5326 		 * retries, then every 1 sec for 50 retires for a total of
5327 		 * ~60 seconds before reset the board again and check every
5328 		 * 1 sec for 50 retries. The up to 60 seconds before the
5329 		 * board ready is required by the Falcon FIPS zeroization
5330 		 * complete, and any reset the board in between shall cause
5331 		 * restart of zeroization, further delay the board ready.
5332 		 */
5333 		if (i++ >= 200) {
5334 			/* Adapter failed to init, timeout, status reg
5335 			   <status> */
5336 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5337 					"0436 Adapter failed to init, "
5338 					"timeout, status reg x%x, "
5339 					"FW Data: A8 x%x AC x%x\n", status,
5340 					readl(phba->MBslimaddr + 0xa8),
5341 					readl(phba->MBslimaddr + 0xac));
5342 			phba->link_state = LPFC_HBA_ERROR;
5343 			return -ETIMEDOUT;
5344 		}
5345 
5346 		/* Check to see if any errors occurred during init */
5347 		if (status & HS_FFERM) {
5348 			/* ERROR: During chipset initialization */
5349 			/* Adapter failed to init, chipset, status reg
5350 			   <status> */
5351 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5352 					"0437 Adapter failed to init, "
5353 					"chipset, status reg x%x, "
5354 					"FW Data: A8 x%x AC x%x\n", status,
5355 					readl(phba->MBslimaddr + 0xa8),
5356 					readl(phba->MBslimaddr + 0xac));
5357 			phba->link_state = LPFC_HBA_ERROR;
5358 			return -EIO;
5359 		}
5360 
5361 		if (i <= 10)
5362 			msleep(10);
5363 		else if (i <= 100)
5364 			msleep(100);
5365 		else
5366 			msleep(1000);
5367 
5368 		if (i == 150) {
5369 			/* Do post */
5370 			phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5371 			lpfc_sli_brdrestart(phba);
5372 		}
5373 		/* Read the HBA Host Status Register */
5374 		if (lpfc_readl(phba->HSregaddr, &status))
5375 			return -EIO;
5376 	}
5377 
5378 	/* Check to see if any errors occurred during init */
5379 	if (status & HS_FFERM) {
5380 		/* ERROR: During chipset initialization */
5381 		/* Adapter failed to init, chipset, status reg <status> */
5382 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5383 				"0438 Adapter failed to init, chipset, "
5384 				"status reg x%x, "
5385 				"FW Data: A8 x%x AC x%x\n", status,
5386 				readl(phba->MBslimaddr + 0xa8),
5387 				readl(phba->MBslimaddr + 0xac));
5388 		phba->link_state = LPFC_HBA_ERROR;
5389 		return -EIO;
5390 	}
5391 
5392 	set_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
5393 
5394 	/* Clear all interrupt enable conditions */
5395 	writel(0, phba->HCregaddr);
5396 	readl(phba->HCregaddr); /* flush */
5397 
5398 	/* setup host attn register */
5399 	writel(0xffffffff, phba->HAregaddr);
5400 	readl(phba->HAregaddr); /* flush */
5401 	return 0;
5402 }
5403 
5404 /**
5405  * lpfc_sli_hbq_count - Get the number of HBQs to be configured
5406  *
5407  * This function calculates and returns the number of HBQs required to be
5408  * configured.
5409  **/
5410 int
lpfc_sli_hbq_count(void)5411 lpfc_sli_hbq_count(void)
5412 {
5413 	return ARRAY_SIZE(lpfc_hbq_defs);
5414 }
5415 
5416 /**
5417  * lpfc_sli_hbq_entry_count - Calculate total number of hbq entries
5418  *
5419  * This function adds the number of hbq entries in every HBQ to get
5420  * the total number of hbq entries required for the HBA and returns
5421  * the total count.
5422  **/
5423 static int
lpfc_sli_hbq_entry_count(void)5424 lpfc_sli_hbq_entry_count(void)
5425 {
5426 	int  hbq_count = lpfc_sli_hbq_count();
5427 	int  count = 0;
5428 	int  i;
5429 
5430 	for (i = 0; i < hbq_count; ++i)
5431 		count += lpfc_hbq_defs[i]->entry_count;
5432 	return count;
5433 }
5434 
5435 /**
5436  * lpfc_sli_hbq_size - Calculate memory required for all hbq entries
5437  *
5438  * This function calculates amount of memory required for all hbq entries
5439  * to be configured and returns the total memory required.
5440  **/
5441 int
lpfc_sli_hbq_size(void)5442 lpfc_sli_hbq_size(void)
5443 {
5444 	return lpfc_sli_hbq_entry_count() * sizeof(struct lpfc_hbq_entry);
5445 }
5446 
5447 /**
5448  * lpfc_sli_hbq_setup - configure and initialize HBQs
5449  * @phba: Pointer to HBA context object.
5450  *
5451  * This function is called during the SLI initialization to configure
5452  * all the HBQs and post buffers to the HBQ. The caller is not
5453  * required to hold any locks. This function will return zero if successful
5454  * else it will return negative error code.
5455  **/
5456 static int
lpfc_sli_hbq_setup(struct lpfc_hba * phba)5457 lpfc_sli_hbq_setup(struct lpfc_hba *phba)
5458 {
5459 	int  hbq_count = lpfc_sli_hbq_count();
5460 	LPFC_MBOXQ_t *pmb;
5461 	MAILBOX_t *pmbox;
5462 	uint32_t hbqno;
5463 	uint32_t hbq_entry_index;
5464 
5465 				/* Get a Mailbox buffer to setup mailbox
5466 				 * commands for HBA initialization
5467 				 */
5468 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5469 
5470 	if (!pmb)
5471 		return -ENOMEM;
5472 
5473 	pmbox = &pmb->u.mb;
5474 
5475 	/* Initialize the struct lpfc_sli_hbq structure for each hbq */
5476 	phba->link_state = LPFC_INIT_MBX_CMDS;
5477 	phba->hbq_in_use = 1;
5478 
5479 	hbq_entry_index = 0;
5480 	for (hbqno = 0; hbqno < hbq_count; ++hbqno) {
5481 		phba->hbqs[hbqno].next_hbqPutIdx = 0;
5482 		phba->hbqs[hbqno].hbqPutIdx      = 0;
5483 		phba->hbqs[hbqno].local_hbqGetIdx   = 0;
5484 		phba->hbqs[hbqno].entry_count =
5485 			lpfc_hbq_defs[hbqno]->entry_count;
5486 		lpfc_config_hbq(phba, hbqno, lpfc_hbq_defs[hbqno],
5487 			hbq_entry_index, pmb);
5488 		hbq_entry_index += phba->hbqs[hbqno].entry_count;
5489 
5490 		if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
5491 			/* Adapter failed to init, mbxCmd <cmd> CFG_RING,
5492 			   mbxStatus <status>, ring <num> */
5493 
5494 			lpfc_printf_log(phba, KERN_ERR,
5495 					LOG_SLI | LOG_VPORT,
5496 					"1805 Adapter failed to init. "
5497 					"Data: x%x x%x x%x\n",
5498 					pmbox->mbxCommand,
5499 					pmbox->mbxStatus, hbqno);
5500 
5501 			phba->link_state = LPFC_HBA_ERROR;
5502 			mempool_free(pmb, phba->mbox_mem_pool);
5503 			return -ENXIO;
5504 		}
5505 	}
5506 	phba->hbq_count = hbq_count;
5507 
5508 	mempool_free(pmb, phba->mbox_mem_pool);
5509 
5510 	/* Initially populate or replenish the HBQs */
5511 	for (hbqno = 0; hbqno < hbq_count; ++hbqno)
5512 		lpfc_sli_hbqbuf_init_hbqs(phba, hbqno);
5513 	return 0;
5514 }
5515 
5516 /**
5517  * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
5518  * @phba: Pointer to HBA context object.
5519  *
5520  * This function is called during the SLI initialization to configure
5521  * all the HBQs and post buffers to the HBQ. The caller is not
5522  * required to hold any locks. This function will return zero if successful
5523  * else it will return negative error code.
5524  **/
5525 static int
lpfc_sli4_rb_setup(struct lpfc_hba * phba)5526 lpfc_sli4_rb_setup(struct lpfc_hba *phba)
5527 {
5528 	phba->hbq_in_use = 1;
5529 	/**
5530 	 * Specific case when the MDS diagnostics is enabled and supported.
5531 	 * The receive buffer count is truncated to manage the incoming
5532 	 * traffic.
5533 	 **/
5534 	if (phba->cfg_enable_mds_diags && phba->mds_diags_support)
5535 		phba->hbqs[LPFC_ELS_HBQ].entry_count =
5536 			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count >> 1;
5537 	else
5538 		phba->hbqs[LPFC_ELS_HBQ].entry_count =
5539 			lpfc_hbq_defs[LPFC_ELS_HBQ]->entry_count;
5540 	phba->hbq_count = 1;
5541 	lpfc_sli_hbqbuf_init_hbqs(phba, LPFC_ELS_HBQ);
5542 	/* Initially populate or replenish the HBQs */
5543 	return 0;
5544 }
5545 
5546 /**
5547  * lpfc_sli_config_port - Issue config port mailbox command
5548  * @phba: Pointer to HBA context object.
5549  * @sli_mode: sli mode - 2/3
5550  *
5551  * This function is called by the sli initialization code path
5552  * to issue config_port mailbox command. This function restarts the
5553  * HBA firmware and issues a config_port mailbox command to configure
5554  * the SLI interface in the sli mode specified by sli_mode
5555  * variable. The caller is not required to hold any locks.
5556  * The function returns 0 if successful, else returns negative error
5557  * code.
5558  **/
5559 int
lpfc_sli_config_port(struct lpfc_hba * phba,int sli_mode)5560 lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
5561 {
5562 	LPFC_MBOXQ_t *pmb;
5563 	uint32_t resetcount = 0, rc = 0, done = 0;
5564 
5565 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5566 	if (!pmb) {
5567 		phba->link_state = LPFC_HBA_ERROR;
5568 		return -ENOMEM;
5569 	}
5570 
5571 	phba->sli_rev = sli_mode;
5572 	while (resetcount < 2 && !done) {
5573 		spin_lock_irq(&phba->hbalock);
5574 		phba->sli.sli_flag |= LPFC_SLI_MBOX_ACTIVE;
5575 		spin_unlock_irq(&phba->hbalock);
5576 		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
5577 		lpfc_sli_brdrestart(phba);
5578 		rc = lpfc_sli_chipset_init(phba);
5579 		if (rc)
5580 			break;
5581 
5582 		spin_lock_irq(&phba->hbalock);
5583 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
5584 		spin_unlock_irq(&phba->hbalock);
5585 		resetcount++;
5586 
5587 		/* Call pre CONFIG_PORT mailbox command initialization.  A
5588 		 * value of 0 means the call was successful.  Any other
5589 		 * nonzero value is a failure, but if ERESTART is returned,
5590 		 * the driver may reset the HBA and try again.
5591 		 */
5592 		rc = lpfc_config_port_prep(phba);
5593 		if (rc == -ERESTART) {
5594 			phba->link_state = LPFC_LINK_UNKNOWN;
5595 			continue;
5596 		} else if (rc)
5597 			break;
5598 
5599 		phba->link_state = LPFC_INIT_MBX_CMDS;
5600 		lpfc_config_port(phba, pmb);
5601 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
5602 		phba->sli3_options &= ~(LPFC_SLI3_NPIV_ENABLED |
5603 					LPFC_SLI3_HBQ_ENABLED |
5604 					LPFC_SLI3_CRP_ENABLED |
5605 					LPFC_SLI3_DSS_ENABLED);
5606 		if (rc != MBX_SUCCESS) {
5607 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5608 				"0442 Adapter failed to init, mbxCmd x%x "
5609 				"CONFIG_PORT, mbxStatus x%x Data: x%x\n",
5610 				pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
5611 			spin_lock_irq(&phba->hbalock);
5612 			phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
5613 			spin_unlock_irq(&phba->hbalock);
5614 			rc = -ENXIO;
5615 		} else {
5616 			/* Allow asynchronous mailbox command to go through */
5617 			spin_lock_irq(&phba->hbalock);
5618 			phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
5619 			spin_unlock_irq(&phba->hbalock);
5620 			done = 1;
5621 
5622 			if ((pmb->u.mb.un.varCfgPort.casabt == 1) &&
5623 			    (pmb->u.mb.un.varCfgPort.gasabt == 0))
5624 				lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
5625 					"3110 Port did not grant ASABT\n");
5626 		}
5627 	}
5628 	if (!done) {
5629 		rc = -EINVAL;
5630 		goto do_prep_failed;
5631 	}
5632 	if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
5633 		if (!pmb->u.mb.un.varCfgPort.cMA) {
5634 			rc = -ENXIO;
5635 			goto do_prep_failed;
5636 		}
5637 		if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
5638 			phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
5639 			phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
5640 			phba->max_vports = (phba->max_vpi > phba->max_vports) ?
5641 				phba->max_vpi : phba->max_vports;
5642 
5643 		} else
5644 			phba->max_vpi = 0;
5645 		if (pmb->u.mb.un.varCfgPort.gerbm)
5646 			phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
5647 		if (pmb->u.mb.un.varCfgPort.gcrp)
5648 			phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
5649 
5650 		phba->hbq_get = phba->mbox->us.s3_pgp.hbq_get;
5651 		phba->port_gp = phba->mbox->us.s3_pgp.port;
5652 
5653 		if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
5654 			if (pmb->u.mb.un.varCfgPort.gbg == 0) {
5655 				phba->cfg_enable_bg = 0;
5656 				phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
5657 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5658 						"0443 Adapter did not grant "
5659 						"BlockGuard\n");
5660 			}
5661 		}
5662 	} else {
5663 		phba->hbq_get = NULL;
5664 		phba->port_gp = phba->mbox->us.s2.port;
5665 		phba->max_vpi = 0;
5666 	}
5667 do_prep_failed:
5668 	mempool_free(pmb, phba->mbox_mem_pool);
5669 	return rc;
5670 }
5671 
5672 
5673 /**
5674  * lpfc_sli_hba_setup - SLI initialization function
5675  * @phba: Pointer to HBA context object.
5676  *
5677  * This function is the main SLI initialization function. This function
5678  * is called by the HBA initialization code, HBA reset code and HBA
5679  * error attention handler code. Caller is not required to hold any
5680  * locks. This function issues config_port mailbox command to configure
5681  * the SLI, setup iocb rings and HBQ rings. In the end the function
5682  * calls the config_port_post function to issue init_link mailbox
5683  * command and to start the discovery. The function will return zero
5684  * if successful, else it will return negative error code.
5685  **/
5686 int
lpfc_sli_hba_setup(struct lpfc_hba * phba)5687 lpfc_sli_hba_setup(struct lpfc_hba *phba)
5688 {
5689 	uint32_t rc;
5690 	int  i;
5691 	int longs;
5692 
5693 	/* Enable ISR already does config_port because of config_msi mbx */
5694 	if (test_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag)) {
5695 		rc = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
5696 		if (rc)
5697 			return -EIO;
5698 		clear_bit(HBA_NEEDS_CFG_PORT, &phba->hba_flag);
5699 	}
5700 	phba->fcp_embed_io = 0;	/* SLI4 FC support only */
5701 
5702 	if (phba->sli_rev == 3) {
5703 		phba->iocb_cmd_size = SLI3_IOCB_CMD_SIZE;
5704 		phba->iocb_rsp_size = SLI3_IOCB_RSP_SIZE;
5705 	} else {
5706 		phba->iocb_cmd_size = SLI2_IOCB_CMD_SIZE;
5707 		phba->iocb_rsp_size = SLI2_IOCB_RSP_SIZE;
5708 		phba->sli3_options = 0;
5709 	}
5710 
5711 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
5712 			"0444 Firmware in SLI %x mode. Max_vpi %d\n",
5713 			phba->sli_rev, phba->max_vpi);
5714 	rc = lpfc_sli_ring_map(phba);
5715 
5716 	if (rc)
5717 		goto lpfc_sli_hba_setup_error;
5718 
5719 	/* Initialize VPIs. */
5720 	if (phba->sli_rev == LPFC_SLI_REV3) {
5721 		/*
5722 		 * The VPI bitmask and physical ID array are allocated
5723 		 * and initialized once only - at driver load.  A port
5724 		 * reset doesn't need to reinitialize this memory.
5725 		 */
5726 		if ((phba->vpi_bmask == NULL) && (phba->vpi_ids == NULL)) {
5727 			longs = (phba->max_vpi + BITS_PER_LONG) / BITS_PER_LONG;
5728 			phba->vpi_bmask = kcalloc(longs,
5729 						  sizeof(unsigned long),
5730 						  GFP_KERNEL);
5731 			if (!phba->vpi_bmask) {
5732 				rc = -ENOMEM;
5733 				goto lpfc_sli_hba_setup_error;
5734 			}
5735 
5736 			phba->vpi_ids = kcalloc(phba->max_vpi + 1,
5737 						sizeof(uint16_t),
5738 						GFP_KERNEL);
5739 			if (!phba->vpi_ids) {
5740 				kfree(phba->vpi_bmask);
5741 				rc = -ENOMEM;
5742 				goto lpfc_sli_hba_setup_error;
5743 			}
5744 			for (i = 0; i < phba->max_vpi; i++)
5745 				phba->vpi_ids[i] = i;
5746 		}
5747 	}
5748 
5749 	/* Init HBQs */
5750 	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
5751 		rc = lpfc_sli_hbq_setup(phba);
5752 		if (rc)
5753 			goto lpfc_sli_hba_setup_error;
5754 	}
5755 	spin_lock_irq(&phba->hbalock);
5756 	phba->sli.sli_flag |= LPFC_PROCESS_LA;
5757 	spin_unlock_irq(&phba->hbalock);
5758 
5759 	rc = lpfc_config_port_post(phba);
5760 	if (rc)
5761 		goto lpfc_sli_hba_setup_error;
5762 
5763 	return rc;
5764 
5765 lpfc_sli_hba_setup_error:
5766 	phba->link_state = LPFC_HBA_ERROR;
5767 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5768 			"0445 Firmware initialization failed\n");
5769 	return rc;
5770 }
5771 
5772 /**
5773  * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
5774  * @phba: Pointer to HBA context object.
5775  *
5776  * This function issue a dump mailbox command to read config region
5777  * 23 and parse the records in the region and populate driver
5778  * data structure.
5779  **/
5780 static int
lpfc_sli4_read_fcoe_params(struct lpfc_hba * phba)5781 lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba)
5782 {
5783 	LPFC_MBOXQ_t *mboxq;
5784 	struct lpfc_dmabuf *mp;
5785 	struct lpfc_mqe *mqe;
5786 	uint32_t data_length;
5787 	int rc;
5788 
5789 	/* Program the default value of vlan_id and fc_map */
5790 	phba->valid_vlan = 0;
5791 	phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
5792 	phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
5793 	phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
5794 
5795 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5796 	if (!mboxq)
5797 		return -ENOMEM;
5798 
5799 	mqe = &mboxq->u.mqe;
5800 	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq)) {
5801 		rc = -ENOMEM;
5802 		goto out_free_mboxq;
5803 	}
5804 
5805 	mp = mboxq->ctx_buf;
5806 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5807 
5808 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
5809 			"(%d):2571 Mailbox cmd x%x Status x%x "
5810 			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5811 			"x%x x%x x%x x%x x%x x%x x%x x%x x%x "
5812 			"CQ: x%x x%x x%x x%x\n",
5813 			mboxq->vport ? mboxq->vport->vpi : 0,
5814 			bf_get(lpfc_mqe_command, mqe),
5815 			bf_get(lpfc_mqe_status, mqe),
5816 			mqe->un.mb_words[0], mqe->un.mb_words[1],
5817 			mqe->un.mb_words[2], mqe->un.mb_words[3],
5818 			mqe->un.mb_words[4], mqe->un.mb_words[5],
5819 			mqe->un.mb_words[6], mqe->un.mb_words[7],
5820 			mqe->un.mb_words[8], mqe->un.mb_words[9],
5821 			mqe->un.mb_words[10], mqe->un.mb_words[11],
5822 			mqe->un.mb_words[12], mqe->un.mb_words[13],
5823 			mqe->un.mb_words[14], mqe->un.mb_words[15],
5824 			mqe->un.mb_words[16], mqe->un.mb_words[50],
5825 			mboxq->mcqe.word0,
5826 			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
5827 			mboxq->mcqe.trailer);
5828 
5829 	if (rc) {
5830 		rc = -EIO;
5831 		goto out_free_mboxq;
5832 	}
5833 	data_length = mqe->un.mb_words[5];
5834 	if (data_length > DMP_RGN23_SIZE) {
5835 		rc = -EIO;
5836 		goto out_free_mboxq;
5837 	}
5838 
5839 	lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
5840 	rc = 0;
5841 
5842 out_free_mboxq:
5843 	lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
5844 	return rc;
5845 }
5846 
5847 /**
5848  * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
5849  * @phba: pointer to lpfc hba data structure.
5850  * @mboxq: pointer to the LPFC_MBOXQ_t structure.
5851  * @vpd: pointer to the memory to hold resulting port vpd data.
5852  * @vpd_size: On input, the number of bytes allocated to @vpd.
5853  *	      On output, the number of data bytes in @vpd.
5854  *
5855  * This routine executes a READ_REV SLI4 mailbox command.  In
5856  * addition, this routine gets the port vpd data.
5857  *
5858  * Return codes
5859  * 	0 - successful
5860  * 	-ENOMEM - could not allocated memory.
5861  **/
5862 static int
lpfc_sli4_read_rev(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint8_t * vpd,uint32_t * vpd_size)5863 lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
5864 		    uint8_t *vpd, uint32_t *vpd_size)
5865 {
5866 	int rc = 0;
5867 	uint32_t dma_size;
5868 	struct lpfc_dmabuf *dmabuf;
5869 	struct lpfc_mqe *mqe;
5870 
5871 	dmabuf = kzalloc_obj(struct lpfc_dmabuf);
5872 	if (!dmabuf)
5873 		return -ENOMEM;
5874 
5875 	/*
5876 	 * Get a DMA buffer for the vpd data resulting from the READ_REV
5877 	 * mailbox command.
5878 	 */
5879 	dma_size = *vpd_size;
5880 	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev, dma_size,
5881 					  &dmabuf->phys, GFP_KERNEL);
5882 	if (!dmabuf->virt) {
5883 		kfree(dmabuf);
5884 		return -ENOMEM;
5885 	}
5886 
5887 	/*
5888 	 * The SLI4 implementation of READ_REV conflicts at word1,
5889 	 * bits 31:16 and SLI4 adds vpd functionality not present
5890 	 * in SLI3.  This code corrects the conflicts.
5891 	 */
5892 	lpfc_read_rev(phba, mboxq);
5893 	mqe = &mboxq->u.mqe;
5894 	mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
5895 	mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
5896 	mqe->un.read_rev.word1 &= 0x0000FFFF;
5897 	bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
5898 	bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
5899 
5900 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5901 	if (rc) {
5902 		dma_free_coherent(&phba->pcidev->dev, dma_size,
5903 				  dmabuf->virt, dmabuf->phys);
5904 		kfree(dmabuf);
5905 		return -EIO;
5906 	}
5907 
5908 	/*
5909 	 * The available vpd length cannot be bigger than the
5910 	 * DMA buffer passed to the port.  Catch the less than
5911 	 * case and update the caller's size.
5912 	 */
5913 	if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
5914 		*vpd_size = mqe->un.read_rev.avail_vpd_len;
5915 
5916 	memcpy(vpd, dmabuf->virt, *vpd_size);
5917 
5918 	dma_free_coherent(&phba->pcidev->dev, dma_size,
5919 			  dmabuf->virt, dmabuf->phys);
5920 	kfree(dmabuf);
5921 	return 0;
5922 }
5923 
5924 /**
5925  * lpfc_sli4_get_ctl_attr - Retrieve SLI4 device controller attributes
5926  * @phba: pointer to lpfc hba data structure.
5927  *
5928  * This routine retrieves SLI4 device physical port name this PCI function
5929  * is attached to.
5930  *
5931  * Return codes
5932  *      0 - successful
5933  *      otherwise - failed to retrieve controller attributes
5934  **/
5935 static int
lpfc_sli4_get_ctl_attr(struct lpfc_hba * phba)5936 lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
5937 {
5938 	LPFC_MBOXQ_t *mboxq;
5939 	struct lpfc_mbx_get_cntl_attributes *mbx_cntl_attr;
5940 	struct lpfc_controller_attribute *cntl_attr;
5941 	void *virtaddr = NULL;
5942 	uint32_t alloclen, reqlen;
5943 	uint32_t shdr_status, shdr_add_status;
5944 	union lpfc_sli4_cfg_shdr *shdr;
5945 	int rc;
5946 
5947 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
5948 	if (!mboxq)
5949 		return -ENOMEM;
5950 
5951 	/* Send COMMON_GET_CNTL_ATTRIBUTES mbox cmd */
5952 	reqlen = sizeof(struct lpfc_mbx_get_cntl_attributes);
5953 	alloclen = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
5954 			LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES, reqlen,
5955 			LPFC_SLI4_MBX_NEMBED);
5956 
5957 	if (alloclen < reqlen) {
5958 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
5959 				"3084 Allocated DMA memory size (%d) is "
5960 				"less than the requested DMA memory size "
5961 				"(%d)\n", alloclen, reqlen);
5962 		rc = -ENOMEM;
5963 		goto out_free_mboxq;
5964 	}
5965 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
5966 	virtaddr = mboxq->sge_array->addr[0];
5967 	mbx_cntl_attr = (struct lpfc_mbx_get_cntl_attributes *)virtaddr;
5968 	shdr = &mbx_cntl_attr->cfg_shdr;
5969 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
5970 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
5971 	if (shdr_status || shdr_add_status || rc) {
5972 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
5973 				"3085 Mailbox x%x (x%x/x%x) failed, "
5974 				"rc:x%x, status:x%x, add_status:x%x\n",
5975 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
5976 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
5977 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
5978 				rc, shdr_status, shdr_add_status);
5979 		rc = -ENXIO;
5980 		goto out_free_mboxq;
5981 	}
5982 
5983 	cntl_attr = &mbx_cntl_attr->cntl_attr;
5984 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_VAL;
5985 	phba->sli4_hba.lnk_info.lnk_tp =
5986 		bf_get(lpfc_cntl_attr_lnk_type, cntl_attr);
5987 	phba->sli4_hba.lnk_info.lnk_no =
5988 		bf_get(lpfc_cntl_attr_lnk_numb, cntl_attr);
5989 	phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
5990 	phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
5991 
5992 	memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str,
5993 		sizeof(phba->BIOSVersion));
5994 	phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0';
5995 
5996 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
5997 			"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
5998 			"flash_id: x%02x, asic_rev: x%02x\n",
5999 			phba->sli4_hba.lnk_info.lnk_tp,
6000 			phba->sli4_hba.lnk_info.lnk_no,
6001 			phba->BIOSVersion, phba->sli4_hba.flash_id,
6002 			phba->sli4_hba.asic_rev);
6003 out_free_mboxq:
6004 	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6005 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
6006 	else
6007 		mempool_free(mboxq, phba->mbox_mem_pool);
6008 	return rc;
6009 }
6010 
6011 /**
6012  * lpfc_sli4_retrieve_pport_name - Retrieve SLI4 device physical port name
6013  * @phba: pointer to lpfc hba data structure.
6014  *
6015  * This routine retrieves SLI4 device physical port name this PCI function
6016  * is attached to.
6017  *
6018  * Return codes
6019  *      0 - successful
6020  *      otherwise - failed to retrieve physical port name
6021  **/
6022 static int
lpfc_sli4_retrieve_pport_name(struct lpfc_hba * phba)6023 lpfc_sli4_retrieve_pport_name(struct lpfc_hba *phba)
6024 {
6025 	LPFC_MBOXQ_t *mboxq;
6026 	struct lpfc_mbx_get_port_name *get_port_name;
6027 	uint32_t shdr_status, shdr_add_status;
6028 	union lpfc_sli4_cfg_shdr *shdr;
6029 	char cport_name = 0;
6030 	int rc;
6031 
6032 	/* We assume nothing at this point */
6033 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6034 	phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_NON;
6035 
6036 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6037 	if (!mboxq)
6038 		return -ENOMEM;
6039 	/* obtain link type and link number via READ_CONFIG */
6040 	phba->sli4_hba.lnk_info.lnk_dv = LPFC_LNK_DAT_INVAL;
6041 	lpfc_sli4_read_config(phba);
6042 
6043 	if (phba->sli4_hba.fawwpn_flag & LPFC_FAWWPN_CONFIG)
6044 		phba->sli4_hba.fawwpn_flag |= LPFC_FAWWPN_FABRIC;
6045 
6046 	if (phba->sli4_hba.lnk_info.lnk_dv == LPFC_LNK_DAT_VAL)
6047 		goto retrieve_ppname;
6048 
6049 	/* obtain link type and link number via COMMON_GET_CNTL_ATTRIBUTES */
6050 	rc = lpfc_sli4_get_ctl_attr(phba);
6051 	if (rc)
6052 		goto out_free_mboxq;
6053 
6054 retrieve_ppname:
6055 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
6056 		LPFC_MBOX_OPCODE_GET_PORT_NAME,
6057 		sizeof(struct lpfc_mbx_get_port_name) -
6058 		sizeof(struct lpfc_sli4_cfg_mhdr),
6059 		LPFC_SLI4_MBX_EMBED);
6060 	get_port_name = &mboxq->u.mqe.un.get_port_name;
6061 	shdr = (union lpfc_sli4_cfg_shdr *)&get_port_name->header.cfg_shdr;
6062 	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_OPCODE_VERSION_1);
6063 	bf_set(lpfc_mbx_get_port_name_lnk_type, &get_port_name->u.request,
6064 		phba->sli4_hba.lnk_info.lnk_tp);
6065 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
6066 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6067 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6068 	if (shdr_status || shdr_add_status || rc) {
6069 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
6070 				"3087 Mailbox x%x (x%x/x%x) failed: "
6071 				"rc:x%x, status:x%x, add_status:x%x\n",
6072 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
6073 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
6074 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
6075 				rc, shdr_status, shdr_add_status);
6076 		rc = -ENXIO;
6077 		goto out_free_mboxq;
6078 	}
6079 	switch (phba->sli4_hba.lnk_info.lnk_no) {
6080 	case LPFC_LINK_NUMBER_0:
6081 		cport_name = bf_get(lpfc_mbx_get_port_name_name0,
6082 				&get_port_name->u.response);
6083 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6084 		break;
6085 	case LPFC_LINK_NUMBER_1:
6086 		cport_name = bf_get(lpfc_mbx_get_port_name_name1,
6087 				&get_port_name->u.response);
6088 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6089 		break;
6090 	case LPFC_LINK_NUMBER_2:
6091 		cport_name = bf_get(lpfc_mbx_get_port_name_name2,
6092 				&get_port_name->u.response);
6093 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6094 		break;
6095 	case LPFC_LINK_NUMBER_3:
6096 		cport_name = bf_get(lpfc_mbx_get_port_name_name3,
6097 				&get_port_name->u.response);
6098 		phba->sli4_hba.pport_name_sta = LPFC_SLI4_PPNAME_GET;
6099 		break;
6100 	default:
6101 		break;
6102 	}
6103 
6104 	if (phba->sli4_hba.pport_name_sta == LPFC_SLI4_PPNAME_GET) {
6105 		phba->Port[0] = cport_name;
6106 		phba->Port[1] = '\0';
6107 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6108 				"3091 SLI get port name: %s\n", phba->Port);
6109 	}
6110 
6111 out_free_mboxq:
6112 	if (bf_get(lpfc_mqe_command, &mboxq->u.mqe) == MBX_SLI4_CONFIG)
6113 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
6114 	else
6115 		mempool_free(mboxq, phba->mbox_mem_pool);
6116 	return rc;
6117 }
6118 
6119 /**
6120  * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
6121  * @phba: pointer to lpfc hba data structure.
6122  *
6123  * This routine is called to explicitly arm the SLI4 device's completion and
6124  * event queues
6125  **/
6126 static void
lpfc_sli4_arm_cqeq_intr(struct lpfc_hba * phba)6127 lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
6128 {
6129 	int qidx;
6130 	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
6131 	struct lpfc_sli4_hdw_queue *qp;
6132 	struct lpfc_queue *eq;
6133 
6134 	sli4_hba->sli4_write_cq_db(phba, sli4_hba->mbx_cq, 0, LPFC_QUEUE_REARM);
6135 	sli4_hba->sli4_write_cq_db(phba, sli4_hba->els_cq, 0, LPFC_QUEUE_REARM);
6136 	if (sli4_hba->nvmels_cq)
6137 		sli4_hba->sli4_write_cq_db(phba, sli4_hba->nvmels_cq, 0,
6138 					   LPFC_QUEUE_REARM);
6139 
6140 	if (sli4_hba->hdwq) {
6141 		/* Loop thru all Hardware Queues */
6142 		for (qidx = 0; qidx < phba->cfg_hdw_queue; qidx++) {
6143 			qp = &sli4_hba->hdwq[qidx];
6144 			/* ARM the corresponding CQ */
6145 			sli4_hba->sli4_write_cq_db(phba, qp->io_cq, 0,
6146 						LPFC_QUEUE_REARM);
6147 		}
6148 
6149 		/* Loop thru all IRQ vectors */
6150 		for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
6151 			eq = sli4_hba->hba_eq_hdl[qidx].eq;
6152 			/* ARM the corresponding EQ */
6153 			sli4_hba->sli4_write_eq_db(phba, eq,
6154 						   0, LPFC_QUEUE_REARM);
6155 		}
6156 	}
6157 
6158 	if (phba->nvmet_support) {
6159 		for (qidx = 0; qidx < phba->cfg_nvmet_mrq; qidx++) {
6160 			sli4_hba->sli4_write_cq_db(phba,
6161 				sli4_hba->nvmet_cqset[qidx], 0,
6162 				LPFC_QUEUE_REARM);
6163 		}
6164 	}
6165 }
6166 
6167 /**
6168  * lpfc_sli4_get_avail_extnt_rsrc - Get available resource extent count.
6169  * @phba: Pointer to HBA context object.
6170  * @type: The resource extent type.
6171  * @extnt_count: buffer to hold port available extent count.
6172  * @extnt_size: buffer to hold element count per extent.
6173  *
6174  * This function calls the port and retrievs the number of available
6175  * extents and their size for a particular extent type.
6176  *
6177  * Returns: 0 if successful.  Nonzero otherwise.
6178  **/
6179 int
lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_count,uint16_t * extnt_size)6180 lpfc_sli4_get_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type,
6181 			       uint16_t *extnt_count, uint16_t *extnt_size)
6182 {
6183 	int rc = 0;
6184 	uint32_t length;
6185 	uint32_t mbox_tmo;
6186 	struct lpfc_mbx_get_rsrc_extent_info *rsrc_info;
6187 	LPFC_MBOXQ_t *mbox;
6188 
6189 	*extnt_count = 0;
6190 	*extnt_size = 0;
6191 
6192 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6193 	if (!mbox)
6194 		return -ENOMEM;
6195 
6196 	/* Find out how many extents are available for this resource type */
6197 	length = (sizeof(struct lpfc_mbx_get_rsrc_extent_info) -
6198 		  sizeof(struct lpfc_sli4_cfg_mhdr));
6199 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6200 			 LPFC_MBOX_OPCODE_GET_RSRC_EXTENT_INFO,
6201 			 length, LPFC_SLI4_MBX_EMBED);
6202 
6203 	/* Send an extents count of 0 - the GET doesn't use it. */
6204 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6205 					LPFC_SLI4_MBX_EMBED);
6206 	if (unlikely(rc)) {
6207 		rc = -EIO;
6208 		goto err_exit;
6209 	}
6210 
6211 	if (!phba->sli4_hba.intr_enable)
6212 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6213 	else {
6214 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6215 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6216 	}
6217 	if (unlikely(rc)) {
6218 		rc = -EIO;
6219 		goto err_exit;
6220 	}
6221 
6222 	rsrc_info = &mbox->u.mqe.un.rsrc_extent_info;
6223 	if (bf_get(lpfc_mbox_hdr_status,
6224 		   &rsrc_info->header.cfg_shdr.response)) {
6225 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6226 				"2930 Failed to get resource extents "
6227 				"Status 0x%x Add'l Status 0x%x\n",
6228 				bf_get(lpfc_mbox_hdr_status,
6229 				       &rsrc_info->header.cfg_shdr.response),
6230 				bf_get(lpfc_mbox_hdr_add_status,
6231 				       &rsrc_info->header.cfg_shdr.response));
6232 		rc = -EIO;
6233 		goto err_exit;
6234 	}
6235 
6236 	*extnt_count = bf_get(lpfc_mbx_get_rsrc_extent_info_cnt,
6237 			      &rsrc_info->u.rsp);
6238 	*extnt_size = bf_get(lpfc_mbx_get_rsrc_extent_info_size,
6239 			     &rsrc_info->u.rsp);
6240 
6241 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
6242 			"3162 Retrieved extents type-%d from port: count:%d, "
6243 			"size:%d\n", type, *extnt_count, *extnt_size);
6244 
6245 err_exit:
6246 	mempool_free(mbox, phba->mbox_mem_pool);
6247 	return rc;
6248 }
6249 
6250 /**
6251  * lpfc_sli4_chk_avail_extnt_rsrc - Check for available SLI4 resource extents.
6252  * @phba: Pointer to HBA context object.
6253  * @type: The extent type to check.
6254  *
6255  * This function reads the current available extents from the port and checks
6256  * if the extent count or extent size has changed since the last access.
6257  * Callers use this routine post port reset to understand if there is a
6258  * extent reprovisioning requirement.
6259  *
6260  * Returns:
6261  *   -Error: error indicates problem.
6262  *   1: Extent count or size has changed.
6263  *   0: No changes.
6264  **/
6265 static int
lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba * phba,uint16_t type)6266 lpfc_sli4_chk_avail_extnt_rsrc(struct lpfc_hba *phba, uint16_t type)
6267 {
6268 	uint16_t curr_ext_cnt, rsrc_ext_cnt;
6269 	uint16_t size_diff, rsrc_ext_size;
6270 	int rc = 0;
6271 	struct lpfc_rsrc_blks *rsrc_entry;
6272 	struct list_head *rsrc_blk_list = NULL;
6273 
6274 	size_diff = 0;
6275 	curr_ext_cnt = 0;
6276 	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6277 					    &rsrc_ext_cnt,
6278 					    &rsrc_ext_size);
6279 	if (unlikely(rc))
6280 		return -EIO;
6281 
6282 	switch (type) {
6283 	case LPFC_RSC_TYPE_FCOE_RPI:
6284 		rsrc_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6285 		break;
6286 	case LPFC_RSC_TYPE_FCOE_VPI:
6287 		rsrc_blk_list = &phba->lpfc_vpi_blk_list;
6288 		break;
6289 	case LPFC_RSC_TYPE_FCOE_XRI:
6290 		rsrc_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6291 		break;
6292 	case LPFC_RSC_TYPE_FCOE_VFI:
6293 		rsrc_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6294 		break;
6295 	default:
6296 		break;
6297 	}
6298 
6299 	list_for_each_entry(rsrc_entry, rsrc_blk_list, list) {
6300 		curr_ext_cnt++;
6301 		if (rsrc_entry->rsrc_size != rsrc_ext_size)
6302 			size_diff++;
6303 	}
6304 
6305 	if (curr_ext_cnt != rsrc_ext_cnt || size_diff != 0)
6306 		rc = 1;
6307 
6308 	return rc;
6309 }
6310 
6311 /**
6312  * lpfc_sli4_cfg_post_extnts -
6313  * @phba: Pointer to HBA context object.
6314  * @extnt_cnt: number of available extents.
6315  * @type: the extent type (rpi, xri, vfi, vpi).
6316  * @emb: buffer to hold either MBX_EMBED or MBX_NEMBED operation.
6317  * @mbox: pointer to the caller's allocated mailbox structure.
6318  *
6319  * This function executes the extents allocation request.  It also
6320  * takes care of the amount of memory needed to allocate or get the
6321  * allocated extents. It is the caller's responsibility to evaluate
6322  * the response.
6323  *
6324  * Returns:
6325  *   -Error:  Error value describes the condition found.
6326  *   0: if successful
6327  **/
6328 static int
lpfc_sli4_cfg_post_extnts(struct lpfc_hba * phba,uint16_t extnt_cnt,uint16_t type,bool * emb,LPFC_MBOXQ_t * mbox)6329 lpfc_sli4_cfg_post_extnts(struct lpfc_hba *phba, uint16_t extnt_cnt,
6330 			  uint16_t type, bool *emb, LPFC_MBOXQ_t *mbox)
6331 {
6332 	int rc = 0;
6333 	uint32_t req_len;
6334 	uint32_t emb_len;
6335 	uint32_t alloc_len, mbox_tmo;
6336 
6337 	/* Calculate the total requested length of the dma memory */
6338 	req_len = extnt_cnt * sizeof(uint16_t);
6339 
6340 	/*
6341 	 * Calculate the size of an embedded mailbox.  The uint32_t
6342 	 * accounts for extents-specific word.
6343 	 */
6344 	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
6345 		sizeof(uint32_t);
6346 
6347 	/*
6348 	 * Presume the allocation and response will fit into an embedded
6349 	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
6350 	 */
6351 	*emb = LPFC_SLI4_MBX_EMBED;
6352 	if (req_len > emb_len) {
6353 		req_len = extnt_cnt * sizeof(uint16_t) +
6354 			sizeof(union lpfc_sli4_cfg_shdr) +
6355 			sizeof(uint32_t);
6356 		*emb = LPFC_SLI4_MBX_NEMBED;
6357 	}
6358 
6359 	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6360 				     LPFC_MBOX_OPCODE_ALLOC_RSRC_EXTENT,
6361 				     req_len, *emb);
6362 	if (alloc_len < req_len) {
6363 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6364 			"2982 Allocated DMA memory size (x%x) is "
6365 			"less than the requested DMA memory "
6366 			"size (x%x)\n", alloc_len, req_len);
6367 		return -ENOMEM;
6368 	}
6369 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, extnt_cnt, type, *emb);
6370 	if (unlikely(rc))
6371 		return -EIO;
6372 
6373 	if (!phba->sli4_hba.intr_enable)
6374 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6375 	else {
6376 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6377 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6378 	}
6379 
6380 	if (unlikely(rc))
6381 		rc = -EIO;
6382 	return rc;
6383 }
6384 
6385 /**
6386  * lpfc_sli4_alloc_extent - Allocate an SLI4 resource extent.
6387  * @phba: Pointer to HBA context object.
6388  * @type:  The resource extent type to allocate.
6389  *
6390  * This function allocates the number of elements for the specified
6391  * resource type.
6392  **/
6393 static int
lpfc_sli4_alloc_extent(struct lpfc_hba * phba,uint16_t type)6394 lpfc_sli4_alloc_extent(struct lpfc_hba *phba, uint16_t type)
6395 {
6396 	bool emb = false;
6397 	uint16_t rsrc_id_cnt, rsrc_cnt, rsrc_size;
6398 	uint16_t rsrc_id, rsrc_start, j, k;
6399 	uint16_t *ids;
6400 	int i, rc;
6401 	unsigned long longs;
6402 	unsigned long *bmask;
6403 	struct lpfc_rsrc_blks *rsrc_blks;
6404 	LPFC_MBOXQ_t *mbox;
6405 	uint32_t length;
6406 	struct lpfc_id_range *id_array = NULL;
6407 	void *virtaddr = NULL;
6408 	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
6409 	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
6410 	struct list_head *ext_blk_list;
6411 
6412 	rc = lpfc_sli4_get_avail_extnt_rsrc(phba, type,
6413 					    &rsrc_cnt,
6414 					    &rsrc_size);
6415 	if (unlikely(rc))
6416 		return -EIO;
6417 
6418 	if ((rsrc_cnt == 0) || (rsrc_size == 0)) {
6419 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6420 			"3009 No available Resource Extents "
6421 			"for resource type 0x%x: Count: 0x%x, "
6422 			"Size 0x%x\n", type, rsrc_cnt,
6423 			rsrc_size);
6424 		return -ENOMEM;
6425 	}
6426 
6427 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_INIT | LOG_SLI,
6428 			"2903 Post resource extents type-0x%x: "
6429 			"count:%d, size %d\n", type, rsrc_cnt, rsrc_size);
6430 
6431 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6432 	if (!mbox)
6433 		return -ENOMEM;
6434 
6435 	rc = lpfc_sli4_cfg_post_extnts(phba, rsrc_cnt, type, &emb, mbox);
6436 	if (unlikely(rc)) {
6437 		rc = -EIO;
6438 		goto err_exit;
6439 	}
6440 
6441 	/*
6442 	 * Figure out where the response is located.  Then get local pointers
6443 	 * to the response data.  The port does not guarantee to respond to
6444 	 * all extents counts request so update the local variable with the
6445 	 * allocated count from the port.
6446 	 */
6447 	if (emb == LPFC_SLI4_MBX_EMBED) {
6448 		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
6449 		id_array = &rsrc_ext->u.rsp.id[0];
6450 		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
6451 	} else {
6452 		virtaddr = mbox->sge_array->addr[0];
6453 		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
6454 		rsrc_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
6455 		id_array = &n_rsrc->id;
6456 	}
6457 
6458 	longs = ((rsrc_cnt * rsrc_size) + BITS_PER_LONG - 1) / BITS_PER_LONG;
6459 	rsrc_id_cnt = rsrc_cnt * rsrc_size;
6460 
6461 	/*
6462 	 * Based on the resource size and count, correct the base and max
6463 	 * resource values.
6464 	 */
6465 	length = sizeof(struct lpfc_rsrc_blks);
6466 	switch (type) {
6467 	case LPFC_RSC_TYPE_FCOE_RPI:
6468 		phba->sli4_hba.rpi_bmask = kcalloc(longs,
6469 						   sizeof(unsigned long),
6470 						   GFP_KERNEL);
6471 		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
6472 			rc = -ENOMEM;
6473 			goto err_exit;
6474 		}
6475 		phba->sli4_hba.rpi_ids = kcalloc(rsrc_id_cnt,
6476 						 sizeof(uint16_t),
6477 						 GFP_KERNEL);
6478 		if (unlikely(!phba->sli4_hba.rpi_ids)) {
6479 			kfree(phba->sli4_hba.rpi_bmask);
6480 			rc = -ENOMEM;
6481 			goto err_exit;
6482 		}
6483 
6484 		/*
6485 		 * The next_rpi was initialized with the maximum available
6486 		 * count but the port may allocate a smaller number.  Catch
6487 		 * that case and update the next_rpi.
6488 		 */
6489 		phba->sli4_hba.next_rpi = rsrc_id_cnt;
6490 
6491 		/* Initialize local ptrs for common extent processing later. */
6492 		bmask = phba->sli4_hba.rpi_bmask;
6493 		ids = phba->sli4_hba.rpi_ids;
6494 		ext_blk_list = &phba->sli4_hba.lpfc_rpi_blk_list;
6495 		break;
6496 	case LPFC_RSC_TYPE_FCOE_VPI:
6497 		phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
6498 					  GFP_KERNEL);
6499 		if (unlikely(!phba->vpi_bmask)) {
6500 			rc = -ENOMEM;
6501 			goto err_exit;
6502 		}
6503 		phba->vpi_ids = kcalloc(rsrc_id_cnt, sizeof(uint16_t),
6504 					 GFP_KERNEL);
6505 		if (unlikely(!phba->vpi_ids)) {
6506 			kfree(phba->vpi_bmask);
6507 			rc = -ENOMEM;
6508 			goto err_exit;
6509 		}
6510 
6511 		/* Initialize local ptrs for common extent processing later. */
6512 		bmask = phba->vpi_bmask;
6513 		ids = phba->vpi_ids;
6514 		ext_blk_list = &phba->lpfc_vpi_blk_list;
6515 		break;
6516 	case LPFC_RSC_TYPE_FCOE_XRI:
6517 		phba->sli4_hba.xri_bmask = kcalloc(longs,
6518 						   sizeof(unsigned long),
6519 						   GFP_KERNEL);
6520 		if (unlikely(!phba->sli4_hba.xri_bmask)) {
6521 			rc = -ENOMEM;
6522 			goto err_exit;
6523 		}
6524 		phba->sli4_hba.max_cfg_param.xri_used = 0;
6525 		phba->sli4_hba.xri_ids = kcalloc(rsrc_id_cnt,
6526 						 sizeof(uint16_t),
6527 						 GFP_KERNEL);
6528 		if (unlikely(!phba->sli4_hba.xri_ids)) {
6529 			kfree(phba->sli4_hba.xri_bmask);
6530 			rc = -ENOMEM;
6531 			goto err_exit;
6532 		}
6533 
6534 		/* Initialize local ptrs for common extent processing later. */
6535 		bmask = phba->sli4_hba.xri_bmask;
6536 		ids = phba->sli4_hba.xri_ids;
6537 		ext_blk_list = &phba->sli4_hba.lpfc_xri_blk_list;
6538 		break;
6539 	case LPFC_RSC_TYPE_FCOE_VFI:
6540 		phba->sli4_hba.vfi_bmask = kcalloc(longs,
6541 						   sizeof(unsigned long),
6542 						   GFP_KERNEL);
6543 		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
6544 			rc = -ENOMEM;
6545 			goto err_exit;
6546 		}
6547 		phba->sli4_hba.vfi_ids = kcalloc(rsrc_id_cnt,
6548 						 sizeof(uint16_t),
6549 						 GFP_KERNEL);
6550 		if (unlikely(!phba->sli4_hba.vfi_ids)) {
6551 			kfree(phba->sli4_hba.vfi_bmask);
6552 			rc = -ENOMEM;
6553 			goto err_exit;
6554 		}
6555 
6556 		/* Initialize local ptrs for common extent processing later. */
6557 		bmask = phba->sli4_hba.vfi_bmask;
6558 		ids = phba->sli4_hba.vfi_ids;
6559 		ext_blk_list = &phba->sli4_hba.lpfc_vfi_blk_list;
6560 		break;
6561 	default:
6562 		/* Unsupported Opcode.  Fail call. */
6563 		id_array = NULL;
6564 		bmask = NULL;
6565 		ids = NULL;
6566 		ext_blk_list = NULL;
6567 		goto err_exit;
6568 	}
6569 
6570 	/*
6571 	 * Complete initializing the extent configuration with the
6572 	 * allocated ids assigned to this function.  The bitmask serves
6573 	 * as an index into the array and manages the available ids.  The
6574 	 * array just stores the ids communicated to the port via the wqes.
6575 	 */
6576 	for (i = 0, j = 0, k = 0; i < rsrc_cnt; i++) {
6577 		if ((i % 2) == 0)
6578 			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_0,
6579 					 &id_array[k]);
6580 		else
6581 			rsrc_id = bf_get(lpfc_mbx_rsrc_id_word4_1,
6582 					 &id_array[k]);
6583 
6584 		rsrc_blks = kzalloc(length, GFP_KERNEL);
6585 		if (unlikely(!rsrc_blks)) {
6586 			rc = -ENOMEM;
6587 			kfree(bmask);
6588 			kfree(ids);
6589 			goto err_exit;
6590 		}
6591 		rsrc_blks->rsrc_start = rsrc_id;
6592 		rsrc_blks->rsrc_size = rsrc_size;
6593 		list_add_tail(&rsrc_blks->list, ext_blk_list);
6594 		rsrc_start = rsrc_id;
6595 		if ((type == LPFC_RSC_TYPE_FCOE_XRI) && (j == 0)) {
6596 			phba->sli4_hba.io_xri_start = rsrc_start +
6597 				lpfc_sli4_get_iocb_cnt(phba);
6598 		}
6599 
6600 		while (rsrc_id < (rsrc_start + rsrc_size)) {
6601 			ids[j] = rsrc_id;
6602 			rsrc_id++;
6603 			j++;
6604 		}
6605 		/* Entire word processed.  Get next word.*/
6606 		if ((i % 2) == 1)
6607 			k++;
6608 	}
6609  err_exit:
6610 	lpfc_sli4_mbox_cmd_free(phba, mbox);
6611 	return rc;
6612 }
6613 
6614 
6615 
6616 /**
6617  * lpfc_sli4_dealloc_extent - Deallocate an SLI4 resource extent.
6618  * @phba: Pointer to HBA context object.
6619  * @type: the extent's type.
6620  *
6621  * This function deallocates all extents of a particular resource type.
6622  * SLI4 does not allow for deallocating a particular extent range.  It
6623  * is the caller's responsibility to release all kernel memory resources.
6624  **/
6625 static int
lpfc_sli4_dealloc_extent(struct lpfc_hba * phba,uint16_t type)6626 lpfc_sli4_dealloc_extent(struct lpfc_hba *phba, uint16_t type)
6627 {
6628 	int rc;
6629 	uint32_t length, mbox_tmo = 0;
6630 	LPFC_MBOXQ_t *mbox;
6631 	struct lpfc_mbx_dealloc_rsrc_extents *dealloc_rsrc;
6632 	struct lpfc_rsrc_blks *rsrc_blk, *rsrc_blk_next;
6633 
6634 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
6635 	if (!mbox)
6636 		return -ENOMEM;
6637 
6638 	/*
6639 	 * This function sends an embedded mailbox because it only sends the
6640 	 * the resource type.  All extents of this type are released by the
6641 	 * port.
6642 	 */
6643 	length = (sizeof(struct lpfc_mbx_dealloc_rsrc_extents) -
6644 		  sizeof(struct lpfc_sli4_cfg_mhdr));
6645 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6646 			 LPFC_MBOX_OPCODE_DEALLOC_RSRC_EXTENT,
6647 			 length, LPFC_SLI4_MBX_EMBED);
6648 
6649 	/* Send an extents count of 0 - the dealloc doesn't use it. */
6650 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, 0, type,
6651 					LPFC_SLI4_MBX_EMBED);
6652 	if (unlikely(rc)) {
6653 		rc = -EIO;
6654 		goto out_free_mbox;
6655 	}
6656 	if (!phba->sli4_hba.intr_enable)
6657 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
6658 	else {
6659 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
6660 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
6661 	}
6662 	if (unlikely(rc)) {
6663 		rc = -EIO;
6664 		goto out_free_mbox;
6665 	}
6666 
6667 	dealloc_rsrc = &mbox->u.mqe.un.dealloc_rsrc_extents;
6668 	if (bf_get(lpfc_mbox_hdr_status,
6669 		   &dealloc_rsrc->header.cfg_shdr.response)) {
6670 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6671 				"2919 Failed to release resource extents "
6672 				"for type %d - Status 0x%x Add'l Status 0x%x. "
6673 				"Resource memory not released.\n",
6674 				type,
6675 				bf_get(lpfc_mbox_hdr_status,
6676 				    &dealloc_rsrc->header.cfg_shdr.response),
6677 				bf_get(lpfc_mbox_hdr_add_status,
6678 				    &dealloc_rsrc->header.cfg_shdr.response));
6679 		rc = -EIO;
6680 		goto out_free_mbox;
6681 	}
6682 
6683 	/* Release kernel memory resources for the specific type. */
6684 	switch (type) {
6685 	case LPFC_RSC_TYPE_FCOE_VPI:
6686 		kfree(phba->vpi_bmask);
6687 		kfree(phba->vpi_ids);
6688 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6689 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6690 				    &phba->lpfc_vpi_blk_list, list) {
6691 			list_del_init(&rsrc_blk->list);
6692 			kfree(rsrc_blk);
6693 		}
6694 		phba->sli4_hba.max_cfg_param.vpi_used = 0;
6695 		break;
6696 	case LPFC_RSC_TYPE_FCOE_XRI:
6697 		kfree(phba->sli4_hba.xri_bmask);
6698 		kfree(phba->sli4_hba.xri_ids);
6699 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6700 				    &phba->sli4_hba.lpfc_xri_blk_list, list) {
6701 			list_del_init(&rsrc_blk->list);
6702 			kfree(rsrc_blk);
6703 		}
6704 		break;
6705 	case LPFC_RSC_TYPE_FCOE_VFI:
6706 		kfree(phba->sli4_hba.vfi_bmask);
6707 		kfree(phba->sli4_hba.vfi_ids);
6708 		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6709 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6710 				    &phba->sli4_hba.lpfc_vfi_blk_list, list) {
6711 			list_del_init(&rsrc_blk->list);
6712 			kfree(rsrc_blk);
6713 		}
6714 		break;
6715 	case LPFC_RSC_TYPE_FCOE_RPI:
6716 		/* RPI bitmask and physical id array are cleaned up earlier. */
6717 		list_for_each_entry_safe(rsrc_blk, rsrc_blk_next,
6718 				    &phba->sli4_hba.lpfc_rpi_blk_list, list) {
6719 			list_del_init(&rsrc_blk->list);
6720 			kfree(rsrc_blk);
6721 		}
6722 		break;
6723 	default:
6724 		break;
6725 	}
6726 
6727 	bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
6728 
6729  out_free_mbox:
6730 	mempool_free(mbox, phba->mbox_mem_pool);
6731 	return rc;
6732 }
6733 
6734 static void
lpfc_set_features(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox,uint32_t feature)6735 lpfc_set_features(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox,
6736 		  uint32_t feature)
6737 {
6738 	uint32_t len;
6739 	u32 sig_freq = 0;
6740 
6741 	len = sizeof(struct lpfc_mbx_set_feature) -
6742 		sizeof(struct lpfc_sli4_cfg_mhdr);
6743 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
6744 			 LPFC_MBOX_OPCODE_SET_FEATURES, len,
6745 			 LPFC_SLI4_MBX_EMBED);
6746 
6747 	switch (feature) {
6748 	case LPFC_SET_UE_RECOVERY:
6749 		bf_set(lpfc_mbx_set_feature_UER,
6750 		       &mbox->u.mqe.un.set_feature, 1);
6751 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_UE_RECOVERY;
6752 		mbox->u.mqe.un.set_feature.param_len = 8;
6753 		break;
6754 	case LPFC_SET_MDS_DIAGS:
6755 		bf_set(lpfc_mbx_set_feature_mds,
6756 		       &mbox->u.mqe.un.set_feature, 1);
6757 		bf_set(lpfc_mbx_set_feature_mds_deep_loopbk,
6758 		       &mbox->u.mqe.un.set_feature, 1);
6759 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_MDS_DIAGS;
6760 		mbox->u.mqe.un.set_feature.param_len = 8;
6761 		break;
6762 	case LPFC_SET_CGN_SIGNAL:
6763 		if (phba->cmf_active_mode == LPFC_CFG_OFF)
6764 			sig_freq = 0;
6765 		else
6766 			sig_freq = phba->cgn_sig_freq;
6767 
6768 		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ALARM) {
6769 			bf_set(lpfc_mbx_set_feature_CGN_alarm_freq,
6770 			       &mbox->u.mqe.un.set_feature, sig_freq);
6771 			bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6772 			       &mbox->u.mqe.un.set_feature, sig_freq);
6773 		}
6774 
6775 		if (phba->cgn_reg_signal == EDC_CG_SIG_WARN_ONLY)
6776 			bf_set(lpfc_mbx_set_feature_CGN_warn_freq,
6777 			       &mbox->u.mqe.un.set_feature, sig_freq);
6778 
6779 		if (phba->cmf_active_mode == LPFC_CFG_OFF ||
6780 		    phba->cgn_reg_signal == EDC_CG_SIG_NOTSUPPORTED)
6781 			sig_freq = 0;
6782 		else
6783 			sig_freq = lpfc_acqe_cgn_frequency;
6784 
6785 		bf_set(lpfc_mbx_set_feature_CGN_acqe_freq,
6786 		       &mbox->u.mqe.un.set_feature, sig_freq);
6787 
6788 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_CGN_SIGNAL;
6789 		mbox->u.mqe.un.set_feature.param_len = 12;
6790 		break;
6791 	case LPFC_SET_DUAL_DUMP:
6792 		bf_set(lpfc_mbx_set_feature_dd,
6793 		       &mbox->u.mqe.un.set_feature, LPFC_ENABLE_DUAL_DUMP);
6794 		bf_set(lpfc_mbx_set_feature_ddquery,
6795 		       &mbox->u.mqe.un.set_feature, 0);
6796 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_DUAL_DUMP;
6797 		mbox->u.mqe.un.set_feature.param_len = 4;
6798 		break;
6799 	case LPFC_SET_ENABLE_MI:
6800 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_MI;
6801 		mbox->u.mqe.un.set_feature.param_len = 4;
6802 		bf_set(lpfc_mbx_set_feature_milunq, &mbox->u.mqe.un.set_feature,
6803 		       phba->pport->cfg_lun_queue_depth);
6804 		bf_set(lpfc_mbx_set_feature_mi, &mbox->u.mqe.un.set_feature,
6805 		       phba->sli4_hba.pc_sli4_params.mi_ver);
6806 		break;
6807 	case LPFC_SET_LD_SIGNAL:
6808 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_LD_SIGNAL;
6809 		mbox->u.mqe.un.set_feature.param_len = 16;
6810 		bf_set(lpfc_mbx_set_feature_lds_qry,
6811 		       &mbox->u.mqe.un.set_feature, LPFC_QUERY_LDS_OP);
6812 		break;
6813 	case LPFC_SET_ENABLE_CMF:
6814 		mbox->u.mqe.un.set_feature.feature = LPFC_SET_ENABLE_CMF;
6815 		mbox->u.mqe.un.set_feature.param_len = 4;
6816 		bf_set(lpfc_mbx_set_feature_cmf,
6817 		       &mbox->u.mqe.un.set_feature, 1);
6818 		break;
6819 	}
6820 	return;
6821 }
6822 
6823 /**
6824  * lpfc_ras_stop_fwlog: Disable FW logging by the adapter
6825  * @phba: Pointer to HBA context object.
6826  *
6827  * Disable FW logging into host memory on the adapter. To
6828  * be done before reading logs from the host memory.
6829  **/
6830 void
lpfc_ras_stop_fwlog(struct lpfc_hba * phba)6831 lpfc_ras_stop_fwlog(struct lpfc_hba *phba)
6832 {
6833 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6834 
6835 	spin_lock_irq(&phba->ras_fwlog_lock);
6836 	ras_fwlog->state = INACTIVE;
6837 	spin_unlock_irq(&phba->ras_fwlog_lock);
6838 
6839 	/* Disable FW logging to host memory */
6840 	writel(LPFC_CTL_PDEV_CTL_DDL_RAS,
6841 	       phba->sli4_hba.conf_regs_memmap_p + LPFC_CTL_PDEV_CTL_OFFSET);
6842 
6843 	/* Wait 10ms for firmware to stop using DMA buffer */
6844 	usleep_range(10 * 1000, 20 * 1000);
6845 }
6846 
6847 /**
6848  * lpfc_sli4_ras_dma_free - Free memory allocated for FW logging.
6849  * @phba: Pointer to HBA context object.
6850  *
6851  * This function is called to free memory allocated for RAS FW logging
6852  * support in the driver.
6853  **/
6854 void
lpfc_sli4_ras_dma_free(struct lpfc_hba * phba)6855 lpfc_sli4_ras_dma_free(struct lpfc_hba *phba)
6856 {
6857 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6858 	struct lpfc_dmabuf *dmabuf, *next;
6859 
6860 	if (!list_empty(&ras_fwlog->fwlog_buff_list)) {
6861 		list_for_each_entry_safe(dmabuf, next,
6862 				    &ras_fwlog->fwlog_buff_list,
6863 				    list) {
6864 			list_del(&dmabuf->list);
6865 			dma_free_coherent(&phba->pcidev->dev,
6866 					  LPFC_RAS_MAX_ENTRY_SIZE,
6867 					  dmabuf->virt, dmabuf->phys);
6868 			kfree(dmabuf);
6869 		}
6870 	}
6871 
6872 	if (ras_fwlog->lwpd.virt) {
6873 		dma_free_coherent(&phba->pcidev->dev,
6874 				  sizeof(uint32_t) * 2,
6875 				  ras_fwlog->lwpd.virt,
6876 				  ras_fwlog->lwpd.phys);
6877 		ras_fwlog->lwpd.virt = NULL;
6878 	}
6879 
6880 	spin_lock_irq(&phba->ras_fwlog_lock);
6881 	ras_fwlog->state = INACTIVE;
6882 	spin_unlock_irq(&phba->ras_fwlog_lock);
6883 }
6884 
6885 /**
6886  * lpfc_sli4_ras_dma_alloc: Allocate memory for FW support
6887  * @phba: Pointer to HBA context object.
6888  * @fwlog_buff_count: Count of buffers to be created.
6889  *
6890  * This routine DMA memory for Log Write Position Data[LPWD] and buffer
6891  * to update FW log is posted to the adapter.
6892  * Buffer count is calculated based on module param ras_fwlog_buffsize
6893  * Size of each buffer posted to FW is 64K.
6894  **/
6895 
6896 static int
lpfc_sli4_ras_dma_alloc(struct lpfc_hba * phba,uint32_t fwlog_buff_count)6897 lpfc_sli4_ras_dma_alloc(struct lpfc_hba *phba,
6898 			uint32_t fwlog_buff_count)
6899 {
6900 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6901 	struct lpfc_dmabuf *dmabuf;
6902 	int rc = 0, i = 0;
6903 
6904 	/* Initialize List */
6905 	INIT_LIST_HEAD(&ras_fwlog->fwlog_buff_list);
6906 
6907 	/* Allocate memory for the LWPD */
6908 	ras_fwlog->lwpd.virt = dma_alloc_coherent(&phba->pcidev->dev,
6909 					    sizeof(uint32_t) * 2,
6910 					    &ras_fwlog->lwpd.phys,
6911 					    GFP_KERNEL);
6912 	if (!ras_fwlog->lwpd.virt) {
6913 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6914 				"6185 LWPD Memory Alloc Failed\n");
6915 
6916 		return -ENOMEM;
6917 	}
6918 
6919 	ras_fwlog->fw_buffcount = fwlog_buff_count;
6920 	for (i = 0; i < ras_fwlog->fw_buffcount; i++) {
6921 		dmabuf = kzalloc_obj(struct lpfc_dmabuf);
6922 		if (!dmabuf) {
6923 			rc = -ENOMEM;
6924 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6925 					"6186 Memory Alloc failed FW logging");
6926 			goto free_mem;
6927 		}
6928 
6929 		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
6930 						  LPFC_RAS_MAX_ENTRY_SIZE,
6931 						  &dmabuf->phys, GFP_KERNEL);
6932 		if (!dmabuf->virt) {
6933 			kfree(dmabuf);
6934 			rc = -ENOMEM;
6935 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
6936 					"6187 DMA Alloc Failed FW logging");
6937 			goto free_mem;
6938 		}
6939 		dmabuf->buffer_tag = i;
6940 		list_add_tail(&dmabuf->list, &ras_fwlog->fwlog_buff_list);
6941 	}
6942 
6943 free_mem:
6944 	if (rc)
6945 		lpfc_sli4_ras_dma_free(phba);
6946 
6947 	return rc;
6948 }
6949 
6950 /**
6951  * lpfc_sli4_ras_mbox_cmpl: Completion handler for RAS MBX command
6952  * @phba: pointer to lpfc hba data structure.
6953  * @pmb: pointer to the driver internal queue element for mailbox command.
6954  *
6955  * Completion handler for driver's RAS MBX command to the device.
6956  **/
6957 static void
lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)6958 lpfc_sli4_ras_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
6959 {
6960 	MAILBOX_t *mb;
6961 	union lpfc_sli4_cfg_shdr *shdr;
6962 	uint32_t shdr_status, shdr_add_status;
6963 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
6964 
6965 	mb = &pmb->u.mb;
6966 
6967 	shdr = (union lpfc_sli4_cfg_shdr *)
6968 		&pmb->u.mqe.un.ras_fwlog.header.cfg_shdr;
6969 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
6970 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
6971 
6972 	if (mb->mbxStatus != MBX_SUCCESS || shdr_status) {
6973 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
6974 				"6188 FW LOG mailbox "
6975 				"completed with status x%x add_status x%x,"
6976 				" mbx status x%x\n",
6977 				shdr_status, shdr_add_status, mb->mbxStatus);
6978 
6979 		ras_fwlog->ras_hwsupport = false;
6980 		goto disable_ras;
6981 	}
6982 
6983 	spin_lock_irq(&phba->ras_fwlog_lock);
6984 	ras_fwlog->state = ACTIVE;
6985 	spin_unlock_irq(&phba->ras_fwlog_lock);
6986 	mempool_free(pmb, phba->mbox_mem_pool);
6987 
6988 	return;
6989 
6990 disable_ras:
6991 	/* Free RAS DMA memory */
6992 	lpfc_sli4_ras_dma_free(phba);
6993 	mempool_free(pmb, phba->mbox_mem_pool);
6994 }
6995 
6996 /**
6997  * lpfc_sli4_ras_fwlog_init: Initialize memory and post RAS MBX command
6998  * @phba: pointer to lpfc hba data structure.
6999  * @fwlog_level: Logging verbosity level.
7000  * @fwlog_enable: Enable/Disable logging.
7001  *
7002  * Initialize memory and post mailbox command to enable FW logging in host
7003  * memory.
7004  **/
7005 int
lpfc_sli4_ras_fwlog_init(struct lpfc_hba * phba,uint32_t fwlog_level,uint32_t fwlog_enable)7006 lpfc_sli4_ras_fwlog_init(struct lpfc_hba *phba,
7007 			 uint32_t fwlog_level,
7008 			 uint32_t fwlog_enable)
7009 {
7010 	struct lpfc_ras_fwlog *ras_fwlog = &phba->ras_fwlog;
7011 	struct lpfc_mbx_set_ras_fwlog *mbx_fwlog = NULL;
7012 	struct lpfc_dmabuf *dmabuf;
7013 	LPFC_MBOXQ_t *mbox;
7014 	uint32_t len = 0, fwlog_buffsize, fwlog_entry_count;
7015 	int rc = 0;
7016 
7017 	spin_lock_irq(&phba->ras_fwlog_lock);
7018 	ras_fwlog->state = INACTIVE;
7019 	spin_unlock_irq(&phba->ras_fwlog_lock);
7020 
7021 	fwlog_buffsize = (LPFC_RAS_MIN_BUFF_POST_SIZE *
7022 			  phba->cfg_ras_fwlog_buffsize);
7023 	fwlog_entry_count = (fwlog_buffsize/LPFC_RAS_MAX_ENTRY_SIZE);
7024 
7025 	/*
7026 	 * If re-enabling FW logging support use earlier allocated
7027 	 * DMA buffers while posting MBX command.
7028 	 **/
7029 	if (!ras_fwlog->lwpd.virt) {
7030 		rc = lpfc_sli4_ras_dma_alloc(phba, fwlog_entry_count);
7031 		if (rc) {
7032 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
7033 					"6189 FW Log Memory Allocation Failed");
7034 			return rc;
7035 		}
7036 	}
7037 
7038 	/* Setup Mailbox command */
7039 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7040 	if (!mbox) {
7041 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7042 				"6190 RAS MBX Alloc Failed");
7043 		rc = -ENOMEM;
7044 		goto mem_free;
7045 	}
7046 
7047 	ras_fwlog->fw_loglevel = fwlog_level;
7048 	len = (sizeof(struct lpfc_mbx_set_ras_fwlog) -
7049 		sizeof(struct lpfc_sli4_cfg_mhdr));
7050 
7051 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_LOWLEVEL,
7052 			 LPFC_MBOX_OPCODE_SET_DIAG_LOG_OPTION,
7053 			 len, LPFC_SLI4_MBX_EMBED);
7054 
7055 	mbx_fwlog = (struct lpfc_mbx_set_ras_fwlog *)&mbox->u.mqe.un.ras_fwlog;
7056 	bf_set(lpfc_fwlog_enable, &mbx_fwlog->u.request,
7057 	       fwlog_enable);
7058 	bf_set(lpfc_fwlog_loglvl, &mbx_fwlog->u.request,
7059 	       ras_fwlog->fw_loglevel);
7060 	bf_set(lpfc_fwlog_buffcnt, &mbx_fwlog->u.request,
7061 	       ras_fwlog->fw_buffcount);
7062 	bf_set(lpfc_fwlog_buffsz, &mbx_fwlog->u.request,
7063 	       LPFC_RAS_MAX_ENTRY_SIZE/SLI4_PAGE_SIZE);
7064 
7065 	/* Update DMA buffer address */
7066 	list_for_each_entry(dmabuf, &ras_fwlog->fwlog_buff_list, list) {
7067 		memset(dmabuf->virt, 0, LPFC_RAS_MAX_ENTRY_SIZE);
7068 
7069 		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_lo =
7070 			putPaddrLow(dmabuf->phys);
7071 
7072 		mbx_fwlog->u.request.buff_fwlog[dmabuf->buffer_tag].addr_hi =
7073 			putPaddrHigh(dmabuf->phys);
7074 	}
7075 
7076 	/* Update LPWD address */
7077 	mbx_fwlog->u.request.lwpd.addr_lo = putPaddrLow(ras_fwlog->lwpd.phys);
7078 	mbx_fwlog->u.request.lwpd.addr_hi = putPaddrHigh(ras_fwlog->lwpd.phys);
7079 
7080 	spin_lock_irq(&phba->ras_fwlog_lock);
7081 	ras_fwlog->state = REG_INPROGRESS;
7082 	spin_unlock_irq(&phba->ras_fwlog_lock);
7083 	mbox->vport = phba->pport;
7084 	mbox->mbox_cmpl = lpfc_sli4_ras_mbox_cmpl;
7085 
7086 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
7087 
7088 	if (rc == MBX_NOT_FINISHED) {
7089 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7090 				"6191 FW-Log Mailbox failed. "
7091 				"status %d mbxStatus : x%x", rc,
7092 				bf_get(lpfc_mqe_status, &mbox->u.mqe));
7093 		mempool_free(mbox, phba->mbox_mem_pool);
7094 		rc = -EIO;
7095 		goto mem_free;
7096 	} else
7097 		rc = 0;
7098 mem_free:
7099 	if (rc)
7100 		lpfc_sli4_ras_dma_free(phba);
7101 
7102 	return rc;
7103 }
7104 
7105 /**
7106  * lpfc_sli4_ras_setup - Check if RAS supported on the adapter
7107  * @phba: Pointer to HBA context object.
7108  *
7109  * Check if RAS is supported on the adapter and initialize it.
7110  **/
7111 void
lpfc_sli4_ras_setup(struct lpfc_hba * phba)7112 lpfc_sli4_ras_setup(struct lpfc_hba *phba)
7113 {
7114 	/* Check RAS FW Log needs to be enabled or not */
7115 	if (lpfc_check_fwlog_support(phba))
7116 		return;
7117 
7118 	lpfc_sli4_ras_fwlog_init(phba, phba->cfg_ras_fwlog_level,
7119 				 LPFC_RAS_ENABLE_LOGGING);
7120 }
7121 
7122 /**
7123  * lpfc_sli4_alloc_resource_identifiers - Allocate all SLI4 resource extents.
7124  * @phba: Pointer to HBA context object.
7125  *
7126  * This function allocates all SLI4 resource identifiers.
7127  **/
7128 int
lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba * phba)7129 lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
7130 {
7131 	int i, rc, error = 0;
7132 	uint16_t count, base;
7133 	unsigned long longs;
7134 
7135 	if (!phba->sli4_hba.rpi_hdrs_in_use)
7136 		phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
7137 	if (phba->sli4_hba.extents_in_use) {
7138 		/*
7139 		 * The port supports resource extents. The XRI, VPI, VFI, RPI
7140 		 * resource extent count must be read and allocated before
7141 		 * provisioning the resource id arrays.
7142 		 */
7143 		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7144 		    LPFC_IDX_RSRC_RDY) {
7145 			/*
7146 			 * Extent-based resources are set - the driver could
7147 			 * be in a port reset. Figure out if any corrective
7148 			 * actions need to be taken.
7149 			 */
7150 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7151 						 LPFC_RSC_TYPE_FCOE_VFI);
7152 			if (rc != 0)
7153 				error++;
7154 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7155 						 LPFC_RSC_TYPE_FCOE_VPI);
7156 			if (rc != 0)
7157 				error++;
7158 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7159 						 LPFC_RSC_TYPE_FCOE_XRI);
7160 			if (rc != 0)
7161 				error++;
7162 			rc = lpfc_sli4_chk_avail_extnt_rsrc(phba,
7163 						 LPFC_RSC_TYPE_FCOE_RPI);
7164 			if (rc != 0)
7165 				error++;
7166 
7167 			/*
7168 			 * It's possible that the number of resources
7169 			 * provided to this port instance changed between
7170 			 * resets.  Detect this condition and reallocate
7171 			 * resources.  Otherwise, there is no action.
7172 			 */
7173 			if (error) {
7174 				lpfc_printf_log(phba, KERN_INFO,
7175 						LOG_MBOX | LOG_INIT,
7176 						"2931 Detected extent resource "
7177 						"change.  Reallocating all "
7178 						"extents.\n");
7179 				rc = lpfc_sli4_dealloc_extent(phba,
7180 						 LPFC_RSC_TYPE_FCOE_VFI);
7181 				rc = lpfc_sli4_dealloc_extent(phba,
7182 						 LPFC_RSC_TYPE_FCOE_VPI);
7183 				rc = lpfc_sli4_dealloc_extent(phba,
7184 						 LPFC_RSC_TYPE_FCOE_XRI);
7185 				rc = lpfc_sli4_dealloc_extent(phba,
7186 						 LPFC_RSC_TYPE_FCOE_RPI);
7187 			} else
7188 				return 0;
7189 		}
7190 
7191 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7192 		if (unlikely(rc))
7193 			goto err_exit;
7194 
7195 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7196 		if (unlikely(rc))
7197 			goto err_exit;
7198 
7199 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7200 		if (unlikely(rc))
7201 			goto err_exit;
7202 
7203 		rc = lpfc_sli4_alloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7204 		if (unlikely(rc))
7205 			goto err_exit;
7206 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7207 		       LPFC_IDX_RSRC_RDY);
7208 		return rc;
7209 	} else {
7210 		/*
7211 		 * The port does not support resource extents.  The XRI, VPI,
7212 		 * VFI, RPI resource ids were determined from READ_CONFIG.
7213 		 * Just allocate the bitmasks and provision the resource id
7214 		 * arrays.  If a port reset is active, the resources don't
7215 		 * need any action - just exit.
7216 		 */
7217 		if (bf_get(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags) ==
7218 		    LPFC_IDX_RSRC_RDY) {
7219 			lpfc_sli4_dealloc_resource_identifiers(phba);
7220 			lpfc_sli4_remove_rpis(phba);
7221 		}
7222 		/* RPIs. */
7223 		count = phba->sli4_hba.max_cfg_param.max_rpi;
7224 		if (count <= 0) {
7225 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7226 					"3279 Invalid provisioning of "
7227 					"rpi:%d\n", count);
7228 			rc = -EINVAL;
7229 			goto err_exit;
7230 		}
7231 		base = phba->sli4_hba.max_cfg_param.rpi_base;
7232 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7233 		phba->sli4_hba.rpi_bmask = kcalloc(longs,
7234 						   sizeof(unsigned long),
7235 						   GFP_KERNEL);
7236 		if (unlikely(!phba->sli4_hba.rpi_bmask)) {
7237 			rc = -ENOMEM;
7238 			goto err_exit;
7239 		}
7240 		phba->sli4_hba.rpi_ids = kcalloc(count, sizeof(uint16_t),
7241 						 GFP_KERNEL);
7242 		if (unlikely(!phba->sli4_hba.rpi_ids)) {
7243 			rc = -ENOMEM;
7244 			goto free_rpi_bmask;
7245 		}
7246 
7247 		for (i = 0; i < count; i++)
7248 			phba->sli4_hba.rpi_ids[i] = base + i;
7249 
7250 		/* VPIs. */
7251 		count = phba->sli4_hba.max_cfg_param.max_vpi;
7252 		if (count <= 0) {
7253 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7254 					"3280 Invalid provisioning of "
7255 					"vpi:%d\n", count);
7256 			rc = -EINVAL;
7257 			goto free_rpi_ids;
7258 		}
7259 		base = phba->sli4_hba.max_cfg_param.vpi_base;
7260 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7261 		phba->vpi_bmask = kcalloc(longs, sizeof(unsigned long),
7262 					  GFP_KERNEL);
7263 		if (unlikely(!phba->vpi_bmask)) {
7264 			rc = -ENOMEM;
7265 			goto free_rpi_ids;
7266 		}
7267 		phba->vpi_ids = kcalloc(count, sizeof(uint16_t),
7268 					GFP_KERNEL);
7269 		if (unlikely(!phba->vpi_ids)) {
7270 			rc = -ENOMEM;
7271 			goto free_vpi_bmask;
7272 		}
7273 
7274 		for (i = 0; i < count; i++)
7275 			phba->vpi_ids[i] = base + i;
7276 
7277 		/* XRIs. */
7278 		count = phba->sli4_hba.max_cfg_param.max_xri;
7279 		if (count <= 0) {
7280 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7281 					"3281 Invalid provisioning of "
7282 					"xri:%d\n", count);
7283 			rc = -EINVAL;
7284 			goto free_vpi_ids;
7285 		}
7286 		base = phba->sli4_hba.max_cfg_param.xri_base;
7287 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7288 		phba->sli4_hba.xri_bmask = kcalloc(longs,
7289 						   sizeof(unsigned long),
7290 						   GFP_KERNEL);
7291 		if (unlikely(!phba->sli4_hba.xri_bmask)) {
7292 			rc = -ENOMEM;
7293 			goto free_vpi_ids;
7294 		}
7295 		phba->sli4_hba.max_cfg_param.xri_used = 0;
7296 		phba->sli4_hba.xri_ids = kcalloc(count, sizeof(uint16_t),
7297 						 GFP_KERNEL);
7298 		if (unlikely(!phba->sli4_hba.xri_ids)) {
7299 			rc = -ENOMEM;
7300 			goto free_xri_bmask;
7301 		}
7302 
7303 		for (i = 0; i < count; i++)
7304 			phba->sli4_hba.xri_ids[i] = base + i;
7305 
7306 		/* VFIs. */
7307 		count = phba->sli4_hba.max_cfg_param.max_vfi;
7308 		if (count <= 0) {
7309 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7310 					"3282 Invalid provisioning of "
7311 					"vfi:%d\n", count);
7312 			rc = -EINVAL;
7313 			goto free_xri_ids;
7314 		}
7315 		base = phba->sli4_hba.max_cfg_param.vfi_base;
7316 		longs = (count + BITS_PER_LONG - 1) / BITS_PER_LONG;
7317 		phba->sli4_hba.vfi_bmask = kcalloc(longs,
7318 						   sizeof(unsigned long),
7319 						   GFP_KERNEL);
7320 		if (unlikely(!phba->sli4_hba.vfi_bmask)) {
7321 			rc = -ENOMEM;
7322 			goto free_xri_ids;
7323 		}
7324 		phba->sli4_hba.vfi_ids = kcalloc(count, sizeof(uint16_t),
7325 						 GFP_KERNEL);
7326 		if (unlikely(!phba->sli4_hba.vfi_ids)) {
7327 			rc = -ENOMEM;
7328 			goto free_vfi_bmask;
7329 		}
7330 
7331 		for (i = 0; i < count; i++)
7332 			phba->sli4_hba.vfi_ids[i] = base + i;
7333 
7334 		/*
7335 		 * Mark all resources ready.  An HBA reset doesn't need
7336 		 * to reset the initialization.
7337 		 */
7338 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags,
7339 		       LPFC_IDX_RSRC_RDY);
7340 		return 0;
7341 	}
7342 
7343  free_vfi_bmask:
7344 	kfree(phba->sli4_hba.vfi_bmask);
7345 	phba->sli4_hba.vfi_bmask = NULL;
7346  free_xri_ids:
7347 	kfree(phba->sli4_hba.xri_ids);
7348 	phba->sli4_hba.xri_ids = NULL;
7349  free_xri_bmask:
7350 	kfree(phba->sli4_hba.xri_bmask);
7351 	phba->sli4_hba.xri_bmask = NULL;
7352  free_vpi_ids:
7353 	kfree(phba->vpi_ids);
7354 	phba->vpi_ids = NULL;
7355  free_vpi_bmask:
7356 	kfree(phba->vpi_bmask);
7357 	phba->vpi_bmask = NULL;
7358  free_rpi_ids:
7359 	kfree(phba->sli4_hba.rpi_ids);
7360 	phba->sli4_hba.rpi_ids = NULL;
7361  free_rpi_bmask:
7362 	kfree(phba->sli4_hba.rpi_bmask);
7363 	phba->sli4_hba.rpi_bmask = NULL;
7364  err_exit:
7365 	return rc;
7366 }
7367 
7368 /**
7369  * lpfc_sli4_dealloc_resource_identifiers - Deallocate all SLI4 resource extents.
7370  * @phba: Pointer to HBA context object.
7371  *
7372  * This function allocates the number of elements for the specified
7373  * resource type.
7374  **/
7375 int
lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba * phba)7376 lpfc_sli4_dealloc_resource_identifiers(struct lpfc_hba *phba)
7377 {
7378 	if (phba->sli4_hba.extents_in_use) {
7379 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VPI);
7380 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_RPI);
7381 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_XRI);
7382 		lpfc_sli4_dealloc_extent(phba, LPFC_RSC_TYPE_FCOE_VFI);
7383 	} else {
7384 		kfree(phba->vpi_bmask);
7385 		phba->sli4_hba.max_cfg_param.vpi_used = 0;
7386 		kfree(phba->vpi_ids);
7387 		bf_set(lpfc_vpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7388 		kfree(phba->sli4_hba.xri_bmask);
7389 		kfree(phba->sli4_hba.xri_ids);
7390 		kfree(phba->sli4_hba.vfi_bmask);
7391 		kfree(phba->sli4_hba.vfi_ids);
7392 		bf_set(lpfc_vfi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7393 		bf_set(lpfc_idx_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
7394 	}
7395 
7396 	return 0;
7397 }
7398 
7399 /**
7400  * lpfc_sli4_get_allocated_extnts - Get the port's allocated extents.
7401  * @phba: Pointer to HBA context object.
7402  * @type: The resource extent type.
7403  * @extnt_cnt: buffer to hold port extent count response
7404  * @extnt_size: buffer to hold port extent size response.
7405  *
7406  * This function calls the port to read the host allocated extents
7407  * for a particular type.
7408  **/
7409 int
lpfc_sli4_get_allocated_extnts(struct lpfc_hba * phba,uint16_t type,uint16_t * extnt_cnt,uint16_t * extnt_size)7410 lpfc_sli4_get_allocated_extnts(struct lpfc_hba *phba, uint16_t type,
7411 			       uint16_t *extnt_cnt, uint16_t *extnt_size)
7412 {
7413 	bool emb;
7414 	int rc = 0;
7415 	uint16_t curr_blks = 0;
7416 	uint32_t req_len, emb_len;
7417 	uint32_t alloc_len, mbox_tmo;
7418 	struct list_head *blk_list_head;
7419 	struct lpfc_rsrc_blks *rsrc_blk;
7420 	LPFC_MBOXQ_t *mbox;
7421 	void *virtaddr = NULL;
7422 	struct lpfc_mbx_nembed_rsrc_extent *n_rsrc;
7423 	struct lpfc_mbx_alloc_rsrc_extents *rsrc_ext;
7424 	union  lpfc_sli4_cfg_shdr *shdr;
7425 
7426 	switch (type) {
7427 	case LPFC_RSC_TYPE_FCOE_VPI:
7428 		blk_list_head = &phba->lpfc_vpi_blk_list;
7429 		break;
7430 	case LPFC_RSC_TYPE_FCOE_XRI:
7431 		blk_list_head = &phba->sli4_hba.lpfc_xri_blk_list;
7432 		break;
7433 	case LPFC_RSC_TYPE_FCOE_VFI:
7434 		blk_list_head = &phba->sli4_hba.lpfc_vfi_blk_list;
7435 		break;
7436 	case LPFC_RSC_TYPE_FCOE_RPI:
7437 		blk_list_head = &phba->sli4_hba.lpfc_rpi_blk_list;
7438 		break;
7439 	default:
7440 		return -EIO;
7441 	}
7442 
7443 	/* Count the number of extents currently allocatd for this type. */
7444 	list_for_each_entry(rsrc_blk, blk_list_head, list) {
7445 		if (curr_blks == 0) {
7446 			/*
7447 			 * The GET_ALLOCATED mailbox does not return the size,
7448 			 * just the count.  The size should be just the size
7449 			 * stored in the current allocated block and all sizes
7450 			 * for an extent type are the same so set the return
7451 			 * value now.
7452 			 */
7453 			*extnt_size = rsrc_blk->rsrc_size;
7454 		}
7455 		curr_blks++;
7456 	}
7457 
7458 	/*
7459 	 * Calculate the size of an embedded mailbox.  The uint32_t
7460 	 * accounts for extents-specific word.
7461 	 */
7462 	emb_len = sizeof(MAILBOX_t) - sizeof(struct mbox_header) -
7463 		sizeof(uint32_t);
7464 
7465 	/*
7466 	 * Presume the allocation and response will fit into an embedded
7467 	 * mailbox.  If not true, reconfigure to a non-embedded mailbox.
7468 	 */
7469 	emb = LPFC_SLI4_MBX_EMBED;
7470 	req_len = emb_len;
7471 	if (req_len > emb_len) {
7472 		req_len = curr_blks * sizeof(uint16_t) +
7473 			sizeof(union lpfc_sli4_cfg_shdr) +
7474 			sizeof(uint32_t);
7475 		emb = LPFC_SLI4_MBX_NEMBED;
7476 	}
7477 
7478 	mbox = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7479 	if (!mbox)
7480 		return -ENOMEM;
7481 	memset(mbox, 0, sizeof(LPFC_MBOXQ_t));
7482 
7483 	alloc_len = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7484 				     LPFC_MBOX_OPCODE_GET_ALLOC_RSRC_EXTENT,
7485 				     req_len, emb);
7486 	if (alloc_len < req_len) {
7487 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7488 			"2983 Allocated DMA memory size (x%x) is "
7489 			"less than the requested DMA memory "
7490 			"size (x%x)\n", alloc_len, req_len);
7491 		rc = -ENOMEM;
7492 		goto err_exit;
7493 	}
7494 	rc = lpfc_sli4_mbox_rsrc_extent(phba, mbox, curr_blks, type, emb);
7495 	if (unlikely(rc)) {
7496 		rc = -EIO;
7497 		goto err_exit;
7498 	}
7499 
7500 	if (!phba->sli4_hba.intr_enable)
7501 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
7502 	else {
7503 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
7504 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
7505 	}
7506 
7507 	if (unlikely(rc)) {
7508 		rc = -EIO;
7509 		goto err_exit;
7510 	}
7511 
7512 	/*
7513 	 * Figure out where the response is located.  Then get local pointers
7514 	 * to the response data.  The port does not guarantee to respond to
7515 	 * all extents counts request so update the local variable with the
7516 	 * allocated count from the port.
7517 	 */
7518 	if (emb == LPFC_SLI4_MBX_EMBED) {
7519 		rsrc_ext = &mbox->u.mqe.un.alloc_rsrc_extents;
7520 		shdr = &rsrc_ext->header.cfg_shdr;
7521 		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, &rsrc_ext->u.rsp);
7522 	} else {
7523 		virtaddr = mbox->sge_array->addr[0];
7524 		n_rsrc = (struct lpfc_mbx_nembed_rsrc_extent *) virtaddr;
7525 		shdr = &n_rsrc->cfg_shdr;
7526 		*extnt_cnt = bf_get(lpfc_mbx_rsrc_cnt, n_rsrc);
7527 	}
7528 
7529 	if (bf_get(lpfc_mbox_hdr_status, &shdr->response)) {
7530 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7531 			"2984 Failed to read allocated resources "
7532 			"for type %d - Status 0x%x Add'l Status 0x%x.\n",
7533 			type,
7534 			bf_get(lpfc_mbox_hdr_status, &shdr->response),
7535 			bf_get(lpfc_mbox_hdr_add_status, &shdr->response));
7536 		rc = -EIO;
7537 		goto err_exit;
7538 	}
7539  err_exit:
7540 	lpfc_sli4_mbox_cmd_free(phba, mbox);
7541 	return rc;
7542 }
7543 
7544 /**
7545  * lpfc_sli4_repost_sgl_list - Repost the buffers sgl pages as block
7546  * @phba: pointer to lpfc hba data structure.
7547  * @sgl_list: linked link of sgl buffers to post
7548  * @cnt: number of linked list buffers
7549  *
7550  * This routine walks the list of buffers that have been allocated and
7551  * repost them to the port by using SGL block post. This is needed after a
7552  * pci_function_reset/warm_start or start. It attempts to construct blocks
7553  * of buffer sgls which contains contiguous xris and uses the non-embedded
7554  * SGL block post mailbox commands to post them to the port. For single
7555  * buffer sgl with non-contiguous xri, if any, it shall use embedded SGL post
7556  * mailbox command for posting.
7557  *
7558  * Returns: 0 = success, non-zero failure.
7559  **/
7560 static int
lpfc_sli4_repost_sgl_list(struct lpfc_hba * phba,struct list_head * sgl_list,int cnt)7561 lpfc_sli4_repost_sgl_list(struct lpfc_hba *phba,
7562 			  struct list_head *sgl_list, int cnt)
7563 {
7564 	struct lpfc_sglq *sglq_entry = NULL;
7565 	struct lpfc_sglq *sglq_entry_next = NULL;
7566 	struct lpfc_sglq *sglq_entry_first = NULL;
7567 	int status = 0, total_cnt;
7568 	int post_cnt = 0, num_posted = 0, block_cnt = 0;
7569 	int last_xritag = NO_XRI;
7570 	LIST_HEAD(prep_sgl_list);
7571 	LIST_HEAD(blck_sgl_list);
7572 	LIST_HEAD(allc_sgl_list);
7573 	LIST_HEAD(post_sgl_list);
7574 	LIST_HEAD(free_sgl_list);
7575 
7576 	spin_lock_irq(&phba->hbalock);
7577 	spin_lock(&phba->sli4_hba.sgl_list_lock);
7578 	list_splice_init(sgl_list, &allc_sgl_list);
7579 	spin_unlock(&phba->sli4_hba.sgl_list_lock);
7580 	spin_unlock_irq(&phba->hbalock);
7581 
7582 	total_cnt = cnt;
7583 	list_for_each_entry_safe(sglq_entry, sglq_entry_next,
7584 				 &allc_sgl_list, list) {
7585 		list_del_init(&sglq_entry->list);
7586 		block_cnt++;
7587 		if ((last_xritag != NO_XRI) &&
7588 		    (sglq_entry->sli4_xritag != last_xritag + 1)) {
7589 			/* a hole in xri block, form a sgl posting block */
7590 			list_splice_init(&prep_sgl_list, &blck_sgl_list);
7591 			post_cnt = block_cnt - 1;
7592 			/* prepare list for next posting block */
7593 			list_add_tail(&sglq_entry->list, &prep_sgl_list);
7594 			block_cnt = 1;
7595 		} else {
7596 			/* prepare list for next posting block */
7597 			list_add_tail(&sglq_entry->list, &prep_sgl_list);
7598 			/* enough sgls for non-embed sgl mbox command */
7599 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
7600 				list_splice_init(&prep_sgl_list,
7601 						 &blck_sgl_list);
7602 				post_cnt = block_cnt;
7603 				block_cnt = 0;
7604 			}
7605 		}
7606 		num_posted++;
7607 
7608 		/* keep track of last sgl's xritag */
7609 		last_xritag = sglq_entry->sli4_xritag;
7610 
7611 		/* end of repost sgl list condition for buffers */
7612 		if (num_posted == total_cnt) {
7613 			if (post_cnt == 0) {
7614 				list_splice_init(&prep_sgl_list,
7615 						 &blck_sgl_list);
7616 				post_cnt = block_cnt;
7617 			} else if (block_cnt == 1) {
7618 				status = lpfc_sli4_post_sgl(phba,
7619 						sglq_entry->phys, 0,
7620 						sglq_entry->sli4_xritag);
7621 				if (!status) {
7622 					/* successful, put sgl to posted list */
7623 					list_add_tail(&sglq_entry->list,
7624 						      &post_sgl_list);
7625 				} else {
7626 					/* Failure, put sgl to free list */
7627 					lpfc_printf_log(phba, KERN_WARNING,
7628 						LOG_SLI,
7629 						"3159 Failed to post "
7630 						"sgl, xritag:x%x\n",
7631 						sglq_entry->sli4_xritag);
7632 					list_add_tail(&sglq_entry->list,
7633 						      &free_sgl_list);
7634 					total_cnt--;
7635 				}
7636 			}
7637 		}
7638 
7639 		/* continue until a nembed page worth of sgls */
7640 		if (post_cnt == 0)
7641 			continue;
7642 
7643 		/* post the buffer list sgls as a block */
7644 		status = lpfc_sli4_post_sgl_list(phba, &blck_sgl_list,
7645 						 post_cnt);
7646 
7647 		if (!status) {
7648 			/* success, put sgl list to posted sgl list */
7649 			list_splice_init(&blck_sgl_list, &post_sgl_list);
7650 		} else {
7651 			/* Failure, put sgl list to free sgl list */
7652 			sglq_entry_first = list_first_entry(&blck_sgl_list,
7653 							    struct lpfc_sglq,
7654 							    list);
7655 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
7656 					"3160 Failed to post sgl-list, "
7657 					"xritag:x%x-x%x\n",
7658 					sglq_entry_first->sli4_xritag,
7659 					(sglq_entry_first->sli4_xritag +
7660 					 post_cnt - 1));
7661 			list_splice_init(&blck_sgl_list, &free_sgl_list);
7662 			total_cnt -= post_cnt;
7663 		}
7664 
7665 		/* don't reset xirtag due to hole in xri block */
7666 		if (block_cnt == 0)
7667 			last_xritag = NO_XRI;
7668 
7669 		/* reset sgl post count for next round of posting */
7670 		post_cnt = 0;
7671 	}
7672 
7673 	/* free the sgls failed to post */
7674 	lpfc_free_sgl_list(phba, &free_sgl_list);
7675 
7676 	/* push sgls posted to the available list */
7677 	if (!list_empty(&post_sgl_list)) {
7678 		spin_lock_irq(&phba->hbalock);
7679 		spin_lock(&phba->sli4_hba.sgl_list_lock);
7680 		list_splice_init(&post_sgl_list, sgl_list);
7681 		spin_unlock(&phba->sli4_hba.sgl_list_lock);
7682 		spin_unlock_irq(&phba->hbalock);
7683 	} else {
7684 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7685 				"3161 Failure to post sgl to port,status %x "
7686 				"blkcnt %d totalcnt %d postcnt %d\n",
7687 				status, block_cnt, total_cnt, post_cnt);
7688 		return -EIO;
7689 	}
7690 
7691 	/* return the number of XRIs actually posted */
7692 	return total_cnt;
7693 }
7694 
7695 /**
7696  * lpfc_sli4_repost_io_sgl_list - Repost all the allocated nvme buffer sgls
7697  * @phba: pointer to lpfc hba data structure.
7698  *
7699  * This routine walks the list of nvme buffers that have been allocated and
7700  * repost them to the port by using SGL block post. This is needed after a
7701  * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
7702  * is responsible for moving all nvme buffers on the lpfc_abts_nvme_sgl_list
7703  * to the lpfc_io_buf_list. If the repost fails, reject all nvme buffers.
7704  *
7705  * Returns: 0 = success, non-zero failure.
7706  **/
7707 static int
lpfc_sli4_repost_io_sgl_list(struct lpfc_hba * phba)7708 lpfc_sli4_repost_io_sgl_list(struct lpfc_hba *phba)
7709 {
7710 	LIST_HEAD(post_nblist);
7711 	int num_posted, rc = 0;
7712 
7713 	/* get all NVME buffers need to repost to a local list */
7714 	lpfc_io_buf_flush(phba, &post_nblist);
7715 
7716 	/* post the list of nvme buffer sgls to port if available */
7717 	if (!list_empty(&post_nblist)) {
7718 		num_posted = lpfc_sli4_post_io_sgl_list(
7719 			phba, &post_nblist, phba->sli4_hba.io_xri_cnt);
7720 		/* failed to post any nvme buffer, return error */
7721 		if (num_posted == 0)
7722 			rc = -EIO;
7723 	}
7724 	return rc;
7725 }
7726 
7727 static void
lpfc_set_host_data(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)7728 lpfc_set_host_data(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
7729 {
7730 	uint32_t len;
7731 
7732 	len = sizeof(struct lpfc_mbx_set_host_data) -
7733 		sizeof(struct lpfc_sli4_cfg_mhdr);
7734 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
7735 			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
7736 			 LPFC_SLI4_MBX_EMBED);
7737 
7738 	mbox->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_OS_DRIVER_VERSION;
7739 	mbox->u.mqe.un.set_host_data.param_len =
7740 					LPFC_HOST_OS_DRIVER_VERSION_SIZE;
7741 	snprintf(mbox->u.mqe.un.set_host_data.un.data,
7742 		 LPFC_HOST_OS_DRIVER_VERSION_SIZE,
7743 		 "Linux %s v"LPFC_DRIVER_VERSION,
7744 		 test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? "FCoE" : "FC");
7745 }
7746 
7747 int
lpfc_post_rq_buffer(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq,int count,int idx)7748 lpfc_post_rq_buffer(struct lpfc_hba *phba, struct lpfc_queue *hrq,
7749 		    struct lpfc_queue *drq, int count, int idx)
7750 {
7751 	int rc, i;
7752 	struct lpfc_rqe hrqe;
7753 	struct lpfc_rqe drqe;
7754 	struct lpfc_rqb *rqbp;
7755 	unsigned long flags;
7756 	struct rqb_dmabuf *rqb_buffer;
7757 	LIST_HEAD(rqb_buf_list);
7758 
7759 	rqbp = hrq->rqbp;
7760 	for (i = 0; i < count; i++) {
7761 		spin_lock_irqsave(&phba->hbalock, flags);
7762 		/* IF RQ is already full, don't bother */
7763 		if (rqbp->buffer_count + i >= rqbp->entry_count - 1) {
7764 			spin_unlock_irqrestore(&phba->hbalock, flags);
7765 			break;
7766 		}
7767 		spin_unlock_irqrestore(&phba->hbalock, flags);
7768 
7769 		rqb_buffer = rqbp->rqb_alloc_buffer(phba);
7770 		if (!rqb_buffer)
7771 			break;
7772 		rqb_buffer->hrq = hrq;
7773 		rqb_buffer->drq = drq;
7774 		rqb_buffer->idx = idx;
7775 		list_add_tail(&rqb_buffer->hbuf.list, &rqb_buf_list);
7776 	}
7777 
7778 	spin_lock_irqsave(&phba->hbalock, flags);
7779 	while (!list_empty(&rqb_buf_list)) {
7780 		list_remove_head(&rqb_buf_list, rqb_buffer, struct rqb_dmabuf,
7781 				 hbuf.list);
7782 
7783 		hrqe.address_lo = putPaddrLow(rqb_buffer->hbuf.phys);
7784 		hrqe.address_hi = putPaddrHigh(rqb_buffer->hbuf.phys);
7785 		drqe.address_lo = putPaddrLow(rqb_buffer->dbuf.phys);
7786 		drqe.address_hi = putPaddrHigh(rqb_buffer->dbuf.phys);
7787 		rc = lpfc_sli4_rq_put(hrq, drq, &hrqe, &drqe);
7788 		if (rc < 0) {
7789 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
7790 					"6421 Cannot post to HRQ %d: %x %x %x "
7791 					"DRQ %x %x\n",
7792 					hrq->queue_id,
7793 					hrq->host_index,
7794 					hrq->hba_index,
7795 					hrq->entry_count,
7796 					drq->host_index,
7797 					drq->hba_index);
7798 			rqbp->rqb_free_buffer(phba, rqb_buffer);
7799 		} else {
7800 			list_add_tail(&rqb_buffer->hbuf.list,
7801 				      &rqbp->rqb_buffer_list);
7802 			rqbp->buffer_count++;
7803 		}
7804 	}
7805 	spin_unlock_irqrestore(&phba->hbalock, flags);
7806 	return 1;
7807 }
7808 
7809 static void
lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7810 lpfc_mbx_cmpl_read_lds_params(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7811 {
7812 	union lpfc_sli4_cfg_shdr *shdr;
7813 	u32 shdr_status, shdr_add_status;
7814 
7815 	shdr = (union lpfc_sli4_cfg_shdr *)
7816 		&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7817 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7818 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7819 	if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7820 		lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT | LOG_MBOX,
7821 				"4622 SET_FEATURE (x%x) mbox failed, "
7822 				"status x%x add_status x%x, mbx status x%x\n",
7823 				LPFC_SET_LD_SIGNAL, shdr_status,
7824 				shdr_add_status, pmb->u.mb.mbxStatus);
7825 		phba->degrade_activate_threshold = 0;
7826 		phba->degrade_deactivate_threshold = 0;
7827 		phba->fec_degrade_interval = 0;
7828 		goto out;
7829 	}
7830 
7831 	phba->degrade_activate_threshold = pmb->u.mqe.un.set_feature.word7;
7832 	phba->degrade_deactivate_threshold = pmb->u.mqe.un.set_feature.word8;
7833 	phba->fec_degrade_interval = pmb->u.mqe.un.set_feature.word10;
7834 
7835 	lpfc_printf_log(phba, KERN_INFO, LOG_LDS_EVENT,
7836 			"4624 Success: da x%x dd x%x interval x%x\n",
7837 			phba->degrade_activate_threshold,
7838 			phba->degrade_deactivate_threshold,
7839 			phba->fec_degrade_interval);
7840 out:
7841 	mempool_free(pmb, phba->mbox_mem_pool);
7842 }
7843 
7844 int
lpfc_read_lds_params(struct lpfc_hba * phba)7845 lpfc_read_lds_params(struct lpfc_hba *phba)
7846 {
7847 	LPFC_MBOXQ_t *mboxq;
7848 	int rc;
7849 
7850 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7851 	if (!mboxq)
7852 		return -ENOMEM;
7853 
7854 	lpfc_set_features(phba, mboxq, LPFC_SET_LD_SIGNAL);
7855 	mboxq->vport = phba->pport;
7856 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_lds_params;
7857 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7858 	if (rc == MBX_NOT_FINISHED) {
7859 		mempool_free(mboxq, phba->mbox_mem_pool);
7860 		return -EIO;
7861 	}
7862 	return 0;
7863 }
7864 
7865 static void
lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmb)7866 lpfc_mbx_cmpl_cgn_set_ftrs(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
7867 {
7868 	struct lpfc_vport *vport = pmb->vport;
7869 	union lpfc_sli4_cfg_shdr *shdr;
7870 	u32 shdr_status, shdr_add_status;
7871 	u32 sig, acqe;
7872 
7873 	/* Two outcomes. (1) Set featurs was successul and EDC negotiation
7874 	 * is done. (2) Mailbox failed and send FPIN support only.
7875 	 */
7876 	shdr = (union lpfc_sli4_cfg_shdr *)
7877 		&pmb->u.mqe.un.sli4_config.header.cfg_shdr;
7878 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
7879 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
7880 	if (shdr_status || shdr_add_status || pmb->u.mb.mbxStatus) {
7881 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
7882 				"2516 CGN SET_FEATURE mbox failed with "
7883 				"status x%x add_status x%x, mbx status x%x "
7884 				"Reset Congestion to FPINs only\n",
7885 				shdr_status, shdr_add_status,
7886 				pmb->u.mb.mbxStatus);
7887 		/* If there is a mbox error, move on to RDF */
7888 		phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7889 		phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7890 		goto out;
7891 	}
7892 
7893 	/* Zero out Congestion Signal ACQE counter */
7894 	phba->cgn_acqe_cnt = 0;
7895 
7896 	acqe = bf_get(lpfc_mbx_set_feature_CGN_acqe_freq,
7897 		      &pmb->u.mqe.un.set_feature);
7898 	sig = bf_get(lpfc_mbx_set_feature_CGN_warn_freq,
7899 		     &pmb->u.mqe.un.set_feature);
7900 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7901 			"4620 SET_FEATURES Success: Freq: %ds %dms "
7902 			" Reg: x%x x%x\n", acqe, sig,
7903 			phba->cgn_reg_signal, phba->cgn_reg_fpin);
7904 out:
7905 	mempool_free(pmb, phba->mbox_mem_pool);
7906 
7907 	/* Register for FPIN events from the fabric now that the
7908 	 * EDC common_set_features has completed.
7909 	 */
7910 	lpfc_issue_els_rdf(vport, 0);
7911 }
7912 
7913 int
lpfc_config_cgn_signal(struct lpfc_hba * phba)7914 lpfc_config_cgn_signal(struct lpfc_hba *phba)
7915 {
7916 	LPFC_MBOXQ_t *mboxq;
7917 	u32 rc;
7918 
7919 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
7920 	if (!mboxq)
7921 		goto out_rdf;
7922 
7923 	lpfc_set_features(phba, mboxq, LPFC_SET_CGN_SIGNAL);
7924 	mboxq->vport = phba->pport;
7925 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_cgn_set_ftrs;
7926 
7927 	lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
7928 			"4621 SET_FEATURES: FREQ sig x%x acqe x%x: "
7929 			"Reg: x%x x%x\n",
7930 			phba->cgn_sig_freq, lpfc_acqe_cgn_frequency,
7931 			phba->cgn_reg_signal, phba->cgn_reg_fpin);
7932 
7933 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
7934 	if (rc == MBX_NOT_FINISHED)
7935 		goto out;
7936 	return 0;
7937 
7938 out:
7939 	mempool_free(mboxq, phba->mbox_mem_pool);
7940 out_rdf:
7941 	/* If there is a mbox error, move on to RDF */
7942 	phba->cgn_reg_fpin = LPFC_CGN_FPIN_WARN | LPFC_CGN_FPIN_ALARM;
7943 	phba->cgn_reg_signal = EDC_CG_SIG_NOTSUPPORTED;
7944 	lpfc_issue_els_rdf(phba->pport, 0);
7945 	return -EIO;
7946 }
7947 
7948 /**
7949  * lpfc_init_idle_stat_hb - Initialize idle_stat tracking
7950  * @phba: pointer to lpfc hba data structure.
7951  *
7952  * This routine initializes the per-eq idle_stat to dynamically dictate
7953  * polling decisions.
7954  *
7955  * Return codes:
7956  *   None
7957  **/
lpfc_init_idle_stat_hb(struct lpfc_hba * phba)7958 static void lpfc_init_idle_stat_hb(struct lpfc_hba *phba)
7959 {
7960 	int i;
7961 	struct lpfc_sli4_hdw_queue *hdwq;
7962 	struct lpfc_queue *eq;
7963 	struct lpfc_idle_stat *idle_stat;
7964 	u64 wall;
7965 
7966 	for_each_present_cpu(i) {
7967 		hdwq = &phba->sli4_hba.hdwq[phba->sli4_hba.cpu_map[i].hdwq];
7968 		eq = hdwq->hba_eq;
7969 
7970 		/* Skip if we've already handled this eq's primary CPU */
7971 		if (eq->chann != i)
7972 			continue;
7973 
7974 		idle_stat = &phba->sli4_hba.idle_stat[i];
7975 
7976 		idle_stat->prev_idle = get_cpu_idle_time(i, &wall, 1);
7977 		idle_stat->prev_wall = wall;
7978 
7979 		if (phba->nvmet_support ||
7980 		    phba->cmf_active_mode != LPFC_CFG_OFF ||
7981 		    phba->intr_type != MSIX)
7982 			eq->poll_mode = LPFC_QUEUE_WORK;
7983 		else
7984 			eq->poll_mode = LPFC_THREADED_IRQ;
7985 	}
7986 
7987 	if (!phba->nvmet_support && phba->intr_type == MSIX)
7988 		schedule_delayed_work(&phba->idle_stat_delay_work,
7989 				      msecs_to_jiffies(LPFC_IDLE_STAT_DELAY));
7990 }
7991 
lpfc_sli4_dip(struct lpfc_hba * phba)7992 static void lpfc_sli4_dip(struct lpfc_hba *phba)
7993 {
7994 	uint32_t if_type;
7995 
7996 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
7997 	if (if_type == LPFC_SLI_INTF_IF_TYPE_2 ||
7998 	    if_type == LPFC_SLI_INTF_IF_TYPE_6) {
7999 		struct lpfc_register reg_data;
8000 
8001 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
8002 			       &reg_data.word0))
8003 			return;
8004 
8005 		if (bf_get(lpfc_sliport_status_dip, &reg_data))
8006 			lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8007 					"2904 Firmware Dump Image Present"
8008 					" on Adapter");
8009 	}
8010 }
8011 
8012 /**
8013  * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
8014  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8015  * @entries: Number of rx_info_entry objects to allocate in ring
8016  *
8017  * Return:
8018  * 0 - Success
8019  * ENOMEM - Failure to kmalloc
8020  **/
lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor * rx_monitor,u32 entries)8021 int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
8022 				u32 entries)
8023 {
8024 	rx_monitor->ring = kmalloc_objs(struct rx_info_entry, entries);
8025 	if (!rx_monitor->ring)
8026 		return -ENOMEM;
8027 
8028 	rx_monitor->head_idx = 0;
8029 	rx_monitor->tail_idx = 0;
8030 	spin_lock_init(&rx_monitor->lock);
8031 	rx_monitor->entries = entries;
8032 
8033 	return 0;
8034 }
8035 
8036 /**
8037  * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
8038  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8039  *
8040  * Called after cancellation of cmf_timer.
8041  **/
lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor * rx_monitor)8042 void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
8043 {
8044 	kfree(rx_monitor->ring);
8045 	rx_monitor->ring = NULL;
8046 	rx_monitor->entries = 0;
8047 	rx_monitor->head_idx = 0;
8048 	rx_monitor->tail_idx = 0;
8049 }
8050 
8051 /**
8052  * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
8053  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8054  * @entry: Pointer to rx_info_entry
8055  *
8056  * Used to insert an rx_info_entry into rx_monitor's ring.  Note that this is a
8057  * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
8058  *
8059  * This is called from lpfc_cmf_timer, which is in timer/softirq context.
8060  *
8061  * In cases of old data overflow, we do a best effort of FIFO order.
8062  **/
lpfc_rx_monitor_record(struct lpfc_rx_info_monitor * rx_monitor,struct rx_info_entry * entry)8063 void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
8064 			    struct rx_info_entry *entry)
8065 {
8066 	struct rx_info_entry *ring = rx_monitor->ring;
8067 	u32 *head_idx = &rx_monitor->head_idx;
8068 	u32 *tail_idx = &rx_monitor->tail_idx;
8069 	spinlock_t *ring_lock = &rx_monitor->lock;
8070 	u32 ring_size = rx_monitor->entries;
8071 
8072 	spin_lock(ring_lock);
8073 	memcpy(&ring[*tail_idx], entry, sizeof(*entry));
8074 	*tail_idx = (*tail_idx + 1) % ring_size;
8075 
8076 	/* Best effort of FIFO saved data */
8077 	if (*tail_idx == *head_idx)
8078 		*head_idx = (*head_idx + 1) % ring_size;
8079 
8080 	spin_unlock(ring_lock);
8081 }
8082 
8083 /**
8084  * lpfc_rx_monitor_report - Read out rx_monitor's ring
8085  * @phba: Pointer to lpfc_hba object
8086  * @rx_monitor: Pointer to lpfc_rx_info_monitor object
8087  * @buf: Pointer to char buffer that will contain rx monitor info data
8088  * @buf_len: Length buf including null char
8089  * @max_read_entries: Maximum number of entries to read out of ring
8090  *
8091  * Used to dump/read what's in rx_monitor's ring buffer.
8092  *
8093  * If buf is NULL || buf_len == 0, then it is implied that we want to log the
8094  * information to kmsg instead of filling out buf.
8095  *
8096  * Return:
8097  * Number of entries read out of the ring
8098  **/
lpfc_rx_monitor_report(struct lpfc_hba * phba,struct lpfc_rx_info_monitor * rx_monitor,char * buf,u32 buf_len,u32 max_read_entries)8099 u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
8100 			   struct lpfc_rx_info_monitor *rx_monitor, char *buf,
8101 			   u32 buf_len, u32 max_read_entries)
8102 {
8103 	struct rx_info_entry *ring = rx_monitor->ring;
8104 	struct rx_info_entry *entry;
8105 	u32 *head_idx = &rx_monitor->head_idx;
8106 	u32 *tail_idx = &rx_monitor->tail_idx;
8107 	spinlock_t *ring_lock = &rx_monitor->lock;
8108 	u32 ring_size = rx_monitor->entries;
8109 	u32 cnt = 0;
8110 	char tmp[DBG_LOG_STR_SZ] = {0};
8111 	bool log_to_kmsg = (!buf || !buf_len) ? true : false;
8112 
8113 	if (!log_to_kmsg) {
8114 		/* clear the buffer to be sure */
8115 		memset(buf, 0, buf_len);
8116 
8117 		scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
8118 					"%-8s%-8s%-8s%-16s\n",
8119 					"MaxBPI", "Tot_Data_CMF",
8120 					"Tot_Data_Cmd", "Tot_Data_Cmpl",
8121 					"Lat(us)", "Avg_IO", "Max_IO", "Bsy",
8122 					"IO_cnt", "Info", "BWutil(ms)");
8123 	}
8124 
8125 	/* Needs to be _irq because record is called from timer interrupt
8126 	 * context
8127 	 */
8128 	spin_lock_irq(ring_lock);
8129 	while (*head_idx != *tail_idx) {
8130 		entry = &ring[*head_idx];
8131 
8132 		/* Read out this entry's data. */
8133 		if (!log_to_kmsg) {
8134 			/* If !log_to_kmsg, then store to buf. */
8135 			scnprintf(tmp, sizeof(tmp),
8136 				  "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
8137 				  "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
8138 				  *head_idx, entry->max_bytes_per_interval,
8139 				  entry->cmf_bytes, entry->total_bytes,
8140 				  entry->rcv_bytes, entry->avg_io_latency,
8141 				  entry->avg_io_size, entry->max_read_cnt,
8142 				  entry->cmf_busy, entry->io_cnt,
8143 				  entry->cmf_info, entry->timer_utilization,
8144 				  entry->timer_interval);
8145 
8146 			/* Check for buffer overflow */
8147 			if ((strlen(buf) + strlen(tmp)) >= buf_len)
8148 				break;
8149 
8150 			/* Append entry's data to buffer */
8151 			strlcat(buf, tmp, buf_len);
8152 		} else {
8153 			lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
8154 					"4410 %02u: MBPI %llu Xmit %llu "
8155 					"Cmpl %llu Lat %llu ASz %llu Info %02u "
8156 					"BWUtil %u Int %u slot %u\n",
8157 					cnt, entry->max_bytes_per_interval,
8158 					entry->total_bytes, entry->rcv_bytes,
8159 					entry->avg_io_latency,
8160 					entry->avg_io_size, entry->cmf_info,
8161 					entry->timer_utilization,
8162 					entry->timer_interval, *head_idx);
8163 		}
8164 
8165 		*head_idx = (*head_idx + 1) % ring_size;
8166 
8167 		/* Don't feed more than max_read_entries */
8168 		cnt++;
8169 		if (cnt >= max_read_entries)
8170 			break;
8171 	}
8172 	spin_unlock_irq(ring_lock);
8173 
8174 	return cnt;
8175 }
8176 
8177 /**
8178  * lpfc_cmf_setup - Initialize idle_stat tracking
8179  * @phba: Pointer to HBA context object.
8180  *
8181  * This is called from HBA setup during driver load or when the HBA
8182  * comes online. this does all the initialization to support CMF and MI.
8183  **/
8184 static int
lpfc_cmf_setup(struct lpfc_hba * phba)8185 lpfc_cmf_setup(struct lpfc_hba *phba)
8186 {
8187 	LPFC_MBOXQ_t *mboxq;
8188 	struct lpfc_dmabuf *mp;
8189 	struct lpfc_pc_sli4_params *sli4_params;
8190 	int rc, cmf, mi_ver;
8191 
8192 	rc = lpfc_sli4_refresh_params(phba);
8193 	if (unlikely(rc))
8194 		return rc;
8195 
8196 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8197 	if (!mboxq)
8198 		return -ENOMEM;
8199 
8200 	sli4_params = &phba->sli4_hba.pc_sli4_params;
8201 
8202 	/* Always try to enable MI feature if we can */
8203 	if (sli4_params->mi_ver) {
8204 		lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_MI);
8205 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8206 		mi_ver = bf_get(lpfc_mbx_set_feature_mi,
8207 				 &mboxq->u.mqe.un.set_feature);
8208 
8209 		if (rc == MBX_SUCCESS) {
8210 			if (mi_ver) {
8211 				lpfc_printf_log(phba,
8212 						KERN_WARNING, LOG_CGN_MGMT,
8213 						"6215 MI is enabled\n");
8214 				sli4_params->mi_ver = mi_ver;
8215 			} else {
8216 				lpfc_printf_log(phba,
8217 						KERN_WARNING, LOG_CGN_MGMT,
8218 						"6338 MI is disabled\n");
8219 				sli4_params->mi_ver = 0;
8220 			}
8221 		} else {
8222 			/* mi_ver is already set from GET_SLI4_PARAMETERS */
8223 			lpfc_printf_log(phba, KERN_INFO,
8224 					LOG_CGN_MGMT | LOG_INIT,
8225 					"6245 Enable MI Mailbox x%x (x%x/x%x) "
8226 					"failed, rc:x%x mi:x%x\n",
8227 					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8228 					lpfc_sli_config_mbox_subsys_get
8229 						(phba, mboxq),
8230 					lpfc_sli_config_mbox_opcode_get
8231 						(phba, mboxq),
8232 					rc, sli4_params->mi_ver);
8233 		}
8234 	} else {
8235 		lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8236 				"6217 MI is disabled\n");
8237 	}
8238 
8239 	/* Ensure FDMI is enabled for MI if enable_mi is set */
8240 	if (sli4_params->mi_ver)
8241 		phba->cfg_fdmi_on = LPFC_FDMI_SUPPORT;
8242 
8243 	/* Always try to enable CMF feature if we can */
8244 	if (sli4_params->cmf) {
8245 		lpfc_set_features(phba, mboxq, LPFC_SET_ENABLE_CMF);
8246 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8247 		cmf = bf_get(lpfc_mbx_set_feature_cmf,
8248 			     &mboxq->u.mqe.un.set_feature);
8249 		if (rc == MBX_SUCCESS && cmf) {
8250 			lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8251 					"6218 CMF is enabled: mode %d\n",
8252 					phba->cmf_active_mode);
8253 		} else {
8254 			lpfc_printf_log(phba, KERN_WARNING,
8255 					LOG_CGN_MGMT | LOG_INIT,
8256 					"6219 Enable CMF Mailbox x%x (x%x/x%x) "
8257 					"failed, rc:x%x dd:x%x\n",
8258 					bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8259 					lpfc_sli_config_mbox_subsys_get
8260 						(phba, mboxq),
8261 					lpfc_sli_config_mbox_opcode_get
8262 						(phba, mboxq),
8263 					rc, cmf);
8264 			sli4_params->cmf = 0;
8265 			phba->cmf_active_mode = LPFC_CFG_OFF;
8266 			goto no_cmf;
8267 		}
8268 
8269 		/* Allocate Congestion Information Buffer */
8270 		if (!phba->cgn_i) {
8271 			mp = kmalloc_obj(*mp);
8272 			if (mp)
8273 				mp->virt = dma_alloc_coherent
8274 						(&phba->pcidev->dev,
8275 						sizeof(struct lpfc_cgn_info),
8276 						&mp->phys, GFP_KERNEL);
8277 			if (!mp || !mp->virt) {
8278 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8279 						"2640 Failed to alloc memory "
8280 						"for Congestion Info\n");
8281 				kfree(mp);
8282 				sli4_params->cmf = 0;
8283 				phba->cmf_active_mode = LPFC_CFG_OFF;
8284 				goto no_cmf;
8285 			}
8286 			phba->cgn_i = mp;
8287 
8288 			/* initialize congestion buffer info */
8289 			lpfc_init_congestion_buf(phba);
8290 			lpfc_init_congestion_stat(phba);
8291 
8292 			/* Zero out Congestion Signal counters */
8293 			atomic64_set(&phba->cgn_acqe_stat.alarm, 0);
8294 			atomic64_set(&phba->cgn_acqe_stat.warn, 0);
8295 		}
8296 
8297 		rc = lpfc_sli4_cgn_params_read(phba);
8298 		if (rc < 0) {
8299 			lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8300 					"6242 Error reading Cgn Params (%d)\n",
8301 					rc);
8302 			/* Ensure CGN Mode is off */
8303 			sli4_params->cmf = 0;
8304 		} else if (!rc) {
8305 			lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT | LOG_INIT,
8306 					"6243 CGN Event empty object.\n");
8307 			/* Ensure CGN Mode is off */
8308 			sli4_params->cmf = 0;
8309 		}
8310 	} else {
8311 no_cmf:
8312 		lpfc_printf_log(phba, KERN_WARNING, LOG_CGN_MGMT,
8313 				"6220 CMF is disabled\n");
8314 	}
8315 
8316 	/* Only register congestion buffer with firmware if BOTH
8317 	 * CMF and E2E are enabled.
8318 	 */
8319 	if (sli4_params->cmf && sli4_params->mi_ver) {
8320 		rc = lpfc_reg_congestion_buf(phba);
8321 		if (rc) {
8322 			dma_free_coherent(&phba->pcidev->dev,
8323 					  sizeof(struct lpfc_cgn_info),
8324 					  phba->cgn_i->virt, phba->cgn_i->phys);
8325 			kfree(phba->cgn_i);
8326 			phba->cgn_i = NULL;
8327 			/* Ensure CGN Mode is off */
8328 			phba->cmf_active_mode = LPFC_CFG_OFF;
8329 			sli4_params->cmf = 0;
8330 			return 0;
8331 		}
8332 	}
8333 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8334 			"6470 Setup MI version %d CMF %d mode %d\n",
8335 			sli4_params->mi_ver, sli4_params->cmf,
8336 			phba->cmf_active_mode);
8337 
8338 	mempool_free(mboxq, phba->mbox_mem_pool);
8339 
8340 	/* Initialize atomic counters */
8341 	atomic_set(&phba->cgn_fabric_warn_cnt, 0);
8342 	atomic_set(&phba->cgn_fabric_alarm_cnt, 0);
8343 	atomic_set(&phba->cgn_sync_alarm_cnt, 0);
8344 	atomic_set(&phba->cgn_sync_warn_cnt, 0);
8345 	atomic_set(&phba->cgn_driver_evt_cnt, 0);
8346 	atomic_set(&phba->cgn_latency_evt_cnt, 0);
8347 	atomic64_set(&phba->cgn_latency_evt, 0);
8348 
8349 	phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
8350 
8351 	/* Allocate RX Monitor Buffer */
8352 	if (!phba->rx_monitor) {
8353 		phba->rx_monitor = kzalloc_obj(*phba->rx_monitor);
8354 
8355 		if (!phba->rx_monitor) {
8356 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8357 					"2644 Failed to alloc memory "
8358 					"for RX Monitor Buffer\n");
8359 			return -ENOMEM;
8360 		}
8361 
8362 		/* Instruct the rx_monitor object to instantiate its ring */
8363 		if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
8364 						LPFC_MAX_RXMONITOR_ENTRY)) {
8365 			kfree(phba->rx_monitor);
8366 			phba->rx_monitor = NULL;
8367 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
8368 					"2645 Failed to alloc memory "
8369 					"for RX Monitor's Ring\n");
8370 			return -ENOMEM;
8371 		}
8372 	}
8373 
8374 	return 0;
8375 }
8376 
8377 static int
lpfc_set_host_tm(struct lpfc_hba * phba)8378 lpfc_set_host_tm(struct lpfc_hba *phba)
8379 {
8380 	LPFC_MBOXQ_t *mboxq;
8381 	uint32_t len, rc;
8382 	struct timespec64 cur_time;
8383 	struct tm broken;
8384 	uint32_t month, day, year;
8385 	uint32_t hour, minute, second;
8386 	struct lpfc_mbx_set_host_date_time *tm;
8387 
8388 	mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8389 	if (!mboxq)
8390 		return -ENOMEM;
8391 
8392 	len = sizeof(struct lpfc_mbx_set_host_data) -
8393 		sizeof(struct lpfc_sli4_cfg_mhdr);
8394 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
8395 			 LPFC_MBOX_OPCODE_SET_HOST_DATA, len,
8396 			 LPFC_SLI4_MBX_EMBED);
8397 
8398 	mboxq->u.mqe.un.set_host_data.param_id = LPFC_SET_HOST_DATE_TIME;
8399 	mboxq->u.mqe.un.set_host_data.param_len =
8400 			sizeof(struct lpfc_mbx_set_host_date_time);
8401 	tm = &mboxq->u.mqe.un.set_host_data.un.tm;
8402 	ktime_get_real_ts64(&cur_time);
8403 	time64_to_tm(cur_time.tv_sec, 0, &broken);
8404 	month = broken.tm_mon + 1;
8405 	day = broken.tm_mday;
8406 	year = broken.tm_year - 100;
8407 	hour = broken.tm_hour;
8408 	minute = broken.tm_min;
8409 	second = broken.tm_sec;
8410 	bf_set(lpfc_mbx_set_host_month, tm, month);
8411 	bf_set(lpfc_mbx_set_host_day, tm, day);
8412 	bf_set(lpfc_mbx_set_host_year, tm, year);
8413 	bf_set(lpfc_mbx_set_host_hour, tm, hour);
8414 	bf_set(lpfc_mbx_set_host_min, tm, minute);
8415 	bf_set(lpfc_mbx_set_host_sec, tm, second);
8416 
8417 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8418 	mempool_free(mboxq, phba->mbox_mem_pool);
8419 	return rc;
8420 }
8421 
8422 /**
8423  * lpfc_get_platform_uuid - Attempts to extract a platform uuid
8424  * @phba: pointer to lpfc hba data structure.
8425  *
8426  * This routine attempts to first read SMBIOS DMI data for the System
8427  * Information structure offset 08h called System UUID.  Else, no platform
8428  * UUID will be advertised.
8429  **/
8430 static void
lpfc_get_platform_uuid(struct lpfc_hba * phba)8431 lpfc_get_platform_uuid(struct lpfc_hba *phba)
8432 {
8433 	int rc;
8434 	const char *uuid;
8435 	char pni[17] = {0}; /* 16 characters + '\0' */
8436 	bool is_ff = true, is_00 = true;
8437 	u8 i;
8438 
8439 	/* First attempt SMBIOS DMI */
8440 	uuid = dmi_get_system_info(DMI_PRODUCT_UUID);
8441 	if (uuid) {
8442 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8443 				"2088 SMBIOS UUID %s\n",
8444 				uuid);
8445 	} else {
8446 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8447 				"2099 Could not extract UUID\n");
8448 	}
8449 
8450 	if (uuid && uuid_is_valid(uuid)) {
8451 		/* Generate PNI from UUID format.
8452 		 *
8453 		 * 1.) Extract lower 64 bits from UUID format.
8454 		 * 2.) Set 3h for NAA Locally Assigned Name Identifier format.
8455 		 *
8456 		 * e.g. xxxxxxxx-xxxx-xxxx-yyyy-yyyyyyyyyyyy
8457 		 *
8458 		 * extract the yyyy-yyyyyyyyyyyy portion
8459 		 * final PNI   3yyyyyyyyyyyyyyy
8460 		 */
8461 		scnprintf(pni, sizeof(pni), "3%c%c%c%s",
8462 			  uuid[20], uuid[21], uuid[22], &uuid[24]);
8463 
8464 		/* Sanitize the converted PNI */
8465 		for (i = 1; i < 16 && (is_ff || is_00); i++) {
8466 			if (pni[i] != '0')
8467 				is_00 = false;
8468 			if (pni[i] != 'f' && pni[i] != 'F')
8469 				is_ff = false;
8470 		}
8471 
8472 		/* Convert from char* to unsigned long */
8473 		rc = kstrtoul(pni, 16, &phba->pni);
8474 		if (!rc && !is_ff && !is_00) {
8475 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8476 					"2100 PNI 0x%016lx\n", phba->pni);
8477 		} else {
8478 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8479 					"2101 PNI %s generation status %d\n",
8480 					pni, rc);
8481 			phba->pni = 0;
8482 		}
8483 	}
8484 }
8485 
8486 /**
8487  * lpfc_sli4_hba_setup - SLI4 device initialization PCI function
8488  * @phba: Pointer to HBA context object.
8489  *
8490  * This function is the main SLI4 device initialization PCI function. This
8491  * function is called by the HBA initialization code, HBA reset code and
8492  * HBA error attention handler code. Caller is not required to hold any
8493  * locks.
8494  **/
8495 int
lpfc_sli4_hba_setup(struct lpfc_hba * phba)8496 lpfc_sli4_hba_setup(struct lpfc_hba *phba)
8497 {
8498 	int rc, i, cnt, len, dd;
8499 	LPFC_MBOXQ_t *mboxq;
8500 	struct lpfc_mqe *mqe;
8501 	uint8_t *vpd;
8502 	uint32_t vpd_size;
8503 	uint32_t ftr_rsp = 0;
8504 	struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
8505 	struct lpfc_vport *vport = phba->pport;
8506 	struct lpfc_dmabuf *mp;
8507 	struct lpfc_rqb *rqbp;
8508 	u32 flg;
8509 
8510 	/* Perform a PCI function reset to start from clean */
8511 	rc = lpfc_pci_function_reset(phba);
8512 	if (unlikely(rc))
8513 		return -ENODEV;
8514 
8515 	/* Check the HBA Host Status Register for readyness */
8516 	rc = lpfc_sli4_post_status_check(phba);
8517 	if (unlikely(rc))
8518 		return -ENODEV;
8519 	else {
8520 		spin_lock_irq(&phba->hbalock);
8521 		phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
8522 		flg = phba->sli.sli_flag;
8523 		spin_unlock_irq(&phba->hbalock);
8524 		/* Allow a little time after setting SLI_ACTIVE for any polled
8525 		 * MBX commands to complete via BSG.
8526 		 */
8527 		for (i = 0; i < 50 && (flg & LPFC_SLI_MBOX_ACTIVE); i++) {
8528 			msleep(20);
8529 			spin_lock_irq(&phba->hbalock);
8530 			flg = phba->sli.sli_flag;
8531 			spin_unlock_irq(&phba->hbalock);
8532 		}
8533 	}
8534 	clear_bit(HBA_SETUP, &phba->hba_flag);
8535 
8536 	lpfc_sli4_dip(phba);
8537 
8538 	/*
8539 	 * Allocate a single mailbox container for initializing the
8540 	 * port.
8541 	 */
8542 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
8543 	if (!mboxq)
8544 		return -ENOMEM;
8545 
8546 	/* Issue READ_REV to collect vpd and FW information. */
8547 	vpd_size = SLI4_PAGE_SIZE;
8548 	vpd = kzalloc(vpd_size, GFP_KERNEL);
8549 	if (!vpd) {
8550 		rc = -ENOMEM;
8551 		goto out_free_mbox;
8552 	}
8553 
8554 	rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
8555 	if (unlikely(rc)) {
8556 		kfree(vpd);
8557 		goto out_free_mbox;
8558 	}
8559 
8560 	mqe = &mboxq->u.mqe;
8561 	phba->sli_rev = bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev);
8562 	if (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev)) {
8563 		set_bit(HBA_FCOE_MODE, &phba->hba_flag);
8564 		phba->fcp_embed_io = 0;	/* SLI4 FC support only */
8565 	} else {
8566 		clear_bit(HBA_FCOE_MODE, &phba->hba_flag);
8567 	}
8568 
8569 	/* Obtain platform UUID, only for SLI4 FC adapters */
8570 	if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag))
8571 		lpfc_get_platform_uuid(phba);
8572 
8573 	if (bf_get(lpfc_mbx_rd_rev_cee_ver, &mqe->un.read_rev) ==
8574 		LPFC_DCBX_CEE_MODE)
8575 		set_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
8576 	else
8577 		clear_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
8578 
8579 	clear_bit(HBA_IOQ_FLUSH, &phba->hba_flag);
8580 
8581 	if (phba->sli_rev != LPFC_SLI_REV4) {
8582 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8583 			"0376 READ_REV Error. SLI Level %d "
8584 			"FCoE enabled %d\n",
8585 			phba->sli_rev,
8586 			test_bit(HBA_FCOE_MODE, &phba->hba_flag) ? 1 : 0);
8587 		rc = -EIO;
8588 		kfree(vpd);
8589 		goto out_free_mbox;
8590 	}
8591 
8592 	rc = lpfc_set_host_tm(phba);
8593 	lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
8594 			"6468 Set host date / time: Status x%x:\n", rc);
8595 
8596 	/*
8597 	 * Continue initialization with default values even if driver failed
8598 	 * to read FCoE param config regions, only read parameters if the
8599 	 * board is FCoE
8600 	 */
8601 	if (test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
8602 	    lpfc_sli4_read_fcoe_params(phba))
8603 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_INIT,
8604 			"2570 Failed to read FCoE parameters\n");
8605 
8606 	/*
8607 	 * Retrieve sli4 device physical port name, failure of doing it
8608 	 * is considered as non-fatal.
8609 	 */
8610 	rc = lpfc_sli4_retrieve_pport_name(phba);
8611 	if (!rc)
8612 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8613 				"3080 Successful retrieving SLI4 device "
8614 				"physical port name: %s.\n", phba->Port);
8615 
8616 	rc = lpfc_sli4_get_ctl_attr(phba);
8617 	if (!rc)
8618 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8619 				"8351 Successful retrieving SLI4 device "
8620 				"CTL ATTR\n");
8621 
8622 	/*
8623 	 * Evaluate the read rev and vpd data. Populate the driver
8624 	 * state with the results. If this routine fails, the failure
8625 	 * is not fatal as the driver will use generic values.
8626 	 */
8627 	rc = lpfc_parse_vpd(phba, vpd, vpd_size);
8628 	if (unlikely(!rc))
8629 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8630 				"0377 Error %d parsing vpd. "
8631 				"Using defaults.\n", rc);
8632 	kfree(vpd);
8633 
8634 	/* Save information as VPD data */
8635 	phba->vpd.rev.biuRev = mqe->un.read_rev.first_hw_rev;
8636 	phba->vpd.rev.smRev = mqe->un.read_rev.second_hw_rev;
8637 
8638 	/*
8639 	 * This is because first G7 ASIC doesn't support the standard
8640 	 * 0x5a NVME cmd descriptor type/subtype
8641 	 */
8642 	if ((bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8643 			LPFC_SLI_INTF_IF_TYPE_6) &&
8644 	    (phba->vpd.rev.biuRev == LPFC_G7_ASIC_1) &&
8645 	    (phba->vpd.rev.smRev == 0) &&
8646 	    (phba->cfg_nvme_embed_cmd == 1))
8647 		phba->cfg_nvme_embed_cmd = 0;
8648 
8649 	phba->vpd.rev.endecRev = mqe->un.read_rev.third_hw_rev;
8650 	phba->vpd.rev.fcphHigh = bf_get(lpfc_mbx_rd_rev_fcph_high,
8651 					 &mqe->un.read_rev);
8652 	phba->vpd.rev.fcphLow = bf_get(lpfc_mbx_rd_rev_fcph_low,
8653 				       &mqe->un.read_rev);
8654 	phba->vpd.rev.feaLevelHigh = bf_get(lpfc_mbx_rd_rev_ftr_lvl_high,
8655 					    &mqe->un.read_rev);
8656 	phba->vpd.rev.feaLevelLow = bf_get(lpfc_mbx_rd_rev_ftr_lvl_low,
8657 					   &mqe->un.read_rev);
8658 	phba->vpd.rev.sli1FwRev = mqe->un.read_rev.fw_id_rev;
8659 	memcpy(phba->vpd.rev.sli1FwName, mqe->un.read_rev.fw_name, 16);
8660 	phba->vpd.rev.sli2FwRev = mqe->un.read_rev.ulp_fw_id_rev;
8661 	memcpy(phba->vpd.rev.sli2FwName, mqe->un.read_rev.ulp_fw_name, 16);
8662 	phba->vpd.rev.opFwRev = mqe->un.read_rev.fw_id_rev;
8663 	memcpy(phba->vpd.rev.opFwName, mqe->un.read_rev.fw_name, 16);
8664 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
8665 			"(%d):0380 READ_REV Status x%x "
8666 			"fw_rev:%s fcphHi:%x fcphLo:%x flHi:%x flLo:%x\n",
8667 			mboxq->vport ? mboxq->vport->vpi : 0,
8668 			bf_get(lpfc_mqe_status, mqe),
8669 			phba->vpd.rev.opFwName,
8670 			phba->vpd.rev.fcphHigh, phba->vpd.rev.fcphLow,
8671 			phba->vpd.rev.feaLevelHigh, phba->vpd.rev.feaLevelLow);
8672 
8673 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
8674 	    LPFC_SLI_INTF_IF_TYPE_0) {
8675 		lpfc_set_features(phba, mboxq, LPFC_SET_UE_RECOVERY);
8676 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8677 		if (rc == MBX_SUCCESS) {
8678 			set_bit(HBA_RECOVERABLE_UE, &phba->hba_flag);
8679 			/* Set 1Sec interval to detect UE */
8680 			phba->eratt_poll_interval = 1;
8681 			phba->sli4_hba.ue_to_sr = bf_get(
8682 					lpfc_mbx_set_feature_UESR,
8683 					&mboxq->u.mqe.un.set_feature);
8684 			phba->sli4_hba.ue_to_rp = bf_get(
8685 					lpfc_mbx_set_feature_UERP,
8686 					&mboxq->u.mqe.un.set_feature);
8687 		}
8688 	}
8689 
8690 	if (phba->cfg_enable_mds_diags && phba->mds_diags_support) {
8691 		/* Enable MDS Diagnostics only if the SLI Port supports it */
8692 		lpfc_set_features(phba, mboxq, LPFC_SET_MDS_DIAGS);
8693 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8694 		if (rc != MBX_SUCCESS)
8695 			phba->mds_diags_support = 0;
8696 	}
8697 
8698 	/*
8699 	 * Discover the port's supported feature set and match it against the
8700 	 * hosts requests.
8701 	 */
8702 	lpfc_request_features(phba, mboxq);
8703 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8704 	if (unlikely(rc)) {
8705 		rc = -EIO;
8706 		goto out_free_mbox;
8707 	}
8708 
8709 	/* Disable VMID if app header is not supported */
8710 	if (phba->cfg_vmid_app_header && !(bf_get(lpfc_mbx_rq_ftr_rsp_ashdr,
8711 						  &mqe->un.req_ftrs))) {
8712 		bf_set(lpfc_ftr_ashdr, &phba->sli4_hba.sli4_flags, 0);
8713 		phba->cfg_vmid_app_header = 0;
8714 		lpfc_printf_log(phba, KERN_DEBUG, LOG_SLI,
8715 				"1242 vmid feature not supported\n");
8716 	}
8717 
8718 	/*
8719 	 * The port must support FCP initiator mode as this is the
8720 	 * only mode running in the host.
8721 	 */
8722 	if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
8723 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8724 				"0378 No support for fcpi mode.\n");
8725 		ftr_rsp++;
8726 	}
8727 
8728 	/*
8729 	 * If the port cannot support the host's requested features
8730 	 * then turn off the global config parameters to disable the
8731 	 * feature in the driver.  This is not a fatal error.
8732 	 */
8733 	if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
8734 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs))) {
8735 			phba->cfg_enable_bg = 0;
8736 			phba->sli3_options &= ~LPFC_SLI3_BG_ENABLED;
8737 			ftr_rsp++;
8738 		}
8739 	}
8740 
8741 	if (phba->max_vpi && phba->cfg_enable_npiv &&
8742 	    !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8743 		ftr_rsp++;
8744 
8745 	if (ftr_rsp) {
8746 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8747 				"0379 Feature Mismatch Data: x%08x %08x "
8748 				"x%x x%x x%x\n", mqe->un.req_ftrs.word2,
8749 				mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
8750 				phba->cfg_enable_npiv, phba->max_vpi);
8751 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
8752 			phba->cfg_enable_bg = 0;
8753 		if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
8754 			phba->cfg_enable_npiv = 0;
8755 	}
8756 
8757 	/* These SLI3 features are assumed in SLI4 */
8758 	spin_lock_irq(&phba->hbalock);
8759 	phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
8760 	spin_unlock_irq(&phba->hbalock);
8761 
8762 	/* Always try to enable dual dump feature if we can */
8763 	lpfc_set_features(phba, mboxq, LPFC_SET_DUAL_DUMP);
8764 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8765 	dd = bf_get(lpfc_mbx_set_feature_dd, &mboxq->u.mqe.un.set_feature);
8766 	if ((rc == MBX_SUCCESS) && (dd == LPFC_ENABLE_DUAL_DUMP))
8767 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
8768 				"6448 Dual Dump is enabled\n");
8769 	else
8770 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI | LOG_INIT,
8771 				"6447 Dual Dump Mailbox x%x (x%x/x%x) failed, "
8772 				"rc:x%x dd:x%x\n",
8773 				bf_get(lpfc_mqe_command, &mboxq->u.mqe),
8774 				lpfc_sli_config_mbox_subsys_get(
8775 					phba, mboxq),
8776 				lpfc_sli_config_mbox_opcode_get(
8777 					phba, mboxq),
8778 				rc, dd);
8779 
8780 	/*
8781 	 * Allocate all resources (xri,rpi,vpi,vfi) now.  Subsequent
8782 	 * calls depends on these resources to complete port setup.
8783 	 */
8784 	rc = lpfc_sli4_alloc_resource_identifiers(phba);
8785 	if (rc) {
8786 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8787 				"2920 Failed to alloc Resource IDs "
8788 				"rc = x%x\n", rc);
8789 		goto out_free_mbox;
8790 	}
8791 
8792 	lpfc_sli4_node_rpi_restore(phba);
8793 
8794 	lpfc_set_host_data(phba, mboxq);
8795 
8796 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8797 	if (rc) {
8798 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
8799 				"2134 Failed to set host os driver version %x",
8800 				rc);
8801 	}
8802 
8803 	/* Read the port's service parameters. */
8804 	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
8805 	if (rc) {
8806 		phba->link_state = LPFC_HBA_ERROR;
8807 		rc = -ENOMEM;
8808 		goto out_free_mbox;
8809 	}
8810 
8811 	mboxq->vport = vport;
8812 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8813 	mp = mboxq->ctx_buf;
8814 	if (rc == MBX_SUCCESS) {
8815 		memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
8816 		rc = 0;
8817 	}
8818 
8819 	/*
8820 	 * This memory was allocated by the lpfc_read_sparam routine but is
8821 	 * no longer needed.  It is released and ctx_buf NULLed to prevent
8822 	 * unintended pointer access as the mbox is reused.
8823 	 */
8824 	lpfc_mbuf_free(phba, mp->virt, mp->phys);
8825 	kfree(mp);
8826 	mboxq->ctx_buf = NULL;
8827 	if (unlikely(rc)) {
8828 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8829 				"0382 READ_SPARAM command failed "
8830 				"status %d, mbxStatus x%x\n",
8831 				rc, bf_get(lpfc_mqe_status, mqe));
8832 		phba->link_state = LPFC_HBA_ERROR;
8833 		rc = -EIO;
8834 		goto out_free_mbox;
8835 	}
8836 
8837 	lpfc_update_vport_wwn(vport);
8838 
8839 	/* Update the fc_host data structures with new wwn. */
8840 	fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
8841 	fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
8842 
8843 	/* Create all the SLI4 queues */
8844 	rc = lpfc_sli4_queue_create(phba);
8845 	if (rc) {
8846 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8847 				"3089 Failed to allocate queues\n");
8848 		rc = -ENODEV;
8849 		goto out_free_mbox;
8850 	}
8851 	/* Set up all the queues to the device */
8852 	rc = lpfc_sli4_queue_setup(phba);
8853 	if (unlikely(rc)) {
8854 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8855 				"0381 Error %d during queue setup.\n", rc);
8856 		goto out_destroy_queue;
8857 	}
8858 	/* Initialize the driver internal SLI layer lists. */
8859 	lpfc_sli4_setup(phba);
8860 	lpfc_sli4_queue_init(phba);
8861 
8862 	/* update host els xri-sgl sizes and mappings */
8863 	rc = lpfc_sli4_els_sgl_update(phba);
8864 	if (unlikely(rc)) {
8865 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8866 				"1400 Failed to update xri-sgl size and "
8867 				"mapping: %d\n", rc);
8868 		goto out_destroy_queue;
8869 	}
8870 
8871 	/* register the els sgl pool to the port */
8872 	rc = lpfc_sli4_repost_sgl_list(phba, &phba->sli4_hba.lpfc_els_sgl_list,
8873 				       phba->sli4_hba.els_xri_cnt);
8874 	if (unlikely(rc < 0)) {
8875 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8876 				"0582 Error %d during els sgl post "
8877 				"operation\n", rc);
8878 		rc = -ENODEV;
8879 		goto out_destroy_queue;
8880 	}
8881 	phba->sli4_hba.els_xri_cnt = rc;
8882 
8883 	if (phba->nvmet_support) {
8884 		/* update host nvmet xri-sgl sizes and mappings */
8885 		rc = lpfc_sli4_nvmet_sgl_update(phba);
8886 		if (unlikely(rc)) {
8887 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8888 					"6308 Failed to update nvmet-sgl size "
8889 					"and mapping: %d\n", rc);
8890 			goto out_destroy_queue;
8891 		}
8892 
8893 		/* register the nvmet sgl pool to the port */
8894 		rc = lpfc_sli4_repost_sgl_list(
8895 			phba,
8896 			&phba->sli4_hba.lpfc_nvmet_sgl_list,
8897 			phba->sli4_hba.nvmet_xri_cnt);
8898 		if (unlikely(rc < 0)) {
8899 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8900 					"3117 Error %d during nvmet "
8901 					"sgl post\n", rc);
8902 			rc = -ENODEV;
8903 			goto out_destroy_queue;
8904 		}
8905 		phba->sli4_hba.nvmet_xri_cnt = rc;
8906 
8907 		/* We allocate an iocbq for every receive context SGL.
8908 		 * The additional allocation is for abort and ls handling.
8909 		 */
8910 		cnt = phba->sli4_hba.nvmet_xri_cnt +
8911 			phba->sli4_hba.max_cfg_param.max_xri;
8912 	} else {
8913 		/* update host common xri-sgl sizes and mappings */
8914 		rc = lpfc_sli4_io_sgl_update(phba);
8915 		if (unlikely(rc)) {
8916 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8917 					"6082 Failed to update nvme-sgl size "
8918 					"and mapping: %d\n", rc);
8919 			goto out_destroy_queue;
8920 		}
8921 
8922 		/* register the allocated common sgl pool to the port */
8923 		rc = lpfc_sli4_repost_io_sgl_list(phba);
8924 		if (unlikely(rc)) {
8925 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8926 					"6116 Error %d during nvme sgl post "
8927 					"operation\n", rc);
8928 			/* Some NVME buffers were moved to abort nvme list */
8929 			/* A pci function reset will repost them */
8930 			rc = -ENODEV;
8931 			goto out_destroy_queue;
8932 		}
8933 		/* Each lpfc_io_buf job structure has an iocbq element.
8934 		 * This cnt provides for abort, els, ct and ls requests.
8935 		 */
8936 		cnt = phba->sli4_hba.max_cfg_param.max_xri;
8937 	}
8938 
8939 	if (!phba->sli.iocbq_lookup) {
8940 		/* Initialize and populate the iocb list per host */
8941 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
8942 				"2821 initialize iocb list with %d entries\n",
8943 				cnt);
8944 		rc = lpfc_init_iocb_list(phba, cnt);
8945 		if (rc) {
8946 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8947 					"1413 Failed to init iocb list.\n");
8948 			goto out_destroy_queue;
8949 		}
8950 	}
8951 
8952 	if (phba->nvmet_support)
8953 		lpfc_nvmet_create_targetport(phba);
8954 
8955 	if (phba->nvmet_support && phba->cfg_nvmet_mrq) {
8956 		/* Post initial buffers to all RQs created */
8957 		for (i = 0; i < phba->cfg_nvmet_mrq; i++) {
8958 			rqbp = phba->sli4_hba.nvmet_mrq_hdr[i]->rqbp;
8959 			INIT_LIST_HEAD(&rqbp->rqb_buffer_list);
8960 			rqbp->rqb_alloc_buffer = lpfc_sli4_nvmet_alloc;
8961 			rqbp->rqb_free_buffer = lpfc_sli4_nvmet_free;
8962 			rqbp->entry_count = LPFC_NVMET_RQE_DEF_COUNT;
8963 			rqbp->buffer_count = 0;
8964 
8965 			lpfc_post_rq_buffer(
8966 				phba, phba->sli4_hba.nvmet_mrq_hdr[i],
8967 				phba->sli4_hba.nvmet_mrq_data[i],
8968 				phba->cfg_nvmet_mrq_post, i);
8969 		}
8970 	}
8971 
8972 	/* Post the rpi header region to the device. */
8973 	rc = lpfc_sli4_post_all_rpi_hdrs(phba);
8974 	if (unlikely(rc)) {
8975 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
8976 				"0393 Error %d during rpi post operation\n",
8977 				rc);
8978 		rc = -ENODEV;
8979 		goto out_free_iocblist;
8980 	}
8981 
8982 	if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) {
8983 		if ((phba->nvmet_support == 0) || (phba->cfg_nvmet_mrq == 1)) {
8984 			/*
8985 			 * The FC Port needs to register FCFI (index 0)
8986 			 */
8987 			lpfc_reg_fcfi(phba, mboxq);
8988 			mboxq->vport = phba->pport;
8989 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
8990 			if (rc != MBX_SUCCESS)
8991 				goto out_unset_queue;
8992 			rc = 0;
8993 			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi,
8994 						&mboxq->u.mqe.un.reg_fcfi);
8995 		} else {
8996 			/* We are a NVME Target mode with MRQ > 1 */
8997 
8998 			/* First register the FCFI */
8999 			lpfc_reg_fcfi_mrq(phba, mboxq, 0);
9000 			mboxq->vport = phba->pport;
9001 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9002 			if (rc != MBX_SUCCESS)
9003 				goto out_unset_queue;
9004 			rc = 0;
9005 			phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_mrq_fcfi,
9006 						&mboxq->u.mqe.un.reg_fcfi_mrq);
9007 
9008 			/* Next register the MRQs */
9009 			lpfc_reg_fcfi_mrq(phba, mboxq, 1);
9010 			mboxq->vport = phba->pport;
9011 			rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9012 			if (rc != MBX_SUCCESS)
9013 				goto out_unset_queue;
9014 			rc = 0;
9015 		}
9016 		/* Check if the port is configured to be disabled */
9017 		lpfc_sli_read_link_ste(phba);
9018 	}
9019 
9020 	/* Don't post more new bufs if repost already recovered
9021 	 * the nvme sgls.
9022 	 */
9023 	if (phba->nvmet_support == 0) {
9024 		if (phba->sli4_hba.io_xri_cnt == 0) {
9025 			len = lpfc_new_io_buf(
9026 					      phba, phba->sli4_hba.io_xri_max);
9027 			if (len == 0) {
9028 				rc = -ENOMEM;
9029 				goto out_unset_queue;
9030 			}
9031 
9032 			if (phba->cfg_xri_rebalancing)
9033 				lpfc_create_multixri_pools(phba);
9034 		}
9035 	} else {
9036 		phba->cfg_xri_rebalancing = 0;
9037 	}
9038 
9039 	/* Allow asynchronous mailbox command to go through */
9040 	spin_lock_irq(&phba->hbalock);
9041 	phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9042 	spin_unlock_irq(&phba->hbalock);
9043 
9044 	/* Post receive buffers to the device */
9045 	lpfc_sli4_rb_setup(phba);
9046 
9047 	/* Reset HBA FCF states after HBA reset */
9048 	phba->fcf.fcf_flag = 0;
9049 	phba->fcf.current_rec.flag = 0;
9050 
9051 	/* Start the ELS watchdog timer */
9052 	mod_timer(&vport->els_tmofunc,
9053 			jiffies + secs_to_jiffies(phba->fc_ratov * 2));
9054 
9055 	/* Start heart beat timer */
9056 	mod_timer(&phba->hb_tmofunc,
9057 		  jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
9058 	clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
9059 	clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
9060 	phba->last_completion_time = jiffies;
9061 
9062 	/* start eq_delay heartbeat */
9063 	if (phba->cfg_auto_imax)
9064 		queue_delayed_work(phba->wq, &phba->eq_delay_work,
9065 				   msecs_to_jiffies(LPFC_EQ_DELAY_MSECS));
9066 
9067 	/* start per phba idle_stat_delay heartbeat */
9068 	lpfc_init_idle_stat_hb(phba);
9069 
9070 	/* Start error attention (ERATT) polling timer */
9071 	mod_timer(&phba->eratt_poll,
9072 		  jiffies + secs_to_jiffies(phba->eratt_poll_interval));
9073 
9074 	/*
9075 	 * The port is ready, set the host's link state to LINK_DOWN
9076 	 * in preparation for link interrupts.
9077 	 */
9078 	spin_lock_irq(&phba->hbalock);
9079 	phba->link_state = LPFC_LINK_DOWN;
9080 
9081 	/* Check if physical ports are trunked */
9082 	if (bf_get(lpfc_conf_trunk_port0, &phba->sli4_hba))
9083 		phba->trunk_link.link0.state = LPFC_LINK_DOWN;
9084 	if (bf_get(lpfc_conf_trunk_port1, &phba->sli4_hba))
9085 		phba->trunk_link.link1.state = LPFC_LINK_DOWN;
9086 	if (bf_get(lpfc_conf_trunk_port2, &phba->sli4_hba))
9087 		phba->trunk_link.link2.state = LPFC_LINK_DOWN;
9088 	if (bf_get(lpfc_conf_trunk_port3, &phba->sli4_hba))
9089 		phba->trunk_link.link3.state = LPFC_LINK_DOWN;
9090 	spin_unlock_irq(&phba->hbalock);
9091 
9092 	/* Arm the CQs and then EQs on device */
9093 	lpfc_sli4_arm_cqeq_intr(phba);
9094 
9095 	/* Indicate device interrupt mode */
9096 	phba->sli4_hba.intr_enable = 1;
9097 
9098 	/* Setup CMF after HBA is initialized */
9099 	lpfc_cmf_setup(phba);
9100 
9101 	if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag) &&
9102 	    test_bit(LINK_DISABLED, &phba->hba_flag)) {
9103 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9104 				"3103 Adapter Link is disabled.\n");
9105 		lpfc_down_link(phba, mboxq);
9106 		rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
9107 		if (rc != MBX_SUCCESS) {
9108 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9109 					"3104 Adapter failed to issue "
9110 					"DOWN_LINK mbox cmd, rc:x%x\n", rc);
9111 			goto out_io_buff_free;
9112 		}
9113 	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
9114 		/* don't perform init_link on SLI4 FC port loopback test */
9115 		if (!(phba->link_flag & LS_LOOPBACK_MODE)) {
9116 			rc = phba->lpfc_hba_init_link(phba, MBX_NOWAIT);
9117 			if (rc)
9118 				goto out_io_buff_free;
9119 		}
9120 	}
9121 	mempool_free(mboxq, phba->mbox_mem_pool);
9122 
9123 	/* Enable RAS FW log support */
9124 	lpfc_sli4_ras_setup(phba);
9125 
9126 	set_bit(HBA_SETUP, &phba->hba_flag);
9127 	return rc;
9128 
9129 out_io_buff_free:
9130 	/* Free allocated IO Buffers */
9131 	lpfc_io_free(phba);
9132 out_unset_queue:
9133 	/* Unset all the queues set up in this routine when error out */
9134 	lpfc_sli4_queue_unset(phba);
9135 out_free_iocblist:
9136 	lpfc_free_iocb_list(phba);
9137 out_destroy_queue:
9138 	lpfc_sli4_queue_destroy(phba);
9139 	lpfc_stop_hba_timers(phba);
9140 out_free_mbox:
9141 	mempool_free(mboxq, phba->mbox_mem_pool);
9142 	return rc;
9143 }
9144 
9145 /**
9146  * lpfc_mbox_timeout - Timeout call back function for mbox timer
9147  * @t: Context to fetch pointer to hba structure from.
9148  *
9149  * This is the callback function for mailbox timer. The mailbox
9150  * timer is armed when a new mailbox command is issued and the timer
9151  * is deleted when the mailbox complete. The function is called by
9152  * the kernel timer code when a mailbox does not complete within
9153  * expected time. This function wakes up the worker thread to
9154  * process the mailbox timeout and returns. All the processing is
9155  * done by the worker thread function lpfc_mbox_timeout_handler.
9156  **/
9157 void
lpfc_mbox_timeout(struct timer_list * t)9158 lpfc_mbox_timeout(struct timer_list *t)
9159 {
9160 	struct lpfc_hba  *phba = timer_container_of(phba, t, sli.mbox_tmo);
9161 	unsigned long iflag;
9162 	uint32_t tmo_posted;
9163 
9164 	spin_lock_irqsave(&phba->pport->work_port_lock, iflag);
9165 	tmo_posted = phba->pport->work_port_events & WORKER_MBOX_TMO;
9166 	if (!tmo_posted)
9167 		phba->pport->work_port_events |= WORKER_MBOX_TMO;
9168 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflag);
9169 
9170 	if (!tmo_posted)
9171 		lpfc_worker_wake_up(phba);
9172 	return;
9173 }
9174 
9175 /**
9176  * lpfc_sli4_mbox_completions_pending - check to see if any mailbox completions
9177  *                                    are pending
9178  * @phba: Pointer to HBA context object.
9179  *
9180  * This function checks if any mailbox completions are present on the mailbox
9181  * completion queue.
9182  **/
9183 static bool
lpfc_sli4_mbox_completions_pending(struct lpfc_hba * phba)9184 lpfc_sli4_mbox_completions_pending(struct lpfc_hba *phba)
9185 {
9186 
9187 	uint32_t idx;
9188 	struct lpfc_queue *mcq;
9189 	struct lpfc_mcqe *mcqe;
9190 	bool pending_completions = false;
9191 	uint8_t	qe_valid;
9192 
9193 	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9194 		return false;
9195 
9196 	/* Check for completions on mailbox completion queue */
9197 
9198 	mcq = phba->sli4_hba.mbx_cq;
9199 	idx = mcq->hba_index;
9200 	qe_valid = mcq->qe_valid;
9201 	while (bf_get_le32(lpfc_cqe_valid,
9202 	       (struct lpfc_cqe *)lpfc_sli4_qe(mcq, idx)) == qe_valid) {
9203 		mcqe = (struct lpfc_mcqe *)(lpfc_sli4_qe(mcq, idx));
9204 		if (bf_get_le32(lpfc_trailer_completed, mcqe) &&
9205 		    (!bf_get_le32(lpfc_trailer_async, mcqe))) {
9206 			pending_completions = true;
9207 			break;
9208 		}
9209 		idx = (idx + 1) % mcq->entry_count;
9210 		if (mcq->hba_index == idx)
9211 			break;
9212 
9213 		/* if the index wrapped around, toggle the valid bit */
9214 		if (phba->sli4_hba.pc_sli4_params.cqav && !idx)
9215 			qe_valid = (qe_valid) ? 0 : 1;
9216 	}
9217 	return pending_completions;
9218 
9219 }
9220 
9221 /**
9222  * lpfc_sli4_process_missed_mbox_completions - process mbox completions
9223  *					      that were missed.
9224  * @phba: Pointer to HBA context object.
9225  *
9226  * For sli4, it is possible to miss an interrupt. As such mbox completions
9227  * maybe missed causing erroneous mailbox timeouts to occur. This function
9228  * checks to see if mbox completions are on the mailbox completion queue
9229  * and will process all the completions associated with the eq for the
9230  * mailbox completion queue.
9231  **/
9232 static bool
lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba * phba)9233 lpfc_sli4_process_missed_mbox_completions(struct lpfc_hba *phba)
9234 {
9235 	struct lpfc_sli4_hba *sli4_hba = &phba->sli4_hba;
9236 	uint32_t eqidx;
9237 	struct lpfc_queue *fpeq = NULL;
9238 	struct lpfc_queue *eq;
9239 	bool mbox_pending;
9240 
9241 	if (unlikely(!phba) || (phba->sli_rev != LPFC_SLI_REV4))
9242 		return false;
9243 
9244 	/* Find the EQ associated with the mbox CQ */
9245 	if (sli4_hba->hdwq) {
9246 		for (eqidx = 0; eqidx < phba->cfg_irq_chann; eqidx++) {
9247 			eq = phba->sli4_hba.hba_eq_hdl[eqidx].eq;
9248 			if (eq && eq->queue_id == sli4_hba->mbx_cq->assoc_qid) {
9249 				fpeq = eq;
9250 				break;
9251 			}
9252 		}
9253 	}
9254 	if (!fpeq)
9255 		return false;
9256 
9257 	/* Turn off interrupts from this EQ */
9258 
9259 	sli4_hba->sli4_eq_clr_intr(fpeq);
9260 
9261 	/* Check to see if a mbox completion is pending */
9262 
9263 	mbox_pending = lpfc_sli4_mbox_completions_pending(phba);
9264 
9265 	/*
9266 	 * If a mbox completion is pending, process all the events on EQ
9267 	 * associated with the mbox completion queue (this could include
9268 	 * mailbox commands, async events, els commands, receive queue data
9269 	 * and fcp commands)
9270 	 */
9271 
9272 	if (mbox_pending)
9273 		/* process and rearm the EQ */
9274 		lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
9275 				     LPFC_QUEUE_WORK);
9276 	else
9277 		/* Always clear and re-arm the EQ */
9278 		sli4_hba->sli4_write_eq_db(phba, fpeq, 0, LPFC_QUEUE_REARM);
9279 
9280 	return mbox_pending;
9281 
9282 }
9283 
9284 /**
9285  * lpfc_mbox_timeout_handler - Worker thread function to handle mailbox timeout
9286  * @phba: Pointer to HBA context object.
9287  *
9288  * This function is called from worker thread when a mailbox command times out.
9289  * The caller is not required to hold any locks. This function will reset the
9290  * HBA and recover all the pending commands.
9291  **/
9292 void
lpfc_mbox_timeout_handler(struct lpfc_hba * phba)9293 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
9294 {
9295 	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
9296 	MAILBOX_t *mb = NULL;
9297 
9298 	struct lpfc_sli *psli = &phba->sli;
9299 
9300 	/* If the mailbox completed, process the completion */
9301 	lpfc_sli4_process_missed_mbox_completions(phba);
9302 
9303 	if (!(psli->sli_flag & LPFC_SLI_ACTIVE))
9304 		return;
9305 
9306 	if (pmbox != NULL)
9307 		mb = &pmbox->u.mb;
9308 	/* Check the pmbox pointer first.  There is a race condition
9309 	 * between the mbox timeout handler getting executed in the
9310 	 * worklist and the mailbox actually completing. When this
9311 	 * race condition occurs, the mbox_active will be NULL.
9312 	 */
9313 	spin_lock_irq(&phba->hbalock);
9314 	if (pmbox == NULL) {
9315 		lpfc_printf_log(phba, KERN_WARNING,
9316 				LOG_MBOX | LOG_SLI,
9317 				"0353 Active Mailbox cleared - mailbox timeout "
9318 				"exiting\n");
9319 		spin_unlock_irq(&phba->hbalock);
9320 		return;
9321 	}
9322 
9323 	/* Mbox cmd <mbxCommand> timeout */
9324 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9325 			"0310 Mailbox command x%x timeout Data: x%x x%x x%px\n",
9326 			mb->mbxCommand,
9327 			phba->pport->port_state,
9328 			phba->sli.sli_flag,
9329 			phba->sli.mbox_active);
9330 	spin_unlock_irq(&phba->hbalock);
9331 
9332 	/* Setting state unknown so lpfc_sli_abort_iocb_ring
9333 	 * would get IOCB_ERROR from lpfc_sli_issue_iocb, allowing
9334 	 * it to fail all outstanding SCSI IO.
9335 	 */
9336 	set_bit(MBX_TMO_ERR, &phba->bit_flags);
9337 	spin_lock_irq(&phba->pport->work_port_lock);
9338 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
9339 	spin_unlock_irq(&phba->pport->work_port_lock);
9340 	spin_lock_irq(&phba->hbalock);
9341 	phba->link_state = LPFC_LINK_UNKNOWN;
9342 	psli->sli_flag &= ~LPFC_SLI_ACTIVE;
9343 	spin_unlock_irq(&phba->hbalock);
9344 
9345 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9346 			"0345 Resetting board due to mailbox timeout\n");
9347 
9348 	/* Reset the HBA device */
9349 	lpfc_reset_hba(phba);
9350 }
9351 
9352 /**
9353  * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
9354  * @phba: Pointer to HBA context object.
9355  * @pmbox: Pointer to mailbox object.
9356  * @flag: Flag indicating how the mailbox need to be processed.
9357  *
9358  * This function is called by discovery code and HBA management code
9359  * to submit a mailbox command to firmware with SLI-3 interface spec. This
9360  * function gets the hbalock to protect the data structures.
9361  * The mailbox command can be submitted in polling mode, in which case
9362  * this function will wait in a polling loop for the completion of the
9363  * mailbox.
9364  * If the mailbox is submitted in no_wait mode (not polling) the
9365  * function will submit the command and returns immediately without waiting
9366  * for the mailbox completion. The no_wait is supported only when HBA
9367  * is in SLI2/SLI3 mode - interrupts are enabled.
9368  * The SLI interface allows only one mailbox pending at a time. If the
9369  * mailbox is issued in polling mode and there is already a mailbox
9370  * pending, then the function will return an error. If the mailbox is issued
9371  * in NO_WAIT mode and there is a mailbox pending already, the function
9372  * will return MBX_BUSY after queuing the mailbox into mailbox queue.
9373  * The sli layer owns the mailbox object until the completion of mailbox
9374  * command if this function return MBX_BUSY or MBX_SUCCESS. For all other
9375  * return codes the caller owns the mailbox command after the return of
9376  * the function.
9377  **/
9378 static int
lpfc_sli_issue_mbox_s3(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)9379 lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
9380 		       uint32_t flag)
9381 {
9382 	MAILBOX_t *mbx;
9383 	struct lpfc_sli *psli = &phba->sli;
9384 	uint32_t status, evtctr;
9385 	uint32_t ha_copy, hc_copy;
9386 	int i;
9387 	unsigned long timeout;
9388 	unsigned long drvr_flag = 0;
9389 	uint32_t word0, ldata;
9390 	void __iomem *to_slim;
9391 	int processing_queue = 0;
9392 
9393 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
9394 	if (!pmbox) {
9395 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9396 		/* processing mbox queue from intr_handler */
9397 		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9398 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9399 			return MBX_SUCCESS;
9400 		}
9401 		processing_queue = 1;
9402 		pmbox = lpfc_mbox_get(phba);
9403 		if (!pmbox) {
9404 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9405 			return MBX_SUCCESS;
9406 		}
9407 	}
9408 
9409 	if (pmbox->mbox_cmpl && pmbox->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
9410 		pmbox->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
9411 		if(!pmbox->vport) {
9412 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9413 			lpfc_printf_log(phba, KERN_ERR,
9414 					LOG_MBOX | LOG_VPORT,
9415 					"1806 Mbox x%x failed. No vport\n",
9416 					pmbox->u.mb.mbxCommand);
9417 			dump_stack();
9418 			goto out_not_finished;
9419 		}
9420 	}
9421 
9422 	/* If the PCI channel is in offline state, do not post mbox. */
9423 	if (unlikely(pci_channel_offline(phba->pcidev))) {
9424 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9425 		goto out_not_finished;
9426 	}
9427 
9428 	/* If HBA has a deferred error attention, fail the iocb. */
9429 	if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
9430 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9431 		goto out_not_finished;
9432 	}
9433 
9434 	psli = &phba->sli;
9435 
9436 	mbx = &pmbox->u.mb;
9437 	status = MBX_SUCCESS;
9438 
9439 	if (phba->link_state == LPFC_HBA_ERROR) {
9440 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9441 
9442 		/* Mbox command <mbxCommand> cannot issue */
9443 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9444 				"(%d):0311 Mailbox command x%x cannot "
9445 				"issue Data: x%x x%x\n",
9446 				pmbox->vport ? pmbox->vport->vpi : 0,
9447 				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9448 		goto out_not_finished;
9449 	}
9450 
9451 	if (mbx->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT) {
9452 		if (lpfc_readl(phba->HCregaddr, &hc_copy) ||
9453 			!(hc_copy & HC_MBINT_ENA)) {
9454 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9455 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9456 				"(%d):2528 Mailbox command x%x cannot "
9457 				"issue Data: x%x x%x\n",
9458 				pmbox->vport ? pmbox->vport->vpi : 0,
9459 				pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
9460 			goto out_not_finished;
9461 		}
9462 	}
9463 
9464 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9465 		/* Polling for a mbox command when another one is already active
9466 		 * is not allowed in SLI. Also, the driver must have established
9467 		 * SLI2 mode to queue and process multiple mbox commands.
9468 		 */
9469 
9470 		if (flag & MBX_POLL) {
9471 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9472 
9473 			/* Mbox command <mbxCommand> cannot issue */
9474 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9475 					"(%d):2529 Mailbox command x%x "
9476 					"cannot issue Data: x%x x%x\n",
9477 					pmbox->vport ? pmbox->vport->vpi : 0,
9478 					pmbox->u.mb.mbxCommand,
9479 					psli->sli_flag, flag);
9480 			goto out_not_finished;
9481 		}
9482 
9483 		if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
9484 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9485 			/* Mbox command <mbxCommand> cannot issue */
9486 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9487 					"(%d):2530 Mailbox command x%x "
9488 					"cannot issue Data: x%x x%x\n",
9489 					pmbox->vport ? pmbox->vport->vpi : 0,
9490 					pmbox->u.mb.mbxCommand,
9491 					psli->sli_flag, flag);
9492 			goto out_not_finished;
9493 		}
9494 
9495 		/* Another mailbox command is still being processed, queue this
9496 		 * command to be processed later.
9497 		 */
9498 		lpfc_mbox_put(phba, pmbox);
9499 
9500 		/* Mbox cmd issue - BUSY */
9501 		lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9502 				"(%d):0308 Mbox cmd issue - BUSY Data: "
9503 				"x%x x%x x%x x%x\n",
9504 				pmbox->vport ? pmbox->vport->vpi : 0xffffff,
9505 				mbx->mbxCommand,
9506 				phba->pport ? phba->pport->port_state : 0xff,
9507 				psli->sli_flag, flag);
9508 
9509 		psli->slistat.mbox_busy++;
9510 		spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9511 
9512 		if (pmbox->vport) {
9513 			lpfc_debugfs_disc_trc(pmbox->vport,
9514 				LPFC_DISC_TRC_MBOX_VPORT,
9515 				"MBOX Bsy vport:  cmd:x%x mb:x%x x%x",
9516 				(uint32_t)mbx->mbxCommand,
9517 				mbx->un.varWords[0], mbx->un.varWords[1]);
9518 		}
9519 		else {
9520 			lpfc_debugfs_disc_trc(phba->pport,
9521 				LPFC_DISC_TRC_MBOX,
9522 				"MBOX Bsy:        cmd:x%x mb:x%x x%x",
9523 				(uint32_t)mbx->mbxCommand,
9524 				mbx->un.varWords[0], mbx->un.varWords[1]);
9525 		}
9526 
9527 		return MBX_BUSY;
9528 	}
9529 
9530 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9531 
9532 	/* If we are not polling, we MUST be in SLI2 mode */
9533 	if (flag != MBX_POLL) {
9534 		if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
9535 		    (mbx->mbxCommand != MBX_KILL_BOARD)) {
9536 			psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9537 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9538 			/* Mbox command <mbxCommand> cannot issue */
9539 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9540 					"(%d):2531 Mailbox command x%x "
9541 					"cannot issue Data: x%x x%x\n",
9542 					pmbox->vport ? pmbox->vport->vpi : 0,
9543 					pmbox->u.mb.mbxCommand,
9544 					psli->sli_flag, flag);
9545 			goto out_not_finished;
9546 		}
9547 		/* timeout active mbox command */
9548 		timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox));
9549 		mod_timer(&psli->mbox_tmo, jiffies + timeout);
9550 	}
9551 
9552 	/* Mailbox cmd <cmd> issue */
9553 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
9554 			"(%d):0309 Mailbox cmd x%x issue Data: x%x x%x "
9555 			"x%x\n",
9556 			pmbox->vport ? pmbox->vport->vpi : 0,
9557 			mbx->mbxCommand,
9558 			phba->pport ? phba->pport->port_state : 0xff,
9559 			psli->sli_flag, flag);
9560 
9561 	if (mbx->mbxCommand != MBX_HEARTBEAT) {
9562 		if (pmbox->vport) {
9563 			lpfc_debugfs_disc_trc(pmbox->vport,
9564 				LPFC_DISC_TRC_MBOX_VPORT,
9565 				"MBOX Send vport: cmd:x%x mb:x%x x%x",
9566 				(uint32_t)mbx->mbxCommand,
9567 				mbx->un.varWords[0], mbx->un.varWords[1]);
9568 		}
9569 		else {
9570 			lpfc_debugfs_disc_trc(phba->pport,
9571 				LPFC_DISC_TRC_MBOX,
9572 				"MBOX Send:       cmd:x%x mb:x%x x%x",
9573 				(uint32_t)mbx->mbxCommand,
9574 				mbx->un.varWords[0], mbx->un.varWords[1]);
9575 		}
9576 	}
9577 
9578 	psli->slistat.mbox_cmd++;
9579 	evtctr = psli->slistat.mbox_event;
9580 
9581 	/* next set own bit for the adapter and copy over command word */
9582 	mbx->mbxOwner = OWN_CHIP;
9583 
9584 	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9585 		/* Populate mbox extension offset word. */
9586 		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len) {
9587 			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9588 				= (uint8_t *)phba->mbox_ext
9589 				  - (uint8_t *)phba->mbox;
9590 		}
9591 
9592 		/* Copy the mailbox extension data */
9593 		if (pmbox->in_ext_byte_len && pmbox->ext_buf) {
9594 			lpfc_sli_pcimem_bcopy(pmbox->ext_buf,
9595 					      (uint8_t *)phba->mbox_ext,
9596 					      pmbox->in_ext_byte_len);
9597 		}
9598 		/* Copy command data to host SLIM area */
9599 		lpfc_sli_pcimem_bcopy(mbx, phba->mbox, MAILBOX_CMD_SIZE);
9600 	} else {
9601 		/* Populate mbox extension offset word. */
9602 		if (pmbox->in_ext_byte_len || pmbox->out_ext_byte_len)
9603 			*(((uint32_t *)mbx) + pmbox->mbox_offset_word)
9604 				= MAILBOX_HBA_EXT_OFFSET;
9605 
9606 		/* Copy the mailbox extension data */
9607 		if (pmbox->in_ext_byte_len && pmbox->ext_buf)
9608 			lpfc_memcpy_to_slim(phba->MBslimaddr +
9609 				MAILBOX_HBA_EXT_OFFSET,
9610 				pmbox->ext_buf, pmbox->in_ext_byte_len);
9611 
9612 		if (mbx->mbxCommand == MBX_CONFIG_PORT)
9613 			/* copy command data into host mbox for cmpl */
9614 			lpfc_sli_pcimem_bcopy(mbx, phba->mbox,
9615 					      MAILBOX_CMD_SIZE);
9616 
9617 		/* First copy mbox command data to HBA SLIM, skip past first
9618 		   word */
9619 		to_slim = phba->MBslimaddr + sizeof (uint32_t);
9620 		lpfc_memcpy_to_slim(to_slim, &mbx->un.varWords[0],
9621 			    MAILBOX_CMD_SIZE - sizeof (uint32_t));
9622 
9623 		/* Next copy over first word, with mbxOwner set */
9624 		ldata = *((uint32_t *)mbx);
9625 		to_slim = phba->MBslimaddr;
9626 		writel(ldata, to_slim);
9627 		readl(to_slim); /* flush */
9628 
9629 		if (mbx->mbxCommand == MBX_CONFIG_PORT)
9630 			/* switch over to host mailbox */
9631 			psli->sli_flag |= LPFC_SLI_ACTIVE;
9632 	}
9633 
9634 	wmb();
9635 
9636 	switch (flag) {
9637 	case MBX_NOWAIT:
9638 		/* Set up reference to mailbox command */
9639 		psli->mbox_active = pmbox;
9640 		/* Interrupt board to do it */
9641 		writel(CA_MBATT, phba->CAregaddr);
9642 		readl(phba->CAregaddr); /* flush */
9643 		/* Don't wait for it to finish, just return */
9644 		break;
9645 
9646 	case MBX_POLL:
9647 		/* Set up null reference to mailbox command */
9648 		psli->mbox_active = NULL;
9649 		/* Interrupt board to do it */
9650 		writel(CA_MBATT, phba->CAregaddr);
9651 		readl(phba->CAregaddr); /* flush */
9652 
9653 		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9654 			/* First read mbox status word */
9655 			word0 = *((uint32_t *)phba->mbox);
9656 			word0 = le32_to_cpu(word0);
9657 		} else {
9658 			/* First read mbox status word */
9659 			if (lpfc_readl(phba->MBslimaddr, &word0)) {
9660 				spin_unlock_irqrestore(&phba->hbalock,
9661 						       drvr_flag);
9662 				goto out_not_finished;
9663 			}
9664 		}
9665 
9666 		/* Read the HBA Host Attention Register */
9667 		if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9668 			spin_unlock_irqrestore(&phba->hbalock,
9669 						       drvr_flag);
9670 			goto out_not_finished;
9671 		}
9672 		timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, pmbox)) + jiffies;
9673 		i = 0;
9674 		/* Wait for command to complete */
9675 		while (((word0 & OWN_CHIP) == OWN_CHIP) ||
9676 		       (!(ha_copy & HA_MBATT) &&
9677 			(phba->link_state > LPFC_WARM_START))) {
9678 			if (time_after(jiffies, timeout)) {
9679 				psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9680 				spin_unlock_irqrestore(&phba->hbalock,
9681 						       drvr_flag);
9682 				goto out_not_finished;
9683 			}
9684 
9685 			/* Check if we took a mbox interrupt while we were
9686 			   polling */
9687 			if (((word0 & OWN_CHIP) != OWN_CHIP)
9688 			    && (evtctr != psli->slistat.mbox_event))
9689 				break;
9690 
9691 			if (i++ > 10) {
9692 				spin_unlock_irqrestore(&phba->hbalock,
9693 						       drvr_flag);
9694 				msleep(1);
9695 				spin_lock_irqsave(&phba->hbalock, drvr_flag);
9696 			}
9697 
9698 			if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9699 				/* First copy command data */
9700 				word0 = *((uint32_t *)phba->mbox);
9701 				word0 = le32_to_cpu(word0);
9702 				if (mbx->mbxCommand == MBX_CONFIG_PORT) {
9703 					MAILBOX_t *slimmb;
9704 					uint32_t slimword0;
9705 					/* Check real SLIM for any errors */
9706 					slimword0 = readl(phba->MBslimaddr);
9707 					slimmb = (MAILBOX_t *) & slimword0;
9708 					if (((slimword0 & OWN_CHIP) != OWN_CHIP)
9709 					    && slimmb->mbxStatus) {
9710 						psli->sli_flag &=
9711 						    ~LPFC_SLI_ACTIVE;
9712 						word0 = slimword0;
9713 					}
9714 				}
9715 			} else {
9716 				/* First copy command data */
9717 				word0 = readl(phba->MBslimaddr);
9718 			}
9719 			/* Read the HBA Host Attention Register */
9720 			if (lpfc_readl(phba->HAregaddr, &ha_copy)) {
9721 				spin_unlock_irqrestore(&phba->hbalock,
9722 						       drvr_flag);
9723 				goto out_not_finished;
9724 			}
9725 		}
9726 
9727 		if (psli->sli_flag & LPFC_SLI_ACTIVE) {
9728 			/* copy results back to user */
9729 			lpfc_sli_pcimem_bcopy(phba->mbox, mbx,
9730 						MAILBOX_CMD_SIZE);
9731 			/* Copy the mailbox extension data */
9732 			if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
9733 				lpfc_sli_pcimem_bcopy(phba->mbox_ext,
9734 						      pmbox->ext_buf,
9735 						      pmbox->out_ext_byte_len);
9736 			}
9737 		} else {
9738 			/* First copy command data */
9739 			lpfc_memcpy_from_slim(mbx, phba->MBslimaddr,
9740 						MAILBOX_CMD_SIZE);
9741 			/* Copy the mailbox extension data */
9742 			if (pmbox->out_ext_byte_len && pmbox->ext_buf) {
9743 				lpfc_memcpy_from_slim(
9744 					pmbox->ext_buf,
9745 					phba->MBslimaddr +
9746 					MAILBOX_HBA_EXT_OFFSET,
9747 					pmbox->out_ext_byte_len);
9748 			}
9749 		}
9750 
9751 		writel(HA_MBATT, phba->HAregaddr);
9752 		readl(phba->HAregaddr); /* flush */
9753 
9754 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
9755 		status = mbx->mbxStatus;
9756 	}
9757 
9758 	spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
9759 	return status;
9760 
9761 out_not_finished:
9762 	if (processing_queue) {
9763 		pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
9764 		lpfc_mbox_cmpl_put(phba, pmbox);
9765 	}
9766 	return MBX_NOT_FINISHED;
9767 }
9768 
9769 /**
9770  * lpfc_sli4_async_mbox_block - Block posting SLI4 asynchronous mailbox command
9771  * @phba: Pointer to HBA context object.
9772  *
9773  * The function blocks the posting of SLI4 asynchronous mailbox commands from
9774  * the driver internal pending mailbox queue. It will then try to wait out the
9775  * possible outstanding mailbox command before return.
9776  *
9777  * Returns:
9778  * 	0 - the outstanding mailbox command completed; otherwise, the wait for
9779  * 	the outstanding mailbox command timed out.
9780  **/
9781 static int
lpfc_sli4_async_mbox_block(struct lpfc_hba * phba)9782 lpfc_sli4_async_mbox_block(struct lpfc_hba *phba)
9783 {
9784 	struct lpfc_sli *psli = &phba->sli;
9785 	LPFC_MBOXQ_t *mboxq;
9786 	int rc = 0;
9787 	unsigned long timeout = 0;
9788 	u32 sli_flag;
9789 	u8 cmd, subsys, opcode;
9790 
9791 	/* Mark the asynchronous mailbox command posting as blocked */
9792 	spin_lock_irq(&phba->hbalock);
9793 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
9794 	/* Determine how long we might wait for the active mailbox
9795 	 * command to be gracefully completed by firmware.
9796 	 */
9797 	if (phba->sli.mbox_active)
9798 		timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
9799 						phba->sli.mbox_active)) + jiffies;
9800 	spin_unlock_irq(&phba->hbalock);
9801 
9802 	/* Make sure the mailbox is really active */
9803 	if (timeout)
9804 		lpfc_sli4_process_missed_mbox_completions(phba);
9805 
9806 	/* Wait for the outstanding mailbox command to complete */
9807 	while (phba->sli.mbox_active) {
9808 		/* Check active mailbox complete status every 2ms */
9809 		msleep(2);
9810 		if (time_after(jiffies, timeout)) {
9811 			/* Timeout, mark the outstanding cmd not complete */
9812 
9813 			/* Sanity check sli.mbox_active has not completed or
9814 			 * cancelled from another context during last 2ms sleep,
9815 			 * so take hbalock to be sure before logging.
9816 			 */
9817 			spin_lock_irq(&phba->hbalock);
9818 			if (phba->sli.mbox_active) {
9819 				mboxq = phba->sli.mbox_active;
9820 				cmd = mboxq->u.mb.mbxCommand;
9821 				subsys = lpfc_sli_config_mbox_subsys_get(phba,
9822 									 mboxq);
9823 				opcode = lpfc_sli_config_mbox_opcode_get(phba,
9824 									 mboxq);
9825 				sli_flag = psli->sli_flag;
9826 				spin_unlock_irq(&phba->hbalock);
9827 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9828 						"2352 Mailbox command x%x "
9829 						"(x%x/x%x) sli_flag x%x could "
9830 						"not complete\n",
9831 						cmd, subsys, opcode,
9832 						sli_flag);
9833 			} else {
9834 				spin_unlock_irq(&phba->hbalock);
9835 			}
9836 
9837 			rc = 1;
9838 			break;
9839 		}
9840 	}
9841 
9842 	/* Can not cleanly block async mailbox command, fails it */
9843 	if (rc) {
9844 		spin_lock_irq(&phba->hbalock);
9845 		psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9846 		spin_unlock_irq(&phba->hbalock);
9847 	}
9848 	return rc;
9849 }
9850 
9851 /**
9852  * lpfc_sli4_async_mbox_unblock - Block posting SLI4 async mailbox command
9853  * @phba: Pointer to HBA context object.
9854  *
9855  * The function unblocks and resume posting of SLI4 asynchronous mailbox
9856  * commands from the driver internal pending mailbox queue. It makes sure
9857  * that there is no outstanding mailbox command before resuming posting
9858  * asynchronous mailbox commands. If, for any reason, there is outstanding
9859  * mailbox command, it will try to wait it out before resuming asynchronous
9860  * mailbox command posting.
9861  **/
9862 static void
lpfc_sli4_async_mbox_unblock(struct lpfc_hba * phba)9863 lpfc_sli4_async_mbox_unblock(struct lpfc_hba *phba)
9864 {
9865 	struct lpfc_sli *psli = &phba->sli;
9866 
9867 	spin_lock_irq(&phba->hbalock);
9868 	if (!(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
9869 		/* Asynchronous mailbox posting is not blocked, do nothing */
9870 		spin_unlock_irq(&phba->hbalock);
9871 		return;
9872 	}
9873 
9874 	/* Outstanding synchronous mailbox command is guaranteed to be done,
9875 	 * successful or timeout, after timing-out the outstanding mailbox
9876 	 * command shall always be removed, so just unblock posting async
9877 	 * mailbox command and resume
9878 	 */
9879 	psli->sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
9880 	spin_unlock_irq(&phba->hbalock);
9881 
9882 	/* wake up worker thread to post asynchronous mailbox command */
9883 	lpfc_worker_wake_up(phba);
9884 }
9885 
9886 /**
9887  * lpfc_sli4_wait_bmbx_ready - Wait for bootstrap mailbox register ready
9888  * @phba: Pointer to HBA context object.
9889  * @mboxq: Pointer to mailbox object.
9890  *
9891  * The function waits for the bootstrap mailbox register ready bit from
9892  * port for twice the regular mailbox command timeout value.
9893  *
9894  *      0 - no timeout on waiting for bootstrap mailbox register ready.
9895  *      MBXERR_ERROR - wait for bootstrap mailbox register timed out or port
9896  *                     is in an unrecoverable state.
9897  **/
9898 static int
lpfc_sli4_wait_bmbx_ready(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)9899 lpfc_sli4_wait_bmbx_ready(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9900 {
9901 	uint32_t db_ready;
9902 	unsigned long timeout;
9903 	struct lpfc_register bmbx_reg;
9904 	struct lpfc_register portstat_reg = {-1};
9905 
9906 	/* Sanity check - there is no point to wait if the port is in an
9907 	 * unrecoverable state.
9908 	 */
9909 	if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) >=
9910 	    LPFC_SLI_INTF_IF_TYPE_2) {
9911 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
9912 			       &portstat_reg.word0) ||
9913 		    lpfc_sli4_unrecoverable_port(&portstat_reg)) {
9914 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
9915 					"3858 Skipping bmbx ready because "
9916 					"Port Status x%x\n",
9917 					portstat_reg.word0);
9918 			return MBXERR_ERROR;
9919 		}
9920 	}
9921 
9922 	timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq)) + jiffies;
9923 
9924 	do {
9925 		bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
9926 		db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
9927 		if (!db_ready)
9928 			mdelay(2);
9929 
9930 		if (time_after(jiffies, timeout))
9931 			return MBXERR_ERROR;
9932 	} while (!db_ready);
9933 
9934 	return 0;
9935 }
9936 
9937 /**
9938  * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
9939  * @phba: Pointer to HBA context object.
9940  * @mboxq: Pointer to mailbox object.
9941  *
9942  * The function posts a mailbox to the port.  The mailbox is expected
9943  * to be comletely filled in and ready for the port to operate on it.
9944  * This routine executes a synchronous completion operation on the
9945  * mailbox by polling for its completion.
9946  *
9947  * The caller must not be holding any locks when calling this routine.
9948  *
9949  * Returns:
9950  *	MBX_SUCCESS - mailbox posted successfully
9951  *	Any of the MBX error values.
9952  **/
9953 static int
lpfc_sli4_post_sync_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)9954 lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
9955 {
9956 	int rc = MBX_SUCCESS;
9957 	unsigned long iflag;
9958 	uint32_t mcqe_status;
9959 	uint32_t mbx_cmnd;
9960 	struct lpfc_sli *psli = &phba->sli;
9961 	struct lpfc_mqe *mb = &mboxq->u.mqe;
9962 	struct lpfc_bmbx_create *mbox_rgn;
9963 	struct dma_address *dma_address;
9964 
9965 	/*
9966 	 * Only one mailbox can be active to the bootstrap mailbox region
9967 	 * at a time and there is no queueing provided.
9968 	 */
9969 	spin_lock_irqsave(&phba->hbalock, iflag);
9970 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
9971 		spin_unlock_irqrestore(&phba->hbalock, iflag);
9972 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
9973 				"(%d):2532 Mailbox command x%x (x%x/x%x) "
9974 				"cannot issue Data: x%x x%x\n",
9975 				mboxq->vport ? mboxq->vport->vpi : 0,
9976 				mboxq->u.mb.mbxCommand,
9977 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
9978 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
9979 				psli->sli_flag, MBX_POLL);
9980 		return MBXERR_ERROR;
9981 	}
9982 	/* The server grabs the token and owns it until release */
9983 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
9984 	phba->sli.mbox_active = mboxq;
9985 	spin_unlock_irqrestore(&phba->hbalock, iflag);
9986 
9987 	/* wait for bootstrap mbox register for readyness */
9988 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
9989 	if (rc)
9990 		goto exit;
9991 	/*
9992 	 * Initialize the bootstrap memory region to avoid stale data areas
9993 	 * in the mailbox post.  Then copy the caller's mailbox contents to
9994 	 * the bmbx mailbox region.
9995 	 */
9996 	mbx_cmnd = bf_get(lpfc_mqe_command, mb);
9997 	memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
9998 	lpfc_sli4_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
9999 			       sizeof(struct lpfc_mqe));
10000 
10001 	/* Post the high mailbox dma address to the port and wait for ready. */
10002 	dma_address = &phba->sli4_hba.bmbx.dma_address;
10003 	writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
10004 
10005 	/* wait for bootstrap mbox register for hi-address write done */
10006 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
10007 	if (rc)
10008 		goto exit;
10009 
10010 	/* Post the low mailbox dma address to the port. */
10011 	writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
10012 
10013 	/* wait for bootstrap mbox register for low address write done */
10014 	rc = lpfc_sli4_wait_bmbx_ready(phba, mboxq);
10015 	if (rc)
10016 		goto exit;
10017 
10018 	/*
10019 	 * Read the CQ to ensure the mailbox has completed.
10020 	 * If so, update the mailbox status so that the upper layers
10021 	 * can complete the request normally.
10022 	 */
10023 	lpfc_sli4_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
10024 			       sizeof(struct lpfc_mqe));
10025 	mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
10026 	lpfc_sli4_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
10027 			       sizeof(struct lpfc_mcqe));
10028 	mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
10029 	/*
10030 	 * When the CQE status indicates a failure and the mailbox status
10031 	 * indicates success then copy the CQE status into the mailbox status
10032 	 * (and prefix it with x4000).
10033 	 */
10034 	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
10035 		if (bf_get(lpfc_mqe_status, mb) == MBX_SUCCESS)
10036 			bf_set(lpfc_mqe_status, mb,
10037 			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
10038 		rc = MBXERR_ERROR;
10039 	} else
10040 		lpfc_sli4_swap_str(phba, mboxq);
10041 
10042 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10043 			"(%d):0356 Mailbox cmd x%x (x%x/x%x) Status x%x "
10044 			"Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
10045 			" x%x x%x CQ: x%x x%x x%x x%x\n",
10046 			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10047 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10048 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10049 			bf_get(lpfc_mqe_status, mb),
10050 			mb->un.mb_words[0], mb->un.mb_words[1],
10051 			mb->un.mb_words[2], mb->un.mb_words[3],
10052 			mb->un.mb_words[4], mb->un.mb_words[5],
10053 			mb->un.mb_words[6], mb->un.mb_words[7],
10054 			mb->un.mb_words[8], mb->un.mb_words[9],
10055 			mb->un.mb_words[10], mb->un.mb_words[11],
10056 			mb->un.mb_words[12], mboxq->mcqe.word0,
10057 			mboxq->mcqe.mcqe_tag0, 	mboxq->mcqe.mcqe_tag1,
10058 			mboxq->mcqe.trailer);
10059 exit:
10060 	/* We are holding the token, no needed for lock when release */
10061 	spin_lock_irqsave(&phba->hbalock, iflag);
10062 	psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10063 	phba->sli.mbox_active = NULL;
10064 	spin_unlock_irqrestore(&phba->hbalock, iflag);
10065 	return rc;
10066 }
10067 
10068 /**
10069  * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
10070  * @phba: Pointer to HBA context object.
10071  * @mboxq: Pointer to mailbox object.
10072  * @flag: Flag indicating how the mailbox need to be processed.
10073  *
10074  * This function is called by discovery code and HBA management code to submit
10075  * a mailbox command to firmware with SLI-4 interface spec.
10076  *
10077  * Return codes the caller owns the mailbox command after the return of the
10078  * function.
10079  **/
10080 static int
lpfc_sli_issue_mbox_s4(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq,uint32_t flag)10081 lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
10082 		       uint32_t flag)
10083 {
10084 	struct lpfc_sli *psli = &phba->sli;
10085 	unsigned long iflags;
10086 	int rc;
10087 
10088 	/* dump from issue mailbox command if setup */
10089 	lpfc_idiag_mbxacc_dump_issue_mbox(phba, &mboxq->u.mb);
10090 
10091 	rc = lpfc_mbox_dev_check(phba);
10092 	if (unlikely(rc)) {
10093 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10094 				"(%d):2544 Mailbox command x%x (x%x/x%x) "
10095 				"cannot issue Data: x%x x%x\n",
10096 				mboxq->vport ? mboxq->vport->vpi : 0,
10097 				mboxq->u.mb.mbxCommand,
10098 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10099 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10100 				psli->sli_flag, flag);
10101 		goto out_not_finished;
10102 	}
10103 
10104 	/* Detect polling mode and jump to a handler */
10105 	if (!phba->sli4_hba.intr_enable) {
10106 		if (flag == MBX_POLL)
10107 			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10108 		else
10109 			rc = -EIO;
10110 		if (rc != MBX_SUCCESS)
10111 			lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10112 					"(%d):2541 Mailbox command x%x "
10113 					"(x%x/x%x) failure: "
10114 					"mqe_sta: x%x mcqe_sta: x%x/x%x "
10115 					"Data: x%x x%x\n",
10116 					mboxq->vport ? mboxq->vport->vpi : 0,
10117 					mboxq->u.mb.mbxCommand,
10118 					lpfc_sli_config_mbox_subsys_get(phba,
10119 									mboxq),
10120 					lpfc_sli_config_mbox_opcode_get(phba,
10121 									mboxq),
10122 					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10123 					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10124 					bf_get(lpfc_mcqe_ext_status,
10125 					       &mboxq->mcqe),
10126 					psli->sli_flag, flag);
10127 		return rc;
10128 	} else if (flag == MBX_POLL) {
10129 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
10130 				"(%d):2542 Try to issue mailbox command "
10131 				"x%x (x%x/x%x) synchronously ahead of async "
10132 				"mailbox command queue: x%x x%x\n",
10133 				mboxq->vport ? mboxq->vport->vpi : 0,
10134 				mboxq->u.mb.mbxCommand,
10135 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10136 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10137 				psli->sli_flag, flag);
10138 		/* Try to block the asynchronous mailbox posting */
10139 		rc = lpfc_sli4_async_mbox_block(phba);
10140 		if (!rc) {
10141 			/* Successfully blocked, now issue sync mbox cmd */
10142 			rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
10143 			if (rc != MBX_SUCCESS)
10144 				lpfc_printf_log(phba, KERN_WARNING,
10145 					LOG_MBOX | LOG_SLI,
10146 					"(%d):2597 Sync Mailbox command "
10147 					"x%x (x%x/x%x) failure: "
10148 					"mqe_sta: x%x mcqe_sta: x%x/x%x "
10149 					"Data: x%x x%x\n",
10150 					mboxq->vport ? mboxq->vport->vpi : 0,
10151 					mboxq->u.mb.mbxCommand,
10152 					lpfc_sli_config_mbox_subsys_get(phba,
10153 									mboxq),
10154 					lpfc_sli_config_mbox_opcode_get(phba,
10155 									mboxq),
10156 					bf_get(lpfc_mqe_status, &mboxq->u.mqe),
10157 					bf_get(lpfc_mcqe_status, &mboxq->mcqe),
10158 					bf_get(lpfc_mcqe_ext_status,
10159 					       &mboxq->mcqe),
10160 					psli->sli_flag, flag);
10161 			/* Unblock the async mailbox posting afterward */
10162 			lpfc_sli4_async_mbox_unblock(phba);
10163 		}
10164 		return rc;
10165 	}
10166 
10167 	/* Now, interrupt mode asynchronous mailbox command */
10168 	rc = lpfc_mbox_cmd_check(phba, mboxq);
10169 	if (rc) {
10170 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10171 				"(%d):2543 Mailbox command x%x (x%x/x%x) "
10172 				"cannot issue Data: x%x x%x\n",
10173 				mboxq->vport ? mboxq->vport->vpi : 0,
10174 				mboxq->u.mb.mbxCommand,
10175 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10176 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10177 				psli->sli_flag, flag);
10178 		goto out_not_finished;
10179 	}
10180 
10181 	/* Put the mailbox command to the driver internal FIFO */
10182 	psli->slistat.mbox_busy++;
10183 	spin_lock_irqsave(&phba->hbalock, iflags);
10184 	lpfc_mbox_put(phba, mboxq);
10185 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10186 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10187 			"(%d):0354 Mbox cmd issue - Enqueue Data: "
10188 			"x%x (x%x/x%x) x%x x%x x%x x%x\n",
10189 			mboxq->vport ? mboxq->vport->vpi : 0xffffff,
10190 			bf_get(lpfc_mqe_command, &mboxq->u.mqe),
10191 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10192 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10193 			mboxq->u.mb.un.varUnregLogin.rpi,
10194 			phba->pport->port_state,
10195 			psli->sli_flag, MBX_NOWAIT);
10196 	/* Wake up worker thread to transport mailbox command from head */
10197 	lpfc_worker_wake_up(phba);
10198 
10199 	return MBX_BUSY;
10200 
10201 out_not_finished:
10202 	return MBX_NOT_FINISHED;
10203 }
10204 
10205 /**
10206  * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
10207  * @phba: Pointer to HBA context object.
10208  *
10209  * This function is called by worker thread to send a mailbox command to
10210  * SLI4 HBA firmware.
10211  *
10212  **/
10213 int
lpfc_sli4_post_async_mbox(struct lpfc_hba * phba)10214 lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
10215 {
10216 	struct lpfc_sli *psli = &phba->sli;
10217 	LPFC_MBOXQ_t *mboxq;
10218 	int rc = MBX_SUCCESS;
10219 	unsigned long iflags;
10220 	struct lpfc_mqe *mqe;
10221 	uint32_t mbx_cmnd;
10222 
10223 	/* Check interrupt mode before post async mailbox command */
10224 	if (unlikely(!phba->sli4_hba.intr_enable))
10225 		return MBX_NOT_FINISHED;
10226 
10227 	/* Check for mailbox command service token */
10228 	spin_lock_irqsave(&phba->hbalock, iflags);
10229 	if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
10230 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10231 		return MBX_NOT_FINISHED;
10232 	}
10233 	if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
10234 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10235 		return MBX_NOT_FINISHED;
10236 	}
10237 	if (unlikely(phba->sli.mbox_active)) {
10238 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10239 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10240 				"0384 There is pending active mailbox cmd\n");
10241 		return MBX_NOT_FINISHED;
10242 	}
10243 	/* Take the mailbox command service token */
10244 	psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
10245 
10246 	/* Get the next mailbox command from head of queue */
10247 	mboxq = lpfc_mbox_get(phba);
10248 
10249 	/* If no more mailbox command waiting for post, we're done */
10250 	if (!mboxq) {
10251 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10252 		spin_unlock_irqrestore(&phba->hbalock, iflags);
10253 		return MBX_SUCCESS;
10254 	}
10255 	phba->sli.mbox_active = mboxq;
10256 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10257 
10258 	/* Check device readiness for posting mailbox command */
10259 	rc = lpfc_mbox_dev_check(phba);
10260 	if (unlikely(rc))
10261 		/* Driver clean routine will clean up pending mailbox */
10262 		goto out_not_finished;
10263 
10264 	/* Prepare the mbox command to be posted */
10265 	mqe = &mboxq->u.mqe;
10266 	mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
10267 
10268 	/* Start timer for the mbox_tmo and log some mailbox post messages */
10269 	mod_timer(&psli->mbox_tmo, (jiffies +
10270 		  secs_to_jiffies(lpfc_mbox_tmo_val(phba, mboxq))));
10271 
10272 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
10273 			"(%d):0355 Mailbox cmd x%x (x%x/x%x) issue Data: "
10274 			"x%x x%x\n",
10275 			mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
10276 			lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10277 			lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10278 			phba->pport->port_state, psli->sli_flag);
10279 
10280 	if (mbx_cmnd != MBX_HEARTBEAT) {
10281 		if (mboxq->vport) {
10282 			lpfc_debugfs_disc_trc(mboxq->vport,
10283 				LPFC_DISC_TRC_MBOX_VPORT,
10284 				"MBOX Send vport: cmd:x%x mb:x%x x%x",
10285 				mbx_cmnd, mqe->un.mb_words[0],
10286 				mqe->un.mb_words[1]);
10287 		} else {
10288 			lpfc_debugfs_disc_trc(phba->pport,
10289 				LPFC_DISC_TRC_MBOX,
10290 				"MBOX Send: cmd:x%x mb:x%x x%x",
10291 				mbx_cmnd, mqe->un.mb_words[0],
10292 				mqe->un.mb_words[1]);
10293 		}
10294 	}
10295 	psli->slistat.mbox_cmd++;
10296 
10297 	/* Post the mailbox command to the port */
10298 	rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
10299 	if (rc != MBX_SUCCESS) {
10300 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10301 				"(%d):2533 Mailbox command x%x (x%x/x%x) "
10302 				"cannot issue Data: x%x x%x\n",
10303 				mboxq->vport ? mboxq->vport->vpi : 0,
10304 				mboxq->u.mb.mbxCommand,
10305 				lpfc_sli_config_mbox_subsys_get(phba, mboxq),
10306 				lpfc_sli_config_mbox_opcode_get(phba, mboxq),
10307 				psli->sli_flag, MBX_NOWAIT);
10308 		goto out_not_finished;
10309 	}
10310 
10311 	return rc;
10312 
10313 out_not_finished:
10314 	spin_lock_irqsave(&phba->hbalock, iflags);
10315 	if (phba->sli.mbox_active) {
10316 		mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
10317 		__lpfc_mbox_cmpl_put(phba, mboxq);
10318 		/* Release the token */
10319 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
10320 		phba->sli.mbox_active = NULL;
10321 	}
10322 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10323 
10324 	return MBX_NOT_FINISHED;
10325 }
10326 
10327 /**
10328  * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
10329  * @phba: Pointer to HBA context object.
10330  * @pmbox: Pointer to mailbox object.
10331  * @flag: Flag indicating how the mailbox need to be processed.
10332  *
10333  * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
10334  * the API jump table function pointer from the lpfc_hba struct.
10335  *
10336  * Return codes the caller owns the mailbox command after the return of the
10337  * function.
10338  **/
10339 int
lpfc_sli_issue_mbox(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmbox,uint32_t flag)10340 lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
10341 {
10342 	return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
10343 }
10344 
10345 /**
10346  * lpfc_mbox_api_table_setup - Set up mbox api function jump table
10347  * @phba: The hba struct for which this call is being executed.
10348  * @dev_grp: The HBA PCI-Device group number.
10349  *
10350  * This routine sets up the mbox interface API function jump table in @phba
10351  * struct.
10352  * Returns: 0 - success, -ENODEV - failure.
10353  **/
10354 int
lpfc_mbox_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)10355 lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
10356 {
10357 
10358 	switch (dev_grp) {
10359 	case LPFC_PCI_DEV_LP:
10360 		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
10361 		phba->lpfc_sli_handle_slow_ring_event =
10362 				lpfc_sli_handle_slow_ring_event_s3;
10363 		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
10364 		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
10365 		phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
10366 		break;
10367 	case LPFC_PCI_DEV_OC:
10368 		phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
10369 		phba->lpfc_sli_handle_slow_ring_event =
10370 				lpfc_sli_handle_slow_ring_event_s4;
10371 		phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
10372 		phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
10373 		phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
10374 		break;
10375 	default:
10376 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
10377 				"1420 Invalid HBA PCI-device group: 0x%x\n",
10378 				dev_grp);
10379 		return -ENODEV;
10380 	}
10381 	return 0;
10382 }
10383 
10384 /**
10385  * __lpfc_sli_ringtx_put - Add an iocb to the txq
10386  * @phba: Pointer to HBA context object.
10387  * @pring: Pointer to driver SLI ring object.
10388  * @piocb: Pointer to address of newly added command iocb.
10389  *
10390  * This function is called with hbalock held for SLI3 ports or
10391  * the ring lock held for SLI4 ports to add a command
10392  * iocb to the txq when SLI layer cannot submit the command iocb
10393  * to the ring.
10394  **/
10395 void
__lpfc_sli_ringtx_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * piocb)10396 __lpfc_sli_ringtx_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10397 		    struct lpfc_iocbq *piocb)
10398 {
10399 	if (phba->sli_rev == LPFC_SLI_REV4)
10400 		lockdep_assert_held(&pring->ring_lock);
10401 	else
10402 		lockdep_assert_held(&phba->hbalock);
10403 	/* Insert the caller's iocb in the txq tail for later processing. */
10404 	list_add_tail(&piocb->list, &pring->txq);
10405 }
10406 
10407 /**
10408  * lpfc_sli_next_iocb - Get the next iocb in the txq
10409  * @phba: Pointer to HBA context object.
10410  * @pring: Pointer to driver SLI ring object.
10411  * @piocb: Pointer to address of newly added command iocb.
10412  *
10413  * This function is called with hbalock held before a new
10414  * iocb is submitted to the firmware. This function checks
10415  * txq to flush the iocbs in txq to Firmware before
10416  * submitting new iocbs to the Firmware.
10417  * If there are iocbs in the txq which need to be submitted
10418  * to firmware, lpfc_sli_next_iocb returns the first element
10419  * of the txq after dequeuing it from txq.
10420  * If there is no iocb in the txq then the function will return
10421  * *piocb and *piocb is set to NULL. Caller needs to check
10422  * *piocb to find if there are more commands in the txq.
10423  **/
10424 static struct lpfc_iocbq *
lpfc_sli_next_iocb(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq ** piocb)10425 lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
10426 		   struct lpfc_iocbq **piocb)
10427 {
10428 	struct lpfc_iocbq * nextiocb;
10429 
10430 	lockdep_assert_held(&phba->hbalock);
10431 
10432 	nextiocb = lpfc_sli_ringtx_get(phba, pring);
10433 	if (!nextiocb) {
10434 		nextiocb = *piocb;
10435 		*piocb = NULL;
10436 	}
10437 
10438 	return nextiocb;
10439 }
10440 
10441 /**
10442  * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
10443  * @phba: Pointer to HBA context object.
10444  * @ring_number: SLI ring number to issue iocb on.
10445  * @piocb: Pointer to command iocb.
10446  * @flag: Flag indicating if this command can be put into txq.
10447  *
10448  * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
10449  * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
10450  * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
10451  * flag is turned on, the function returns IOCB_ERROR. When the link is down,
10452  * this function allows only iocbs for posting buffers. This function finds
10453  * next available slot in the command ring and posts the command to the
10454  * available slot and writes the port attention register to request HBA start
10455  * processing new iocb. If there is no slot available in the ring and
10456  * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
10457  * the function returns IOCB_BUSY.
10458  *
10459  * This function is called with hbalock held. The function will return success
10460  * after it successfully submit the iocb to firmware or after adding to the
10461  * txq.
10462  **/
10463 static int
__lpfc_sli_issue_iocb_s3(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10464 __lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
10465 		    struct lpfc_iocbq *piocb, uint32_t flag)
10466 {
10467 	struct lpfc_iocbq *nextiocb;
10468 	IOCB_t *iocb;
10469 	struct lpfc_sli_ring *pring = &phba->sli.sli3_ring[ring_number];
10470 
10471 	lockdep_assert_held(&phba->hbalock);
10472 
10473 	if (piocb->cmd_cmpl && (!piocb->vport) &&
10474 	   (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
10475 	   (piocb->iocb.ulpCommand != CMD_CLOSE_XRI_CN)) {
10476 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
10477 				"1807 IOCB x%x failed. No vport\n",
10478 				piocb->iocb.ulpCommand);
10479 		dump_stack();
10480 		return IOCB_ERROR;
10481 	}
10482 
10483 
10484 	/* If the PCI channel is in offline state, do not post iocbs. */
10485 	if (unlikely(pci_channel_offline(phba->pcidev)))
10486 		return IOCB_ERROR;
10487 
10488 	/* If HBA has a deferred error attention, fail the iocb. */
10489 	if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
10490 		return IOCB_ERROR;
10491 
10492 	/*
10493 	 * We should never get an IOCB if we are in a < LINK_DOWN state
10494 	 */
10495 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
10496 		return IOCB_ERROR;
10497 
10498 	/*
10499 	 * Check to see if we are blocking IOCB processing because of a
10500 	 * outstanding event.
10501 	 */
10502 	if (unlikely(pring->flag & LPFC_STOP_IOCB_EVENT))
10503 		goto iocb_busy;
10504 
10505 	if (unlikely(phba->link_state == LPFC_LINK_DOWN)) {
10506 		/*
10507 		 * Only CREATE_XRI, CLOSE_XRI, and QUE_RING_BUF
10508 		 * can be issued if the link is not up.
10509 		 */
10510 		switch (piocb->iocb.ulpCommand) {
10511 		case CMD_QUE_RING_BUF_CN:
10512 		case CMD_QUE_RING_BUF64_CN:
10513 			/*
10514 			 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
10515 			 * completion, cmd_cmpl MUST be 0.
10516 			 */
10517 			if (piocb->cmd_cmpl)
10518 				piocb->cmd_cmpl = NULL;
10519 			fallthrough;
10520 		case CMD_CREATE_XRI_CR:
10521 		case CMD_CLOSE_XRI_CN:
10522 		case CMD_CLOSE_XRI_CX:
10523 			break;
10524 		default:
10525 			goto iocb_busy;
10526 		}
10527 
10528 	/*
10529 	 * For FCP commands, we must be in a state where we can process link
10530 	 * attention events.
10531 	 */
10532 	} else if (unlikely(pring->ringno == LPFC_FCP_RING &&
10533 			    !(phba->sli.sli_flag & LPFC_PROCESS_LA))) {
10534 		goto iocb_busy;
10535 	}
10536 
10537 	while ((iocb = lpfc_sli_next_iocb_slot(phba, pring)) &&
10538 	       (nextiocb = lpfc_sli_next_iocb(phba, pring, &piocb)))
10539 		lpfc_sli_submit_iocb(phba, pring, iocb, nextiocb);
10540 
10541 	if (iocb)
10542 		lpfc_sli_update_ring(phba, pring);
10543 	else
10544 		lpfc_sli_update_full_ring(phba, pring);
10545 
10546 	if (!piocb)
10547 		return IOCB_SUCCESS;
10548 
10549 	goto out_busy;
10550 
10551  iocb_busy:
10552 	pring->stats.iocb_cmd_delay++;
10553 
10554  out_busy:
10555 
10556 	if (!(flag & SLI_IOCB_RET_IOCB)) {
10557 		__lpfc_sli_ringtx_put(phba, pring, piocb);
10558 		return IOCB_SUCCESS;
10559 	}
10560 
10561 	return IOCB_BUSY;
10562 }
10563 
10564 /**
10565  * __lpfc_sli_issue_fcp_io_s3 - SLI3 device for sending fcp io iocb
10566  * @phba: Pointer to HBA context object.
10567  * @ring_number: SLI ring number to issue wqe on.
10568  * @piocb: Pointer to command iocb.
10569  * @flag: Flag indicating if this command can be put into txq.
10570  *
10571  * __lpfc_sli_issue_fcp_io_s3 is wrapper function to invoke lockless func to
10572  * send  an iocb command to an HBA with SLI-3 interface spec.
10573  *
10574  * This function takes the hbalock before invoking the lockless version.
10575  * The function will return success after it successfully submit the wqe to
10576  * firmware or after adding to the txq.
10577  **/
10578 static int
__lpfc_sli_issue_fcp_io_s3(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10579 __lpfc_sli_issue_fcp_io_s3(struct lpfc_hba *phba, uint32_t ring_number,
10580 			   struct lpfc_iocbq *piocb, uint32_t flag)
10581 {
10582 	unsigned long iflags;
10583 	int rc;
10584 
10585 	spin_lock_irqsave(&phba->hbalock, iflags);
10586 	rc = __lpfc_sli_issue_iocb_s3(phba, ring_number, piocb, flag);
10587 	spin_unlock_irqrestore(&phba->hbalock, iflags);
10588 
10589 	return rc;
10590 }
10591 
10592 /**
10593  * __lpfc_sli_issue_fcp_io_s4 - SLI4 device for sending fcp io wqe
10594  * @phba: Pointer to HBA context object.
10595  * @ring_number: SLI ring number to issue wqe on.
10596  * @piocb: Pointer to command iocb.
10597  * @flag: Flag indicating if this command can be put into txq.
10598  *
10599  * __lpfc_sli_issue_fcp_io_s4 is used by other functions in the driver to issue
10600  * an wqe command to an HBA with SLI-4 interface spec.
10601  *
10602  * This function is a lockless version. The function will return success
10603  * after it successfully submit the wqe to firmware or after adding to the
10604  * txq.
10605  **/
10606 static int
__lpfc_sli_issue_fcp_io_s4(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10607 __lpfc_sli_issue_fcp_io_s4(struct lpfc_hba *phba, uint32_t ring_number,
10608 			   struct lpfc_iocbq *piocb, uint32_t flag)
10609 {
10610 	struct lpfc_io_buf *lpfc_cmd = piocb->io_buf;
10611 
10612 	lpfc_prep_embed_io(phba, lpfc_cmd);
10613 	return lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, piocb);
10614 }
10615 
10616 void
lpfc_prep_embed_io(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_cmd)10617 lpfc_prep_embed_io(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_cmd)
10618 {
10619 	struct lpfc_iocbq *piocb = &lpfc_cmd->cur_iocbq;
10620 	union lpfc_wqe128 *wqe = &lpfc_cmd->cur_iocbq.wqe;
10621 	struct sli4_sge_le *sgl;
10622 	u32 type_size;
10623 
10624 	/* 128 byte wqe support here */
10625 	sgl = (struct sli4_sge_le *)lpfc_cmd->dma_sgl;
10626 
10627 	if (phba->fcp_embed_io) {
10628 		struct fcp_cmnd *fcp_cmnd;
10629 		u32 *ptr;
10630 
10631 		fcp_cmnd = lpfc_cmd->fcp_cmnd;
10632 
10633 		/* Word 0-2 - FCP_CMND */
10634 		type_size = le32_to_cpu(sgl->sge_len);
10635 		type_size |= ULP_BDE64_TYPE_BDE_IMMED;
10636 		wqe->generic.bde.tus.w = type_size;
10637 		wqe->generic.bde.addrHigh = 0;
10638 		wqe->generic.bde.addrLow =  72;  /* Word 18 */
10639 
10640 		bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10641 		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 0);
10642 
10643 		/* Word 18-29  FCP CMND Payload */
10644 		ptr = &wqe->words[18];
10645 		lpfc_sli_pcimem_bcopy(fcp_cmnd, ptr, le32_to_cpu(sgl->sge_len));
10646 	} else {
10647 		/* Word 0-2 - Inline BDE */
10648 		wqe->generic.bde.tus.f.bdeFlags =  BUFF_TYPE_BDE_64;
10649 		wqe->generic.bde.tus.f.bdeSize = le32_to_cpu(sgl->sge_len);
10650 		wqe->generic.bde.addrHigh = le32_to_cpu(sgl->addr_hi);
10651 		wqe->generic.bde.addrLow = le32_to_cpu(sgl->addr_lo);
10652 
10653 		/* Word 10 */
10654 		bf_set(wqe_dbde, &wqe->generic.wqe_com, 1);
10655 		bf_set(wqe_wqes, &wqe->generic.wqe_com, 0);
10656 	}
10657 
10658 	/* add the VMID tags as per switch response */
10659 	if (unlikely(piocb->cmd_flag & LPFC_IO_VMID)) {
10660 		if (phba->pport->vmid_flag & LPFC_VMID_TYPE_PRIO) {
10661 			bf_set(wqe_ccpe, &wqe->fcp_iwrite.wqe_com, 1);
10662 			bf_set(wqe_ccp, &wqe->fcp_iwrite.wqe_com,
10663 					(piocb->vmid_tag.cs_ctl_vmid));
10664 		} else if (phba->cfg_vmid_app_header) {
10665 			bf_set(wqe_appid, &wqe->fcp_iwrite.wqe_com, 1);
10666 			bf_set(wqe_wqes, &wqe->fcp_iwrite.wqe_com, 1);
10667 			wqe->words[31] = piocb->vmid_tag.app_id;
10668 		}
10669 	}
10670 }
10671 
10672 /**
10673  * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
10674  * @phba: Pointer to HBA context object.
10675  * @ring_number: SLI ring number to issue iocb on.
10676  * @piocb: Pointer to command iocb.
10677  * @flag: Flag indicating if this command can be put into txq.
10678  *
10679  * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
10680  * an iocb command to an HBA with SLI-4 interface spec.
10681  *
10682  * This function is called with ringlock held. The function will return success
10683  * after it successfully submit the iocb to firmware or after adding to the
10684  * txq.
10685  **/
10686 static int
__lpfc_sli_issue_iocb_s4(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10687 __lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
10688 			 struct lpfc_iocbq *piocb, uint32_t flag)
10689 {
10690 	struct lpfc_sglq *sglq;
10691 	union lpfc_wqe128 *wqe;
10692 	struct lpfc_queue *wq;
10693 	struct lpfc_sli_ring *pring;
10694 	u32 ulp_command = get_job_cmnd(phba, piocb);
10695 
10696 	/* Get the WQ */
10697 	if ((piocb->cmd_flag & LPFC_IO_FCP) ||
10698 	    (piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
10699 		wq = phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq;
10700 	} else {
10701 		wq = phba->sli4_hba.els_wq;
10702 	}
10703 
10704 	/* Get corresponding ring */
10705 	pring = wq->pring;
10706 
10707 	/*
10708 	 * The WQE can be either 64 or 128 bytes,
10709 	 */
10710 
10711 	lockdep_assert_held(&pring->ring_lock);
10712 	wqe = &piocb->wqe;
10713 	if (piocb->sli4_xritag == NO_XRI) {
10714 		if (ulp_command == CMD_ABORT_XRI_CX)
10715 			sglq = NULL;
10716 		else {
10717 			sglq = __lpfc_sli_get_els_sglq(phba, piocb);
10718 			if (!sglq) {
10719 				if (!(flag & SLI_IOCB_RET_IOCB)) {
10720 					__lpfc_sli_ringtx_put(phba,
10721 							pring,
10722 							piocb);
10723 					return IOCB_SUCCESS;
10724 				} else {
10725 					return IOCB_BUSY;
10726 				}
10727 			}
10728 		}
10729 	} else if (piocb->cmd_flag &  LPFC_IO_FCP) {
10730 		/* These IO's already have an XRI and a mapped sgl. */
10731 		sglq = NULL;
10732 	}
10733 	else {
10734 		/*
10735 		 * This is a continuation of a commandi,(CX) so this
10736 		 * sglq is on the active list
10737 		 */
10738 		sglq = __lpfc_get_active_sglq(phba, piocb->sli4_lxritag);
10739 		if (!sglq)
10740 			return IOCB_ERROR;
10741 	}
10742 
10743 	if (sglq) {
10744 		piocb->sli4_lxritag = sglq->sli4_lxritag;
10745 		piocb->sli4_xritag = sglq->sli4_xritag;
10746 
10747 		/* ABTS sent by initiator to CT exchange, the
10748 		 * RX_ID field will be filled with the newly
10749 		 * allocated responder XRI.
10750 		 */
10751 		if (ulp_command == CMD_XMIT_BLS_RSP64_CX &&
10752 		    piocb->abort_bls == LPFC_ABTS_UNSOL_INT)
10753 			bf_set(xmit_bls_rsp64_rxid, &wqe->xmit_bls_rsp,
10754 			       piocb->sli4_xritag);
10755 
10756 		bf_set(wqe_xri_tag, &wqe->generic.wqe_com,
10757 		       piocb->sli4_xritag);
10758 
10759 		if (lpfc_wqe_bpl2sgl(phba, piocb, sglq) == NO_XRI)
10760 			return IOCB_ERROR;
10761 	}
10762 
10763 	if (lpfc_sli4_wq_put(wq, wqe))
10764 		return IOCB_ERROR;
10765 
10766 	lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
10767 
10768 	return 0;
10769 }
10770 
10771 /*
10772  * lpfc_sli_issue_fcp_io - Wrapper func for issuing fcp i/o
10773  *
10774  * This routine wraps the actual fcp i/o function for issusing WQE for sli-4
10775  * or IOCB for sli-3  function.
10776  * pointer from the lpfc_hba struct.
10777  *
10778  * Return codes:
10779  * IOCB_ERROR - Error
10780  * IOCB_SUCCESS - Success
10781  * IOCB_BUSY - Busy
10782  **/
10783 int
lpfc_sli_issue_fcp_io(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10784 lpfc_sli_issue_fcp_io(struct lpfc_hba *phba, uint32_t ring_number,
10785 		      struct lpfc_iocbq *piocb, uint32_t flag)
10786 {
10787 	return phba->__lpfc_sli_issue_fcp_io(phba, ring_number, piocb, flag);
10788 }
10789 
10790 /*
10791  * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
10792  *
10793  * This routine wraps the actual lockless version for issusing IOCB function
10794  * pointer from the lpfc_hba struct.
10795  *
10796  * Return codes:
10797  * IOCB_ERROR - Error
10798  * IOCB_SUCCESS - Success
10799  * IOCB_BUSY - Busy
10800  **/
10801 int
__lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)10802 __lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
10803 		struct lpfc_iocbq *piocb, uint32_t flag)
10804 {
10805 	return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
10806 }
10807 
10808 static void
__lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq * cmdiocbq,struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,u16 cmd_size,u32 did,u32 elscmd,u8 tmo,u8 expect_rsp)10809 __lpfc_sli_prep_els_req_rsp_s3(struct lpfc_iocbq *cmdiocbq,
10810 			       struct lpfc_vport *vport,
10811 			       struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10812 			       u32 elscmd, u8 tmo, u8 expect_rsp)
10813 {
10814 	struct lpfc_hba *phba = vport->phba;
10815 	IOCB_t *cmd;
10816 
10817 	cmd = &cmdiocbq->iocb;
10818 	memset(cmd, 0, sizeof(*cmd));
10819 
10820 	cmd->un.elsreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10821 	cmd->un.elsreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10822 	cmd->un.elsreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10823 
10824 	if (expect_rsp) {
10825 		cmd->un.elsreq64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
10826 		cmd->un.elsreq64.remoteID = did; /* DID */
10827 		cmd->ulpCommand = CMD_ELS_REQUEST64_CR;
10828 		cmd->ulpTimeout = tmo;
10829 	} else {
10830 		cmd->un.elsreq64.bdl.bdeSize = sizeof(struct ulp_bde64);
10831 		cmd->un.genreq64.xmit_els_remoteID = did; /* DID */
10832 		cmd->ulpCommand = CMD_XMIT_ELS_RSP64_CX;
10833 		cmd->ulpPU = PARM_NPIV_DID;
10834 	}
10835 	cmd->ulpBdeCount = 1;
10836 	cmd->ulpLe = 1;
10837 	cmd->ulpClass = CLASS3;
10838 
10839 	/* If we have NPIV enabled, we want to send ELS traffic by VPI. */
10840 	if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
10841 		if (expect_rsp) {
10842 			cmd->un.elsreq64.myID = vport->fc_myDID;
10843 
10844 			/* For ELS_REQUEST64_CR, use the VPI by default */
10845 			cmd->ulpContext = phba->vpi_ids[vport->vpi];
10846 		}
10847 
10848 		cmd->ulpCt_h = 0;
10849 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10850 		if (elscmd == ELS_CMD_ECHO)
10851 			cmd->ulpCt_l = 0; /* context = invalid RPI */
10852 		else
10853 			cmd->ulpCt_l = 1; /* context = VPI */
10854 	}
10855 }
10856 
10857 static void
__lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq * cmdiocbq,struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,u16 cmd_size,u32 did,u32 elscmd,u8 tmo,u8 expect_rsp)10858 __lpfc_sli_prep_els_req_rsp_s4(struct lpfc_iocbq *cmdiocbq,
10859 			       struct lpfc_vport *vport,
10860 			       struct lpfc_dmabuf *bmp, u16 cmd_size, u32 did,
10861 			       u32 elscmd, u8 tmo, u8 expect_rsp)
10862 {
10863 	struct lpfc_hba  *phba = vport->phba;
10864 	union lpfc_wqe128 *wqe;
10865 	struct ulp_bde64_le *bde;
10866 	u8 els_id;
10867 
10868 	wqe = &cmdiocbq->wqe;
10869 	memset(wqe, 0, sizeof(*wqe));
10870 
10871 	/* Word 0 - 2 BDE */
10872 	bde = (struct ulp_bde64_le *)&wqe->generic.bde;
10873 	bde->addr_low = cpu_to_le32(putPaddrLow(bmp->phys));
10874 	bde->addr_high = cpu_to_le32(putPaddrHigh(bmp->phys));
10875 	bde->type_size = cpu_to_le32(cmd_size);
10876 	bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
10877 
10878 	if (expect_rsp) {
10879 		bf_set(wqe_cmnd, &wqe->els_req.wqe_com, CMD_ELS_REQUEST64_WQE);
10880 
10881 		/* Transfer length */
10882 		wqe->els_req.payload_len = cmd_size;
10883 		wqe->els_req.max_response_payload_len = FCELSSIZE;
10884 
10885 		/* DID */
10886 		bf_set(wqe_els_did, &wqe->els_req.wqe_dest, did);
10887 
10888 		/* Word 11 - ELS_ID */
10889 		switch (elscmd) {
10890 		case ELS_CMD_PLOGI:
10891 			els_id = LPFC_ELS_ID_PLOGI;
10892 			break;
10893 		case ELS_CMD_FLOGI:
10894 			els_id = LPFC_ELS_ID_FLOGI;
10895 			break;
10896 		case ELS_CMD_LOGO:
10897 			els_id = LPFC_ELS_ID_LOGO;
10898 			break;
10899 		case ELS_CMD_FDISC:
10900 			if (!vport->fc_myDID) {
10901 				els_id = LPFC_ELS_ID_FDISC;
10902 				break;
10903 			}
10904 			fallthrough;
10905 		default:
10906 			els_id = LPFC_ELS_ID_DEFAULT;
10907 			break;
10908 		}
10909 
10910 		bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
10911 	} else {
10912 		/* DID */
10913 		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest, did);
10914 
10915 		/* Transfer length */
10916 		wqe->xmit_els_rsp.response_payload_len = cmd_size;
10917 
10918 		bf_set(wqe_cmnd, &wqe->xmit_els_rsp.wqe_com,
10919 		       CMD_XMIT_ELS_RSP64_WQE);
10920 	}
10921 
10922 	bf_set(wqe_tmo, &wqe->generic.wqe_com, tmo);
10923 	bf_set(wqe_reqtag, &wqe->generic.wqe_com, cmdiocbq->iotag);
10924 	bf_set(wqe_class, &wqe->generic.wqe_com, CLASS3);
10925 
10926 	/* If we have NPIV enabled, we want to send ELS traffic by VPI.
10927 	 * For SLI4, since the driver controls VPIs we also want to include
10928 	 * all ELS pt2pt protocol traffic as well.
10929 	 */
10930 	if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) ||
10931 	    test_bit(FC_PT2PT, &vport->fc_flag)) {
10932 		if (expect_rsp) {
10933 			bf_set(els_req64_sid, &wqe->els_req, vport->fc_myDID);
10934 
10935 			/* For ELS_REQUEST64_WQE, use the VPI by default */
10936 			bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
10937 			       phba->vpi_ids[vport->vpi]);
10938 		}
10939 
10940 		/* The CT field must be 0=INVALID_RPI for the ECHO cmd */
10941 		if (elscmd == ELS_CMD_ECHO)
10942 			bf_set(wqe_ct, &wqe->generic.wqe_com, 0);
10943 		else
10944 			bf_set(wqe_ct, &wqe->generic.wqe_com, 1);
10945 	}
10946 }
10947 
10948 void
lpfc_sli_prep_els_req_rsp(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_vport * vport,struct lpfc_dmabuf * bmp,u16 cmd_size,u32 did,u32 elscmd,u8 tmo,u8 expect_rsp)10949 lpfc_sli_prep_els_req_rsp(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
10950 			  struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
10951 			  u16 cmd_size, u32 did, u32 elscmd, u8 tmo,
10952 			  u8 expect_rsp)
10953 {
10954 	phba->__lpfc_sli_prep_els_req_rsp(cmdiocbq, vport, bmp, cmd_size, did,
10955 					  elscmd, tmo, expect_rsp);
10956 }
10957 
10958 static void
__lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u32 num_entry,u8 tmo)10959 __lpfc_sli_prep_gen_req_s3(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10960 			   u16 rpi, u32 num_entry, u8 tmo)
10961 {
10962 	IOCB_t *cmd;
10963 
10964 	cmd = &cmdiocbq->iocb;
10965 	memset(cmd, 0, sizeof(*cmd));
10966 
10967 	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
10968 	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
10969 	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
10970 	cmd->un.genreq64.bdl.bdeSize = num_entry * sizeof(struct ulp_bde64);
10971 
10972 	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
10973 	cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
10974 	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
10975 
10976 	cmd->ulpContext = rpi;
10977 	cmd->ulpClass = CLASS3;
10978 	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
10979 	cmd->ulpBdeCount = 1;
10980 	cmd->ulpLe = 1;
10981 	cmd->ulpOwner = OWN_CHIP;
10982 	cmd->ulpTimeout = tmo;
10983 }
10984 
10985 static void
__lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u32 num_entry,u8 tmo)10986 __lpfc_sli_prep_gen_req_s4(struct lpfc_iocbq *cmdiocbq, struct lpfc_dmabuf *bmp,
10987 			   u16 rpi, u32 num_entry, u8 tmo)
10988 {
10989 	union lpfc_wqe128 *cmdwqe;
10990 	struct ulp_bde64_le *bde, *bpl;
10991 	u32 xmit_len = 0, total_len = 0, size, type, i;
10992 
10993 	cmdwqe = &cmdiocbq->wqe;
10994 	memset(cmdwqe, 0, sizeof(*cmdwqe));
10995 
10996 	/* Calculate total_len and xmit_len */
10997 	bpl = (struct ulp_bde64_le *)bmp->virt;
10998 	for (i = 0; i < num_entry; i++) {
10999 		size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
11000 		total_len += size;
11001 	}
11002 	for (i = 0; i < num_entry; i++) {
11003 		size = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_SIZE_MASK;
11004 		type = le32_to_cpu(bpl[i].type_size) & ULP_BDE64_TYPE_MASK;
11005 		if (type != ULP_BDE64_TYPE_BDE_64)
11006 			break;
11007 		xmit_len += size;
11008 	}
11009 
11010 	/* Words 0 - 2 */
11011 	bde = (struct ulp_bde64_le *)&cmdwqe->generic.bde;
11012 	bde->addr_low = bpl->addr_low;
11013 	bde->addr_high = bpl->addr_high;
11014 	bde->type_size = cpu_to_le32(xmit_len);
11015 	bde->type_size |= cpu_to_le32(ULP_BDE64_TYPE_BDE_64);
11016 
11017 	/* Word 3 */
11018 	cmdwqe->gen_req.request_payload_len = xmit_len;
11019 
11020 	/* Word 5 */
11021 	bf_set(wqe_type, &cmdwqe->gen_req.wge_ctl, FC_TYPE_CT);
11022 	bf_set(wqe_rctl, &cmdwqe->gen_req.wge_ctl, FC_RCTL_DD_UNSOL_CTL);
11023 	bf_set(wqe_si, &cmdwqe->gen_req.wge_ctl, 1);
11024 	bf_set(wqe_la, &cmdwqe->gen_req.wge_ctl, 1);
11025 
11026 	/* Word 6 */
11027 	bf_set(wqe_ctxt_tag, &cmdwqe->gen_req.wqe_com, rpi);
11028 
11029 	/* Word 7 */
11030 	bf_set(wqe_tmo, &cmdwqe->gen_req.wqe_com, tmo);
11031 	bf_set(wqe_class, &cmdwqe->gen_req.wqe_com, CLASS3);
11032 	bf_set(wqe_cmnd, &cmdwqe->gen_req.wqe_com, CMD_GEN_REQUEST64_CR);
11033 	bf_set(wqe_ct, &cmdwqe->gen_req.wqe_com, SLI4_CT_RPI);
11034 
11035 	/* Word 12 */
11036 	cmdwqe->gen_req.max_response_payload_len = total_len - xmit_len;
11037 }
11038 
11039 void
lpfc_sli_prep_gen_req(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u32 num_entry,u8 tmo)11040 lpfc_sli_prep_gen_req(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11041 		      struct lpfc_dmabuf *bmp, u16 rpi, u32 num_entry, u8 tmo)
11042 {
11043 	phba->__lpfc_sli_prep_gen_req(cmdiocbq, bmp, rpi, num_entry, tmo);
11044 }
11045 
11046 static void
__lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u16 ox_id,u32 num_entry,u8 rctl,u8 last_seq,u8 cr_cx_cmd)11047 __lpfc_sli_prep_xmit_seq64_s3(struct lpfc_iocbq *cmdiocbq,
11048 			      struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11049 			      u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11050 {
11051 	IOCB_t *icmd;
11052 
11053 	icmd = &cmdiocbq->iocb;
11054 	memset(icmd, 0, sizeof(*icmd));
11055 
11056 	icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
11057 	icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
11058 	icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
11059 	icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
11060 	icmd->un.xseq64.w5.hcsw.Fctl = LA;
11061 	if (last_seq)
11062 		icmd->un.xseq64.w5.hcsw.Fctl |= LS;
11063 	icmd->un.xseq64.w5.hcsw.Dfctl = 0;
11064 	icmd->un.xseq64.w5.hcsw.Rctl = rctl;
11065 	icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
11066 
11067 	icmd->ulpBdeCount = 1;
11068 	icmd->ulpLe = 1;
11069 	icmd->ulpClass = CLASS3;
11070 
11071 	switch (cr_cx_cmd) {
11072 	case CMD_XMIT_SEQUENCE64_CR:
11073 		icmd->ulpContext = rpi;
11074 		icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
11075 		break;
11076 	case CMD_XMIT_SEQUENCE64_CX:
11077 		icmd->ulpContext = ox_id;
11078 		icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
11079 		break;
11080 	default:
11081 		break;
11082 	}
11083 }
11084 
11085 static void
__lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u16 ox_id,u32 full_size,u8 rctl,u8 last_seq,u8 cr_cx_cmd)11086 __lpfc_sli_prep_xmit_seq64_s4(struct lpfc_iocbq *cmdiocbq,
11087 			      struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11088 			      u32 full_size, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11089 {
11090 	union lpfc_wqe128 *wqe;
11091 	struct ulp_bde64 *bpl;
11092 
11093 	wqe = &cmdiocbq->wqe;
11094 	memset(wqe, 0, sizeof(*wqe));
11095 
11096 	/* Words 0 - 2 */
11097 	bpl = (struct ulp_bde64 *)bmp->virt;
11098 	wqe->xmit_sequence.bde.addrHigh = bpl->addrHigh;
11099 	wqe->xmit_sequence.bde.addrLow = bpl->addrLow;
11100 	wqe->xmit_sequence.bde.tus.w = bpl->tus.w;
11101 
11102 	/* Word 5 */
11103 	bf_set(wqe_ls, &wqe->xmit_sequence.wge_ctl, last_seq);
11104 	bf_set(wqe_la, &wqe->xmit_sequence.wge_ctl, 1);
11105 	bf_set(wqe_dfctl, &wqe->xmit_sequence.wge_ctl, 0);
11106 	bf_set(wqe_rctl, &wqe->xmit_sequence.wge_ctl, rctl);
11107 	bf_set(wqe_type, &wqe->xmit_sequence.wge_ctl, FC_TYPE_CT);
11108 
11109 	/* Word 6 */
11110 	bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com, rpi);
11111 
11112 	bf_set(wqe_cmnd, &wqe->xmit_sequence.wqe_com,
11113 	       CMD_XMIT_SEQUENCE64_WQE);
11114 
11115 	/* Word 7 */
11116 	bf_set(wqe_class, &wqe->xmit_sequence.wqe_com, CLASS3);
11117 
11118 	/* Word 9 */
11119 	bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com, ox_id);
11120 
11121 	if (cmdiocbq->cmd_flag & (LPFC_IO_LIBDFC | LPFC_IO_LOOPBACK)) {
11122 		/* Word 10 */
11123 		if (cmdiocbq->cmd_flag & LPFC_IO_VMID) {
11124 			bf_set(wqe_appid, &wqe->xmit_sequence.wqe_com, 1);
11125 			bf_set(wqe_wqes, &wqe->xmit_sequence.wqe_com, 1);
11126 			wqe->words[31] = LOOPBACK_SRC_APPID;
11127 		}
11128 
11129 		/* Word 12 */
11130 		wqe->xmit_sequence.xmit_len = full_size;
11131 	}
11132 	else
11133 		wqe->xmit_sequence.xmit_len =
11134 			wqe->xmit_sequence.bde.tus.f.bdeSize;
11135 }
11136 
11137 void
lpfc_sli_prep_xmit_seq64(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_dmabuf * bmp,u16 rpi,u16 ox_id,u32 num_entry,u8 rctl,u8 last_seq,u8 cr_cx_cmd)11138 lpfc_sli_prep_xmit_seq64(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11139 			 struct lpfc_dmabuf *bmp, u16 rpi, u16 ox_id,
11140 			 u32 num_entry, u8 rctl, u8 last_seq, u8 cr_cx_cmd)
11141 {
11142 	phba->__lpfc_sli_prep_xmit_seq64(cmdiocbq, bmp, rpi, ox_id, num_entry,
11143 					 rctl, last_seq, cr_cx_cmd);
11144 }
11145 
11146 static void
__lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq * cmdiocbq,u16 ulp_context,u16 iotag,u8 ulp_class,u16 cqid,bool ia,bool wqec)11147 __lpfc_sli_prep_abort_xri_s3(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11148 			     u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11149 			     bool wqec)
11150 {
11151 	IOCB_t *icmd = NULL;
11152 
11153 	icmd = &cmdiocbq->iocb;
11154 	memset(icmd, 0, sizeof(*icmd));
11155 
11156 	/* Word 5 */
11157 	icmd->un.acxri.abortContextTag = ulp_context;
11158 	icmd->un.acxri.abortIoTag = iotag;
11159 
11160 	if (ia) {
11161 		/* Word 7 */
11162 		icmd->ulpCommand = CMD_CLOSE_XRI_CN;
11163 	} else {
11164 		/* Word 3 */
11165 		icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
11166 
11167 		/* Word 7 */
11168 		icmd->ulpClass = ulp_class;
11169 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
11170 	}
11171 
11172 	/* Word 7 */
11173 	icmd->ulpLe = 1;
11174 }
11175 
11176 static void
__lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq * cmdiocbq,u16 ulp_context,u16 iotag,u8 ulp_class,u16 cqid,bool ia,bool wqec)11177 __lpfc_sli_prep_abort_xri_s4(struct lpfc_iocbq *cmdiocbq, u16 ulp_context,
11178 			     u16 iotag, u8 ulp_class, u16 cqid, bool ia,
11179 			     bool wqec)
11180 {
11181 	union lpfc_wqe128 *wqe;
11182 
11183 	wqe = &cmdiocbq->wqe;
11184 	memset(wqe, 0, sizeof(*wqe));
11185 
11186 	/* Word 3 */
11187 	bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
11188 	if (ia)
11189 		bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
11190 	else
11191 		bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
11192 
11193 	/* Word 7 */
11194 	bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_WQE);
11195 
11196 	/* Word 8 */
11197 	wqe->abort_cmd.wqe_com.abort_tag = ulp_context;
11198 
11199 	/* Word 9 */
11200 	bf_set(wqe_reqtag, &wqe->abort_cmd.wqe_com, iotag);
11201 
11202 	/* Word 10 */
11203 	bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
11204 
11205 	/* Word 11 */
11206 	if (wqec)
11207 		bf_set(wqe_wqec, &wqe->abort_cmd.wqe_com, 1);
11208 	bf_set(wqe_cqid, &wqe->abort_cmd.wqe_com, cqid);
11209 	bf_set(wqe_cmd_type, &wqe->abort_cmd.wqe_com, OTHER_COMMAND);
11210 }
11211 
11212 void
lpfc_sli_prep_abort_xri(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,u16 ulp_context,u16 iotag,u8 ulp_class,u16 cqid,bool ia,bool wqec)11213 lpfc_sli_prep_abort_xri(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocbq,
11214 			u16 ulp_context, u16 iotag, u8 ulp_class, u16 cqid,
11215 			bool ia, bool wqec)
11216 {
11217 	phba->__lpfc_sli_prep_abort_xri(cmdiocbq, ulp_context, iotag, ulp_class,
11218 					cqid, ia, wqec);
11219 }
11220 
11221 /**
11222  * lpfc_sli_api_table_setup - Set up sli api function jump table
11223  * @phba: The hba struct for which this call is being executed.
11224  * @dev_grp: The HBA PCI-Device group number.
11225  *
11226  * This routine sets up the SLI interface API function jump table in @phba
11227  * struct.
11228  * Returns: 0 - success, -ENODEV - failure.
11229  **/
11230 int
lpfc_sli_api_table_setup(struct lpfc_hba * phba,uint8_t dev_grp)11231 lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
11232 {
11233 
11234 	switch (dev_grp) {
11235 	case LPFC_PCI_DEV_LP:
11236 		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
11237 		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
11238 		phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s3;
11239 		phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s3;
11240 		phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s3;
11241 		phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s3;
11242 		phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s3;
11243 		break;
11244 	case LPFC_PCI_DEV_OC:
11245 		phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
11246 		phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
11247 		phba->__lpfc_sli_issue_fcp_io = __lpfc_sli_issue_fcp_io_s4;
11248 		phba->__lpfc_sli_prep_els_req_rsp = __lpfc_sli_prep_els_req_rsp_s4;
11249 		phba->__lpfc_sli_prep_gen_req = __lpfc_sli_prep_gen_req_s4;
11250 		phba->__lpfc_sli_prep_xmit_seq64 = __lpfc_sli_prep_xmit_seq64_s4;
11251 		phba->__lpfc_sli_prep_abort_xri = __lpfc_sli_prep_abort_xri_s4;
11252 		break;
11253 	default:
11254 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
11255 				"1419 Invalid HBA PCI-device group: 0x%x\n",
11256 				dev_grp);
11257 		return -ENODEV;
11258 	}
11259 	return 0;
11260 }
11261 
11262 /**
11263  * lpfc_sli4_calc_ring - Calculates which ring to use
11264  * @phba: Pointer to HBA context object.
11265  * @piocb: Pointer to command iocb.
11266  *
11267  * For SLI4 only, FCP IO can deferred to one fo many WQs, based on
11268  * hba_wqidx, thus we need to calculate the corresponding ring.
11269  * Since ABORTS must go on the same WQ of the command they are
11270  * aborting, we use command's hba_wqidx.
11271  */
11272 struct lpfc_sli_ring *
lpfc_sli4_calc_ring(struct lpfc_hba * phba,struct lpfc_iocbq * piocb)11273 lpfc_sli4_calc_ring(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
11274 {
11275 	struct lpfc_io_buf *lpfc_cmd;
11276 
11277 	if (piocb->cmd_flag & (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
11278 		if (unlikely(!phba->sli4_hba.hdwq))
11279 			return NULL;
11280 		/*
11281 		 * for abort iocb hba_wqidx should already
11282 		 * be setup based on what work queue we used.
11283 		 */
11284 		if (!(piocb->cmd_flag & LPFC_USE_FCPWQIDX)) {
11285 			lpfc_cmd = piocb->io_buf;
11286 			piocb->hba_wqidx = lpfc_cmd->hdwq_no;
11287 		}
11288 		return phba->sli4_hba.hdwq[piocb->hba_wqidx].io_wq->pring;
11289 	} else {
11290 		if (unlikely(!phba->sli4_hba.els_wq))
11291 			return NULL;
11292 		piocb->hba_wqidx = 0;
11293 		return phba->sli4_hba.els_wq->pring;
11294 	}
11295 }
11296 
lpfc_sli4_poll_eq(struct lpfc_queue * eq)11297 inline void lpfc_sli4_poll_eq(struct lpfc_queue *eq)
11298 {
11299 	struct lpfc_hba *phba = eq->phba;
11300 
11301 	/*
11302 	 * Unlocking an irq is one of the entry point to check
11303 	 * for re-schedule, but we are good for io submission
11304 	 * path as midlayer does a get_cpu to glue us in. Flush
11305 	 * out the invalidate queue so we can see the updated
11306 	 * value for flag.
11307 	 */
11308 	smp_rmb();
11309 
11310 	if (READ_ONCE(eq->mode) == LPFC_EQ_POLL)
11311 		/* We will not likely get the completion for the caller
11312 		 * during this iteration but i guess that's fine.
11313 		 * Future io's coming on this eq should be able to
11314 		 * pick it up.  As for the case of single io's, they
11315 		 * will be handled through a sched from polling timer
11316 		 * function which is currently triggered every 1msec.
11317 		 */
11318 		lpfc_sli4_process_eq(phba, eq, LPFC_QUEUE_NOARM,
11319 				     LPFC_QUEUE_WORK);
11320 }
11321 
11322 /**
11323  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
11324  * @phba: Pointer to HBA context object.
11325  * @ring_number: Ring number
11326  * @piocb: Pointer to command iocb.
11327  * @flag: Flag indicating if this command can be put into txq.
11328  *
11329  * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
11330  * function. This function gets the hbalock and calls
11331  * __lpfc_sli_issue_iocb function and will return the error returned
11332  * by __lpfc_sli_issue_iocb function. This wrapper is used by
11333  * functions which do not hold hbalock.
11334  **/
11335 int
lpfc_sli_issue_iocb(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,uint32_t flag)11336 lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
11337 		    struct lpfc_iocbq *piocb, uint32_t flag)
11338 {
11339 	struct lpfc_sli_ring *pring;
11340 	struct lpfc_queue *eq;
11341 	unsigned long iflags;
11342 	int rc;
11343 
11344 	/* If the PCI channel is in offline state, do not post iocbs. */
11345 	if (unlikely(pci_channel_offline(phba->pcidev)))
11346 		return IOCB_ERROR;
11347 
11348 	if (phba->sli_rev == LPFC_SLI_REV4) {
11349 		lpfc_sli_prep_wqe(phba, piocb);
11350 
11351 		eq = phba->sli4_hba.hdwq[piocb->hba_wqidx].hba_eq;
11352 
11353 		pring = lpfc_sli4_calc_ring(phba, piocb);
11354 		if (unlikely(pring == NULL))
11355 			return IOCB_ERROR;
11356 
11357 		spin_lock_irqsave(&pring->ring_lock, iflags);
11358 		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11359 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
11360 
11361 		lpfc_sli4_poll_eq(eq);
11362 	} else {
11363 		/* For now, SLI2/3 will still use hbalock */
11364 		spin_lock_irqsave(&phba->hbalock, iflags);
11365 		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
11366 		spin_unlock_irqrestore(&phba->hbalock, iflags);
11367 	}
11368 	return rc;
11369 }
11370 
11371 /**
11372  * lpfc_extra_ring_setup - Extra ring setup function
11373  * @phba: Pointer to HBA context object.
11374  *
11375  * This function is called while driver attaches with the
11376  * HBA to setup the extra ring. The extra ring is used
11377  * only when driver needs to support target mode functionality
11378  * or IP over FC functionalities.
11379  *
11380  * This function is called with no lock held. SLI3 only.
11381  **/
11382 static int
lpfc_extra_ring_setup(struct lpfc_hba * phba)11383 lpfc_extra_ring_setup( struct lpfc_hba *phba)
11384 {
11385 	struct lpfc_sli *psli;
11386 	struct lpfc_sli_ring *pring;
11387 
11388 	psli = &phba->sli;
11389 
11390 	/* Adjust cmd/rsp ring iocb entries more evenly */
11391 
11392 	/* Take some away from the FCP ring */
11393 	pring = &psli->sli3_ring[LPFC_FCP_RING];
11394 	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11395 	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11396 	pring->sli.sli3.numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11397 	pring->sli.sli3.numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11398 
11399 	/* and give them to the extra ring */
11400 	pring = &psli->sli3_ring[LPFC_EXTRA_RING];
11401 
11402 	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11403 	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11404 	pring->sli.sli3.numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11405 	pring->sli.sli3.numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11406 
11407 	/* Setup default profile for this ring */
11408 	pring->iotag_max = 4096;
11409 	pring->num_mask = 1;
11410 	pring->prt[0].profile = 0;      /* Mask 0 */
11411 	pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
11412 	pring->prt[0].type = phba->cfg_multi_ring_type;
11413 	pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
11414 	return 0;
11415 }
11416 
11417 static void
lpfc_sli_post_recovery_event(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp)11418 lpfc_sli_post_recovery_event(struct lpfc_hba *phba,
11419 			     struct lpfc_nodelist *ndlp)
11420 {
11421 	unsigned long iflags;
11422 	struct lpfc_work_evt  *evtp = &ndlp->recovery_evt;
11423 
11424 	/* Hold a node reference for outstanding queued work */
11425 	if (!lpfc_nlp_get(ndlp))
11426 		return;
11427 
11428 	spin_lock_irqsave(&phba->hbalock, iflags);
11429 	if (!list_empty(&evtp->evt_listp)) {
11430 		spin_unlock_irqrestore(&phba->hbalock, iflags);
11431 		lpfc_nlp_put(ndlp);
11432 		return;
11433 	}
11434 
11435 	evtp->evt_arg1 = ndlp;
11436 	evtp->evt = LPFC_EVT_RECOVER_PORT;
11437 	list_add_tail(&evtp->evt_listp, &phba->work_list);
11438 	spin_unlock_irqrestore(&phba->hbalock, iflags);
11439 
11440 	lpfc_worker_wake_up(phba);
11441 }
11442 
11443 /* lpfc_sli_abts_err_handler - handle a failed ABTS request from an SLI3 port.
11444  * @phba: Pointer to HBA context object.
11445  * @iocbq: Pointer to iocb object.
11446  *
11447  * The async_event handler calls this routine when it receives
11448  * an ASYNC_STATUS_CN event from the port.  The port generates
11449  * this event when an Abort Sequence request to an rport fails
11450  * twice in succession.  The abort could be originated by the
11451  * driver or by the port.  The ABTS could have been for an ELS
11452  * or FCP IO.  The port only generates this event when an ABTS
11453  * fails to complete after one retry.
11454  */
11455 static void
lpfc_sli_abts_err_handler(struct lpfc_hba * phba,struct lpfc_iocbq * iocbq)11456 lpfc_sli_abts_err_handler(struct lpfc_hba *phba,
11457 			  struct lpfc_iocbq *iocbq)
11458 {
11459 	struct lpfc_nodelist *ndlp = NULL;
11460 	uint16_t rpi = 0, vpi = 0;
11461 	struct lpfc_vport *vport = NULL;
11462 
11463 	/* The rpi in the ulpContext is vport-sensitive. */
11464 	vpi = iocbq->iocb.un.asyncstat.sub_ctxt_tag;
11465 	rpi = iocbq->iocb.ulpContext;
11466 
11467 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11468 			"3092 Port generated ABTS async event "
11469 			"on vpi %d rpi %d status 0x%x\n",
11470 			vpi, rpi, iocbq->iocb.ulpStatus);
11471 
11472 	vport = lpfc_find_vport_by_vpid(phba, vpi);
11473 	if (!vport)
11474 		goto err_exit;
11475 	ndlp = lpfc_findnode_rpi(vport, rpi);
11476 	if (!ndlp)
11477 		goto err_exit;
11478 
11479 	if (iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
11480 		lpfc_sli_abts_recover_port(vport, ndlp);
11481 	return;
11482 
11483  err_exit:
11484 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11485 			"3095 Event Context not found, no "
11486 			"action on vpi %d rpi %d status 0x%x, reason 0x%x\n",
11487 			vpi, rpi, iocbq->iocb.ulpStatus,
11488 			iocbq->iocb.ulpContext);
11489 }
11490 
11491 /* lpfc_sli4_abts_err_handler - handle a failed ABTS request from an SLI4 port.
11492  * @phba: pointer to HBA context object.
11493  * @ndlp: nodelist pointer for the impacted rport.
11494  * @axri: pointer to the wcqe containing the failed exchange.
11495  *
11496  * The driver calls this routine when it receives an ABORT_XRI_FCP CQE from the
11497  * port.  The port generates this event when an abort exchange request to an
11498  * rport fails twice in succession with no reply.  The abort could be originated
11499  * by the driver or by the port.  The ABTS could have been for an ELS or FCP IO.
11500  */
11501 void
lpfc_sli4_abts_err_handler(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,struct sli4_wcqe_xri_aborted * axri)11502 lpfc_sli4_abts_err_handler(struct lpfc_hba *phba,
11503 			   struct lpfc_nodelist *ndlp,
11504 			   struct sli4_wcqe_xri_aborted *axri)
11505 {
11506 	uint32_t ext_status = 0;
11507 
11508 	if (!ndlp) {
11509 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
11510 				"3115 Node Context not found, driver "
11511 				"ignoring abts err event\n");
11512 		return;
11513 	}
11514 
11515 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
11516 			"3116 Port generated FCP XRI ABORT event on "
11517 			"vpi %d rpi %d xri x%x status 0x%x parameter x%x\n",
11518 			ndlp->vport->vpi, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi],
11519 			bf_get(lpfc_wcqe_xa_xri, axri),
11520 			bf_get(lpfc_wcqe_xa_status, axri),
11521 			axri->parameter);
11522 
11523 	/*
11524 	 * Catch the ABTS protocol failure case.  Older OCe FW releases returned
11525 	 * LOCAL_REJECT and 0 for a failed ABTS exchange and later OCe and
11526 	 * LPe FW releases returned LOCAL_REJECT and SEQUENCE_TIMEOUT.
11527 	 */
11528 	ext_status = axri->parameter & IOERR_PARAM_MASK;
11529 	if ((bf_get(lpfc_wcqe_xa_status, axri) == IOSTAT_LOCAL_REJECT) &&
11530 	    ((ext_status == IOERR_SEQUENCE_TIMEOUT) || (ext_status == 0)))
11531 		lpfc_sli_post_recovery_event(phba, ndlp);
11532 }
11533 
11534 /**
11535  * lpfc_sli_async_event_handler - ASYNC iocb handler function
11536  * @phba: Pointer to HBA context object.
11537  * @pring: Pointer to driver SLI ring object.
11538  * @iocbq: Pointer to iocb object.
11539  *
11540  * This function is called by the slow ring event handler
11541  * function when there is an ASYNC event iocb in the ring.
11542  * This function is called with no lock held.
11543  * Currently this function handles only temperature related
11544  * ASYNC events. The function decodes the temperature sensor
11545  * event message and posts events for the management applications.
11546  **/
11547 static void
lpfc_sli_async_event_handler(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * iocbq)11548 lpfc_sli_async_event_handler(struct lpfc_hba * phba,
11549 	struct lpfc_sli_ring * pring, struct lpfc_iocbq * iocbq)
11550 {
11551 	IOCB_t *icmd;
11552 	uint16_t evt_code;
11553 	struct temp_event temp_event_data;
11554 	struct Scsi_Host *shost;
11555 	uint32_t *iocb_w;
11556 
11557 	icmd = &iocbq->iocb;
11558 	evt_code = icmd->un.asyncstat.evt_code;
11559 
11560 	switch (evt_code) {
11561 	case ASYNC_TEMP_WARN:
11562 	case ASYNC_TEMP_SAFE:
11563 		temp_event_data.data = (uint32_t) icmd->ulpContext;
11564 		temp_event_data.event_type = FC_REG_TEMPERATURE_EVENT;
11565 		if (evt_code == ASYNC_TEMP_WARN) {
11566 			temp_event_data.event_code = LPFC_THRESHOLD_TEMP;
11567 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11568 				"0347 Adapter is very hot, please take "
11569 				"corrective action. temperature : %d Celsius\n",
11570 				(uint32_t) icmd->ulpContext);
11571 		} else {
11572 			temp_event_data.event_code = LPFC_NORMAL_TEMP;
11573 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11574 				"0340 Adapter temperature is OK now. "
11575 				"temperature : %d Celsius\n",
11576 				(uint32_t) icmd->ulpContext);
11577 		}
11578 
11579 		/* Send temperature change event to applications */
11580 		shost = lpfc_shost_from_vport(phba->pport);
11581 		fc_host_post_vendor_event(shost, fc_get_event_number(),
11582 			sizeof(temp_event_data), (char *) &temp_event_data,
11583 			LPFC_NL_VENDOR_ID);
11584 		break;
11585 	case ASYNC_STATUS_CN:
11586 		lpfc_sli_abts_err_handler(phba, iocbq);
11587 		break;
11588 	default:
11589 		iocb_w = (uint32_t *) icmd;
11590 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
11591 			"0346 Ring %d handler: unexpected ASYNC_STATUS"
11592 			" evt_code 0x%x\n"
11593 			"W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
11594 			"W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
11595 			"W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
11596 			"W12 0x%08x W13 0x%08x W14 0x%08x W15 0x%08x\n",
11597 			pring->ringno, icmd->un.asyncstat.evt_code,
11598 			iocb_w[0], iocb_w[1], iocb_w[2], iocb_w[3],
11599 			iocb_w[4], iocb_w[5], iocb_w[6], iocb_w[7],
11600 			iocb_w[8], iocb_w[9], iocb_w[10], iocb_w[11],
11601 			iocb_w[12], iocb_w[13], iocb_w[14], iocb_w[15]);
11602 
11603 		break;
11604 	}
11605 }
11606 
11607 
11608 /**
11609  * lpfc_sli4_setup - SLI ring setup function
11610  * @phba: Pointer to HBA context object.
11611  *
11612  * lpfc_sli_setup sets up rings of the SLI interface with
11613  * number of iocbs per ring and iotags. This function is
11614  * called while driver attach to the HBA and before the
11615  * interrupts are enabled. So there is no need for locking.
11616  *
11617  * This function always returns 0.
11618  **/
11619 int
lpfc_sli4_setup(struct lpfc_hba * phba)11620 lpfc_sli4_setup(struct lpfc_hba *phba)
11621 {
11622 	struct lpfc_sli_ring *pring;
11623 
11624 	pring = phba->sli4_hba.els_wq->pring;
11625 	pring->num_mask = LPFC_MAX_RING_MASK;
11626 	pring->prt[0].profile = 0;	/* Mask 0 */
11627 	pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11628 	pring->prt[0].type = FC_TYPE_ELS;
11629 	pring->prt[0].lpfc_sli_rcv_unsol_event =
11630 	    lpfc_els_unsol_event;
11631 	pring->prt[1].profile = 0;	/* Mask 1 */
11632 	pring->prt[1].rctl = FC_RCTL_ELS_REP;
11633 	pring->prt[1].type = FC_TYPE_ELS;
11634 	pring->prt[1].lpfc_sli_rcv_unsol_event =
11635 	    lpfc_els_unsol_event;
11636 	pring->prt[2].profile = 0;	/* Mask 2 */
11637 	/* NameServer Inquiry */
11638 	pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11639 	/* NameServer */
11640 	pring->prt[2].type = FC_TYPE_CT;
11641 	pring->prt[2].lpfc_sli_rcv_unsol_event =
11642 	    lpfc_ct_unsol_event;
11643 	pring->prt[3].profile = 0;	/* Mask 3 */
11644 	/* NameServer response */
11645 	pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11646 	/* NameServer */
11647 	pring->prt[3].type = FC_TYPE_CT;
11648 	pring->prt[3].lpfc_sli_rcv_unsol_event =
11649 	    lpfc_ct_unsol_event;
11650 	return 0;
11651 }
11652 
11653 /**
11654  * lpfc_sli_setup - SLI ring setup function
11655  * @phba: Pointer to HBA context object.
11656  *
11657  * lpfc_sli_setup sets up rings of the SLI interface with
11658  * number of iocbs per ring and iotags. This function is
11659  * called while driver attach to the HBA and before the
11660  * interrupts are enabled. So there is no need for locking.
11661  *
11662  * This function always returns 0. SLI3 only.
11663  **/
11664 int
lpfc_sli_setup(struct lpfc_hba * phba)11665 lpfc_sli_setup(struct lpfc_hba *phba)
11666 {
11667 	int i, totiocbsize = 0;
11668 	struct lpfc_sli *psli = &phba->sli;
11669 	struct lpfc_sli_ring *pring;
11670 
11671 	psli->num_rings = MAX_SLI3_CONFIGURED_RINGS;
11672 	psli->sli_flag = 0;
11673 
11674 	psli->iocbq_lookup = NULL;
11675 	psli->iocbq_lookup_len = 0;
11676 	psli->last_iotag = 0;
11677 
11678 	for (i = 0; i < psli->num_rings; i++) {
11679 		pring = &psli->sli3_ring[i];
11680 		switch (i) {
11681 		case LPFC_FCP_RING:	/* ring 0 - FCP */
11682 			/* numCiocb and numRiocb are used in config_port */
11683 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R0_ENTRIES;
11684 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R0_ENTRIES;
11685 			pring->sli.sli3.numCiocb +=
11686 				SLI2_IOCB_CMD_R1XTRA_ENTRIES;
11687 			pring->sli.sli3.numRiocb +=
11688 				SLI2_IOCB_RSP_R1XTRA_ENTRIES;
11689 			pring->sli.sli3.numCiocb +=
11690 				SLI2_IOCB_CMD_R3XTRA_ENTRIES;
11691 			pring->sli.sli3.numRiocb +=
11692 				SLI2_IOCB_RSP_R3XTRA_ENTRIES;
11693 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11694 							SLI3_IOCB_CMD_SIZE :
11695 							SLI2_IOCB_CMD_SIZE;
11696 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11697 							SLI3_IOCB_RSP_SIZE :
11698 							SLI2_IOCB_RSP_SIZE;
11699 			pring->iotag_ctr = 0;
11700 			pring->iotag_max =
11701 			    (phba->cfg_hba_queue_depth * 2);
11702 			pring->fast_iotag = pring->iotag_max;
11703 			pring->num_mask = 0;
11704 			break;
11705 		case LPFC_EXTRA_RING:	/* ring 1 - EXTRA */
11706 			/* numCiocb and numRiocb are used in config_port */
11707 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R1_ENTRIES;
11708 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R1_ENTRIES;
11709 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11710 							SLI3_IOCB_CMD_SIZE :
11711 							SLI2_IOCB_CMD_SIZE;
11712 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11713 							SLI3_IOCB_RSP_SIZE :
11714 							SLI2_IOCB_RSP_SIZE;
11715 			pring->iotag_max = phba->cfg_hba_queue_depth;
11716 			pring->num_mask = 0;
11717 			break;
11718 		case LPFC_ELS_RING:	/* ring 2 - ELS / CT */
11719 			/* numCiocb and numRiocb are used in config_port */
11720 			pring->sli.sli3.numCiocb = SLI2_IOCB_CMD_R2_ENTRIES;
11721 			pring->sli.sli3.numRiocb = SLI2_IOCB_RSP_R2_ENTRIES;
11722 			pring->sli.sli3.sizeCiocb = (phba->sli_rev == 3) ?
11723 							SLI3_IOCB_CMD_SIZE :
11724 							SLI2_IOCB_CMD_SIZE;
11725 			pring->sli.sli3.sizeRiocb = (phba->sli_rev == 3) ?
11726 							SLI3_IOCB_RSP_SIZE :
11727 							SLI2_IOCB_RSP_SIZE;
11728 			pring->fast_iotag = 0;
11729 			pring->iotag_ctr = 0;
11730 			pring->iotag_max = 4096;
11731 			pring->lpfc_sli_rcv_async_status =
11732 				lpfc_sli_async_event_handler;
11733 			pring->num_mask = LPFC_MAX_RING_MASK;
11734 			pring->prt[0].profile = 0;	/* Mask 0 */
11735 			pring->prt[0].rctl = FC_RCTL_ELS_REQ;
11736 			pring->prt[0].type = FC_TYPE_ELS;
11737 			pring->prt[0].lpfc_sli_rcv_unsol_event =
11738 			    lpfc_els_unsol_event;
11739 			pring->prt[1].profile = 0;	/* Mask 1 */
11740 			pring->prt[1].rctl = FC_RCTL_ELS_REP;
11741 			pring->prt[1].type = FC_TYPE_ELS;
11742 			pring->prt[1].lpfc_sli_rcv_unsol_event =
11743 			    lpfc_els_unsol_event;
11744 			pring->prt[2].profile = 0;	/* Mask 2 */
11745 			/* NameServer Inquiry */
11746 			pring->prt[2].rctl = FC_RCTL_DD_UNSOL_CTL;
11747 			/* NameServer */
11748 			pring->prt[2].type = FC_TYPE_CT;
11749 			pring->prt[2].lpfc_sli_rcv_unsol_event =
11750 			    lpfc_ct_unsol_event;
11751 			pring->prt[3].profile = 0;	/* Mask 3 */
11752 			/* NameServer response */
11753 			pring->prt[3].rctl = FC_RCTL_DD_SOL_CTL;
11754 			/* NameServer */
11755 			pring->prt[3].type = FC_TYPE_CT;
11756 			pring->prt[3].lpfc_sli_rcv_unsol_event =
11757 			    lpfc_ct_unsol_event;
11758 			break;
11759 		}
11760 		totiocbsize += (pring->sli.sli3.numCiocb *
11761 			pring->sli.sli3.sizeCiocb) +
11762 			(pring->sli.sli3.numRiocb * pring->sli.sli3.sizeRiocb);
11763 	}
11764 	if (totiocbsize > MAX_SLIM_IOCB_SIZE) {
11765 		/* Too many cmd / rsp ring entries in SLI2 SLIM */
11766 		printk(KERN_ERR "%d:0462 Too many cmd / rsp ring entries in "
11767 		       "SLI2 SLIM Data: x%x x%lx\n",
11768 		       phba->brd_no, totiocbsize,
11769 		       (unsigned long) MAX_SLIM_IOCB_SIZE);
11770 	}
11771 	if (phba->cfg_multi_ring_support == 2)
11772 		lpfc_extra_ring_setup(phba);
11773 
11774 	return 0;
11775 }
11776 
11777 /**
11778  * lpfc_sli4_queue_init - Queue initialization function
11779  * @phba: Pointer to HBA context object.
11780  *
11781  * lpfc_sli4_queue_init sets up mailbox queues and iocb queues for each
11782  * ring. This function also initializes ring indices of each ring.
11783  * This function is called during the initialization of the SLI
11784  * interface of an HBA.
11785  * This function is called with no lock held and always returns
11786  * 1.
11787  **/
11788 void
lpfc_sli4_queue_init(struct lpfc_hba * phba)11789 lpfc_sli4_queue_init(struct lpfc_hba *phba)
11790 {
11791 	struct lpfc_sli *psli;
11792 	struct lpfc_sli_ring *pring;
11793 	int i;
11794 
11795 	psli = &phba->sli;
11796 	spin_lock_irq(&phba->hbalock);
11797 	INIT_LIST_HEAD(&psli->mboxq);
11798 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
11799 	/* Initialize list headers for txq and txcmplq as double linked lists */
11800 	for (i = 0; i < phba->cfg_hdw_queue; i++) {
11801 		pring = phba->sli4_hba.hdwq[i].io_wq->pring;
11802 		pring->flag = 0;
11803 		pring->ringno = LPFC_FCP_RING;
11804 		pring->txcmplq_cnt = 0;
11805 		INIT_LIST_HEAD(&pring->txq);
11806 		INIT_LIST_HEAD(&pring->txcmplq);
11807 		INIT_LIST_HEAD(&pring->iocb_continueq);
11808 		spin_lock_init(&pring->ring_lock);
11809 	}
11810 	pring = phba->sli4_hba.els_wq->pring;
11811 	pring->flag = 0;
11812 	pring->ringno = LPFC_ELS_RING;
11813 	pring->txcmplq_cnt = 0;
11814 	INIT_LIST_HEAD(&pring->txq);
11815 	INIT_LIST_HEAD(&pring->txcmplq);
11816 	INIT_LIST_HEAD(&pring->iocb_continueq);
11817 	spin_lock_init(&pring->ring_lock);
11818 
11819 	if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
11820 		pring = phba->sli4_hba.nvmels_wq->pring;
11821 		pring->flag = 0;
11822 		pring->ringno = LPFC_ELS_RING;
11823 		pring->txcmplq_cnt = 0;
11824 		INIT_LIST_HEAD(&pring->txq);
11825 		INIT_LIST_HEAD(&pring->txcmplq);
11826 		INIT_LIST_HEAD(&pring->iocb_continueq);
11827 		spin_lock_init(&pring->ring_lock);
11828 	}
11829 
11830 	spin_unlock_irq(&phba->hbalock);
11831 }
11832 
11833 /**
11834  * lpfc_sli_queue_init - Queue initialization function
11835  * @phba: Pointer to HBA context object.
11836  *
11837  * lpfc_sli_queue_init sets up mailbox queues and iocb queues for each
11838  * ring. This function also initializes ring indices of each ring.
11839  * This function is called during the initialization of the SLI
11840  * interface of an HBA.
11841  * This function is called with no lock held and always returns
11842  * 1.
11843  **/
11844 void
lpfc_sli_queue_init(struct lpfc_hba * phba)11845 lpfc_sli_queue_init(struct lpfc_hba *phba)
11846 {
11847 	struct lpfc_sli *psli;
11848 	struct lpfc_sli_ring *pring;
11849 	int i;
11850 
11851 	psli = &phba->sli;
11852 	spin_lock_irq(&phba->hbalock);
11853 	INIT_LIST_HEAD(&psli->mboxq);
11854 	INIT_LIST_HEAD(&psli->mboxq_cmpl);
11855 	/* Initialize list headers for txq and txcmplq as double linked lists */
11856 	for (i = 0; i < psli->num_rings; i++) {
11857 		pring = &psli->sli3_ring[i];
11858 		pring->ringno = i;
11859 		pring->sli.sli3.next_cmdidx  = 0;
11860 		pring->sli.sli3.local_getidx = 0;
11861 		pring->sli.sli3.cmdidx = 0;
11862 		INIT_LIST_HEAD(&pring->iocb_continueq);
11863 		INIT_LIST_HEAD(&pring->iocb_continue_saveq);
11864 		INIT_LIST_HEAD(&pring->postbufq);
11865 		pring->flag = 0;
11866 		INIT_LIST_HEAD(&pring->txq);
11867 		INIT_LIST_HEAD(&pring->txcmplq);
11868 		spin_lock_init(&pring->ring_lock);
11869 	}
11870 	spin_unlock_irq(&phba->hbalock);
11871 }
11872 
11873 /**
11874  * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
11875  * @phba: Pointer to HBA context object.
11876  *
11877  * This routine flushes the mailbox command subsystem. It will unconditionally
11878  * flush all the mailbox commands in the three possible stages in the mailbox
11879  * command sub-system: pending mailbox command queue; the outstanding mailbox
11880  * command; and completed mailbox command queue. It is caller's responsibility
11881  * to make sure that the driver is in the proper state to flush the mailbox
11882  * command sub-system. Namely, the posting of mailbox commands into the
11883  * pending mailbox command queue from the various clients must be stopped;
11884  * either the HBA is in a state that it will never works on the outstanding
11885  * mailbox command (such as in EEH or ERATT conditions) or the outstanding
11886  * mailbox command has been completed.
11887  **/
11888 static void
lpfc_sli_mbox_sys_flush(struct lpfc_hba * phba)11889 lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
11890 {
11891 	LIST_HEAD(completions);
11892 	struct lpfc_sli *psli = &phba->sli;
11893 	LPFC_MBOXQ_t *pmb;
11894 	unsigned long iflag;
11895 
11896 	/* Disable softirqs, including timers from obtaining phba->hbalock */
11897 	local_bh_disable();
11898 
11899 	/* Flush all the mailbox commands in the mbox system */
11900 	spin_lock_irqsave(&phba->hbalock, iflag);
11901 
11902 	/* The pending mailbox command queue */
11903 	list_splice_init(&phba->sli.mboxq, &completions);
11904 	/* The outstanding active mailbox command */
11905 	if (psli->mbox_active) {
11906 		list_add_tail(&psli->mbox_active->list, &completions);
11907 		psli->mbox_active = NULL;
11908 		psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
11909 	}
11910 	/* The completed mailbox command queue */
11911 	list_splice_init(&phba->sli.mboxq_cmpl, &completions);
11912 	spin_unlock_irqrestore(&phba->hbalock, iflag);
11913 
11914 	/* Enable softirqs again, done with phba->hbalock */
11915 	local_bh_enable();
11916 
11917 	/* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
11918 	while (!list_empty(&completions)) {
11919 		list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
11920 		pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
11921 		if (pmb->mbox_cmpl)
11922 			pmb->mbox_cmpl(phba, pmb);
11923 	}
11924 }
11925 
11926 /**
11927  * lpfc_sli_host_down - Vport cleanup function
11928  * @vport: Pointer to virtual port object.
11929  *
11930  * lpfc_sli_host_down is called to clean up the resources
11931  * associated with a vport before destroying virtual
11932  * port data structures.
11933  * This function does following operations:
11934  * - Free discovery resources associated with this virtual
11935  *   port.
11936  * - Free iocbs associated with this virtual port in
11937  *   the txq.
11938  * - Send abort for all iocb commands associated with this
11939  *   vport in txcmplq.
11940  *
11941  * This function is called with no lock held and always returns 1.
11942  **/
11943 int
lpfc_sli_host_down(struct lpfc_vport * vport)11944 lpfc_sli_host_down(struct lpfc_vport *vport)
11945 {
11946 	LIST_HEAD(completions);
11947 	struct lpfc_hba *phba = vport->phba;
11948 	struct lpfc_sli *psli = &phba->sli;
11949 	struct lpfc_queue *qp = NULL;
11950 	struct lpfc_sli_ring *pring;
11951 	struct lpfc_iocbq *iocb, *next_iocb;
11952 	int i;
11953 	unsigned long flags = 0;
11954 	uint16_t prev_pring_flag;
11955 
11956 	lpfc_cleanup_discovery_resources(vport);
11957 
11958 	spin_lock_irqsave(&phba->hbalock, flags);
11959 
11960 	/*
11961 	 * Error everything on the txq since these iocbs
11962 	 * have not been given to the FW yet.
11963 	 * Also issue ABTS for everything on the txcmplq
11964 	 */
11965 	if (phba->sli_rev != LPFC_SLI_REV4) {
11966 		for (i = 0; i < psli->num_rings; i++) {
11967 			pring = &psli->sli3_ring[i];
11968 			prev_pring_flag = pring->flag;
11969 			/* Only slow rings */
11970 			if (pring->ringno == LPFC_ELS_RING) {
11971 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
11972 				/* Set the lpfc data pending flag */
11973 				set_bit(LPFC_DATA_READY, &phba->data_flags);
11974 			}
11975 			list_for_each_entry_safe(iocb, next_iocb,
11976 						 &pring->txq, list) {
11977 				if (iocb->vport != vport)
11978 					continue;
11979 				list_move_tail(&iocb->list, &completions);
11980 			}
11981 			list_for_each_entry_safe(iocb, next_iocb,
11982 						 &pring->txcmplq, list) {
11983 				if (iocb->vport != vport)
11984 					continue;
11985 				lpfc_sli_issue_abort_iotag(phba, pring, iocb,
11986 							   NULL);
11987 			}
11988 			pring->flag = prev_pring_flag;
11989 		}
11990 	} else {
11991 		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
11992 			pring = qp->pring;
11993 			if (!pring)
11994 				continue;
11995 			if (pring == phba->sli4_hba.els_wq->pring) {
11996 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
11997 				/* Set the lpfc data pending flag */
11998 				set_bit(LPFC_DATA_READY, &phba->data_flags);
11999 			}
12000 			prev_pring_flag = pring->flag;
12001 			spin_lock(&pring->ring_lock);
12002 			list_for_each_entry_safe(iocb, next_iocb,
12003 						 &pring->txq, list) {
12004 				if (iocb->vport != vport)
12005 					continue;
12006 				list_move_tail(&iocb->list, &completions);
12007 			}
12008 			spin_unlock(&pring->ring_lock);
12009 			list_for_each_entry_safe(iocb, next_iocb,
12010 						 &pring->txcmplq, list) {
12011 				if (iocb->vport != vport)
12012 					continue;
12013 				lpfc_sli_issue_abort_iotag(phba, pring, iocb,
12014 							   NULL);
12015 			}
12016 			pring->flag = prev_pring_flag;
12017 		}
12018 	}
12019 	spin_unlock_irqrestore(&phba->hbalock, flags);
12020 
12021 	/* Make sure HBA is alive */
12022 	lpfc_issue_hb_tmo(phba);
12023 
12024 	/* Cancel all the IOCBs from the completions list */
12025 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12026 			      IOERR_SLI_DOWN);
12027 	return 1;
12028 }
12029 
12030 /**
12031  * lpfc_sli_hba_down - Resource cleanup function for the HBA
12032  * @phba: Pointer to HBA context object.
12033  *
12034  * This function cleans up all iocb, buffers, mailbox commands
12035  * while shutting down the HBA. This function is called with no
12036  * lock held and always returns 1.
12037  * This function does the following to cleanup driver resources:
12038  * - Free discovery resources for each virtual port
12039  * - Cleanup any pending fabric iocbs
12040  * - Iterate through the iocb txq and free each entry
12041  *   in the list.
12042  * - Free up any buffer posted to the HBA
12043  * - Free mailbox commands in the mailbox queue.
12044  **/
12045 int
lpfc_sli_hba_down(struct lpfc_hba * phba)12046 lpfc_sli_hba_down(struct lpfc_hba *phba)
12047 {
12048 	LIST_HEAD(completions);
12049 	struct lpfc_sli *psli = &phba->sli;
12050 	struct lpfc_queue *qp = NULL;
12051 	struct lpfc_sli_ring *pring;
12052 	struct lpfc_dmabuf *buf_ptr;
12053 	unsigned long flags = 0;
12054 	int i;
12055 
12056 	/* Shutdown the mailbox command sub-system */
12057 	lpfc_sli_mbox_sys_shutdown(phba, LPFC_MBX_WAIT);
12058 
12059 	lpfc_hba_down_prep(phba);
12060 
12061 	/* Disable softirqs, including timers from obtaining phba->hbalock */
12062 	local_bh_disable();
12063 
12064 	lpfc_fabric_abort_hba(phba);
12065 
12066 	spin_lock_irqsave(&phba->hbalock, flags);
12067 
12068 	/*
12069 	 * Error everything on the txq since these iocbs
12070 	 * have not been given to the FW yet.
12071 	 */
12072 	if (phba->sli_rev != LPFC_SLI_REV4) {
12073 		for (i = 0; i < psli->num_rings; i++) {
12074 			pring = &psli->sli3_ring[i];
12075 			/* Only slow rings */
12076 			if (pring->ringno == LPFC_ELS_RING) {
12077 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
12078 				/* Set the lpfc data pending flag */
12079 				set_bit(LPFC_DATA_READY, &phba->data_flags);
12080 			}
12081 			list_splice_init(&pring->txq, &completions);
12082 		}
12083 	} else {
12084 		list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12085 			pring = qp->pring;
12086 			if (!pring)
12087 				continue;
12088 			spin_lock(&pring->ring_lock);
12089 			list_splice_init(&pring->txq, &completions);
12090 			spin_unlock(&pring->ring_lock);
12091 			if (pring == phba->sli4_hba.els_wq->pring) {
12092 				pring->flag |= LPFC_DEFERRED_RING_EVENT;
12093 				/* Set the lpfc data pending flag */
12094 				set_bit(LPFC_DATA_READY, &phba->data_flags);
12095 			}
12096 		}
12097 	}
12098 	spin_unlock_irqrestore(&phba->hbalock, flags);
12099 
12100 	/* Cancel all the IOCBs from the completions list */
12101 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
12102 			      IOERR_SLI_DOWN);
12103 
12104 	spin_lock_irqsave(&phba->hbalock, flags);
12105 	list_splice_init(&phba->elsbuf, &completions);
12106 	phba->elsbuf_cnt = 0;
12107 	phba->elsbuf_prev_cnt = 0;
12108 	spin_unlock_irqrestore(&phba->hbalock, flags);
12109 
12110 	while (!list_empty(&completions)) {
12111 		list_remove_head(&completions, buf_ptr,
12112 			struct lpfc_dmabuf, list);
12113 		lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys);
12114 		kfree(buf_ptr);
12115 	}
12116 
12117 	/* Enable softirqs again, done with phba->hbalock */
12118 	local_bh_enable();
12119 
12120 	/* Return any active mbox cmds */
12121 	timer_delete_sync(&psli->mbox_tmo);
12122 
12123 	spin_lock_irqsave(&phba->pport->work_port_lock, flags);
12124 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
12125 	spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
12126 
12127 	return 1;
12128 }
12129 
12130 /**
12131  * lpfc_sli_pcimem_bcopy - SLI memory copy function
12132  * @srcp: Source memory pointer.
12133  * @destp: Destination memory pointer.
12134  * @cnt: Number of words required to be copied.
12135  *
12136  * This function is used for copying data between driver memory
12137  * and the SLI memory. This function also changes the endianness
12138  * of each word if native endianness is different from SLI
12139  * endianness. This function can be called with or without
12140  * lock.
12141  **/
12142 void
lpfc_sli_pcimem_bcopy(void * srcp,void * destp,uint32_t cnt)12143 lpfc_sli_pcimem_bcopy(void *srcp, void *destp, uint32_t cnt)
12144 {
12145 	uint32_t *src = srcp;
12146 	uint32_t *dest = destp;
12147 	uint32_t ldata;
12148 	int i;
12149 
12150 	for (i = 0; i < (int)cnt; i += sizeof (uint32_t)) {
12151 		ldata = *src;
12152 		ldata = le32_to_cpu(ldata);
12153 		*dest = ldata;
12154 		src++;
12155 		dest++;
12156 	}
12157 }
12158 
12159 
12160 /**
12161  * lpfc_sli_bemem_bcopy - SLI memory copy function
12162  * @srcp: Source memory pointer.
12163  * @destp: Destination memory pointer.
12164  * @cnt: Number of words required to be copied.
12165  *
12166  * This function is used for copying data between a data structure
12167  * with big endian representation to local endianness.
12168  * This function can be called with or without lock.
12169  **/
12170 void
lpfc_sli_bemem_bcopy(void * srcp,void * destp,uint32_t cnt)12171 lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
12172 {
12173 	uint32_t *src = srcp;
12174 	uint32_t *dest = destp;
12175 	uint32_t ldata;
12176 	int i;
12177 
12178 	for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
12179 		ldata = *src;
12180 		ldata = be32_to_cpu(ldata);
12181 		*dest = ldata;
12182 		src++;
12183 		dest++;
12184 	}
12185 }
12186 
12187 /**
12188  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
12189  * @phba: Pointer to HBA context object.
12190  * @pring: Pointer to driver SLI ring object.
12191  * @mp: Pointer to driver buffer object.
12192  *
12193  * This function is called with no lock held.
12194  * It always return zero after adding the buffer to the postbufq
12195  * buffer list.
12196  **/
12197 int
lpfc_sli_ringpostbuf_put(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_dmabuf * mp)12198 lpfc_sli_ringpostbuf_put(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12199 			 struct lpfc_dmabuf *mp)
12200 {
12201 	/* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
12202 	   later */
12203 	spin_lock_irq(&phba->hbalock);
12204 	list_add_tail(&mp->list, &pring->postbufq);
12205 	pring->postbufq_cnt++;
12206 	spin_unlock_irq(&phba->hbalock);
12207 	return 0;
12208 }
12209 
12210 /**
12211  * lpfc_sli_get_buffer_tag - allocates a tag for a CMD_QUE_XRI64_CX buffer
12212  * @phba: Pointer to HBA context object.
12213  *
12214  * When HBQ is enabled, buffers are searched based on tags. This function
12215  * allocates a tag for buffer posted using CMD_QUE_XRI64_CX iocb. The
12216  * tag is bit wise or-ed with QUE_BUFTAG_BIT to make sure that the tag
12217  * does not conflict with tags of buffer posted for unsolicited events.
12218  * The function returns the allocated tag. The function is called with
12219  * no locks held.
12220  **/
12221 uint32_t
lpfc_sli_get_buffer_tag(struct lpfc_hba * phba)12222 lpfc_sli_get_buffer_tag(struct lpfc_hba *phba)
12223 {
12224 	spin_lock_irq(&phba->hbalock);
12225 	phba->buffer_tag_count++;
12226 	/*
12227 	 * Always set the QUE_BUFTAG_BIT to distiguish between
12228 	 * a tag assigned by HBQ.
12229 	 */
12230 	phba->buffer_tag_count |= QUE_BUFTAG_BIT;
12231 	spin_unlock_irq(&phba->hbalock);
12232 	return phba->buffer_tag_count;
12233 }
12234 
12235 /**
12236  * lpfc_sli_ring_taggedbuf_get - find HBQ buffer associated with given tag
12237  * @phba: Pointer to HBA context object.
12238  * @pring: Pointer to driver SLI ring object.
12239  * @tag: Buffer tag.
12240  *
12241  * Buffers posted using CMD_QUE_XRI64_CX iocb are in pring->postbufq
12242  * list. After HBA DMA data to these buffers, CMD_IOCB_RET_XRI64_CX
12243  * iocb is posted to the response ring with the tag of the buffer.
12244  * This function searches the pring->postbufq list using the tag
12245  * to find buffer associated with CMD_IOCB_RET_XRI64_CX
12246  * iocb. If the buffer is found then lpfc_dmabuf object of the
12247  * buffer is returned to the caller else NULL is returned.
12248  * This function is called with no lock held.
12249  **/
12250 struct lpfc_dmabuf *
lpfc_sli_ring_taggedbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,uint32_t tag)12251 lpfc_sli_ring_taggedbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12252 			uint32_t tag)
12253 {
12254 	struct lpfc_dmabuf *mp, *next_mp;
12255 	struct list_head *slp = &pring->postbufq;
12256 
12257 	/* Search postbufq, from the beginning, looking for a match on tag */
12258 	spin_lock_irq(&phba->hbalock);
12259 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12260 		if (mp->buffer_tag == tag) {
12261 			list_del_init(&mp->list);
12262 			pring->postbufq_cnt--;
12263 			spin_unlock_irq(&phba->hbalock);
12264 			return mp;
12265 		}
12266 	}
12267 
12268 	spin_unlock_irq(&phba->hbalock);
12269 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12270 			"0402 Cannot find virtual addr for buffer tag on "
12271 			"ring %d Data x%lx x%px x%px x%x\n",
12272 			pring->ringno, (unsigned long) tag,
12273 			slp->next, slp->prev, pring->postbufq_cnt);
12274 
12275 	return NULL;
12276 }
12277 
12278 /**
12279  * lpfc_sli_ringpostbuf_get - search buffers for unsolicited CT and ELS events
12280  * @phba: Pointer to HBA context object.
12281  * @pring: Pointer to driver SLI ring object.
12282  * @phys: DMA address of the buffer.
12283  *
12284  * This function searches the buffer list using the dma_address
12285  * of unsolicited event to find the driver's lpfc_dmabuf object
12286  * corresponding to the dma_address. The function returns the
12287  * lpfc_dmabuf object if a buffer is found else it returns NULL.
12288  * This function is called by the ct and els unsolicited event
12289  * handlers to get the buffer associated with the unsolicited
12290  * event.
12291  *
12292  * This function is called with no lock held.
12293  **/
12294 struct lpfc_dmabuf *
lpfc_sli_ringpostbuf_get(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,dma_addr_t phys)12295 lpfc_sli_ringpostbuf_get(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12296 			 dma_addr_t phys)
12297 {
12298 	struct lpfc_dmabuf *mp, *next_mp;
12299 	struct list_head *slp = &pring->postbufq;
12300 
12301 	/* Search postbufq, from the beginning, looking for a match on phys */
12302 	spin_lock_irq(&phba->hbalock);
12303 	list_for_each_entry_safe(mp, next_mp, &pring->postbufq, list) {
12304 		if (mp->phys == phys) {
12305 			list_del_init(&mp->list);
12306 			pring->postbufq_cnt--;
12307 			spin_unlock_irq(&phba->hbalock);
12308 			return mp;
12309 		}
12310 	}
12311 
12312 	spin_unlock_irq(&phba->hbalock);
12313 	lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
12314 			"0410 Cannot find virtual addr for mapped buf on "
12315 			"ring %d Data x%llx x%px x%px x%x\n",
12316 			pring->ringno, (unsigned long long)phys,
12317 			slp->next, slp->prev, pring->postbufq_cnt);
12318 	return NULL;
12319 }
12320 
12321 /**
12322  * lpfc_sli_abort_els_cmpl - Completion handler for the els abort iocbs
12323  * @phba: Pointer to HBA context object.
12324  * @cmdiocb: Pointer to driver command iocb object.
12325  * @rspiocb: Pointer to driver response iocb object.
12326  *
12327  * This function is the completion handler for the abort iocbs for
12328  * ELS commands. This function is called from the ELS ring event
12329  * handler with no lock held. This function frees memory resources
12330  * associated with the abort iocb.
12331  **/
12332 static void
lpfc_sli_abort_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12333 lpfc_sli_abort_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12334 			struct lpfc_iocbq *rspiocb)
12335 {
12336 	u32 ulp_status = get_job_ulpstatus(phba, rspiocb);
12337 	u32 ulp_word4 = get_job_word4(phba, rspiocb);
12338 	u8 cmnd = get_job_cmnd(phba, cmdiocb);
12339 
12340 	if (ulp_status) {
12341 		/*
12342 		 * Assume that the port already completed and returned, or
12343 		 * will return the iocb. Just Log the message.
12344 		 */
12345 		if (phba->sli_rev < LPFC_SLI_REV4) {
12346 			if (cmnd == CMD_ABORT_XRI_CX &&
12347 			    ulp_status == IOSTAT_LOCAL_REJECT &&
12348 			    ulp_word4 == IOERR_ABORT_REQUESTED) {
12349 				goto release_iocb;
12350 			}
12351 		}
12352 	}
12353 
12354 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS | LOG_SLI,
12355 			"0327 Abort els iocb complete x%px with io cmd xri %x "
12356 			"abort tag x%x abort status %x abort code %x\n",
12357 			cmdiocb, get_job_abtsiotag(phba, cmdiocb),
12358 			(phba->sli_rev == LPFC_SLI_REV4) ?
12359 			get_wqe_reqtag(cmdiocb) :
12360 			cmdiocb->iocb.ulpIoTag,
12361 			ulp_status, ulp_word4);
12362 release_iocb:
12363 	lpfc_sli_release_iocbq(phba, cmdiocb);
12364 	return;
12365 }
12366 
12367 /**
12368  * lpfc_ignore_els_cmpl - Completion handler for aborted ELS command
12369  * @phba: Pointer to HBA context object.
12370  * @cmdiocb: Pointer to driver command iocb object.
12371  * @rspiocb: Pointer to driver response iocb object.
12372  *
12373  * The function is called from SLI ring event handler with no
12374  * lock held. This function is the completion handler for ELS commands
12375  * which are aborted. The function frees memory resources used for
12376  * the aborted ELS commands.
12377  **/
12378 void
lpfc_ignore_els_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12379 lpfc_ignore_els_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12380 		     struct lpfc_iocbq *rspiocb)
12381 {
12382 	struct lpfc_nodelist *ndlp = cmdiocb->ndlp;
12383 	IOCB_t *irsp;
12384 	LPFC_MBOXQ_t *mbox;
12385 	u32 ulp_command, ulp_status, ulp_word4, iotag;
12386 
12387 	ulp_command = get_job_cmnd(phba, cmdiocb);
12388 	ulp_status = get_job_ulpstatus(phba, rspiocb);
12389 	ulp_word4 = get_job_word4(phba, rspiocb);
12390 
12391 	if (phba->sli_rev == LPFC_SLI_REV4) {
12392 		iotag = get_wqe_reqtag(cmdiocb);
12393 	} else {
12394 		irsp = &rspiocb->iocb;
12395 		iotag = irsp->ulpIoTag;
12396 
12397 		/* It is possible a PLOGI_RJT for NPIV ports to get aborted.
12398 		 * The MBX_REG_LOGIN64 mbox command is freed back to the
12399 		 * mbox_mem_pool here.
12400 		 */
12401 		if (cmdiocb->context_un.mbox) {
12402 			mbox = cmdiocb->context_un.mbox;
12403 			lpfc_mbox_rsrc_cleanup(phba, mbox, MBOX_THD_UNLOCKED);
12404 			cmdiocb->context_un.mbox = NULL;
12405 		}
12406 	}
12407 
12408 	/* ELS cmd tag <ulpIoTag> completes */
12409 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
12410 			"0139 Ignoring ELS cmd code x%x ref cnt x%x Data: "
12411 			"x%x x%x x%x x%px\n",
12412 			ulp_command, kref_read(&cmdiocb->ndlp->kref),
12413 			ulp_status, ulp_word4, iotag, cmdiocb->ndlp);
12414 	/*
12415 	 * Deref the ndlp after free_iocb. sli_release_iocb will access the ndlp
12416 	 * if exchange is busy.
12417 	 */
12418 	if (ulp_command == CMD_GEN_REQUEST64_CR)
12419 		lpfc_ct_free_iocb(phba, cmdiocb);
12420 	else
12421 		lpfc_els_free_iocb(phba, cmdiocb);
12422 
12423 	lpfc_nlp_put(ndlp);
12424 }
12425 
12426 /**
12427  * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
12428  * @phba: Pointer to HBA context object.
12429  * @pring: Pointer to driver SLI ring object.
12430  * @cmdiocb: Pointer to driver command iocb object.
12431  * @cmpl: completion function.
12432  *
12433  * This function issues an abort iocb for the provided command iocb. In case
12434  * of unloading, the abort iocb will not be issued to commands on the ELS
12435  * ring. Instead, the callback function shall be changed to those commands
12436  * so that nothing happens when them finishes. This function is called with
12437  * hbalock held andno ring_lock held (SLI4). The function returns IOCB_SUCCESS
12438  * when the command iocb is an abort request.
12439  *
12440  **/
12441 int
lpfc_sli_issue_abort_iotag(struct lpfc_hba * phba,struct lpfc_sli_ring * pring,struct lpfc_iocbq * cmdiocb,void * cmpl)12442 lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
12443 			   struct lpfc_iocbq *cmdiocb, void *cmpl)
12444 {
12445 	struct lpfc_vport *vport = cmdiocb->vport;
12446 	struct lpfc_iocbq *abtsiocbp;
12447 	int retval = IOCB_ERROR;
12448 	unsigned long iflags;
12449 	struct lpfc_nodelist *ndlp = NULL;
12450 	u32 ulp_command = get_job_cmnd(phba, cmdiocb);
12451 	u16 ulp_context, iotag;
12452 	bool ia;
12453 
12454 	/*
12455 	 * There are certain command types we don't want to abort.  And we
12456 	 * don't want to abort commands that are already in the process of
12457 	 * being aborted.
12458 	 */
12459 	if (ulp_command == CMD_ABORT_XRI_WQE ||
12460 	    ulp_command == CMD_ABORT_XRI_CN ||
12461 	    ulp_command == CMD_CLOSE_XRI_CN ||
12462 	    cmdiocb->cmd_flag & LPFC_DRIVER_ABORTED)
12463 		return IOCB_ABORTING;
12464 
12465 	if (!pring) {
12466 		if (cmdiocb->cmd_flag & LPFC_IO_FABRIC)
12467 			cmdiocb->fabric_cmd_cmpl = lpfc_ignore_els_cmpl;
12468 		else
12469 			cmdiocb->cmd_cmpl = lpfc_ignore_els_cmpl;
12470 		return retval;
12471 	}
12472 
12473 	/*
12474 	 * Always abort the outstanding WQE and set the IA bit correctly
12475 	 * for the context.  This is necessary for correctly removing
12476 	 * outstanding ndlp reference counts when the CQE completes with
12477 	 * the XB bit set.
12478 	 */
12479 	abtsiocbp = __lpfc_sli_get_iocbq(phba);
12480 	if (abtsiocbp == NULL)
12481 		return IOCB_NORESOURCE;
12482 
12483 	/* This signals the response to set the correct status
12484 	 * before calling the completion handler
12485 	 */
12486 	cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
12487 
12488 	if (phba->sli_rev == LPFC_SLI_REV4) {
12489 		ulp_context = cmdiocb->sli4_xritag;
12490 		iotag = abtsiocbp->iotag;
12491 	} else {
12492 		iotag = cmdiocb->iocb.ulpIoTag;
12493 		if (pring->ringno == LPFC_ELS_RING) {
12494 			ndlp = cmdiocb->ndlp;
12495 			ulp_context = ndlp->nlp_rpi;
12496 		} else {
12497 			ulp_context = cmdiocb->iocb.ulpContext;
12498 		}
12499 	}
12500 
12501 	/* Just close the exchange under certain conditions. */
12502 	if (test_bit(FC_UNLOADING, &vport->load_flag) ||
12503 	    phba->link_state < LPFC_LINK_UP ||
12504 	    (phba->sli_rev == LPFC_SLI_REV4 &&
12505 	     phba->sli4_hba.link_state.status == LPFC_FC_LA_TYPE_LINK_DOWN) ||
12506 	    (phba->link_flag & LS_EXTERNAL_LOOPBACK))
12507 		ia = true;
12508 	else
12509 		ia = false;
12510 
12511 	lpfc_sli_prep_abort_xri(phba, abtsiocbp, ulp_context, iotag,
12512 				cmdiocb->iocb.ulpClass,
12513 				LPFC_WQE_CQ_ID_DEFAULT, ia, false);
12514 
12515 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
12516 	abtsiocbp->hba_wqidx = cmdiocb->hba_wqidx;
12517 	if (cmdiocb->cmd_flag & LPFC_IO_FCP)
12518 		abtsiocbp->cmd_flag |= (LPFC_IO_FCP | LPFC_USE_FCPWQIDX);
12519 
12520 	if (cmdiocb->cmd_flag & LPFC_IO_FOF)
12521 		abtsiocbp->cmd_flag |= LPFC_IO_FOF;
12522 
12523 	if (cmpl)
12524 		abtsiocbp->cmd_cmpl = cmpl;
12525 	else
12526 		abtsiocbp->cmd_cmpl = lpfc_sli_abort_els_cmpl;
12527 	abtsiocbp->vport = vport;
12528 
12529 	if (phba->sli_rev == LPFC_SLI_REV4) {
12530 		pring = lpfc_sli4_calc_ring(phba, abtsiocbp);
12531 		if (unlikely(pring == NULL))
12532 			goto abort_iotag_exit;
12533 		/* Note: both hbalock and ring_lock need to be set here */
12534 		spin_lock_irqsave(&pring->ring_lock, iflags);
12535 		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12536 			abtsiocbp, 0);
12537 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
12538 	} else {
12539 		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
12540 			abtsiocbp, 0);
12541 	}
12542 
12543 abort_iotag_exit:
12544 
12545 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
12546 			 "0339 Abort IO XRI x%x, Original iotag x%x, "
12547 			 "abort tag x%x Cmdjob : x%px Abortjob : x%px "
12548 			 "retval x%x : IA %d cmd_cmpl %ps\n",
12549 			 ulp_context, (phba->sli_rev == LPFC_SLI_REV4) ?
12550 			 cmdiocb->iotag : iotag, iotag, cmdiocb, abtsiocbp,
12551 			 retval, ia, abtsiocbp->cmd_cmpl);
12552 	if (retval) {
12553 		cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
12554 		__lpfc_sli_release_iocbq(phba, abtsiocbp);
12555 	}
12556 
12557 	/*
12558 	 * Caller to this routine should check for IOCB_ERROR
12559 	 * and handle it properly.  This routine no longer removes
12560 	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
12561 	 */
12562 	return retval;
12563 }
12564 
12565 /**
12566  * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
12567  * @phba: pointer to lpfc HBA data structure.
12568  *
12569  * This routine will abort all pending and outstanding iocbs to an HBA.
12570  **/
12571 void
lpfc_sli_hba_iocb_abort(struct lpfc_hba * phba)12572 lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
12573 {
12574 	struct lpfc_sli *psli = &phba->sli;
12575 	struct lpfc_sli_ring *pring;
12576 	struct lpfc_queue *qp = NULL;
12577 	int i;
12578 
12579 	if (phba->sli_rev != LPFC_SLI_REV4) {
12580 		for (i = 0; i < psli->num_rings; i++) {
12581 			pring = &psli->sli3_ring[i];
12582 			lpfc_sli_abort_iocb_ring(phba, pring);
12583 		}
12584 		return;
12585 	}
12586 	list_for_each_entry(qp, &phba->sli4_hba.lpfc_wq_list, wq_list) {
12587 		pring = qp->pring;
12588 		if (!pring)
12589 			continue;
12590 		lpfc_sli_abort_iocb_ring(phba, pring);
12591 	}
12592 }
12593 
12594 /**
12595  * lpfc_sli_validate_fcp_iocb_for_abort - filter iocbs appropriate for FCP aborts
12596  * @iocbq: Pointer to iocb object.
12597  * @vport: Pointer to driver virtual port object.
12598  *
12599  * This function acts as an iocb filter for functions which abort FCP iocbs.
12600  *
12601  * Return values
12602  * -ENODEV, if a null iocb or vport ptr is encountered
12603  * -EINVAL, if the iocb is not an FCP I/O, not on the TX cmpl queue, premarked as
12604  *          driver already started the abort process, or is an abort iocb itself
12605  * 0, passes criteria for aborting the FCP I/O iocb
12606  **/
12607 static int
lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq * iocbq,struct lpfc_vport * vport)12608 lpfc_sli_validate_fcp_iocb_for_abort(struct lpfc_iocbq *iocbq,
12609 				     struct lpfc_vport *vport)
12610 {
12611 	u8 ulp_command;
12612 
12613 	/* No null ptr vports */
12614 	if (!iocbq || iocbq->vport != vport)
12615 		return -ENODEV;
12616 
12617 	/* iocb must be for FCP IO, already exists on the TX cmpl queue,
12618 	 * can't be premarked as driver aborted, nor be an ABORT iocb itself
12619 	 */
12620 	ulp_command = get_job_cmnd(vport->phba, iocbq);
12621 	if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12622 	    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ) ||
12623 	    (iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12624 	    (ulp_command == CMD_ABORT_XRI_CN ||
12625 	     ulp_command == CMD_CLOSE_XRI_CN ||
12626 	     ulp_command == CMD_ABORT_XRI_WQE))
12627 		return -EINVAL;
12628 
12629 	return 0;
12630 }
12631 
12632 /**
12633  * lpfc_sli_validate_fcp_iocb - validate commands associated with a SCSI target
12634  * @iocbq: Pointer to driver iocb object.
12635  * @vport: Pointer to driver virtual port object.
12636  * @tgt_id: SCSI ID of the target.
12637  * @lun_id: LUN ID of the scsi device.
12638  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST
12639  *
12640  * This function acts as an iocb filter for validating a lun/SCSI target/SCSI
12641  * host.
12642  *
12643  * It will return
12644  * 0 if the filtering criteria is met for the given iocb and will return
12645  * 1 if the filtering criteria is not met.
12646  * If ctx_cmd == LPFC_CTX_LUN, the function returns 0 only if the
12647  * given iocb is for the SCSI device specified by vport, tgt_id and
12648  * lun_id parameter.
12649  * If ctx_cmd == LPFC_CTX_TGT,  the function returns 0 only if the
12650  * given iocb is for the SCSI target specified by vport and tgt_id
12651  * parameters.
12652  * If ctx_cmd == LPFC_CTX_HOST, the function returns 0 only if the
12653  * given iocb is for the SCSI host associated with the given vport.
12654  * This function is called with no locks held.
12655  **/
12656 static int
lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq * iocbq,struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)12657 lpfc_sli_validate_fcp_iocb(struct lpfc_iocbq *iocbq, struct lpfc_vport *vport,
12658 			   uint16_t tgt_id, uint64_t lun_id,
12659 			   lpfc_ctx_cmd ctx_cmd)
12660 {
12661 	struct lpfc_io_buf *lpfc_cmd;
12662 	int rc = 1;
12663 
12664 	lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12665 
12666 	if (lpfc_cmd->pCmd == NULL)
12667 		return rc;
12668 
12669 	switch (ctx_cmd) {
12670 	case LPFC_CTX_LUN:
12671 		if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12672 		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id) &&
12673 		    (scsilun_to_int(&lpfc_cmd->fcp_cmnd->fcp_lun) == lun_id))
12674 			rc = 0;
12675 		break;
12676 	case LPFC_CTX_TGT:
12677 		if ((lpfc_cmd->rdata) && (lpfc_cmd->rdata->pnode) &&
12678 		    (lpfc_cmd->rdata->pnode->nlp_sid == tgt_id))
12679 			rc = 0;
12680 		break;
12681 	case LPFC_CTX_HOST:
12682 		rc = 0;
12683 		break;
12684 	default:
12685 		printk(KERN_ERR "%s: Unknown context cmd type, value %d\n",
12686 			__func__, ctx_cmd);
12687 		break;
12688 	}
12689 
12690 	return rc;
12691 }
12692 
12693 /**
12694  * lpfc_sli_sum_iocb - Function to count the number of FCP iocbs pending
12695  * @vport: Pointer to virtual port.
12696  * @tgt_id: SCSI ID of the target.
12697  * @lun_id: LUN ID of the scsi device.
12698  * @ctx_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12699  *
12700  * This function returns number of FCP commands pending for the vport.
12701  * When ctx_cmd == LPFC_CTX_LUN, the function returns number of FCP
12702  * commands pending on the vport associated with SCSI device specified
12703  * by tgt_id and lun_id parameters.
12704  * When ctx_cmd == LPFC_CTX_TGT, the function returns number of FCP
12705  * commands pending on the vport associated with SCSI target specified
12706  * by tgt_id parameter.
12707  * When ctx_cmd == LPFC_CTX_HOST, the function returns number of FCP
12708  * commands pending on the vport.
12709  * This function returns the number of iocbs which satisfy the filter.
12710  * This function is called without any lock held.
12711  **/
12712 int
lpfc_sli_sum_iocb(struct lpfc_vport * vport,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd ctx_cmd)12713 lpfc_sli_sum_iocb(struct lpfc_vport *vport, uint16_t tgt_id, uint64_t lun_id,
12714 		  lpfc_ctx_cmd ctx_cmd)
12715 {
12716 	struct lpfc_hba *phba = vport->phba;
12717 	struct lpfc_iocbq *iocbq;
12718 	int sum, i;
12719 	unsigned long iflags;
12720 	u8 ulp_command;
12721 
12722 	spin_lock_irqsave(&phba->hbalock, iflags);
12723 	for (i = 1, sum = 0; i <= phba->sli.last_iotag; i++) {
12724 		iocbq = phba->sli.iocbq_lookup[i];
12725 
12726 		if (!iocbq || iocbq->vport != vport)
12727 			continue;
12728 		if (!(iocbq->cmd_flag & LPFC_IO_FCP) ||
12729 		    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ))
12730 			continue;
12731 
12732 		/* Include counting outstanding aborts */
12733 		ulp_command = get_job_cmnd(phba, iocbq);
12734 		if (ulp_command == CMD_ABORT_XRI_CN ||
12735 		    ulp_command == CMD_CLOSE_XRI_CN ||
12736 		    ulp_command == CMD_ABORT_XRI_WQE) {
12737 			sum++;
12738 			continue;
12739 		}
12740 
12741 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12742 					       ctx_cmd) == 0)
12743 			sum++;
12744 	}
12745 	spin_unlock_irqrestore(&phba->hbalock, iflags);
12746 
12747 	return sum;
12748 }
12749 
12750 /**
12751  * lpfc_sli_abort_fcp_cmpl - Completion handler function for aborted FCP IOCBs
12752  * @phba: Pointer to HBA context object
12753  * @cmdiocb: Pointer to command iocb object.
12754  * @rspiocb: Pointer to response iocb object.
12755  *
12756  * This function is called when an aborted FCP iocb completes. This
12757  * function is called by the ring event handler with no lock held.
12758  * This function frees the iocb.
12759  **/
12760 void
lpfc_sli_abort_fcp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)12761 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
12762 			struct lpfc_iocbq *rspiocb)
12763 {
12764 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
12765 			"3096 ABORT_XRI_CX completing on rpi x%x "
12766 			"original iotag x%x, abort cmd iotag x%x "
12767 			"status 0x%x, reason 0x%x\n",
12768 			(phba->sli_rev == LPFC_SLI_REV4) ?
12769 			cmdiocb->sli4_xritag :
12770 			cmdiocb->iocb.un.acxri.abortContextTag,
12771 			get_job_abtsiotag(phba, cmdiocb),
12772 			cmdiocb->iotag, get_job_ulpstatus(phba, rspiocb),
12773 			get_job_word4(phba, rspiocb));
12774 	lpfc_sli_release_iocbq(phba, cmdiocb);
12775 	return;
12776 }
12777 
12778 /**
12779  * lpfc_sli_abort_iocb - issue abort for all commands on a host/target/LUN
12780  * @vport: Pointer to virtual port.
12781  * @tgt_id: SCSI ID of the target.
12782  * @lun_id: LUN ID of the scsi device.
12783  * @abort_cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12784  *
12785  * This function sends an abort command for every SCSI command
12786  * associated with the given virtual port pending on the ring
12787  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12788  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12789  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12790  * followed by lpfc_sli_validate_fcp_iocb.
12791  *
12792  * When abort_cmd == LPFC_CTX_LUN, the function sends abort only to the
12793  * FCP iocbs associated with lun specified by tgt_id and lun_id
12794  * parameters
12795  * When abort_cmd == LPFC_CTX_TGT, the function sends abort only to the
12796  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12797  * When abort_cmd == LPFC_CTX_HOST, the function sends abort to all
12798  * FCP iocbs associated with virtual port.
12799  * The pring used for SLI3 is sli3_ring[LPFC_FCP_RING], for SLI4
12800  * lpfc_sli4_calc_ring is used.
12801  * This function returns number of iocbs it failed to abort.
12802  * This function is called with no locks held.
12803  **/
12804 int
lpfc_sli_abort_iocb(struct lpfc_vport * vport,u16 tgt_id,u64 lun_id,lpfc_ctx_cmd abort_cmd)12805 lpfc_sli_abort_iocb(struct lpfc_vport *vport, u16 tgt_id, u64 lun_id,
12806 		    lpfc_ctx_cmd abort_cmd)
12807 {
12808 	struct lpfc_hba *phba = vport->phba;
12809 	struct lpfc_sli_ring *pring = NULL;
12810 	struct lpfc_iocbq *iocbq;
12811 	int errcnt = 0, ret_val = 0;
12812 	unsigned long iflags;
12813 	int i;
12814 
12815 	/* all I/Os are in process of being flushed */
12816 	if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
12817 		return errcnt;
12818 
12819 	for (i = 1; i <= phba->sli.last_iotag; i++) {
12820 		iocbq = phba->sli.iocbq_lookup[i];
12821 
12822 		if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12823 			continue;
12824 
12825 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12826 					       abort_cmd) != 0)
12827 			continue;
12828 
12829 		spin_lock_irqsave(&phba->hbalock, iflags);
12830 		if (phba->sli_rev == LPFC_SLI_REV3) {
12831 			pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
12832 		} else if (phba->sli_rev == LPFC_SLI_REV4) {
12833 			pring = lpfc_sli4_calc_ring(phba, iocbq);
12834 		}
12835 		ret_val = lpfc_sli_issue_abort_iotag(phba, pring, iocbq,
12836 						     lpfc_sli_abort_fcp_cmpl);
12837 		spin_unlock_irqrestore(&phba->hbalock, iflags);
12838 		if (ret_val != IOCB_SUCCESS)
12839 			errcnt++;
12840 	}
12841 
12842 	return errcnt;
12843 }
12844 
12845 /**
12846  * lpfc_sli_abort_taskmgmt - issue abort for all commands on a host/target/LUN
12847  * @vport: Pointer to virtual port.
12848  * @pring: Pointer to driver SLI ring object.
12849  * @tgt_id: SCSI ID of the target.
12850  * @lun_id: LUN ID of the scsi device.
12851  * @cmd: LPFC_CTX_LUN/LPFC_CTX_TGT/LPFC_CTX_HOST.
12852  *
12853  * This function sends an abort command for every SCSI command
12854  * associated with the given virtual port pending on the ring
12855  * filtered by lpfc_sli_validate_fcp_iocb_for_abort and then
12856  * lpfc_sli_validate_fcp_iocb function.  The ordering for validation before
12857  * submitting abort iocbs must be lpfc_sli_validate_fcp_iocb_for_abort
12858  * followed by lpfc_sli_validate_fcp_iocb.
12859  *
12860  * When taskmgmt_cmd == LPFC_CTX_LUN, the function sends abort only to the
12861  * FCP iocbs associated with lun specified by tgt_id and lun_id
12862  * parameters
12863  * When taskmgmt_cmd == LPFC_CTX_TGT, the function sends abort only to the
12864  * FCP iocbs associated with SCSI target specified by tgt_id parameter.
12865  * When taskmgmt_cmd == LPFC_CTX_HOST, the function sends abort to all
12866  * FCP iocbs associated with virtual port.
12867  * This function returns number of iocbs it aborted .
12868  * This function is called with no locks held right after a taskmgmt
12869  * command is sent.
12870  **/
12871 int
lpfc_sli_abort_taskmgmt(struct lpfc_vport * vport,struct lpfc_sli_ring * pring,uint16_t tgt_id,uint64_t lun_id,lpfc_ctx_cmd cmd)12872 lpfc_sli_abort_taskmgmt(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
12873 			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
12874 {
12875 	struct lpfc_hba *phba = vport->phba;
12876 	struct lpfc_io_buf *lpfc_cmd;
12877 	struct lpfc_iocbq *abtsiocbq;
12878 	struct lpfc_nodelist *ndlp = NULL;
12879 	struct lpfc_iocbq *iocbq;
12880 	int sum, i, ret_val;
12881 	unsigned long iflags;
12882 	struct lpfc_sli_ring *pring_s4 = NULL;
12883 	u16 ulp_context, iotag, cqid = LPFC_WQE_CQ_ID_DEFAULT;
12884 	bool ia;
12885 
12886 	/* all I/Os are in process of being flushed */
12887 	if (test_bit(HBA_IOQ_FLUSH, &phba->hba_flag))
12888 		return 0;
12889 
12890 	sum = 0;
12891 
12892 	spin_lock_irqsave(&phba->hbalock, iflags);
12893 	for (i = 1; i <= phba->sli.last_iotag; i++) {
12894 		iocbq = phba->sli.iocbq_lookup[i];
12895 
12896 		if (lpfc_sli_validate_fcp_iocb_for_abort(iocbq, vport))
12897 			continue;
12898 
12899 		if (lpfc_sli_validate_fcp_iocb(iocbq, vport, tgt_id, lun_id,
12900 					       cmd) != 0)
12901 			continue;
12902 
12903 		/* Guard against IO completion being called at same time */
12904 		lpfc_cmd = container_of(iocbq, struct lpfc_io_buf, cur_iocbq);
12905 		spin_lock(&lpfc_cmd->buf_lock);
12906 
12907 		if (!lpfc_cmd->pCmd) {
12908 			spin_unlock(&lpfc_cmd->buf_lock);
12909 			continue;
12910 		}
12911 
12912 		if (phba->sli_rev == LPFC_SLI_REV4) {
12913 			pring_s4 =
12914 			    phba->sli4_hba.hdwq[iocbq->hba_wqidx].io_wq->pring;
12915 			if (!pring_s4) {
12916 				spin_unlock(&lpfc_cmd->buf_lock);
12917 				continue;
12918 			}
12919 			/* Note: both hbalock and ring_lock must be set here */
12920 			spin_lock(&pring_s4->ring_lock);
12921 		}
12922 
12923 		/*
12924 		 * If the iocbq is already being aborted, don't take a second
12925 		 * action, but do count it.
12926 		 */
12927 		if ((iocbq->cmd_flag & LPFC_DRIVER_ABORTED) ||
12928 		    !(iocbq->cmd_flag & LPFC_IO_ON_TXCMPLQ)) {
12929 			if (phba->sli_rev == LPFC_SLI_REV4)
12930 				spin_unlock(&pring_s4->ring_lock);
12931 			spin_unlock(&lpfc_cmd->buf_lock);
12932 			continue;
12933 		}
12934 
12935 		/* issue ABTS for this IOCB based on iotag */
12936 		abtsiocbq = __lpfc_sli_get_iocbq(phba);
12937 		if (!abtsiocbq) {
12938 			if (phba->sli_rev == LPFC_SLI_REV4)
12939 				spin_unlock(&pring_s4->ring_lock);
12940 			spin_unlock(&lpfc_cmd->buf_lock);
12941 			continue;
12942 		}
12943 
12944 		if (phba->sli_rev == LPFC_SLI_REV4) {
12945 			iotag = abtsiocbq->iotag;
12946 			ulp_context = iocbq->sli4_xritag;
12947 			cqid = lpfc_cmd->hdwq->io_cq_map;
12948 		} else {
12949 			iotag = iocbq->iocb.ulpIoTag;
12950 			if (pring->ringno == LPFC_ELS_RING) {
12951 				ndlp = iocbq->ndlp;
12952 				ulp_context = ndlp->nlp_rpi;
12953 			} else {
12954 				ulp_context = iocbq->iocb.ulpContext;
12955 			}
12956 		}
12957 
12958 		ndlp = lpfc_cmd->rdata->pnode;
12959 
12960 		if (lpfc_is_link_up(phba) &&
12961 		    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE) &&
12962 		    !(phba->link_flag & LS_EXTERNAL_LOOPBACK))
12963 			ia = false;
12964 		else
12965 			ia = true;
12966 
12967 		lpfc_sli_prep_abort_xri(phba, abtsiocbq, ulp_context, iotag,
12968 					iocbq->iocb.ulpClass, cqid,
12969 					ia, false);
12970 
12971 		abtsiocbq->vport = vport;
12972 
12973 		/* ABTS WQE must go to the same WQ as the WQE to be aborted */
12974 		abtsiocbq->hba_wqidx = iocbq->hba_wqidx;
12975 		if (iocbq->cmd_flag & LPFC_IO_FCP)
12976 			abtsiocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
12977 		if (iocbq->cmd_flag & LPFC_IO_FOF)
12978 			abtsiocbq->cmd_flag |= LPFC_IO_FOF;
12979 
12980 		/* Setup callback routine and issue the command. */
12981 		abtsiocbq->cmd_cmpl = lpfc_sli_abort_fcp_cmpl;
12982 
12983 		/*
12984 		 * Indicate the IO is being aborted by the driver and set
12985 		 * the caller's flag into the aborted IO.
12986 		 */
12987 		iocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
12988 
12989 		if (phba->sli_rev == LPFC_SLI_REV4) {
12990 			ret_val = __lpfc_sli_issue_iocb(phba, pring_s4->ringno,
12991 							abtsiocbq, 0);
12992 			spin_unlock(&pring_s4->ring_lock);
12993 		} else {
12994 			ret_val = __lpfc_sli_issue_iocb(phba, pring->ringno,
12995 							abtsiocbq, 0);
12996 		}
12997 
12998 		spin_unlock(&lpfc_cmd->buf_lock);
12999 
13000 		if (ret_val == IOCB_ERROR)
13001 			__lpfc_sli_release_iocbq(phba, abtsiocbq);
13002 		else
13003 			sum++;
13004 	}
13005 	spin_unlock_irqrestore(&phba->hbalock, iflags);
13006 	return sum;
13007 }
13008 
13009 /**
13010  * lpfc_sli_wake_iocb_wait - lpfc_sli_issue_iocb_wait's completion handler
13011  * @phba: Pointer to HBA context object.
13012  * @cmdiocbq: Pointer to command iocb.
13013  * @rspiocbq: Pointer to response iocb.
13014  *
13015  * This function is the completion handler for iocbs issued using
13016  * lpfc_sli_issue_iocb_wait function. This function is called by the
13017  * ring event handler function without any lock held. This function
13018  * can be called from both worker thread context and interrupt
13019  * context. This function also can be called from other thread which
13020  * cleans up the SLI layer objects.
13021  * This function copy the contents of the response iocb to the
13022  * response iocb memory object provided by the caller of
13023  * lpfc_sli_issue_iocb_wait and then wakes up the thread which
13024  * sleeps for the iocb completion.
13025  **/
13026 static void
lpfc_sli_wake_iocb_wait(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocbq,struct lpfc_iocbq * rspiocbq)13027 lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
13028 			struct lpfc_iocbq *cmdiocbq,
13029 			struct lpfc_iocbq *rspiocbq)
13030 {
13031 	wait_queue_head_t *pdone_q;
13032 	unsigned long iflags;
13033 	struct lpfc_io_buf *lpfc_cmd;
13034 	size_t offset = offsetof(struct lpfc_iocbq, wqe);
13035 
13036 	spin_lock_irqsave(&phba->hbalock, iflags);
13037 	if (cmdiocbq->cmd_flag & LPFC_IO_WAKE_TMO) {
13038 
13039 		/*
13040 		 * A time out has occurred for the iocb.  If a time out
13041 		 * completion handler has been supplied, call it.  Otherwise,
13042 		 * just free the iocbq.
13043 		 */
13044 
13045 		spin_unlock_irqrestore(&phba->hbalock, iflags);
13046 		cmdiocbq->cmd_cmpl = cmdiocbq->wait_cmd_cmpl;
13047 		cmdiocbq->wait_cmd_cmpl = NULL;
13048 		if (cmdiocbq->cmd_cmpl)
13049 			cmdiocbq->cmd_cmpl(phba, cmdiocbq, NULL);
13050 		else
13051 			lpfc_sli_release_iocbq(phba, cmdiocbq);
13052 		return;
13053 	}
13054 
13055 	/* Copy the contents of the local rspiocb into the caller's buffer. */
13056 	cmdiocbq->cmd_flag |= LPFC_IO_WAKE;
13057 	if (cmdiocbq->rsp_iocb && rspiocbq)
13058 		memcpy((char *)cmdiocbq->rsp_iocb + offset,
13059 		       (char *)rspiocbq + offset, sizeof(*rspiocbq) - offset);
13060 
13061 	/* Set the exchange busy flag for task management commands */
13062 	if ((cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
13063 	    !(cmdiocbq->cmd_flag & LPFC_IO_LIBDFC)) {
13064 		lpfc_cmd = container_of(cmdiocbq, struct lpfc_io_buf,
13065 					cur_iocbq);
13066 		if (rspiocbq && (rspiocbq->cmd_flag & LPFC_EXCHANGE_BUSY))
13067 			lpfc_cmd->flags |= LPFC_SBUF_XBUSY;
13068 		else
13069 			lpfc_cmd->flags &= ~LPFC_SBUF_XBUSY;
13070 	}
13071 
13072 	pdone_q = cmdiocbq->context_un.wait_queue;
13073 	if (pdone_q)
13074 		wake_up(pdone_q);
13075 	spin_unlock_irqrestore(&phba->hbalock, iflags);
13076 	return;
13077 }
13078 
13079 /**
13080  * lpfc_chk_iocb_flg - Test IOCB flag with lock held.
13081  * @phba: Pointer to HBA context object..
13082  * @piocbq: Pointer to command iocb.
13083  * @flag: Flag to test.
13084  *
13085  * This routine grabs the hbalock and then test the cmd_flag to
13086  * see if the passed in flag is set.
13087  * Returns:
13088  * 1 if flag is set.
13089  * 0 if flag is not set.
13090  **/
13091 static int
lpfc_chk_iocb_flg(struct lpfc_hba * phba,struct lpfc_iocbq * piocbq,uint32_t flag)13092 lpfc_chk_iocb_flg(struct lpfc_hba *phba,
13093 		 struct lpfc_iocbq *piocbq, uint32_t flag)
13094 {
13095 	unsigned long iflags;
13096 	int ret;
13097 
13098 	spin_lock_irqsave(&phba->hbalock, iflags);
13099 	ret = piocbq->cmd_flag & flag;
13100 	spin_unlock_irqrestore(&phba->hbalock, iflags);
13101 	return ret;
13102 
13103 }
13104 
13105 /**
13106  * lpfc_sli_issue_iocb_wait - Synchronous function to issue iocb commands
13107  * @phba: Pointer to HBA context object..
13108  * @ring_number: Ring number
13109  * @piocb: Pointer to command iocb.
13110  * @prspiocbq: Pointer to response iocb.
13111  * @timeout: Timeout in number of seconds.
13112  *
13113  * This function issues the iocb to firmware and waits for the
13114  * iocb to complete. The cmd_cmpl field of the shall be used
13115  * to handle iocbs which time out. If the field is NULL, the
13116  * function shall free the iocbq structure.  If more clean up is
13117  * needed, the caller is expected to provide a completion function
13118  * that will provide the needed clean up.  If the iocb command is
13119  * not completed within timeout seconds, the function will either
13120  * free the iocbq structure (if cmd_cmpl == NULL) or execute the
13121  * completion function set in the cmd_cmpl field and then return
13122  * a status of IOCB_TIMEDOUT.  The caller should not free the iocb
13123  * resources if this function returns IOCB_TIMEDOUT.
13124  * The function waits for the iocb completion using an
13125  * non-interruptible wait.
13126  * This function will sleep while waiting for iocb completion.
13127  * So, this function should not be called from any context which
13128  * does not allow sleeping. Due to the same reason, this function
13129  * cannot be called with interrupt disabled.
13130  * This function assumes that the iocb completions occur while
13131  * this function sleep. So, this function cannot be called from
13132  * the thread which process iocb completion for this ring.
13133  * This function clears the cmd_flag of the iocb object before
13134  * issuing the iocb and the iocb completion handler sets this
13135  * flag and wakes this thread when the iocb completes.
13136  * The contents of the response iocb will be copied to prspiocbq
13137  * by the completion handler when the command completes.
13138  * This function returns IOCB_SUCCESS when success.
13139  * This function is called with no lock held.
13140  **/
13141 int
lpfc_sli_issue_iocb_wait(struct lpfc_hba * phba,uint32_t ring_number,struct lpfc_iocbq * piocb,struct lpfc_iocbq * prspiocbq,uint32_t timeout)13142 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
13143 			 uint32_t ring_number,
13144 			 struct lpfc_iocbq *piocb,
13145 			 struct lpfc_iocbq *prspiocbq,
13146 			 uint32_t timeout)
13147 {
13148 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(done_q);
13149 	long timeleft, timeout_req = 0;
13150 	int retval = IOCB_SUCCESS;
13151 	uint32_t creg_val;
13152 	struct lpfc_iocbq *iocb;
13153 	int txq_cnt = 0;
13154 	int txcmplq_cnt = 0;
13155 	struct lpfc_sli_ring *pring;
13156 	unsigned long iflags;
13157 	bool iocb_completed = true;
13158 
13159 	if (phba->sli_rev >= LPFC_SLI_REV4) {
13160 		lpfc_sli_prep_wqe(phba, piocb);
13161 
13162 		pring = lpfc_sli4_calc_ring(phba, piocb);
13163 	} else
13164 		pring = &phba->sli.sli3_ring[ring_number];
13165 	/*
13166 	 * If the caller has provided a response iocbq buffer, then rsp_iocb
13167 	 * is NULL or its an error.
13168 	 */
13169 	if (prspiocbq) {
13170 		if (piocb->rsp_iocb)
13171 			return IOCB_ERROR;
13172 		piocb->rsp_iocb = prspiocbq;
13173 	}
13174 
13175 	piocb->wait_cmd_cmpl = piocb->cmd_cmpl;
13176 	piocb->cmd_cmpl = lpfc_sli_wake_iocb_wait;
13177 	piocb->context_un.wait_queue = &done_q;
13178 	piocb->cmd_flag &= ~(LPFC_IO_WAKE | LPFC_IO_WAKE_TMO);
13179 
13180 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13181 		if (lpfc_readl(phba->HCregaddr, &creg_val))
13182 			return IOCB_ERROR;
13183 		creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
13184 		writel(creg_val, phba->HCregaddr);
13185 		readl(phba->HCregaddr); /* flush */
13186 	}
13187 
13188 	retval = lpfc_sli_issue_iocb(phba, ring_number, piocb,
13189 				     SLI_IOCB_RET_IOCB);
13190 	if (retval == IOCB_SUCCESS) {
13191 		timeout_req = secs_to_jiffies(timeout);
13192 		timeleft = wait_event_timeout(done_q,
13193 				lpfc_chk_iocb_flg(phba, piocb, LPFC_IO_WAKE),
13194 				timeout_req);
13195 		spin_lock_irqsave(&phba->hbalock, iflags);
13196 		if (!(piocb->cmd_flag & LPFC_IO_WAKE)) {
13197 
13198 			/*
13199 			 * IOCB timed out.  Inform the wake iocb wait
13200 			 * completion function and set local status
13201 			 */
13202 
13203 			iocb_completed = false;
13204 			piocb->cmd_flag |= LPFC_IO_WAKE_TMO;
13205 		}
13206 		spin_unlock_irqrestore(&phba->hbalock, iflags);
13207 		if (iocb_completed) {
13208 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13209 					"0331 IOCB wake signaled\n");
13210 			/* Note: we are not indicating if the IOCB has a success
13211 			 * status or not - that's for the caller to check.
13212 			 * IOCB_SUCCESS means just that the command was sent and
13213 			 * completed. Not that it completed successfully.
13214 			 * */
13215 		} else if (timeleft == 0) {
13216 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13217 					"0338 IOCB wait timeout error - no "
13218 					"wake response Data x%x\n", timeout);
13219 			retval = IOCB_TIMEDOUT;
13220 		} else {
13221 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13222 					"0330 IOCB wake NOT set, "
13223 					"Data x%x x%lx\n",
13224 					timeout, (timeleft / jiffies));
13225 			retval = IOCB_TIMEDOUT;
13226 		}
13227 	} else if (retval == IOCB_BUSY) {
13228 		if (phba->cfg_log_verbose & LOG_SLI) {
13229 			list_for_each_entry(iocb, &pring->txq, list) {
13230 				txq_cnt++;
13231 			}
13232 			list_for_each_entry(iocb, &pring->txcmplq, list) {
13233 				txcmplq_cnt++;
13234 			}
13235 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13236 				"2818 Max IOCBs %d txq cnt %d txcmplq cnt %d\n",
13237 				phba->iocb_cnt, txq_cnt, txcmplq_cnt);
13238 		}
13239 		return retval;
13240 	} else {
13241 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
13242 				"0332 IOCB wait issue failed, Data x%x\n",
13243 				retval);
13244 		retval = IOCB_ERROR;
13245 	}
13246 
13247 	if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
13248 		if (lpfc_readl(phba->HCregaddr, &creg_val))
13249 			return IOCB_ERROR;
13250 		creg_val &= ~(HC_R0INT_ENA << LPFC_FCP_RING);
13251 		writel(creg_val, phba->HCregaddr);
13252 		readl(phba->HCregaddr); /* flush */
13253 	}
13254 
13255 	if (prspiocbq)
13256 		piocb->rsp_iocb = NULL;
13257 
13258 	piocb->context_un.wait_queue = NULL;
13259 	piocb->cmd_cmpl = NULL;
13260 	return retval;
13261 }
13262 
13263 /**
13264  * lpfc_sli_issue_mbox_wait - Synchronous function to issue mailbox
13265  * @phba: Pointer to HBA context object.
13266  * @pmboxq: Pointer to driver mailbox object.
13267  * @timeout: Timeout in number of seconds.
13268  *
13269  * This function issues the mailbox to firmware and waits for the
13270  * mailbox command to complete. If the mailbox command is not
13271  * completed within timeout seconds, it returns MBX_TIMEOUT.
13272  * The function waits for the mailbox completion using an
13273  * interruptible wait. If the thread is woken up due to a
13274  * signal, MBX_TIMEOUT error is returned to the caller. Caller
13275  * should not free the mailbox resources, if this function returns
13276  * MBX_TIMEOUT.
13277  * This function will sleep while waiting for mailbox completion.
13278  * So, this function should not be called from any context which
13279  * does not allow sleeping. Due to the same reason, this function
13280  * cannot be called with interrupt disabled.
13281  * This function assumes that the mailbox completion occurs while
13282  * this function sleep. So, this function cannot be called from
13283  * the worker thread which processes mailbox completion.
13284  * This function is called in the context of HBA management
13285  * applications.
13286  * This function returns MBX_SUCCESS when successful.
13287  * This function is called with no lock held.
13288  **/
13289 int
lpfc_sli_issue_mbox_wait(struct lpfc_hba * phba,LPFC_MBOXQ_t * pmboxq,uint32_t timeout)13290 lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
13291 			 uint32_t timeout)
13292 {
13293 	struct completion mbox_done;
13294 	int retval;
13295 	unsigned long flag;
13296 
13297 	pmboxq->mbox_flag &= ~LPFC_MBX_WAKE;
13298 	/* setup wake call as IOCB callback */
13299 	pmboxq->mbox_cmpl = lpfc_sli_wake_mbox_wait;
13300 
13301 	/* setup ctx_u field to pass wait_queue pointer to wake function  */
13302 	init_completion(&mbox_done);
13303 	pmboxq->ctx_u.mbox_wait = &mbox_done;
13304 	/* now issue the command */
13305 	retval = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
13306 	if (retval == MBX_BUSY || retval == MBX_SUCCESS) {
13307 		wait_for_completion_timeout(&mbox_done, secs_to_jiffies(timeout));
13308 
13309 		spin_lock_irqsave(&phba->hbalock, flag);
13310 		pmboxq->ctx_u.mbox_wait = NULL;
13311 		/*
13312 		 * if LPFC_MBX_WAKE flag is set the mailbox is completed
13313 		 * else do not free the resources.
13314 		 */
13315 		if (pmboxq->mbox_flag & LPFC_MBX_WAKE) {
13316 			retval = MBX_SUCCESS;
13317 		} else {
13318 			retval = MBX_TIMEOUT;
13319 			pmboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
13320 		}
13321 		spin_unlock_irqrestore(&phba->hbalock, flag);
13322 	}
13323 	return retval;
13324 }
13325 
13326 /**
13327  * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
13328  * @phba: Pointer to HBA context.
13329  * @mbx_action: Mailbox shutdown options.
13330  *
13331  * This function is called to shutdown the driver's mailbox sub-system.
13332  * It first marks the mailbox sub-system is in a block state to prevent
13333  * the asynchronous mailbox command from issued off the pending mailbox
13334  * command queue. If the mailbox command sub-system shutdown is due to
13335  * HBA error conditions such as EEH or ERATT, this routine shall invoke
13336  * the mailbox sub-system flush routine to forcefully bring down the
13337  * mailbox sub-system. Otherwise, if it is due to normal condition (such
13338  * as with offline or HBA function reset), this routine will wait for the
13339  * outstanding mailbox command to complete before invoking the mailbox
13340  * sub-system flush routine to gracefully bring down mailbox sub-system.
13341  **/
13342 void
lpfc_sli_mbox_sys_shutdown(struct lpfc_hba * phba,int mbx_action)13343 lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
13344 {
13345 	struct lpfc_sli *psli = &phba->sli;
13346 	unsigned long timeout;
13347 
13348 	if (mbx_action == LPFC_MBX_NO_WAIT) {
13349 		/* delay 100ms for port state */
13350 		msleep(100);
13351 		lpfc_sli_mbox_sys_flush(phba);
13352 		return;
13353 	}
13354 	timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
13355 
13356 	/* Disable softirqs, including timers from obtaining phba->hbalock */
13357 	local_bh_disable();
13358 
13359 	spin_lock_irq(&phba->hbalock);
13360 	psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
13361 
13362 	if (psli->sli_flag & LPFC_SLI_ACTIVE) {
13363 		/* Determine how long we might wait for the active mailbox
13364 		 * command to be gracefully completed by firmware.
13365 		 */
13366 		if (phba->sli.mbox_active)
13367 			timeout = secs_to_jiffies(lpfc_mbox_tmo_val(phba,
13368 						phba->sli.mbox_active)) + jiffies;
13369 		spin_unlock_irq(&phba->hbalock);
13370 
13371 		/* Enable softirqs again, done with phba->hbalock */
13372 		local_bh_enable();
13373 
13374 		while (phba->sli.mbox_active) {
13375 			/* Check active mailbox complete status every 2ms */
13376 			msleep(2);
13377 			if (time_after(jiffies, timeout))
13378 				/* Timeout, let the mailbox flush routine to
13379 				 * forcefully release active mailbox command
13380 				 */
13381 				break;
13382 		}
13383 	} else {
13384 		spin_unlock_irq(&phba->hbalock);
13385 
13386 		/* Enable softirqs again, done with phba->hbalock */
13387 		local_bh_enable();
13388 	}
13389 
13390 	lpfc_sli_mbox_sys_flush(phba);
13391 }
13392 
13393 /**
13394  * lpfc_sli_eratt_read - read sli-3 error attention events
13395  * @phba: Pointer to HBA context.
13396  *
13397  * This function is called to read the SLI3 device error attention registers
13398  * for possible error attention events. The caller must hold the hostlock
13399  * with spin_lock_irq().
13400  *
13401  * This function returns 1 when there is Error Attention in the Host Attention
13402  * Register and returns 0 otherwise.
13403  **/
13404 static int
lpfc_sli_eratt_read(struct lpfc_hba * phba)13405 lpfc_sli_eratt_read(struct lpfc_hba *phba)
13406 {
13407 	uint32_t ha_copy;
13408 
13409 	/* Read chip Host Attention (HA) register */
13410 	if (lpfc_readl(phba->HAregaddr, &ha_copy))
13411 		goto unplug_err;
13412 
13413 	if (ha_copy & HA_ERATT) {
13414 		/* Read host status register to retrieve error event */
13415 		if (lpfc_sli_read_hs(phba))
13416 			goto unplug_err;
13417 
13418 		/* Check if there is a deferred error condition is active */
13419 		if ((HS_FFER1 & phba->work_hs) &&
13420 		    ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13421 		      HS_FFER6 | HS_FFER7 | HS_FFER8) & phba->work_hs)) {
13422 			set_bit(DEFER_ERATT, &phba->hba_flag);
13423 			/* Clear all interrupt enable conditions */
13424 			writel(0, phba->HCregaddr);
13425 			readl(phba->HCregaddr);
13426 		}
13427 
13428 		/* Set the driver HA work bitmap */
13429 		phba->work_ha |= HA_ERATT;
13430 		/* Indicate polling handles this ERATT */
13431 		set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13432 		return 1;
13433 	}
13434 	return 0;
13435 
13436 unplug_err:
13437 	/* Set the driver HS work bitmap */
13438 	phba->work_hs |= UNPLUG_ERR;
13439 	/* Set the driver HA work bitmap */
13440 	phba->work_ha |= HA_ERATT;
13441 	/* Indicate polling handles this ERATT */
13442 	set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13443 	return 1;
13444 }
13445 
13446 /**
13447  * lpfc_sli4_eratt_read - read sli-4 error attention events
13448  * @phba: Pointer to HBA context.
13449  *
13450  * This function is called to read the SLI4 device error attention registers
13451  * for possible error attention events. The caller must hold the hostlock
13452  * with spin_lock_irq().
13453  *
13454  * This function returns 1 when there is Error Attention in the Host Attention
13455  * Register and returns 0 otherwise.
13456  **/
13457 static int
lpfc_sli4_eratt_read(struct lpfc_hba * phba)13458 lpfc_sli4_eratt_read(struct lpfc_hba *phba)
13459 {
13460 	uint32_t uerr_sta_hi, uerr_sta_lo;
13461 	uint32_t if_type, portsmphr;
13462 	struct lpfc_register portstat_reg;
13463 	u32 logmask;
13464 
13465 	/*
13466 	 * For now, use the SLI4 device internal unrecoverable error
13467 	 * registers for error attention. This can be changed later.
13468 	 */
13469 	if_type = bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf);
13470 	switch (if_type) {
13471 	case LPFC_SLI_INTF_IF_TYPE_0:
13472 		if (lpfc_readl(phba->sli4_hba.u.if_type0.UERRLOregaddr,
13473 			&uerr_sta_lo) ||
13474 			lpfc_readl(phba->sli4_hba.u.if_type0.UERRHIregaddr,
13475 			&uerr_sta_hi)) {
13476 			phba->work_hs |= UNPLUG_ERR;
13477 			phba->work_ha |= HA_ERATT;
13478 			set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13479 			return 1;
13480 		}
13481 		if ((~phba->sli4_hba.ue_mask_lo & uerr_sta_lo) ||
13482 		    (~phba->sli4_hba.ue_mask_hi & uerr_sta_hi)) {
13483 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13484 					"1423 HBA Unrecoverable error: "
13485 					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
13486 					"ue_mask_lo_reg=0x%x, "
13487 					"ue_mask_hi_reg=0x%x\n",
13488 					uerr_sta_lo, uerr_sta_hi,
13489 					phba->sli4_hba.ue_mask_lo,
13490 					phba->sli4_hba.ue_mask_hi);
13491 			phba->work_status[0] = uerr_sta_lo;
13492 			phba->work_status[1] = uerr_sta_hi;
13493 			phba->work_ha |= HA_ERATT;
13494 			set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13495 			return 1;
13496 		}
13497 		break;
13498 	case LPFC_SLI_INTF_IF_TYPE_2:
13499 	case LPFC_SLI_INTF_IF_TYPE_6:
13500 		if (lpfc_readl(phba->sli4_hba.u.if_type2.STATUSregaddr,
13501 			&portstat_reg.word0) ||
13502 			lpfc_readl(phba->sli4_hba.PSMPHRregaddr,
13503 			&portsmphr)){
13504 			phba->work_hs |= UNPLUG_ERR;
13505 			phba->work_ha |= HA_ERATT;
13506 			set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13507 			return 1;
13508 		}
13509 		if (bf_get(lpfc_sliport_status_err, &portstat_reg)) {
13510 			phba->work_status[0] =
13511 				readl(phba->sli4_hba.u.if_type2.ERR1regaddr);
13512 			phba->work_status[1] =
13513 				readl(phba->sli4_hba.u.if_type2.ERR2regaddr);
13514 			logmask = LOG_TRACE_EVENT;
13515 			if (phba->work_status[0] ==
13516 				SLIPORT_ERR1_REG_ERR_CODE_2 &&
13517 			    phba->work_status[1] == SLIPORT_ERR2_REG_FW_RESTART)
13518 				logmask = LOG_SLI;
13519 			lpfc_printf_log(phba, KERN_ERR, logmask,
13520 					"2885 Port Status Event: "
13521 					"port status reg 0x%x, "
13522 					"port smphr reg 0x%x, "
13523 					"error 1=0x%x, error 2=0x%x\n",
13524 					portstat_reg.word0,
13525 					portsmphr,
13526 					phba->work_status[0],
13527 					phba->work_status[1]);
13528 			phba->work_ha |= HA_ERATT;
13529 			set_bit(HBA_ERATT_HANDLED, &phba->hba_flag);
13530 			return 1;
13531 		}
13532 		break;
13533 	case LPFC_SLI_INTF_IF_TYPE_1:
13534 	default:
13535 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13536 				"2886 HBA Error Attention on unsupported "
13537 				"if type %d.", if_type);
13538 		return 1;
13539 	}
13540 
13541 	return 0;
13542 }
13543 
13544 /**
13545  * lpfc_sli_check_eratt - check error attention events
13546  * @phba: Pointer to HBA context.
13547  *
13548  * This function is called from timer soft interrupt context to check HBA's
13549  * error attention register bit for error attention events.
13550  *
13551  * This function returns 1 when there is Error Attention in the Host Attention
13552  * Register and returns 0 otherwise.
13553  **/
13554 int
lpfc_sli_check_eratt(struct lpfc_hba * phba)13555 lpfc_sli_check_eratt(struct lpfc_hba *phba)
13556 {
13557 	uint32_t ha_copy;
13558 
13559 	/* If somebody is waiting to handle an eratt, don't process it
13560 	 * here. The brdkill function will do this.
13561 	 */
13562 	if (phba->link_flag & LS_IGNORE_ERATT)
13563 		return 0;
13564 
13565 	/* Check if interrupt handler handles this ERATT */
13566 	if (test_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
13567 		/* Interrupt handler has handled ERATT */
13568 		return 0;
13569 
13570 	/*
13571 	 * If there is deferred error attention, do not check for error
13572 	 * attention
13573 	 */
13574 	if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
13575 		return 0;
13576 
13577 	spin_lock_irq(&phba->hbalock);
13578 	/* If PCI channel is offline, don't process it */
13579 	if (unlikely(pci_channel_offline(phba->pcidev))) {
13580 		spin_unlock_irq(&phba->hbalock);
13581 		return 0;
13582 	}
13583 
13584 	switch (phba->sli_rev) {
13585 	case LPFC_SLI_REV2:
13586 	case LPFC_SLI_REV3:
13587 		/* Read chip Host Attention (HA) register */
13588 		ha_copy = lpfc_sli_eratt_read(phba);
13589 		break;
13590 	case LPFC_SLI_REV4:
13591 		/* Read device Uncoverable Error (UERR) registers */
13592 		ha_copy = lpfc_sli4_eratt_read(phba);
13593 		break;
13594 	default:
13595 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13596 				"0299 Invalid SLI revision (%d)\n",
13597 				phba->sli_rev);
13598 		ha_copy = 0;
13599 		break;
13600 	}
13601 	spin_unlock_irq(&phba->hbalock);
13602 
13603 	return ha_copy;
13604 }
13605 
13606 /**
13607  * lpfc_intr_state_check - Check device state for interrupt handling
13608  * @phba: Pointer to HBA context.
13609  *
13610  * This inline routine checks whether a device or its PCI slot is in a state
13611  * that the interrupt should be handled.
13612  *
13613  * This function returns 0 if the device or the PCI slot is in a state that
13614  * interrupt should be handled, otherwise -EIO.
13615  */
13616 static inline int
lpfc_intr_state_check(struct lpfc_hba * phba)13617 lpfc_intr_state_check(struct lpfc_hba *phba)
13618 {
13619 	/* If the pci channel is offline, ignore all the interrupts */
13620 	if (unlikely(pci_channel_offline(phba->pcidev)))
13621 		return -EIO;
13622 
13623 	/* Update device level interrupt statistics */
13624 	phba->sli.slistat.sli_intr++;
13625 
13626 	/* Ignore all interrupts during initialization. */
13627 	if (unlikely(phba->link_state < LPFC_LINK_DOWN))
13628 		return -EIO;
13629 
13630 	return 0;
13631 }
13632 
13633 /**
13634  * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
13635  * @irq: Interrupt number.
13636  * @dev_id: The device context pointer.
13637  *
13638  * This function is directly called from the PCI layer as an interrupt
13639  * service routine when device with SLI-3 interface spec is enabled with
13640  * MSI-X multi-message interrupt mode and there are slow-path events in
13641  * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
13642  * interrupt mode, this function is called as part of the device-level
13643  * interrupt handler. When the PCI slot is in error recovery or the HBA
13644  * is undergoing initialization, the interrupt handler will not process
13645  * the interrupt. The link attention and ELS ring attention events are
13646  * handled by the worker thread. The interrupt handler signals the worker
13647  * thread and returns for these events. This function is called without
13648  * any lock held. It gets the hbalock to access and update SLI data
13649  * structures.
13650  *
13651  * This function returns IRQ_HANDLED when interrupt is handled else it
13652  * returns IRQ_NONE.
13653  **/
13654 irqreturn_t
lpfc_sli_sp_intr_handler(int irq,void * dev_id)13655 lpfc_sli_sp_intr_handler(int irq, void *dev_id)
13656 {
13657 	struct lpfc_hba  *phba;
13658 	uint32_t ha_copy, hc_copy;
13659 	uint32_t work_ha_copy;
13660 	unsigned long status;
13661 	unsigned long iflag;
13662 	uint32_t control;
13663 
13664 	MAILBOX_t *mbox, *pmbox;
13665 	struct lpfc_vport *vport;
13666 	struct lpfc_nodelist *ndlp;
13667 	struct lpfc_dmabuf *mp;
13668 	LPFC_MBOXQ_t *pmb;
13669 	int rc;
13670 
13671 	/*
13672 	 * Get the driver's phba structure from the dev_id and
13673 	 * assume the HBA is not interrupting.
13674 	 */
13675 	phba = (struct lpfc_hba *)dev_id;
13676 
13677 	if (unlikely(!phba))
13678 		return IRQ_NONE;
13679 
13680 	/*
13681 	 * Stuff needs to be attented to when this function is invoked as an
13682 	 * individual interrupt handler in MSI-X multi-message interrupt mode
13683 	 */
13684 	if (phba->intr_type == MSIX) {
13685 		/* Check device state for handling interrupt */
13686 		if (lpfc_intr_state_check(phba))
13687 			return IRQ_NONE;
13688 		/* Need to read HA REG for slow-path events */
13689 		spin_lock_irqsave(&phba->hbalock, iflag);
13690 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
13691 			goto unplug_error;
13692 		/* If somebody is waiting to handle an eratt don't process it
13693 		 * here. The brdkill function will do this.
13694 		 */
13695 		if (phba->link_flag & LS_IGNORE_ERATT)
13696 			ha_copy &= ~HA_ERATT;
13697 		/* Check the need for handling ERATT in interrupt handler */
13698 		if (ha_copy & HA_ERATT) {
13699 			if (test_and_set_bit(HBA_ERATT_HANDLED,
13700 					     &phba->hba_flag))
13701 				/* ERATT polling has handled ERATT */
13702 				ha_copy &= ~HA_ERATT;
13703 		}
13704 
13705 		/*
13706 		 * If there is deferred error attention, do not check for any
13707 		 * interrupt.
13708 		 */
13709 		if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
13710 			spin_unlock_irqrestore(&phba->hbalock, iflag);
13711 			return IRQ_NONE;
13712 		}
13713 
13714 		/* Clear up only attention source related to slow-path */
13715 		if (lpfc_readl(phba->HCregaddr, &hc_copy))
13716 			goto unplug_error;
13717 
13718 		writel(hc_copy & ~(HC_MBINT_ENA | HC_R2INT_ENA |
13719 			HC_LAINT_ENA | HC_ERINT_ENA),
13720 			phba->HCregaddr);
13721 		writel((ha_copy & (HA_MBATT | HA_R2_CLR_MSK)),
13722 			phba->HAregaddr);
13723 		writel(hc_copy, phba->HCregaddr);
13724 		readl(phba->HAregaddr); /* flush */
13725 		spin_unlock_irqrestore(&phba->hbalock, iflag);
13726 	} else
13727 		ha_copy = phba->ha_copy;
13728 
13729 	work_ha_copy = ha_copy & phba->work_ha_mask;
13730 
13731 	if (work_ha_copy) {
13732 		if (work_ha_copy & HA_LATT) {
13733 			if (phba->sli.sli_flag & LPFC_PROCESS_LA) {
13734 				/*
13735 				 * Turn off Link Attention interrupts
13736 				 * until CLEAR_LA done
13737 				 */
13738 				spin_lock_irqsave(&phba->hbalock, iflag);
13739 				phba->sli.sli_flag &= ~LPFC_PROCESS_LA;
13740 				if (lpfc_readl(phba->HCregaddr, &control))
13741 					goto unplug_error;
13742 				control &= ~HC_LAINT_ENA;
13743 				writel(control, phba->HCregaddr);
13744 				readl(phba->HCregaddr); /* flush */
13745 				spin_unlock_irqrestore(&phba->hbalock, iflag);
13746 			}
13747 			else
13748 				work_ha_copy &= ~HA_LATT;
13749 		}
13750 
13751 		if (work_ha_copy & ~(HA_ERATT | HA_MBATT | HA_LATT)) {
13752 			/*
13753 			 * Turn off Slow Rings interrupts, LPFC_ELS_RING is
13754 			 * the only slow ring.
13755 			 */
13756 			status = (work_ha_copy &
13757 				(HA_RXMASK  << (4*LPFC_ELS_RING)));
13758 			status >>= (4*LPFC_ELS_RING);
13759 			if (status & HA_RXMASK) {
13760 				spin_lock_irqsave(&phba->hbalock, iflag);
13761 				if (lpfc_readl(phba->HCregaddr, &control))
13762 					goto unplug_error;
13763 
13764 				lpfc_debugfs_slow_ring_trc(phba,
13765 				"ISR slow ring:   ctl:x%x stat:x%x isrcnt:x%x",
13766 				control, status,
13767 				(uint32_t)phba->sli.slistat.sli_intr);
13768 
13769 				if (control & (HC_R0INT_ENA << LPFC_ELS_RING)) {
13770 					lpfc_debugfs_slow_ring_trc(phba,
13771 						"ISR Disable ring:"
13772 						"pwork:x%x hawork:x%x wait:x%x",
13773 						phba->work_ha, work_ha_copy,
13774 						(uint32_t)((unsigned long)
13775 						&phba->work_waitq));
13776 
13777 					control &=
13778 					    ~(HC_R0INT_ENA << LPFC_ELS_RING);
13779 					writel(control, phba->HCregaddr);
13780 					readl(phba->HCregaddr); /* flush */
13781 				}
13782 				else {
13783 					lpfc_debugfs_slow_ring_trc(phba,
13784 						"ISR slow ring:   pwork:"
13785 						"x%x hawork:x%x wait:x%x",
13786 						phba->work_ha, work_ha_copy,
13787 						(uint32_t)((unsigned long)
13788 						&phba->work_waitq));
13789 				}
13790 				spin_unlock_irqrestore(&phba->hbalock, iflag);
13791 			}
13792 		}
13793 		spin_lock_irqsave(&phba->hbalock, iflag);
13794 		if (work_ha_copy & HA_ERATT) {
13795 			if (lpfc_sli_read_hs(phba))
13796 				goto unplug_error;
13797 			/*
13798 			 * Check if there is a deferred error condition
13799 			 * is active
13800 			 */
13801 			if ((HS_FFER1 & phba->work_hs) &&
13802 				((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
13803 				  HS_FFER6 | HS_FFER7 | HS_FFER8) &
13804 				  phba->work_hs)) {
13805 				set_bit(DEFER_ERATT, &phba->hba_flag);
13806 				/* Clear all interrupt enable conditions */
13807 				writel(0, phba->HCregaddr);
13808 				readl(phba->HCregaddr);
13809 			}
13810 		}
13811 
13812 		if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
13813 			pmb = phba->sli.mbox_active;
13814 			pmbox = &pmb->u.mb;
13815 			mbox = phba->mbox;
13816 			vport = pmb->vport;
13817 
13818 			/* First check out the status word */
13819 			lpfc_sli_pcimem_bcopy(mbox, pmbox, sizeof(uint32_t));
13820 			if (pmbox->mbxOwner != OWN_HOST) {
13821 				spin_unlock_irqrestore(&phba->hbalock, iflag);
13822 				/*
13823 				 * Stray Mailbox Interrupt, mbxCommand <cmd>
13824 				 * mbxStatus <status>
13825 				 */
13826 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
13827 						"(%d):0304 Stray Mailbox "
13828 						"Interrupt mbxCommand x%x "
13829 						"mbxStatus x%x\n",
13830 						(vport ? vport->vpi : 0),
13831 						pmbox->mbxCommand,
13832 						pmbox->mbxStatus);
13833 				/* clear mailbox attention bit */
13834 				work_ha_copy &= ~HA_MBATT;
13835 			} else {
13836 				phba->sli.mbox_active = NULL;
13837 				spin_unlock_irqrestore(&phba->hbalock, iflag);
13838 				phba->last_completion_time = jiffies;
13839 				timer_delete(&phba->sli.mbox_tmo);
13840 				if (pmb->mbox_cmpl) {
13841 					lpfc_sli_pcimem_bcopy(mbox, pmbox,
13842 							MAILBOX_CMD_SIZE);
13843 					if (pmb->out_ext_byte_len &&
13844 						pmb->ext_buf)
13845 						lpfc_sli_pcimem_bcopy(
13846 						phba->mbox_ext,
13847 						pmb->ext_buf,
13848 						pmb->out_ext_byte_len);
13849 				}
13850 				if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
13851 					pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
13852 
13853 					lpfc_debugfs_disc_trc(vport,
13854 						LPFC_DISC_TRC_MBOX_VPORT,
13855 						"MBOX dflt rpi: : "
13856 						"status:x%x rpi:x%x",
13857 						(uint32_t)pmbox->mbxStatus,
13858 						pmbox->un.varWords[0], 0);
13859 
13860 					if (!pmbox->mbxStatus) {
13861 						mp = pmb->ctx_buf;
13862 						ndlp = pmb->ctx_ndlp;
13863 
13864 						/* Reg_LOGIN of dflt RPI was
13865 						 * successful. new lets get
13866 						 * rid of the RPI using the
13867 						 * same mbox buffer.
13868 						 */
13869 						lpfc_unreg_login(phba,
13870 							vport->vpi,
13871 							pmbox->un.varWords[0],
13872 							pmb);
13873 						pmb->mbox_cmpl =
13874 							lpfc_mbx_cmpl_dflt_rpi;
13875 						pmb->ctx_buf = mp;
13876 						pmb->ctx_ndlp = ndlp;
13877 						pmb->vport = vport;
13878 						rc = lpfc_sli_issue_mbox(phba,
13879 								pmb,
13880 								MBX_NOWAIT);
13881 						if (rc != MBX_BUSY)
13882 							lpfc_printf_log(phba,
13883 							KERN_ERR,
13884 							LOG_TRACE_EVENT,
13885 							"0350 rc should have"
13886 							"been MBX_BUSY\n");
13887 						if (rc != MBX_NOT_FINISHED)
13888 							goto send_current_mbox;
13889 					}
13890 				}
13891 				spin_lock_irqsave(
13892 						&phba->pport->work_port_lock,
13893 						iflag);
13894 				phba->pport->work_port_events &=
13895 					~WORKER_MBOX_TMO;
13896 				spin_unlock_irqrestore(
13897 						&phba->pport->work_port_lock,
13898 						iflag);
13899 
13900 				/* Do NOT queue MBX_HEARTBEAT to the worker
13901 				 * thread for processing.
13902 				 */
13903 				if (pmbox->mbxCommand == MBX_HEARTBEAT) {
13904 					/* Process mbox now */
13905 					phba->sli.mbox_active = NULL;
13906 					phba->sli.sli_flag &=
13907 						~LPFC_SLI_MBOX_ACTIVE;
13908 					if (pmb->mbox_cmpl)
13909 						pmb->mbox_cmpl(phba, pmb);
13910 				} else {
13911 					/* Queue to worker thread to process */
13912 					lpfc_mbox_cmpl_put(phba, pmb);
13913 				}
13914 			}
13915 		} else
13916 			spin_unlock_irqrestore(&phba->hbalock, iflag);
13917 
13918 		if ((work_ha_copy & HA_MBATT) &&
13919 		    (phba->sli.mbox_active == NULL)) {
13920 send_current_mbox:
13921 			/* Process next mailbox command if there is one */
13922 			do {
13923 				rc = lpfc_sli_issue_mbox(phba, NULL,
13924 							 MBX_NOWAIT);
13925 			} while (rc == MBX_NOT_FINISHED);
13926 			if (rc != MBX_SUCCESS)
13927 				lpfc_printf_log(phba, KERN_ERR,
13928 						LOG_TRACE_EVENT,
13929 						"0349 rc should be "
13930 						"MBX_SUCCESS\n");
13931 		}
13932 
13933 		spin_lock_irqsave(&phba->hbalock, iflag);
13934 		phba->work_ha |= work_ha_copy;
13935 		spin_unlock_irqrestore(&phba->hbalock, iflag);
13936 		lpfc_worker_wake_up(phba);
13937 	}
13938 	return IRQ_HANDLED;
13939 unplug_error:
13940 	spin_unlock_irqrestore(&phba->hbalock, iflag);
13941 	return IRQ_HANDLED;
13942 
13943 } /* lpfc_sli_sp_intr_handler */
13944 
13945 /**
13946  * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
13947  * @irq: Interrupt number.
13948  * @dev_id: The device context pointer.
13949  *
13950  * This function is directly called from the PCI layer as an interrupt
13951  * service routine when device with SLI-3 interface spec is enabled with
13952  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
13953  * ring event in the HBA. However, when the device is enabled with either
13954  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
13955  * device-level interrupt handler. When the PCI slot is in error recovery
13956  * or the HBA is undergoing initialization, the interrupt handler will not
13957  * process the interrupt. The SCSI FCP fast-path ring event are handled in
13958  * the intrrupt context. This function is called without any lock held.
13959  * It gets the hbalock to access and update SLI data structures.
13960  *
13961  * This function returns IRQ_HANDLED when interrupt is handled else it
13962  * returns IRQ_NONE.
13963  **/
13964 irqreturn_t
lpfc_sli_fp_intr_handler(int irq,void * dev_id)13965 lpfc_sli_fp_intr_handler(int irq, void *dev_id)
13966 {
13967 	struct lpfc_hba  *phba;
13968 	uint32_t ha_copy;
13969 	unsigned long status;
13970 	unsigned long iflag;
13971 	struct lpfc_sli_ring *pring;
13972 
13973 	/* Get the driver's phba structure from the dev_id and
13974 	 * assume the HBA is not interrupting.
13975 	 */
13976 	phba = (struct lpfc_hba *) dev_id;
13977 
13978 	if (unlikely(!phba))
13979 		return IRQ_NONE;
13980 
13981 	/*
13982 	 * Stuff needs to be attented to when this function is invoked as an
13983 	 * individual interrupt handler in MSI-X multi-message interrupt mode
13984 	 */
13985 	if (phba->intr_type == MSIX) {
13986 		/* Check device state for handling interrupt */
13987 		if (lpfc_intr_state_check(phba))
13988 			return IRQ_NONE;
13989 		/* Need to read HA REG for FCP ring and other ring events */
13990 		if (lpfc_readl(phba->HAregaddr, &ha_copy))
13991 			return IRQ_HANDLED;
13992 
13993 		/*
13994 		 * If there is deferred error attention, do not check for
13995 		 * any interrupt.
13996 		 */
13997 		if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag)))
13998 			return IRQ_NONE;
13999 
14000 		/* Clear up only attention source related to fast-path */
14001 		spin_lock_irqsave(&phba->hbalock, iflag);
14002 		writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
14003 			phba->HAregaddr);
14004 		readl(phba->HAregaddr); /* flush */
14005 		spin_unlock_irqrestore(&phba->hbalock, iflag);
14006 	} else
14007 		ha_copy = phba->ha_copy;
14008 
14009 	/*
14010 	 * Process all events on FCP ring. Take the optimized path for FCP IO.
14011 	 */
14012 	ha_copy &= ~(phba->work_ha_mask);
14013 
14014 	status = (ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14015 	status >>= (4*LPFC_FCP_RING);
14016 	pring = &phba->sli.sli3_ring[LPFC_FCP_RING];
14017 	if (status & HA_RXMASK)
14018 		lpfc_sli_handle_fast_ring_event(phba, pring, status);
14019 
14020 	if (phba->cfg_multi_ring_support == 2) {
14021 		/*
14022 		 * Process all events on extra ring. Take the optimized path
14023 		 * for extra ring IO.
14024 		 */
14025 		status = (ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14026 		status >>= (4*LPFC_EXTRA_RING);
14027 		if (status & HA_RXMASK) {
14028 			lpfc_sli_handle_fast_ring_event(phba,
14029 					&phba->sli.sli3_ring[LPFC_EXTRA_RING],
14030 					status);
14031 		}
14032 	}
14033 	return IRQ_HANDLED;
14034 }  /* lpfc_sli_fp_intr_handler */
14035 
14036 /**
14037  * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
14038  * @irq: Interrupt number.
14039  * @dev_id: The device context pointer.
14040  *
14041  * This function is the HBA device-level interrupt handler to device with
14042  * SLI-3 interface spec, called from the PCI layer when either MSI or
14043  * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
14044  * requires driver attention. This function invokes the slow-path interrupt
14045  * attention handling function and fast-path interrupt attention handling
14046  * function in turn to process the relevant HBA attention events. This
14047  * function is called without any lock held. It gets the hbalock to access
14048  * and update SLI data structures.
14049  *
14050  * This function returns IRQ_HANDLED when interrupt is handled, else it
14051  * returns IRQ_NONE.
14052  **/
14053 irqreturn_t
lpfc_sli_intr_handler(int irq,void * dev_id)14054 lpfc_sli_intr_handler(int irq, void *dev_id)
14055 {
14056 	struct lpfc_hba  *phba;
14057 	irqreturn_t sp_irq_rc, fp_irq_rc;
14058 	unsigned long status1, status2;
14059 	uint32_t hc_copy;
14060 
14061 	/*
14062 	 * Get the driver's phba structure from the dev_id and
14063 	 * assume the HBA is not interrupting.
14064 	 */
14065 	phba = (struct lpfc_hba *) dev_id;
14066 
14067 	if (unlikely(!phba))
14068 		return IRQ_NONE;
14069 
14070 	/* Check device state for handling interrupt */
14071 	if (lpfc_intr_state_check(phba))
14072 		return IRQ_NONE;
14073 
14074 	spin_lock(&phba->hbalock);
14075 	if (lpfc_readl(phba->HAregaddr, &phba->ha_copy)) {
14076 		spin_unlock(&phba->hbalock);
14077 		return IRQ_HANDLED;
14078 	}
14079 
14080 	if (unlikely(!phba->ha_copy)) {
14081 		spin_unlock(&phba->hbalock);
14082 		return IRQ_NONE;
14083 	} else if (phba->ha_copy & HA_ERATT) {
14084 		if (test_and_set_bit(HBA_ERATT_HANDLED, &phba->hba_flag))
14085 			/* ERATT polling has handled ERATT */
14086 			phba->ha_copy &= ~HA_ERATT;
14087 	}
14088 
14089 	/*
14090 	 * If there is deferred error attention, do not check for any interrupt.
14091 	 */
14092 	if (unlikely(test_bit(DEFER_ERATT, &phba->hba_flag))) {
14093 		spin_unlock(&phba->hbalock);
14094 		return IRQ_NONE;
14095 	}
14096 
14097 	/* Clear attention sources except link and error attentions */
14098 	if (lpfc_readl(phba->HCregaddr, &hc_copy)) {
14099 		spin_unlock(&phba->hbalock);
14100 		return IRQ_HANDLED;
14101 	}
14102 	writel(hc_copy & ~(HC_MBINT_ENA | HC_R0INT_ENA | HC_R1INT_ENA
14103 		| HC_R2INT_ENA | HC_LAINT_ENA | HC_ERINT_ENA),
14104 		phba->HCregaddr);
14105 	writel((phba->ha_copy & ~(HA_LATT | HA_ERATT)), phba->HAregaddr);
14106 	writel(hc_copy, phba->HCregaddr);
14107 	readl(phba->HAregaddr); /* flush */
14108 	spin_unlock(&phba->hbalock);
14109 
14110 	/*
14111 	 * Invokes slow-path host attention interrupt handling as appropriate.
14112 	 */
14113 
14114 	/* status of events with mailbox and link attention */
14115 	status1 = phba->ha_copy & (HA_MBATT | HA_LATT | HA_ERATT);
14116 
14117 	/* status of events with ELS ring */
14118 	status2 = (phba->ha_copy & (HA_RXMASK  << (4*LPFC_ELS_RING)));
14119 	status2 >>= (4*LPFC_ELS_RING);
14120 
14121 	if (status1 || (status2 & HA_RXMASK))
14122 		sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
14123 	else
14124 		sp_irq_rc = IRQ_NONE;
14125 
14126 	/*
14127 	 * Invoke fast-path host attention interrupt handling as appropriate.
14128 	 */
14129 
14130 	/* status of events with FCP ring */
14131 	status1 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_FCP_RING)));
14132 	status1 >>= (4*LPFC_FCP_RING);
14133 
14134 	/* status of events with extra ring */
14135 	if (phba->cfg_multi_ring_support == 2) {
14136 		status2 = (phba->ha_copy & (HA_RXMASK << (4*LPFC_EXTRA_RING)));
14137 		status2 >>= (4*LPFC_EXTRA_RING);
14138 	} else
14139 		status2 = 0;
14140 
14141 	if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
14142 		fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
14143 	else
14144 		fp_irq_rc = IRQ_NONE;
14145 
14146 	/* Return device-level interrupt handling status */
14147 	return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
14148 }  /* lpfc_sli_intr_handler */
14149 
14150 /**
14151  * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
14152  * @phba: pointer to lpfc hba data structure.
14153  *
14154  * This routine is invoked by the worker thread to process all the pending
14155  * SLI4 els abort xri events.
14156  **/
lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba * phba)14157 void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
14158 {
14159 	struct lpfc_cq_event *cq_event;
14160 	unsigned long iflags;
14161 
14162 	/* First, declare the els xri abort event has been handled */
14163 	clear_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
14164 
14165 	/* Now, handle all the els xri abort events */
14166 	spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14167 	while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
14168 		/* Get the first event from the head of the event queue */
14169 		list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
14170 				 cq_event, struct lpfc_cq_event, list);
14171 		spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14172 				       iflags);
14173 		/* Notify aborted XRI for ELS work queue */
14174 		lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
14175 
14176 		/* Free the event processed back to the free pool */
14177 		lpfc_sli4_cq_event_release(phba, cq_event);
14178 		spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14179 				  iflags);
14180 	}
14181 	spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock, iflags);
14182 }
14183 
14184 /**
14185  * lpfc_sli4_els_preprocess_rspiocbq - Get response iocbq from els wcqe
14186  * @phba: Pointer to HBA context object.
14187  * @irspiocbq: Pointer to work-queue completion queue entry.
14188  *
14189  * This routine handles an ELS work-queue completion event and construct
14190  * a pseudo response ELS IOCBQ from the SLI4 ELS WCQE for the common
14191  * discovery engine to handle.
14192  *
14193  * Return: Pointer to the receive IOCBQ, NULL otherwise.
14194  **/
14195 static struct lpfc_iocbq *
lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba * phba,struct lpfc_iocbq * irspiocbq)14196 lpfc_sli4_els_preprocess_rspiocbq(struct lpfc_hba *phba,
14197 				  struct lpfc_iocbq *irspiocbq)
14198 {
14199 	struct lpfc_sli_ring *pring;
14200 	struct lpfc_iocbq *cmdiocbq;
14201 	struct lpfc_wcqe_complete *wcqe;
14202 	unsigned long iflags;
14203 
14204 	pring = lpfc_phba_elsring(phba);
14205 	if (unlikely(!pring))
14206 		return NULL;
14207 
14208 	wcqe = &irspiocbq->cq_event.cqe.wcqe_cmpl;
14209 	spin_lock_irqsave(&pring->ring_lock, iflags);
14210 	pring->stats.iocb_event++;
14211 	/* Look up the ELS command IOCB and create pseudo response IOCB */
14212 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
14213 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
14214 	if (unlikely(!cmdiocbq)) {
14215 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
14216 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14217 				"0386 ELS complete with no corresponding "
14218 				"cmdiocb: 0x%x 0x%x 0x%x 0x%x\n",
14219 				wcqe->word0, wcqe->total_data_placed,
14220 				wcqe->parameter, wcqe->word3);
14221 		lpfc_sli_release_iocbq(phba, irspiocbq);
14222 		return NULL;
14223 	}
14224 
14225 	memcpy(&irspiocbq->wqe, &cmdiocbq->wqe, sizeof(union lpfc_wqe128));
14226 	memcpy(&irspiocbq->wcqe_cmpl, wcqe, sizeof(*wcqe));
14227 
14228 	/* Put the iocb back on the txcmplq */
14229 	lpfc_sli_ringtxcmpl_put(phba, pring, cmdiocbq);
14230 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
14231 
14232 	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
14233 		spin_lock_irqsave(&phba->hbalock, iflags);
14234 		irspiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
14235 		spin_unlock_irqrestore(&phba->hbalock, iflags);
14236 	}
14237 
14238 	return irspiocbq;
14239 }
14240 
14241 inline struct lpfc_cq_event *
lpfc_cq_event_setup(struct lpfc_hba * phba,void * entry,int size)14242 lpfc_cq_event_setup(struct lpfc_hba *phba, void *entry, int size)
14243 {
14244 	struct lpfc_cq_event *cq_event;
14245 
14246 	/* Allocate a new internal CQ_EVENT entry */
14247 	cq_event = lpfc_sli4_cq_event_alloc(phba);
14248 	if (!cq_event) {
14249 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14250 				"0602 Failed to alloc CQ_EVENT entry\n");
14251 		return NULL;
14252 	}
14253 
14254 	/* Move the CQE into the event */
14255 	memcpy(&cq_event->cqe, entry, size);
14256 	return cq_event;
14257 }
14258 
14259 /**
14260  * lpfc_sli4_sp_handle_async_event - Handle an asynchronous event
14261  * @phba: Pointer to HBA context object.
14262  * @mcqe: Pointer to mailbox completion queue entry.
14263  *
14264  * This routine process a mailbox completion queue entry with asynchronous
14265  * event.
14266  *
14267  * Return: true if work posted to worker thread, otherwise false.
14268  **/
14269 static bool
lpfc_sli4_sp_handle_async_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)14270 lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14271 {
14272 	struct lpfc_cq_event *cq_event;
14273 	unsigned long iflags;
14274 
14275 	lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14276 			"0392 Async Event: word0:x%x, word1:x%x, "
14277 			"word2:x%x, word3:x%x\n", mcqe->word0,
14278 			mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
14279 
14280 	cq_event = lpfc_cq_event_setup(phba, mcqe, sizeof(struct lpfc_mcqe));
14281 	if (!cq_event)
14282 		return false;
14283 
14284 	spin_lock_irqsave(&phba->sli4_hba.asynce_list_lock, iflags);
14285 	list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
14286 	spin_unlock_irqrestore(&phba->sli4_hba.asynce_list_lock, iflags);
14287 
14288 	/* Set the async event flag */
14289 	set_bit(ASYNC_EVENT, &phba->hba_flag);
14290 
14291 	return true;
14292 }
14293 
14294 /**
14295  * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
14296  * @phba: Pointer to HBA context object.
14297  * @mcqe: Pointer to mailbox completion queue entry.
14298  *
14299  * This routine process a mailbox completion queue entry with mailbox
14300  * completion event.
14301  *
14302  * Return: true if work posted to worker thread, otherwise false.
14303  **/
14304 static bool
lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba * phba,struct lpfc_mcqe * mcqe)14305 lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
14306 {
14307 	uint32_t mcqe_status;
14308 	MAILBOX_t *mbox, *pmbox;
14309 	struct lpfc_mqe *mqe;
14310 	struct lpfc_vport *vport;
14311 	struct lpfc_nodelist *ndlp;
14312 	struct lpfc_dmabuf *mp;
14313 	unsigned long iflags;
14314 	LPFC_MBOXQ_t *pmb;
14315 	bool workposted = false;
14316 	int rc;
14317 
14318 	/* If not a mailbox complete MCQE, out by checking mailbox consume */
14319 	if (!bf_get(lpfc_trailer_completed, mcqe))
14320 		goto out_no_mqe_complete;
14321 
14322 	/* Get the reference to the active mbox command */
14323 	spin_lock_irqsave(&phba->hbalock, iflags);
14324 	pmb = phba->sli.mbox_active;
14325 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14326 	if (unlikely(!pmb)) {
14327 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14328 				"1832 No pending MBOX command to handle, "
14329 				"mcqe: x%08x x%08x x%08x x%08x\n",
14330 				mcqe->word0, mcqe->mcqe_tag0,
14331 				mcqe->mcqe_tag1, mcqe->trailer);
14332 		goto out_no_mqe_complete;
14333 	}
14334 	mqe = &pmb->u.mqe;
14335 	pmbox = (MAILBOX_t *)&pmb->u.mqe;
14336 	mbox = phba->mbox;
14337 	vport = pmb->vport;
14338 
14339 	/* Reset heartbeat timer */
14340 	phba->last_completion_time = jiffies;
14341 	timer_delete(&phba->sli.mbox_tmo);
14342 
14343 	/* Move mbox data to caller's mailbox region, do endian swapping */
14344 	if (pmb->mbox_cmpl && mbox)
14345 		lpfc_sli4_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
14346 
14347 	/*
14348 	 * For mcqe errors, conditionally move a modified error code to
14349 	 * the mbox so that the error will not be missed.
14350 	 */
14351 	mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
14352 	if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
14353 		if (bf_get(lpfc_mqe_status, mqe) == MBX_SUCCESS)
14354 			bf_set(lpfc_mqe_status, mqe,
14355 			       (LPFC_MBX_ERROR_RANGE | mcqe_status));
14356 	}
14357 	if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
14358 		pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
14359 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
14360 				      "MBOX dflt rpi: status:x%x rpi:x%x",
14361 				      mcqe_status,
14362 				      pmbox->un.varWords[0], 0);
14363 		if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
14364 			mp = pmb->ctx_buf;
14365 			ndlp = pmb->ctx_ndlp;
14366 
14367 			/* Reg_LOGIN of dflt RPI was successful. Mark the
14368 			 * node as having an UNREG_LOGIN in progress to stop
14369 			 * an unsolicited PLOGI from the same NPortId from
14370 			 * starting another mailbox transaction.
14371 			 */
14372 			set_bit(NLP_UNREG_INP, &ndlp->nlp_flag);
14373 			lpfc_unreg_login(phba, vport->vpi,
14374 					 pmbox->un.varWords[0], pmb);
14375 			pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
14376 			pmb->ctx_buf = mp;
14377 
14378 			/* No reference taken here.  This is a default
14379 			 * RPI reg/immediate unreg cycle. The reference was
14380 			 * taken in the reg rpi path and is released when
14381 			 * this mailbox completes.
14382 			 */
14383 			pmb->ctx_ndlp = ndlp;
14384 			pmb->vport = vport;
14385 			rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
14386 			if (rc != MBX_BUSY)
14387 				lpfc_printf_log(phba, KERN_ERR,
14388 						LOG_TRACE_EVENT,
14389 						"0385 rc should "
14390 						"have been MBX_BUSY\n");
14391 			if (rc != MBX_NOT_FINISHED)
14392 				goto send_current_mbox;
14393 		}
14394 	}
14395 	spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
14396 	phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
14397 	spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
14398 
14399 	/* Do NOT queue MBX_HEARTBEAT to the worker thread for processing. */
14400 	if (pmbox->mbxCommand == MBX_HEARTBEAT) {
14401 		spin_lock_irqsave(&phba->hbalock, iflags);
14402 		/* Release the mailbox command posting token */
14403 		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14404 		phba->sli.mbox_active = NULL;
14405 		if (bf_get(lpfc_trailer_consumed, mcqe))
14406 			lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14407 		spin_unlock_irqrestore(&phba->hbalock, iflags);
14408 
14409 		/* Post the next mbox command, if there is one */
14410 		lpfc_sli4_post_async_mbox(phba);
14411 
14412 		/* Process cmpl now */
14413 		if (pmb->mbox_cmpl)
14414 			pmb->mbox_cmpl(phba, pmb);
14415 		return false;
14416 	}
14417 
14418 	/* There is mailbox completion work to queue to the worker thread */
14419 	spin_lock_irqsave(&phba->hbalock, iflags);
14420 	__lpfc_mbox_cmpl_put(phba, pmb);
14421 	phba->work_ha |= HA_MBATT;
14422 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14423 	workposted = true;
14424 
14425 send_current_mbox:
14426 	spin_lock_irqsave(&phba->hbalock, iflags);
14427 	/* Release the mailbox command posting token */
14428 	phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
14429 	/* Setting active mailbox pointer need to be in sync to flag clear */
14430 	phba->sli.mbox_active = NULL;
14431 	if (bf_get(lpfc_trailer_consumed, mcqe))
14432 		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14433 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14434 	/* Wake up worker thread to post the next pending mailbox command */
14435 	lpfc_worker_wake_up(phba);
14436 	return workposted;
14437 
14438 out_no_mqe_complete:
14439 	spin_lock_irqsave(&phba->hbalock, iflags);
14440 	if (bf_get(lpfc_trailer_consumed, mcqe))
14441 		lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
14442 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14443 	return false;
14444 }
14445 
14446 /**
14447  * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
14448  * @phba: Pointer to HBA context object.
14449  * @cq: Pointer to associated CQ
14450  * @cqe: Pointer to mailbox completion queue entry.
14451  *
14452  * This routine process a mailbox completion queue entry, it invokes the
14453  * proper mailbox complete handling or asynchronous event handling routine
14454  * according to the MCQE's async bit.
14455  *
14456  * Return: true if work posted to worker thread, otherwise false.
14457  **/
14458 static bool
lpfc_sli4_sp_handle_mcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)14459 lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14460 			 struct lpfc_cqe *cqe)
14461 {
14462 	struct lpfc_mcqe mcqe;
14463 	bool workposted;
14464 
14465 	cq->CQ_mbox++;
14466 
14467 	/* Copy the mailbox MCQE and convert endian order as needed */
14468 	lpfc_sli4_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
14469 
14470 	/* Invoke the proper event handling routine */
14471 	if (!bf_get(lpfc_trailer_async, &mcqe))
14472 		workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
14473 	else
14474 		workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
14475 	return workposted;
14476 }
14477 
14478 /**
14479  * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
14480  * @phba: Pointer to HBA context object.
14481  * @cq: Pointer to associated CQ
14482  * @wcqe: Pointer to work-queue completion queue entry.
14483  *
14484  * This routine handles an ELS work-queue completion event.
14485  *
14486  * Return: true if work posted to worker thread, otherwise false.
14487  **/
14488 static bool
lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)14489 lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14490 			     struct lpfc_wcqe_complete *wcqe)
14491 {
14492 	struct lpfc_iocbq *irspiocbq;
14493 	unsigned long iflags;
14494 	struct lpfc_sli_ring *pring = cq->pring;
14495 	int txq_cnt = 0;
14496 	int txcmplq_cnt = 0;
14497 
14498 	/* Check for response status */
14499 	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
14500 		/* Log the error status */
14501 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14502 				"0357 ELS CQE error: status=x%x: "
14503 				"CQE: %08x %08x %08x %08x\n",
14504 				bf_get(lpfc_wcqe_c_status, wcqe),
14505 				wcqe->word0, wcqe->total_data_placed,
14506 				wcqe->parameter, wcqe->word3);
14507 	}
14508 
14509 	/* Get an irspiocbq for later ELS response processing use */
14510 	irspiocbq = lpfc_sli_get_iocbq(phba);
14511 	if (!irspiocbq) {
14512 		if (!list_empty(&pring->txq))
14513 			txq_cnt++;
14514 		if (!list_empty(&pring->txcmplq))
14515 			txcmplq_cnt++;
14516 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14517 			"0387 NO IOCBQ data: txq_cnt=%d iocb_cnt=%d "
14518 			"els_txcmplq_cnt=%d\n",
14519 			txq_cnt, phba->iocb_cnt,
14520 			txcmplq_cnt);
14521 		return false;
14522 	}
14523 
14524 	/* Save off the slow-path queue event for work thread to process */
14525 	memcpy(&irspiocbq->cq_event.cqe.wcqe_cmpl, wcqe, sizeof(*wcqe));
14526 	spin_lock_irqsave(&phba->hbalock, iflags);
14527 	list_add_tail(&irspiocbq->cq_event.list,
14528 		      &phba->sli4_hba.sp_queue_event);
14529 	spin_unlock_irqrestore(&phba->hbalock, iflags);
14530 	set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
14531 
14532 	return true;
14533 }
14534 
14535 /**
14536  * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
14537  * @phba: Pointer to HBA context object.
14538  * @wcqe: Pointer to work-queue completion queue entry.
14539  *
14540  * This routine handles slow-path WQ entry consumed event by invoking the
14541  * proper WQ release routine to the slow-path WQ.
14542  **/
14543 static void
lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_wcqe_release * wcqe)14544 lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
14545 			     struct lpfc_wcqe_release *wcqe)
14546 {
14547 	/* sanity check on queue memory */
14548 	if (unlikely(!phba->sli4_hba.els_wq))
14549 		return;
14550 	/* Check for the slow-path ELS work queue */
14551 	if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
14552 		lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
14553 				     bf_get(lpfc_wcqe_r_wqe_index, wcqe));
14554 	else
14555 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
14556 				"2579 Slow-path wqe consume event carries "
14557 				"miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
14558 				bf_get(lpfc_wcqe_r_wqe_index, wcqe),
14559 				phba->sli4_hba.els_wq->queue_id);
14560 }
14561 
14562 /**
14563  * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
14564  * @phba: Pointer to HBA context object.
14565  * @cq: Pointer to a WQ completion queue.
14566  * @wcqe: Pointer to work-queue completion queue entry.
14567  *
14568  * This routine handles an XRI abort event.
14569  *
14570  * Return: true if work posted to worker thread, otherwise false.
14571  **/
14572 static bool
lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct sli4_wcqe_xri_aborted * wcqe)14573 lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
14574 				   struct lpfc_queue *cq,
14575 				   struct sli4_wcqe_xri_aborted *wcqe)
14576 {
14577 	bool workposted = false;
14578 	struct lpfc_cq_event *cq_event;
14579 	unsigned long iflags;
14580 
14581 	switch (cq->subtype) {
14582 	case LPFC_IO:
14583 		lpfc_sli4_io_xri_aborted(phba, wcqe, cq->hdwq);
14584 		if (phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) {
14585 			/* Notify aborted XRI for NVME work queue */
14586 			if (phba->nvmet_support)
14587 				lpfc_sli4_nvmet_xri_aborted(phba, wcqe);
14588 		}
14589 		workposted = false;
14590 		break;
14591 	case LPFC_NVME_LS: /* NVME LS uses ELS resources */
14592 	case LPFC_ELS:
14593 		cq_event = lpfc_cq_event_setup(phba, wcqe, sizeof(*wcqe));
14594 		if (!cq_event) {
14595 			workposted = false;
14596 			break;
14597 		}
14598 		cq_event->hdwq = cq->hdwq;
14599 		spin_lock_irqsave(&phba->sli4_hba.els_xri_abrt_list_lock,
14600 				  iflags);
14601 		list_add_tail(&cq_event->list,
14602 			      &phba->sli4_hba.sp_els_xri_aborted_work_queue);
14603 		/* Set the els xri abort event flag */
14604 		set_bit(ELS_XRI_ABORT_EVENT, &phba->hba_flag);
14605 		spin_unlock_irqrestore(&phba->sli4_hba.els_xri_abrt_list_lock,
14606 				       iflags);
14607 		workposted = true;
14608 		break;
14609 	default:
14610 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14611 				"0603 Invalid CQ subtype %d: "
14612 				"%08x %08x %08x %08x\n",
14613 				cq->subtype, wcqe->word0, wcqe->parameter,
14614 				wcqe->word2, wcqe->word3);
14615 		workposted = false;
14616 		break;
14617 	}
14618 	return workposted;
14619 }
14620 
14621 #define FC_RCTL_MDS_DIAGS	0xF4
14622 
14623 /**
14624  * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
14625  * @phba: Pointer to HBA context object.
14626  * @rcqe: Pointer to receive-queue completion queue entry.
14627  *
14628  * This routine process a receive-queue completion queue entry.
14629  *
14630  * Return: true if work posted to worker thread, otherwise false.
14631  **/
14632 static bool
lpfc_sli4_sp_handle_rcqe(struct lpfc_hba * phba,struct lpfc_rcqe * rcqe)14633 lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_rcqe *rcqe)
14634 {
14635 	bool workposted = false;
14636 	struct fc_frame_header *fc_hdr;
14637 	struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
14638 	struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
14639 	struct lpfc_nvmet_tgtport *tgtp;
14640 	struct hbq_dmabuf *dma_buf;
14641 	uint32_t status, rq_id;
14642 	unsigned long iflags;
14643 
14644 	/* sanity check on queue memory */
14645 	if (unlikely(!hrq) || unlikely(!drq))
14646 		return workposted;
14647 
14648 	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
14649 		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
14650 	else
14651 		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
14652 	if (rq_id != hrq->queue_id)
14653 		goto out;
14654 
14655 	status = bf_get(lpfc_rcqe_status, rcqe);
14656 	switch (status) {
14657 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
14658 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14659 				"2537 Receive Frame Truncated!!\n");
14660 		fallthrough;
14661 	case FC_STATUS_RQ_SUCCESS:
14662 		spin_lock_irqsave(&phba->hbalock, iflags);
14663 		lpfc_sli4_rq_release(hrq, drq);
14664 		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14665 		if (!dma_buf) {
14666 			hrq->RQ_no_buf_found++;
14667 			spin_unlock_irqrestore(&phba->hbalock, iflags);
14668 			goto out;
14669 		}
14670 		hrq->RQ_rcv_buf++;
14671 		hrq->RQ_buf_posted--;
14672 		memcpy(&dma_buf->cq_event.cqe.rcqe_cmpl, rcqe, sizeof(*rcqe));
14673 
14674 		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
14675 
14676 		if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
14677 		    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
14678 			spin_unlock_irqrestore(&phba->hbalock, iflags);
14679 			/* Handle MDS Loopback frames */
14680 			if  (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
14681 				lpfc_sli4_handle_mds_loopback(phba->pport,
14682 							      dma_buf);
14683 			else
14684 				lpfc_in_buf_free(phba, &dma_buf->dbuf);
14685 			break;
14686 		}
14687 
14688 		/* save off the frame for the work thread to process */
14689 		list_add_tail(&dma_buf->cq_event.list,
14690 			      &phba->sli4_hba.sp_queue_event);
14691 		spin_unlock_irqrestore(&phba->hbalock, iflags);
14692 		/* Frame received */
14693 		set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
14694 		workposted = true;
14695 		break;
14696 	case FC_STATUS_INSUFF_BUF_FRM_DISC:
14697 		if (phba->nvmet_support) {
14698 			tgtp = phba->targetport->private;
14699 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14700 					"6402 RQE Error x%x, posted %d err_cnt "
14701 					"%d: %x %x %x\n",
14702 					status, hrq->RQ_buf_posted,
14703 					hrq->RQ_no_posted_buf,
14704 					atomic_read(&tgtp->rcv_fcp_cmd_in),
14705 					atomic_read(&tgtp->rcv_fcp_cmd_out),
14706 					atomic_read(&tgtp->xmt_fcp_release));
14707 		}
14708 		hrq->RQ_discard_frm++;
14709 		fallthrough;
14710 	case FC_STATUS_INSUFF_BUF_NEED_BUF:
14711 		/* Unexpected event - bump the counter for support. */
14712 		hrq->RQ_no_posted_buf++;
14713 
14714 		lpfc_log_msg(phba, KERN_WARNING,
14715 			     LOG_ELS | LOG_DISCOVERY | LOG_SLI,
14716 			     "6423 RQE completion Status x%x, needed x%x "
14717 			     "discarded x%x\n", status,
14718 			     hrq->RQ_no_posted_buf - hrq->RQ_discard_frm,
14719 			     hrq->RQ_discard_frm);
14720 
14721 		/* For SLI3, post more buffers if possible. No action for SLI4.
14722 		 * SLI4 is reposting immediately after processing the RQE.
14723 		 */
14724 		set_bit(HBA_POST_RECEIVE_BUFFER, &phba->hba_flag);
14725 		workposted = true;
14726 		break;
14727 	case FC_STATUS_RQ_DMA_FAILURE:
14728 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14729 				"2564 RQE DMA Error x%x, x%08x x%08x x%08x "
14730 				"x%08x\n",
14731 				status, rcqe->word0, rcqe->word1,
14732 				rcqe->word2, rcqe->word3);
14733 
14734 		/* If IV set, no further recovery */
14735 		if (bf_get(lpfc_rcqe_iv, rcqe))
14736 			break;
14737 
14738 		/* recycle consumed resource */
14739 		spin_lock_irqsave(&phba->hbalock, iflags);
14740 		lpfc_sli4_rq_release(hrq, drq);
14741 		dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
14742 		if (!dma_buf) {
14743 			hrq->RQ_no_buf_found++;
14744 			spin_unlock_irqrestore(&phba->hbalock, iflags);
14745 			break;
14746 		}
14747 		hrq->RQ_rcv_buf++;
14748 		hrq->RQ_buf_posted--;
14749 		spin_unlock_irqrestore(&phba->hbalock, iflags);
14750 		lpfc_in_buf_free(phba, &dma_buf->dbuf);
14751 		break;
14752 	default:
14753 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14754 				"2565 Unexpected RQE Status x%x, w0-3 x%08x "
14755 				"x%08x x%08x x%08x\n",
14756 				status, rcqe->word0, rcqe->word1,
14757 				rcqe->word2, rcqe->word3);
14758 		break;
14759 	}
14760 out:
14761 	return workposted;
14762 }
14763 
14764 /**
14765  * lpfc_sli4_sp_handle_cqe - Process a slow path completion queue entry
14766  * @phba: Pointer to HBA context object.
14767  * @cq: Pointer to the completion queue.
14768  * @cqe: Pointer to a completion queue entry.
14769  *
14770  * This routine process a slow-path work-queue or receive queue completion queue
14771  * entry.
14772  *
14773  * Return: true if work posted to worker thread, otherwise false.
14774  **/
14775 static bool
lpfc_sli4_sp_handle_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)14776 lpfc_sli4_sp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
14777 			 struct lpfc_cqe *cqe)
14778 {
14779 	struct lpfc_cqe cqevt;
14780 	bool workposted = false;
14781 
14782 	/* Copy the work queue CQE and convert endian order if needed */
14783 	lpfc_sli4_pcimem_bcopy(cqe, &cqevt, sizeof(struct lpfc_cqe));
14784 
14785 	/* Check and process for different type of WCQE and dispatch */
14786 	switch (bf_get(lpfc_cqe_code, &cqevt)) {
14787 	case CQE_CODE_COMPL_WQE:
14788 		/* Process the WQ/RQ complete event */
14789 		phba->last_completion_time = jiffies;
14790 		workposted = lpfc_sli4_sp_handle_els_wcqe(phba, cq,
14791 				(struct lpfc_wcqe_complete *)&cqevt);
14792 		break;
14793 	case CQE_CODE_RELEASE_WQE:
14794 		/* Process the WQ release event */
14795 		lpfc_sli4_sp_handle_rel_wcqe(phba,
14796 				(struct lpfc_wcqe_release *)&cqevt);
14797 		break;
14798 	case CQE_CODE_XRI_ABORTED:
14799 		/* Process the WQ XRI abort event */
14800 		phba->last_completion_time = jiffies;
14801 		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
14802 				(struct sli4_wcqe_xri_aborted *)&cqevt);
14803 		break;
14804 	case CQE_CODE_RECEIVE:
14805 	case CQE_CODE_RECEIVE_V1:
14806 		/* Process the RQ event */
14807 		phba->last_completion_time = jiffies;
14808 		workposted = lpfc_sli4_sp_handle_rcqe(phba,
14809 				(struct lpfc_rcqe *)&cqevt);
14810 		break;
14811 	default:
14812 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14813 				"0388 Not a valid WCQE code: x%x\n",
14814 				bf_get(lpfc_cqe_code, &cqevt));
14815 		break;
14816 	}
14817 	return workposted;
14818 }
14819 
14820 /**
14821  * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
14822  * @phba: Pointer to HBA context object.
14823  * @eqe: Pointer to fast-path event queue entry.
14824  * @speq: Pointer to slow-path event queue.
14825  *
14826  * This routine process a event queue entry from the slow-path event queue.
14827  * It will check the MajorCode and MinorCode to determine this is for a
14828  * completion event on a completion queue, if not, an error shall be logged
14829  * and just return. Otherwise, it will get to the corresponding completion
14830  * queue and process all the entries on that completion queue, rearm the
14831  * completion queue, and then return.
14832  *
14833  **/
14834 static void
lpfc_sli4_sp_handle_eqe(struct lpfc_hba * phba,struct lpfc_eqe * eqe,struct lpfc_queue * speq)14835 lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
14836 	struct lpfc_queue *speq)
14837 {
14838 	struct lpfc_queue *cq = NULL, *childq;
14839 	uint16_t cqid;
14840 	int ret = 0;
14841 
14842 	/* Get the reference to the corresponding CQ */
14843 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
14844 
14845 	list_for_each_entry(childq, &speq->child_list, list) {
14846 		if (childq->queue_id == cqid) {
14847 			cq = childq;
14848 			break;
14849 		}
14850 	}
14851 	if (unlikely(!cq)) {
14852 		if (phba->sli.sli_flag & LPFC_SLI_ACTIVE)
14853 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14854 					"0365 Slow-path CQ identifier "
14855 					"(%d) does not exist\n", cqid);
14856 		return;
14857 	}
14858 
14859 	/* Save EQ associated with this CQ */
14860 	cq->assoc_qp = speq;
14861 
14862 	if (is_kdump_kernel())
14863 		ret = queue_work(phba->wq, &cq->spwork);
14864 	else
14865 		ret = queue_work_on(cq->chann, phba->wq, &cq->spwork);
14866 
14867 	if (!ret)
14868 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
14869 				"0390 Cannot schedule queue work "
14870 				"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
14871 				cqid, cq->queue_id, raw_smp_processor_id());
14872 }
14873 
14874 /**
14875  * __lpfc_sli4_process_cq - Process elements of a CQ
14876  * @phba: Pointer to HBA context object.
14877  * @cq: Pointer to CQ to be processed
14878  * @handler: Routine to process each cqe
14879  * @delay: Pointer to usdelay to set in case of rescheduling of the handler
14880  *
14881  * This routine processes completion queue entries in a CQ. While a valid
14882  * queue element is found, the handler is called. During processing checks
14883  * are made for periodic doorbell writes to let the hardware know of
14884  * element consumption.
14885  *
14886  * If the max limit on cqes to process is hit, or there are no more valid
14887  * entries, the loop stops. If we processed a sufficient number of elements,
14888  * meaning there is sufficient load, rather than rearming and generating
14889  * another interrupt, a cq rescheduling delay will be set. A delay of 0
14890  * indicates no rescheduling.
14891  *
14892  * Returns True if work scheduled, False otherwise.
14893  **/
14894 static bool
__lpfc_sli4_process_cq(struct lpfc_hba * phba,struct lpfc_queue * cq,bool (* handler)(struct lpfc_hba *,struct lpfc_queue *,struct lpfc_cqe *),unsigned long * delay)14895 __lpfc_sli4_process_cq(struct lpfc_hba *phba, struct lpfc_queue *cq,
14896 	bool (*handler)(struct lpfc_hba *, struct lpfc_queue *,
14897 			struct lpfc_cqe *), unsigned long *delay)
14898 {
14899 	struct lpfc_cqe *cqe;
14900 	bool workposted = false;
14901 	int count = 0, consumed = 0;
14902 	bool arm = true;
14903 
14904 	/* default - no reschedule */
14905 	*delay = 0;
14906 
14907 	if (cmpxchg(&cq->queue_claimed, 0, 1) != 0)
14908 		goto rearm_and_exit;
14909 
14910 	/* Process all the entries to the CQ */
14911 	cq->q_flag = 0;
14912 	cqe = lpfc_sli4_cq_get(cq);
14913 	while (cqe) {
14914 		workposted |= handler(phba, cq, cqe);
14915 		__lpfc_sli4_consume_cqe(phba, cq, cqe);
14916 
14917 		consumed++;
14918 		if (!(++count % cq->max_proc_limit))
14919 			break;
14920 
14921 		if (!(count % cq->notify_interval)) {
14922 			phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14923 						LPFC_QUEUE_NOARM);
14924 			consumed = 0;
14925 			cq->assoc_qp->q_flag |= HBA_EQ_DELAY_CHK;
14926 		}
14927 
14928 		if (count == LPFC_NVMET_CQ_NOTIFY)
14929 			cq->q_flag |= HBA_NVMET_CQ_NOTIFY;
14930 
14931 		cqe = lpfc_sli4_cq_get(cq);
14932 	}
14933 	if (count >= phba->cfg_cq_poll_threshold) {
14934 		*delay = 1;
14935 		arm = false;
14936 	}
14937 
14938 	/* Track the max number of CQEs processed in 1 EQ */
14939 	if (count > cq->CQ_max_cqe)
14940 		cq->CQ_max_cqe = count;
14941 
14942 	cq->assoc_qp->EQ_cqe_cnt += count;
14943 
14944 	/* Catch the no cq entry condition */
14945 	if (unlikely(count == 0))
14946 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
14947 				"0369 No entry from completion queue "
14948 				"qid=%d\n", cq->queue_id);
14949 
14950 	xchg(&cq->queue_claimed, 0);
14951 
14952 rearm_and_exit:
14953 	phba->sli4_hba.sli4_write_cq_db(phba, cq, consumed,
14954 			arm ?  LPFC_QUEUE_REARM : LPFC_QUEUE_NOARM);
14955 
14956 	return workposted;
14957 }
14958 
14959 /**
14960  * __lpfc_sli4_sp_process_cq - Process a slow-path event queue entry
14961  * @cq: pointer to CQ to process
14962  *
14963  * This routine calls the cq processing routine with a handler specific
14964  * to the type of queue bound to it.
14965  *
14966  * The CQ routine returns two values: the first is the calling status,
14967  * which indicates whether work was queued to the  background discovery
14968  * thread. If true, the routine should wakeup the discovery thread;
14969  * the second is the delay parameter. If non-zero, rather than rearming
14970  * the CQ and yet another interrupt, the CQ handler should be queued so
14971  * that it is processed in a subsequent polling action. The value of
14972  * the delay indicates when to reschedule it.
14973  **/
14974 static void
__lpfc_sli4_sp_process_cq(struct lpfc_queue * cq)14975 __lpfc_sli4_sp_process_cq(struct lpfc_queue *cq)
14976 {
14977 	struct lpfc_hba *phba = cq->phba;
14978 	unsigned long delay;
14979 	bool workposted = false;
14980 	int ret = 0;
14981 
14982 	/* Process and rearm the CQ */
14983 	switch (cq->type) {
14984 	case LPFC_MCQ:
14985 		workposted |= __lpfc_sli4_process_cq(phba, cq,
14986 						lpfc_sli4_sp_handle_mcqe,
14987 						&delay);
14988 		break;
14989 	case LPFC_WCQ:
14990 		if (cq->subtype == LPFC_IO)
14991 			workposted |= __lpfc_sli4_process_cq(phba, cq,
14992 						lpfc_sli4_fp_handle_cqe,
14993 						&delay);
14994 		else
14995 			workposted |= __lpfc_sli4_process_cq(phba, cq,
14996 						lpfc_sli4_sp_handle_cqe,
14997 						&delay);
14998 		break;
14999 	default:
15000 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15001 				"0370 Invalid completion queue type (%d)\n",
15002 				cq->type);
15003 		return;
15004 	}
15005 
15006 	if (delay) {
15007 		if (is_kdump_kernel())
15008 			ret = queue_delayed_work(phba->wq, &cq->sched_spwork,
15009 						delay);
15010 		else
15011 			ret = queue_delayed_work_on(cq->chann, phba->wq,
15012 						&cq->sched_spwork, delay);
15013 		if (!ret)
15014 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15015 				"0394 Cannot schedule queue work "
15016 				"for cqid=%d on CPU %d\n",
15017 				cq->queue_id, cq->chann);
15018 	}
15019 
15020 	/* wake up worker thread if there are works to be done */
15021 	if (workposted)
15022 		lpfc_worker_wake_up(phba);
15023 }
15024 
15025 /**
15026  * lpfc_sli4_sp_process_cq - slow-path work handler when started by
15027  *   interrupt
15028  * @work: pointer to work element
15029  *
15030  * translates from the work handler and calls the slow-path handler.
15031  **/
15032 static void
lpfc_sli4_sp_process_cq(struct work_struct * work)15033 lpfc_sli4_sp_process_cq(struct work_struct *work)
15034 {
15035 	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, spwork);
15036 
15037 	__lpfc_sli4_sp_process_cq(cq);
15038 }
15039 
15040 /**
15041  * lpfc_sli4_dly_sp_process_cq - slow-path work handler when started by timer
15042  * @work: pointer to work element
15043  *
15044  * translates from the work handler and calls the slow-path handler.
15045  **/
15046 static void
lpfc_sli4_dly_sp_process_cq(struct work_struct * work)15047 lpfc_sli4_dly_sp_process_cq(struct work_struct *work)
15048 {
15049 	struct lpfc_queue *cq = container_of(to_delayed_work(work),
15050 					struct lpfc_queue, sched_spwork);
15051 
15052 	__lpfc_sli4_sp_process_cq(cq);
15053 }
15054 
15055 /**
15056  * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
15057  * @phba: Pointer to HBA context object.
15058  * @cq: Pointer to associated CQ
15059  * @wcqe: Pointer to work-queue completion queue entry.
15060  *
15061  * This routine process a fast-path work queue completion entry from fast-path
15062  * event queue for FCP command response completion.
15063  **/
15064 static void
lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_complete * wcqe)15065 lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15066 			     struct lpfc_wcqe_complete *wcqe)
15067 {
15068 	struct lpfc_sli_ring *pring = cq->pring;
15069 	struct lpfc_iocbq *cmdiocbq;
15070 	unsigned long iflags;
15071 
15072 	/* Check for response status */
15073 	if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
15074 		/* If resource errors reported from HBA, reduce queue
15075 		 * depth of the SCSI device.
15076 		 */
15077 		if (((bf_get(lpfc_wcqe_c_status, wcqe) ==
15078 		     IOSTAT_LOCAL_REJECT)) &&
15079 		    ((wcqe->parameter & IOERR_PARAM_MASK) ==
15080 		     IOERR_NO_RESOURCES))
15081 			phba->lpfc_rampdown_queue_depth(phba);
15082 
15083 		/* Log the cmpl status */
15084 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
15085 				"0373 FCP CQE cmpl: status=x%x: "
15086 				"CQE: %08x %08x %08x %08x\n",
15087 				bf_get(lpfc_wcqe_c_status, wcqe),
15088 				wcqe->word0, wcqe->total_data_placed,
15089 				wcqe->parameter, wcqe->word3);
15090 	}
15091 
15092 	/* Look up the FCP command IOCB and create pseudo response IOCB */
15093 	spin_lock_irqsave(&pring->ring_lock, iflags);
15094 	pring->stats.iocb_event++;
15095 	cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
15096 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
15097 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
15098 	if (unlikely(!cmdiocbq)) {
15099 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15100 				"0374 FCP complete with no corresponding "
15101 				"cmdiocb: iotag (%d)\n",
15102 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
15103 		return;
15104 	}
15105 #ifdef CONFIG_SCSI_LPFC_DEBUG_FS
15106 	cmdiocbq->isr_timestamp = cq->isr_timestamp;
15107 #endif
15108 	if (bf_get(lpfc_wcqe_c_xb, wcqe)) {
15109 		spin_lock_irqsave(&phba->hbalock, iflags);
15110 		cmdiocbq->cmd_flag |= LPFC_EXCHANGE_BUSY;
15111 		spin_unlock_irqrestore(&phba->hbalock, iflags);
15112 	}
15113 
15114 	if (cmdiocbq->cmd_cmpl) {
15115 		/* For FCP the flag is cleared in cmd_cmpl */
15116 		if (!(cmdiocbq->cmd_flag & LPFC_IO_FCP) &&
15117 		    cmdiocbq->cmd_flag & LPFC_DRIVER_ABORTED) {
15118 			spin_lock_irqsave(&phba->hbalock, iflags);
15119 			cmdiocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
15120 			spin_unlock_irqrestore(&phba->hbalock, iflags);
15121 		}
15122 
15123 		/* Pass the cmd_iocb and the wcqe to the upper layer */
15124 		memcpy(&cmdiocbq->wcqe_cmpl, wcqe,
15125 		       sizeof(struct lpfc_wcqe_complete));
15126 		cmdiocbq->cmd_cmpl(phba, cmdiocbq, cmdiocbq);
15127 	} else {
15128 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15129 				"0375 FCP cmdiocb not callback function "
15130 				"iotag: (%d)\n",
15131 				bf_get(lpfc_wcqe_c_request_tag, wcqe));
15132 	}
15133 }
15134 
15135 /**
15136  * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
15137  * @phba: Pointer to HBA context object.
15138  * @cq: Pointer to completion queue.
15139  * @wcqe: Pointer to work-queue completion queue entry.
15140  *
15141  * This routine handles an fast-path WQ entry consumed event by invoking the
15142  * proper WQ release routine to the slow-path WQ.
15143  **/
15144 static void
lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_wcqe_release * wcqe)15145 lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15146 			     struct lpfc_wcqe_release *wcqe)
15147 {
15148 	struct lpfc_queue *childwq;
15149 	bool wqid_matched = false;
15150 	uint16_t hba_wqid;
15151 
15152 	/* Check for fast-path FCP work queue release */
15153 	hba_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
15154 	list_for_each_entry(childwq, &cq->child_list, list) {
15155 		if (childwq->queue_id == hba_wqid) {
15156 			lpfc_sli4_wq_release(childwq,
15157 					bf_get(lpfc_wcqe_r_wqe_index, wcqe));
15158 			if (childwq->q_flag & HBA_NVMET_WQFULL)
15159 				lpfc_nvmet_wqfull_process(phba, childwq);
15160 			wqid_matched = true;
15161 			break;
15162 		}
15163 	}
15164 	/* Report warning log message if no match found */
15165 	if (wqid_matched != true)
15166 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15167 				"2580 Fast-path wqe consume event carries "
15168 				"miss-matched qid: wcqe-qid=x%x\n", hba_wqid);
15169 }
15170 
15171 /**
15172  * lpfc_sli4_nvmet_handle_rcqe - Process a receive-queue completion queue entry
15173  * @phba: Pointer to HBA context object.
15174  * @cq: Pointer to completion queue.
15175  * @rcqe: Pointer to receive-queue completion queue entry.
15176  *
15177  * This routine process a receive-queue completion queue entry.
15178  *
15179  * Return: true if work posted to worker thread, otherwise false.
15180  **/
15181 static bool
lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_rcqe * rcqe)15182 lpfc_sli4_nvmet_handle_rcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15183 			    struct lpfc_rcqe *rcqe)
15184 {
15185 	bool workposted = false;
15186 	struct lpfc_queue *hrq;
15187 	struct lpfc_queue *drq;
15188 	struct rqb_dmabuf *dma_buf;
15189 	struct fc_frame_header *fc_hdr;
15190 	struct lpfc_nvmet_tgtport *tgtp;
15191 	uint32_t status, rq_id;
15192 	unsigned long iflags;
15193 	uint32_t fctl, idx;
15194 
15195 	if ((phba->nvmet_support == 0) ||
15196 	    (phba->sli4_hba.nvmet_cqset == NULL))
15197 		return workposted;
15198 
15199 	idx = cq->queue_id - phba->sli4_hba.nvmet_cqset[0]->queue_id;
15200 	hrq = phba->sli4_hba.nvmet_mrq_hdr[idx];
15201 	drq = phba->sli4_hba.nvmet_mrq_data[idx];
15202 
15203 	/* sanity check on queue memory */
15204 	if (unlikely(!hrq) || unlikely(!drq))
15205 		return workposted;
15206 
15207 	if (bf_get(lpfc_cqe_code, rcqe) == CQE_CODE_RECEIVE_V1)
15208 		rq_id = bf_get(lpfc_rcqe_rq_id_v1, rcqe);
15209 	else
15210 		rq_id = bf_get(lpfc_rcqe_rq_id, rcqe);
15211 
15212 	if ((phba->nvmet_support == 0) ||
15213 	    (rq_id != hrq->queue_id))
15214 		return workposted;
15215 
15216 	status = bf_get(lpfc_rcqe_status, rcqe);
15217 	switch (status) {
15218 	case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
15219 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15220 				"6126 Receive Frame Truncated!!\n");
15221 		fallthrough;
15222 	case FC_STATUS_RQ_SUCCESS:
15223 		spin_lock_irqsave(&phba->hbalock, iflags);
15224 		lpfc_sli4_rq_release(hrq, drq);
15225 		dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15226 		if (!dma_buf) {
15227 			hrq->RQ_no_buf_found++;
15228 			spin_unlock_irqrestore(&phba->hbalock, iflags);
15229 			goto out;
15230 		}
15231 		spin_unlock_irqrestore(&phba->hbalock, iflags);
15232 		hrq->RQ_rcv_buf++;
15233 		hrq->RQ_buf_posted--;
15234 		fc_hdr = (struct fc_frame_header *)dma_buf->hbuf.virt;
15235 
15236 		/* Just some basic sanity checks on FCP Command frame */
15237 		fctl = (fc_hdr->fh_f_ctl[0] << 16 |
15238 			fc_hdr->fh_f_ctl[1] << 8 |
15239 			fc_hdr->fh_f_ctl[2]);
15240 		if (((fctl &
15241 		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) !=
15242 		    (FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT)) ||
15243 		    (fc_hdr->fh_seq_cnt != 0)) /* 0 byte swapped is still 0 */
15244 			goto drop;
15245 
15246 		if (fc_hdr->fh_type == FC_TYPE_FCP) {
15247 			dma_buf->bytes_recv = bf_get(lpfc_rcqe_length, rcqe);
15248 			lpfc_nvmet_unsol_fcp_event(
15249 				phba, idx, dma_buf, cq->isr_timestamp,
15250 				cq->q_flag & HBA_NVMET_CQ_NOTIFY);
15251 			return false;
15252 		}
15253 drop:
15254 		lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15255 		break;
15256 	case FC_STATUS_INSUFF_BUF_FRM_DISC:
15257 		if (phba->nvmet_support) {
15258 			tgtp = phba->targetport->private;
15259 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15260 					"6401 RQE Error x%x, posted %d err_cnt "
15261 					"%d: %x %x %x\n",
15262 					status, hrq->RQ_buf_posted,
15263 					hrq->RQ_no_posted_buf,
15264 					atomic_read(&tgtp->rcv_fcp_cmd_in),
15265 					atomic_read(&tgtp->rcv_fcp_cmd_out),
15266 					atomic_read(&tgtp->xmt_fcp_release));
15267 		}
15268 		fallthrough;
15269 
15270 	case FC_STATUS_INSUFF_BUF_NEED_BUF:
15271 		hrq->RQ_no_posted_buf++;
15272 		/* Post more buffers if possible */
15273 		break;
15274 	case FC_STATUS_RQ_DMA_FAILURE:
15275 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15276 				"2575 RQE DMA Error x%x, x%08x x%08x x%08x "
15277 				"x%08x\n",
15278 				status, rcqe->word0, rcqe->word1,
15279 				rcqe->word2, rcqe->word3);
15280 
15281 		/* If IV set, no further recovery */
15282 		if (bf_get(lpfc_rcqe_iv, rcqe))
15283 			break;
15284 
15285 		/* recycle consumed resource */
15286 		spin_lock_irqsave(&phba->hbalock, iflags);
15287 		lpfc_sli4_rq_release(hrq, drq);
15288 		dma_buf = lpfc_sli_rqbuf_get(phba, hrq);
15289 		if (!dma_buf) {
15290 			hrq->RQ_no_buf_found++;
15291 			spin_unlock_irqrestore(&phba->hbalock, iflags);
15292 			break;
15293 		}
15294 		hrq->RQ_rcv_buf++;
15295 		hrq->RQ_buf_posted--;
15296 		spin_unlock_irqrestore(&phba->hbalock, iflags);
15297 		lpfc_rq_buf_free(phba, &dma_buf->hbuf);
15298 		break;
15299 	default:
15300 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15301 				"2576 Unexpected RQE Status x%x, w0-3 x%08x "
15302 				"x%08x x%08x x%08x\n",
15303 				status, rcqe->word0, rcqe->word1,
15304 				rcqe->word2, rcqe->word3);
15305 		break;
15306 	}
15307 out:
15308 	return workposted;
15309 }
15310 
15311 /**
15312  * lpfc_sli4_fp_handle_cqe - Process fast-path work queue completion entry
15313  * @phba: adapter with cq
15314  * @cq: Pointer to the completion queue.
15315  * @cqe: Pointer to fast-path completion queue entry.
15316  *
15317  * This routine process a fast-path work queue completion entry from fast-path
15318  * event queue for FCP command response completion.
15319  *
15320  * Return: true if work posted to worker thread, otherwise false.
15321  **/
15322 static bool
lpfc_sli4_fp_handle_cqe(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_cqe * cqe)15323 lpfc_sli4_fp_handle_cqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
15324 			 struct lpfc_cqe *cqe)
15325 {
15326 	struct lpfc_wcqe_release wcqe;
15327 	bool workposted = false;
15328 
15329 	/* Copy the work queue CQE and convert endian order if needed */
15330 	lpfc_sli4_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
15331 
15332 	/* Check and process for different type of WCQE and dispatch */
15333 	switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
15334 	case CQE_CODE_COMPL_WQE:
15335 	case CQE_CODE_NVME_ERSP:
15336 		cq->CQ_wq++;
15337 		/* Process the WQ complete event */
15338 		phba->last_completion_time = jiffies;
15339 		if (cq->subtype == LPFC_IO || cq->subtype == LPFC_NVME_LS)
15340 			lpfc_sli4_fp_handle_fcp_wcqe(phba, cq,
15341 				(struct lpfc_wcqe_complete *)&wcqe);
15342 		break;
15343 	case CQE_CODE_RELEASE_WQE:
15344 		cq->CQ_release_wqe++;
15345 		/* Process the WQ release event */
15346 		lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
15347 				(struct lpfc_wcqe_release *)&wcqe);
15348 		break;
15349 	case CQE_CODE_XRI_ABORTED:
15350 		cq->CQ_xri_aborted++;
15351 		/* Process the WQ XRI abort event */
15352 		phba->last_completion_time = jiffies;
15353 		workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
15354 				(struct sli4_wcqe_xri_aborted *)&wcqe);
15355 		break;
15356 	case CQE_CODE_RECEIVE_V1:
15357 	case CQE_CODE_RECEIVE:
15358 		phba->last_completion_time = jiffies;
15359 		if (cq->subtype == LPFC_NVMET) {
15360 			workposted = lpfc_sli4_nvmet_handle_rcqe(
15361 				phba, cq, (struct lpfc_rcqe *)&wcqe);
15362 		}
15363 		break;
15364 	default:
15365 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15366 				"0144 Not a valid CQE code: x%x\n",
15367 				bf_get(lpfc_wcqe_c_code, &wcqe));
15368 		break;
15369 	}
15370 	return workposted;
15371 }
15372 
15373 /**
15374  * __lpfc_sli4_hba_process_cq - Process a fast-path event queue entry
15375  * @cq: Pointer to CQ to be processed
15376  *
15377  * This routine calls the cq processing routine with the handler for
15378  * fast path CQEs.
15379  *
15380  * The CQ routine returns two values: the first is the calling status,
15381  * which indicates whether work was queued to the  background discovery
15382  * thread. If true, the routine should wakeup the discovery thread;
15383  * the second is the delay parameter. If non-zero, rather than rearming
15384  * the CQ and yet another interrupt, the CQ handler should be queued so
15385  * that it is processed in a subsequent polling action. The value of
15386  * the delay indicates when to reschedule it.
15387  **/
15388 static void
__lpfc_sli4_hba_process_cq(struct lpfc_queue * cq)15389 __lpfc_sli4_hba_process_cq(struct lpfc_queue *cq)
15390 {
15391 	struct lpfc_hba *phba = cq->phba;
15392 	unsigned long delay;
15393 	bool workposted = false;
15394 	int ret;
15395 
15396 	/* process and rearm the CQ */
15397 	workposted |= __lpfc_sli4_process_cq(phba, cq, lpfc_sli4_fp_handle_cqe,
15398 					     &delay);
15399 
15400 	if (delay) {
15401 		if (is_kdump_kernel())
15402 			ret = queue_delayed_work(phba->wq, &cq->sched_irqwork,
15403 						delay);
15404 		else
15405 			ret = queue_delayed_work_on(cq->chann, phba->wq,
15406 						&cq->sched_irqwork, delay);
15407 		if (!ret)
15408 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15409 					"0367 Cannot schedule queue work "
15410 					"for cqid=%d on CPU %d\n",
15411 					cq->queue_id, cq->chann);
15412 	}
15413 
15414 	/* wake up worker thread if there are works to be done */
15415 	if (workposted)
15416 		lpfc_worker_wake_up(phba);
15417 }
15418 
15419 /**
15420  * lpfc_sli4_hba_process_cq - fast-path work handler when started by
15421  *   interrupt
15422  * @work: pointer to work element
15423  *
15424  * translates from the work handler and calls the fast-path handler.
15425  **/
15426 static void
lpfc_sli4_hba_process_cq(struct work_struct * work)15427 lpfc_sli4_hba_process_cq(struct work_struct *work)
15428 {
15429 	struct lpfc_queue *cq = container_of(work, struct lpfc_queue, irqwork);
15430 
15431 	__lpfc_sli4_hba_process_cq(cq);
15432 }
15433 
15434 /**
15435  * lpfc_sli4_hba_handle_eqe - Process a fast-path event queue entry
15436  * @phba: Pointer to HBA context object.
15437  * @eq: Pointer to the queue structure.
15438  * @eqe: Pointer to fast-path event queue entry.
15439  * @poll_mode: poll_mode to execute processing the cq.
15440  *
15441  * This routine process a event queue entry from the fast-path event queue.
15442  * It will check the MajorCode and MinorCode to determine this is for a
15443  * completion event on a completion queue, if not, an error shall be logged
15444  * and just return. Otherwise, it will get to the corresponding completion
15445  * queue and process all the entries on the completion queue, rearm the
15446  * completion queue, and then return.
15447  **/
15448 static void
lpfc_sli4_hba_handle_eqe(struct lpfc_hba * phba,struct lpfc_queue * eq,struct lpfc_eqe * eqe,enum lpfc_poll_mode poll_mode)15449 lpfc_sli4_hba_handle_eqe(struct lpfc_hba *phba, struct lpfc_queue *eq,
15450 			 struct lpfc_eqe *eqe, enum lpfc_poll_mode poll_mode)
15451 {
15452 	struct lpfc_queue *cq = NULL;
15453 	uint32_t qidx = eq->hdwq;
15454 	uint16_t cqid, id;
15455 	int ret;
15456 
15457 	if (unlikely(bf_get_le32(lpfc_eqe_major_code, eqe) != 0)) {
15458 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15459 				"0366 Not a valid completion "
15460 				"event: majorcode=x%x, minorcode=x%x\n",
15461 				bf_get_le32(lpfc_eqe_major_code, eqe),
15462 				bf_get_le32(lpfc_eqe_minor_code, eqe));
15463 		return;
15464 	}
15465 
15466 	/* Get the reference to the corresponding CQ */
15467 	cqid = bf_get_le32(lpfc_eqe_resource_id, eqe);
15468 
15469 	/* Use the fast lookup method first */
15470 	if (cqid <= phba->sli4_hba.cq_max) {
15471 		cq = phba->sli4_hba.cq_lookup[cqid];
15472 		if (cq)
15473 			goto  work_cq;
15474 	}
15475 
15476 	/* Next check for NVMET completion */
15477 	if (phba->cfg_nvmet_mrq && phba->sli4_hba.nvmet_cqset) {
15478 		id = phba->sli4_hba.nvmet_cqset[0]->queue_id;
15479 		if ((cqid >= id) && (cqid < (id + phba->cfg_nvmet_mrq))) {
15480 			/* Process NVMET unsol rcv */
15481 			cq = phba->sli4_hba.nvmet_cqset[cqid - id];
15482 			goto  process_cq;
15483 		}
15484 	}
15485 
15486 	if (phba->sli4_hba.nvmels_cq &&
15487 	    (cqid == phba->sli4_hba.nvmels_cq->queue_id)) {
15488 		/* Process NVME unsol rcv */
15489 		cq = phba->sli4_hba.nvmels_cq;
15490 	}
15491 
15492 	/* Otherwise this is a Slow path event */
15493 	if (cq == NULL) {
15494 		lpfc_sli4_sp_handle_eqe(phba, eqe,
15495 					phba->sli4_hba.hdwq[qidx].hba_eq);
15496 		return;
15497 	}
15498 
15499 process_cq:
15500 	if (unlikely(cqid != cq->queue_id)) {
15501 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15502 				"0368 Miss-matched fast-path completion "
15503 				"queue identifier: eqcqid=%d, fcpcqid=%d\n",
15504 				cqid, cq->queue_id);
15505 		return;
15506 	}
15507 
15508 work_cq:
15509 #if defined(CONFIG_SCSI_LPFC_DEBUG_FS)
15510 	if (phba->ktime_on)
15511 		cq->isr_timestamp = ktime_get_ns();
15512 	else
15513 		cq->isr_timestamp = 0;
15514 #endif
15515 
15516 	switch (poll_mode) {
15517 	case LPFC_THREADED_IRQ:
15518 		__lpfc_sli4_hba_process_cq(cq);
15519 		break;
15520 	case LPFC_QUEUE_WORK:
15521 	default:
15522 		if (is_kdump_kernel())
15523 			ret = queue_work(phba->wq, &cq->irqwork);
15524 		else
15525 			ret = queue_work_on(cq->chann, phba->wq, &cq->irqwork);
15526 		if (!ret)
15527 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
15528 					"0383 Cannot schedule queue work "
15529 					"for CQ eqcqid=%d, cqid=%d on CPU %d\n",
15530 					cqid, cq->queue_id,
15531 					raw_smp_processor_id());
15532 		break;
15533 	}
15534 }
15535 
15536 /**
15537  * lpfc_sli4_dly_hba_process_cq - fast-path work handler when started by timer
15538  * @work: pointer to work element
15539  *
15540  * translates from the work handler and calls the fast-path handler.
15541  **/
15542 static void
lpfc_sli4_dly_hba_process_cq(struct work_struct * work)15543 lpfc_sli4_dly_hba_process_cq(struct work_struct *work)
15544 {
15545 	struct lpfc_queue *cq = container_of(to_delayed_work(work),
15546 					struct lpfc_queue, sched_irqwork);
15547 
15548 	__lpfc_sli4_hba_process_cq(cq);
15549 }
15550 
15551 /**
15552  * lpfc_sli4_hba_intr_handler - HBA interrupt handler to SLI-4 device
15553  * @irq: Interrupt number.
15554  * @dev_id: The device context pointer.
15555  *
15556  * This function is directly called from the PCI layer as an interrupt
15557  * service routine when device with SLI-4 interface spec is enabled with
15558  * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
15559  * ring event in the HBA. However, when the device is enabled with either
15560  * MSI or Pin-IRQ interrupt mode, this function is called as part of the
15561  * device-level interrupt handler. When the PCI slot is in error recovery
15562  * or the HBA is undergoing initialization, the interrupt handler will not
15563  * process the interrupt. The SCSI FCP fast-path ring event are handled in
15564  * the intrrupt context. This function is called without any lock held.
15565  * It gets the hbalock to access and update SLI data structures. Note that,
15566  * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
15567  * equal to that of FCP CQ index.
15568  *
15569  * The link attention and ELS ring attention events are handled
15570  * by the worker thread. The interrupt handler signals the worker thread
15571  * and returns for these events. This function is called without any lock
15572  * held. It gets the hbalock to access and update SLI data structures.
15573  *
15574  * This function returns IRQ_HANDLED when interrupt is handled, IRQ_WAKE_THREAD
15575  * when interrupt is scheduled to be handled from a threaded irq context, or
15576  * else returns IRQ_NONE.
15577  **/
15578 irqreturn_t
lpfc_sli4_hba_intr_handler(int irq,void * dev_id)15579 lpfc_sli4_hba_intr_handler(int irq, void *dev_id)
15580 {
15581 	struct lpfc_hba *phba;
15582 	struct lpfc_hba_eq_hdl *hba_eq_hdl;
15583 	struct lpfc_queue *fpeq;
15584 	unsigned long iflag;
15585 	int hba_eqidx;
15586 	int ecount = 0;
15587 	struct lpfc_eq_intr_info *eqi;
15588 
15589 	/* Get the driver's phba structure from the dev_id */
15590 	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
15591 	phba = hba_eq_hdl->phba;
15592 	hba_eqidx = hba_eq_hdl->idx;
15593 
15594 	if (unlikely(!phba))
15595 		return IRQ_NONE;
15596 	if (unlikely(!phba->sli4_hba.hdwq))
15597 		return IRQ_NONE;
15598 
15599 	/* Get to the EQ struct associated with this vector */
15600 	fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
15601 	if (unlikely(!fpeq))
15602 		return IRQ_NONE;
15603 
15604 	/* Check device state for handling interrupt */
15605 	if (unlikely(lpfc_intr_state_check(phba))) {
15606 		/* Check again for link_state with lock held */
15607 		spin_lock_irqsave(&phba->hbalock, iflag);
15608 		if (phba->link_state < LPFC_LINK_DOWN)
15609 			/* Flush, clear interrupt, and rearm the EQ */
15610 			lpfc_sli4_eqcq_flush(phba, fpeq);
15611 		spin_unlock_irqrestore(&phba->hbalock, iflag);
15612 		return IRQ_NONE;
15613 	}
15614 
15615 	switch (fpeq->poll_mode) {
15616 	case LPFC_THREADED_IRQ:
15617 		/* CGN mgmt is mutually exclusive from irq processing */
15618 		if (phba->cmf_active_mode == LPFC_CFG_OFF)
15619 			return IRQ_WAKE_THREAD;
15620 		fallthrough;
15621 	case LPFC_QUEUE_WORK:
15622 	default:
15623 		eqi = this_cpu_ptr(phba->sli4_hba.eq_info);
15624 		eqi->icnt++;
15625 
15626 		fpeq->last_cpu = raw_smp_processor_id();
15627 
15628 		if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
15629 		    fpeq->q_flag & HBA_EQ_DELAY_CHK &&
15630 		    phba->cfg_auto_imax &&
15631 		    fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
15632 		    phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
15633 			lpfc_sli4_mod_hba_eq_delay(phba, fpeq,
15634 						   LPFC_MAX_AUTO_EQ_DELAY);
15635 
15636 		/* process and rearm the EQ */
15637 		ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
15638 					      LPFC_QUEUE_WORK);
15639 
15640 		if (unlikely(ecount == 0)) {
15641 			fpeq->EQ_no_entry++;
15642 			if (phba->intr_type == MSIX)
15643 				/* MSI-X treated interrupt served as no EQ share INT */
15644 				lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
15645 						"0358 MSI-X interrupt with no EQE\n");
15646 			else
15647 				/* Non MSI-X treated on interrupt as EQ share INT */
15648 				return IRQ_NONE;
15649 		}
15650 	}
15651 
15652 	return IRQ_HANDLED;
15653 } /* lpfc_sli4_hba_intr_handler */
15654 
15655 /**
15656  * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
15657  * @irq: Interrupt number.
15658  * @dev_id: The device context pointer.
15659  *
15660  * This function is the device-level interrupt handler to device with SLI-4
15661  * interface spec, called from the PCI layer when either MSI or Pin-IRQ
15662  * interrupt mode is enabled and there is an event in the HBA which requires
15663  * driver attention. This function invokes the slow-path interrupt attention
15664  * handling function and fast-path interrupt attention handling function in
15665  * turn to process the relevant HBA attention events. This function is called
15666  * without any lock held. It gets the hbalock to access and update SLI data
15667  * structures.
15668  *
15669  * This function returns IRQ_HANDLED when interrupt is handled, else it
15670  * returns IRQ_NONE.
15671  **/
15672 irqreturn_t
lpfc_sli4_intr_handler(int irq,void * dev_id)15673 lpfc_sli4_intr_handler(int irq, void *dev_id)
15674 {
15675 	struct lpfc_hba  *phba;
15676 	irqreturn_t hba_irq_rc;
15677 	bool hba_handled = false;
15678 	int qidx;
15679 
15680 	/* Get the driver's phba structure from the dev_id */
15681 	phba = (struct lpfc_hba *)dev_id;
15682 
15683 	if (unlikely(!phba))
15684 		return IRQ_NONE;
15685 
15686 	/*
15687 	 * Invoke fast-path host attention interrupt handling as appropriate.
15688 	 */
15689 	for (qidx = 0; qidx < phba->cfg_irq_chann; qidx++) {
15690 		hba_irq_rc = lpfc_sli4_hba_intr_handler(irq,
15691 					&phba->sli4_hba.hba_eq_hdl[qidx]);
15692 		if (hba_irq_rc == IRQ_HANDLED)
15693 			hba_handled |= true;
15694 	}
15695 
15696 	return (hba_handled == true) ? IRQ_HANDLED : IRQ_NONE;
15697 } /* lpfc_sli4_intr_handler */
15698 
lpfc_sli4_poll_hbtimer(struct timer_list * t)15699 void lpfc_sli4_poll_hbtimer(struct timer_list *t)
15700 {
15701 	struct lpfc_hba *phba = timer_container_of(phba, t, cpuhp_poll_timer);
15702 	struct lpfc_queue *eq;
15703 
15704 	rcu_read_lock();
15705 
15706 	list_for_each_entry_rcu(eq, &phba->poll_list, _poll_list)
15707 		lpfc_sli4_poll_eq(eq);
15708 	if (!list_empty(&phba->poll_list))
15709 		mod_timer(&phba->cpuhp_poll_timer,
15710 			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15711 
15712 	rcu_read_unlock();
15713 }
15714 
lpfc_sli4_add_to_poll_list(struct lpfc_queue * eq)15715 static inline void lpfc_sli4_add_to_poll_list(struct lpfc_queue *eq)
15716 {
15717 	struct lpfc_hba *phba = eq->phba;
15718 
15719 	/* kickstart slowpath processing if needed */
15720 	if (list_empty(&phba->poll_list))
15721 		mod_timer(&phba->cpuhp_poll_timer,
15722 			  jiffies + msecs_to_jiffies(LPFC_POLL_HB));
15723 
15724 	list_add_rcu(&eq->_poll_list, &phba->poll_list);
15725 	synchronize_rcu();
15726 }
15727 
lpfc_sli4_remove_from_poll_list(struct lpfc_queue * eq)15728 static inline void lpfc_sli4_remove_from_poll_list(struct lpfc_queue *eq)
15729 {
15730 	struct lpfc_hba *phba = eq->phba;
15731 
15732 	/* Disable slowpath processing for this eq.  Kick start the eq
15733 	 * by RE-ARMING the eq's ASAP
15734 	 */
15735 	list_del_rcu(&eq->_poll_list);
15736 	synchronize_rcu();
15737 
15738 	if (list_empty(&phba->poll_list))
15739 		timer_delete_sync(&phba->cpuhp_poll_timer);
15740 }
15741 
lpfc_sli4_cleanup_poll_list(struct lpfc_hba * phba)15742 void lpfc_sli4_cleanup_poll_list(struct lpfc_hba *phba)
15743 {
15744 	struct lpfc_queue *eq, *next;
15745 
15746 	list_for_each_entry_safe(eq, next, &phba->poll_list, _poll_list)
15747 		list_del(&eq->_poll_list);
15748 
15749 	INIT_LIST_HEAD(&phba->poll_list);
15750 	synchronize_rcu();
15751 }
15752 
15753 static inline void
__lpfc_sli4_switch_eqmode(struct lpfc_queue * eq,uint8_t mode)15754 __lpfc_sli4_switch_eqmode(struct lpfc_queue *eq, uint8_t mode)
15755 {
15756 	if (mode == eq->mode)
15757 		return;
15758 	/*
15759 	 * currently this function is only called during a hotplug
15760 	 * event and the cpu on which this function is executing
15761 	 * is going offline.  By now the hotplug has instructed
15762 	 * the scheduler to remove this cpu from cpu active mask.
15763 	 * So we don't need to work about being put aside by the
15764 	 * scheduler for a high priority process.  Yes, the inte-
15765 	 * rrupts could come but they are known to retire ASAP.
15766 	 */
15767 
15768 	/* Disable polling in the fastpath */
15769 	WRITE_ONCE(eq->mode, mode);
15770 	/* flush out the store buffer */
15771 	smp_wmb();
15772 
15773 	/*
15774 	 * Add this eq to the polling list and start polling. For
15775 	 * a grace period both interrupt handler and poller will
15776 	 * try to process the eq _but_ that's fine.  We have a
15777 	 * synchronization mechanism in place (queue_claimed) to
15778 	 * deal with it.  This is just a draining phase for int-
15779 	 * errupt handler (not eq's) as we have guranteed through
15780 	 * barrier that all the CPUs have seen the new CQ_POLLED
15781 	 * state. which will effectively disable the REARMING of
15782 	 * the EQ.  The whole idea is eq's die off eventually as
15783 	 * we are not rearming EQ's anymore.
15784 	 */
15785 	mode ? lpfc_sli4_add_to_poll_list(eq) :
15786 	       lpfc_sli4_remove_from_poll_list(eq);
15787 }
15788 
lpfc_sli4_start_polling(struct lpfc_queue * eq)15789 void lpfc_sli4_start_polling(struct lpfc_queue *eq)
15790 {
15791 	__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_POLL);
15792 }
15793 
lpfc_sli4_stop_polling(struct lpfc_queue * eq)15794 void lpfc_sli4_stop_polling(struct lpfc_queue *eq)
15795 {
15796 	struct lpfc_hba *phba = eq->phba;
15797 
15798 	__lpfc_sli4_switch_eqmode(eq, LPFC_EQ_INTERRUPT);
15799 
15800 	/* Kick start for the pending io's in h/w.
15801 	 * Once we switch back to interrupt processing on a eq
15802 	 * the io path completion will only arm eq's when it
15803 	 * receives a completion.  But since eq's are in disa-
15804 	 * rmed state it doesn't receive a completion.  This
15805 	 * creates a deadlock scenaro.
15806 	 */
15807 	phba->sli4_hba.sli4_write_eq_db(phba, eq, 0, LPFC_QUEUE_REARM);
15808 }
15809 
15810 /**
15811  * lpfc_sli4_queue_free - free a queue structure and associated memory
15812  * @queue: The queue structure to free.
15813  *
15814  * This function frees a queue structure and the DMAable memory used for
15815  * the host resident queue. This function must be called after destroying the
15816  * queue on the HBA.
15817  **/
15818 void
lpfc_sli4_queue_free(struct lpfc_queue * queue)15819 lpfc_sli4_queue_free(struct lpfc_queue *queue)
15820 {
15821 	struct lpfc_dmabuf *dmabuf;
15822 
15823 	if (!queue)
15824 		return;
15825 
15826 	if (!list_empty(&queue->wq_list))
15827 		list_del(&queue->wq_list);
15828 
15829 	while (!list_empty(&queue->page_list)) {
15830 		list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
15831 				 list);
15832 		dma_free_coherent(&queue->phba->pcidev->dev, queue->page_size,
15833 				  dmabuf->virt, dmabuf->phys);
15834 		kfree(dmabuf);
15835 	}
15836 	if (queue->rqbp) {
15837 		lpfc_free_rq_buffer(queue->phba, queue);
15838 		kfree(queue->rqbp);
15839 	}
15840 
15841 	if (!list_empty(&queue->cpu_list))
15842 		list_del(&queue->cpu_list);
15843 
15844 	kfree(queue);
15845 	return;
15846 }
15847 
15848 /**
15849  * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
15850  * @phba: The HBA that this queue is being created on.
15851  * @page_size: The size of a queue page
15852  * @entry_size: The size of each queue entry for this queue.
15853  * @entry_count: The number of entries that this queue will handle.
15854  * @cpu: The cpu that will primarily utilize this queue.
15855  *
15856  * This function allocates a queue structure and the DMAable memory used for
15857  * the host resident queue. This function must be called before creating the
15858  * queue on the HBA.
15859  **/
15860 struct lpfc_queue *
lpfc_sli4_queue_alloc(struct lpfc_hba * phba,uint32_t page_size,uint32_t entry_size,uint32_t entry_count,int cpu)15861 lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t page_size,
15862 		      uint32_t entry_size, uint32_t entry_count, int cpu)
15863 {
15864 	struct lpfc_queue *queue;
15865 	struct lpfc_dmabuf *dmabuf;
15866 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
15867 	uint16_t x, pgcnt;
15868 
15869 	if (!phba->sli4_hba.pc_sli4_params.supported)
15870 		hw_page_size = page_size;
15871 
15872 	pgcnt = ALIGN(entry_size * entry_count, hw_page_size) / hw_page_size;
15873 
15874 	/* If needed, Adjust page count to match the max the adapter supports */
15875 	if (pgcnt > phba->sli4_hba.pc_sli4_params.wqpcnt)
15876 		pgcnt = phba->sli4_hba.pc_sli4_params.wqpcnt;
15877 
15878 	queue = kzalloc_node(sizeof(*queue) + (sizeof(void *) * pgcnt),
15879 			     GFP_KERNEL, cpu_to_node(cpu));
15880 	if (!queue)
15881 		return NULL;
15882 
15883 	INIT_LIST_HEAD(&queue->list);
15884 	INIT_LIST_HEAD(&queue->_poll_list);
15885 	INIT_LIST_HEAD(&queue->wq_list);
15886 	INIT_LIST_HEAD(&queue->wqfull_list);
15887 	INIT_LIST_HEAD(&queue->page_list);
15888 	INIT_LIST_HEAD(&queue->child_list);
15889 	INIT_LIST_HEAD(&queue->cpu_list);
15890 
15891 	/* Set queue parameters now.  If the system cannot provide memory
15892 	 * resources, the free routine needs to know what was allocated.
15893 	 */
15894 	queue->page_count = pgcnt;
15895 	queue->q_pgs = (void **)&queue[1];
15896 	queue->entry_cnt_per_pg = hw_page_size / entry_size;
15897 	queue->entry_size = entry_size;
15898 	queue->entry_count = entry_count;
15899 	queue->page_size = hw_page_size;
15900 	queue->phba = phba;
15901 
15902 	for (x = 0; x < queue->page_count; x++) {
15903 		dmabuf = kzalloc_node(sizeof(*dmabuf), GFP_KERNEL,
15904 				      dev_to_node(&phba->pcidev->dev));
15905 		if (!dmabuf)
15906 			goto out_fail;
15907 		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
15908 						  hw_page_size, &dmabuf->phys,
15909 						  GFP_KERNEL);
15910 		if (!dmabuf->virt) {
15911 			kfree(dmabuf);
15912 			goto out_fail;
15913 		}
15914 		dmabuf->buffer_tag = x;
15915 		list_add_tail(&dmabuf->list, &queue->page_list);
15916 		/* use lpfc_sli4_qe to index a paritcular entry in this page */
15917 		queue->q_pgs[x] = dmabuf->virt;
15918 	}
15919 	INIT_WORK(&queue->irqwork, lpfc_sli4_hba_process_cq);
15920 	INIT_WORK(&queue->spwork, lpfc_sli4_sp_process_cq);
15921 	INIT_DELAYED_WORK(&queue->sched_irqwork, lpfc_sli4_dly_hba_process_cq);
15922 	INIT_DELAYED_WORK(&queue->sched_spwork, lpfc_sli4_dly_sp_process_cq);
15923 
15924 	/* notify_interval will be set during q creation */
15925 
15926 	return queue;
15927 out_fail:
15928 	lpfc_sli4_queue_free(queue);
15929 	return NULL;
15930 }
15931 
15932 /**
15933  * lpfc_dual_chute_pci_bar_map - Map pci base address register to host memory
15934  * @phba: HBA structure that indicates port to create a queue on.
15935  * @pci_barset: PCI BAR set flag.
15936  *
15937  * This function shall perform iomap of the specified PCI BAR address to host
15938  * memory address if not already done so and return it. The returned host
15939  * memory address can be NULL.
15940  */
15941 static void __iomem *
lpfc_dual_chute_pci_bar_map(struct lpfc_hba * phba,uint16_t pci_barset)15942 lpfc_dual_chute_pci_bar_map(struct lpfc_hba *phba, uint16_t pci_barset)
15943 {
15944 	if (!phba->pcidev)
15945 		return NULL;
15946 
15947 	switch (pci_barset) {
15948 	case WQ_PCI_BAR_0_AND_1:
15949 		return phba->pci_bar0_memmap_p;
15950 	case WQ_PCI_BAR_2_AND_3:
15951 		return phba->pci_bar2_memmap_p;
15952 	case WQ_PCI_BAR_4_AND_5:
15953 		return phba->pci_bar4_memmap_p;
15954 	default:
15955 		break;
15956 	}
15957 	return NULL;
15958 }
15959 
15960 static __maybe_unused void __iomem *
lpfc_dpp_wc_map(struct lpfc_hba * phba,uint8_t dpp_barset)15961 lpfc_dpp_wc_map(struct lpfc_hba *phba, uint8_t dpp_barset)
15962 {
15963 
15964 	/* DPP region is supposed to cover 64-bit BAR2 */
15965 	if (dpp_barset != WQ_PCI_BAR_4_AND_5) {
15966 		lpfc_log_msg(phba, KERN_WARNING, LOG_INIT,
15967 			     "3273 dpp_barset x%x != WQ_PCI_BAR_4_AND_5\n",
15968 			     dpp_barset);
15969 		return NULL;
15970 	}
15971 
15972 	if (!phba->sli4_hba.dpp_regs_memmap_wc_p) {
15973 		void __iomem *dpp_map;
15974 
15975 		dpp_map = ioremap_wc(phba->pci_bar2_map,
15976 				     pci_resource_len(phba->pcidev,
15977 						      PCI_64BIT_BAR4));
15978 
15979 		if (dpp_map)
15980 			phba->sli4_hba.dpp_regs_memmap_wc_p = dpp_map;
15981 	}
15982 
15983 	return phba->sli4_hba.dpp_regs_memmap_wc_p;
15984 }
15985 
15986 /**
15987  * lpfc_modify_hba_eq_delay - Modify Delay Multiplier on EQs
15988  * @phba: HBA structure that EQs are on.
15989  * @startq: The starting EQ index to modify
15990  * @numq: The number of EQs (consecutive indexes) to modify
15991  * @usdelay: amount of delay
15992  *
15993  * This function revises the EQ delay on 1 or more EQs. The EQ delay
15994  * is set either by writing to a register (if supported by the SLI Port)
15995  * or by mailbox command. The mailbox command allows several EQs to be
15996  * updated at once.
15997  *
15998  * The @phba struct is used to send a mailbox command to HBA. The @startq
15999  * is used to get the starting EQ index to change. The @numq value is
16000  * used to specify how many consecutive EQ indexes, starting at EQ index,
16001  * are to be changed. This function is asynchronous and will wait for any
16002  * mailbox commands to finish before returning.
16003  *
16004  * On success this function will return a zero. If unable to allocate
16005  * enough memory this function will return -ENOMEM. If a mailbox command
16006  * fails this function will return -ENXIO. Note: on ENXIO, some EQs may
16007  * have had their delay multipler changed.
16008  **/
16009 void
lpfc_modify_hba_eq_delay(struct lpfc_hba * phba,uint32_t startq,uint32_t numq,uint32_t usdelay)16010 lpfc_modify_hba_eq_delay(struct lpfc_hba *phba, uint32_t startq,
16011 			 uint32_t numq, uint32_t usdelay)
16012 {
16013 	struct lpfc_mbx_modify_eq_delay *eq_delay;
16014 	LPFC_MBOXQ_t *mbox;
16015 	struct lpfc_queue *eq;
16016 	int cnt = 0, rc, length;
16017 	uint32_t shdr_status, shdr_add_status;
16018 	uint32_t dmult;
16019 	int qidx;
16020 	union lpfc_sli4_cfg_shdr *shdr;
16021 
16022 	if (startq >= phba->cfg_irq_chann)
16023 		return;
16024 
16025 	if (usdelay > 0xFFFF) {
16026 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_FCP | LOG_NVME,
16027 				"6429 usdelay %d too large. Scaled down to "
16028 				"0xFFFF.\n", usdelay);
16029 		usdelay = 0xFFFF;
16030 	}
16031 
16032 	/* set values by EQ_DELAY register if supported */
16033 	if (phba->sli.sli_flag & LPFC_SLI_USE_EQDR) {
16034 		for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
16035 			eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
16036 			if (!eq)
16037 				continue;
16038 
16039 			lpfc_sli4_mod_hba_eq_delay(phba, eq, usdelay);
16040 
16041 			if (++cnt >= numq)
16042 				break;
16043 		}
16044 		return;
16045 	}
16046 
16047 	/* Otherwise, set values by mailbox cmd */
16048 
16049 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16050 	if (!mbox) {
16051 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16052 				"6428 Failed allocating mailbox cmd buffer."
16053 				" EQ delay was not set.\n");
16054 		return;
16055 	}
16056 	length = (sizeof(struct lpfc_mbx_modify_eq_delay) -
16057 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16058 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16059 			 LPFC_MBOX_OPCODE_MODIFY_EQ_DELAY,
16060 			 length, LPFC_SLI4_MBX_EMBED);
16061 	eq_delay = &mbox->u.mqe.un.eq_delay;
16062 
16063 	/* Calculate delay multiper from maximum interrupt per second */
16064 	dmult = (usdelay * LPFC_DMULT_CONST) / LPFC_SEC_TO_USEC;
16065 	if (dmult)
16066 		dmult--;
16067 	if (dmult > LPFC_DMULT_MAX)
16068 		dmult = LPFC_DMULT_MAX;
16069 
16070 	for (qidx = startq; qidx < phba->cfg_irq_chann; qidx++) {
16071 		eq = phba->sli4_hba.hba_eq_hdl[qidx].eq;
16072 		if (!eq)
16073 			continue;
16074 		eq->q_mode = usdelay;
16075 		eq_delay->u.request.eq[cnt].eq_id = eq->queue_id;
16076 		eq_delay->u.request.eq[cnt].phase = 0;
16077 		eq_delay->u.request.eq[cnt].delay_multi = dmult;
16078 
16079 		if (++cnt >= numq)
16080 			break;
16081 	}
16082 	eq_delay->u.request.num_eq = cnt;
16083 
16084 	mbox->vport = phba->pport;
16085 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16086 	mbox->ctx_ndlp = NULL;
16087 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16088 	shdr = (union lpfc_sli4_cfg_shdr *) &eq_delay->header.cfg_shdr;
16089 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16090 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16091 	if (shdr_status || shdr_add_status || rc) {
16092 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16093 				"2512 MODIFY_EQ_DELAY mailbox failed with "
16094 				"status x%x add_status x%x, mbx status x%x\n",
16095 				shdr_status, shdr_add_status, rc);
16096 	}
16097 	mempool_free(mbox, phba->mbox_mem_pool);
16098 	return;
16099 }
16100 
16101 /**
16102  * lpfc_eq_create - Create an Event Queue on the HBA
16103  * @phba: HBA structure that indicates port to create a queue on.
16104  * @eq: The queue structure to use to create the event queue.
16105  * @imax: The maximum interrupt per second limit.
16106  *
16107  * This function creates an event queue, as detailed in @eq, on a port,
16108  * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
16109  *
16110  * The @phba struct is used to send mailbox command to HBA. The @eq struct
16111  * is used to get the entry count and entry size that are necessary to
16112  * determine the number of pages to allocate and use for this queue. This
16113  * function will send the EQ_CREATE mailbox command to the HBA to setup the
16114  * event queue. This function is asynchronous and will wait for the mailbox
16115  * command to finish before continuing.
16116  *
16117  * On success this function will return a zero. If unable to allocate enough
16118  * memory this function will return -ENOMEM. If the queue create mailbox command
16119  * fails this function will return -ENXIO.
16120  **/
16121 int
lpfc_eq_create(struct lpfc_hba * phba,struct lpfc_queue * eq,uint32_t imax)16122 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
16123 {
16124 	struct lpfc_mbx_eq_create *eq_create;
16125 	LPFC_MBOXQ_t *mbox;
16126 	int rc, length, status = 0;
16127 	struct lpfc_dmabuf *dmabuf;
16128 	uint32_t shdr_status, shdr_add_status;
16129 	union lpfc_sli4_cfg_shdr *shdr;
16130 	uint16_t dmult;
16131 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16132 
16133 	/* sanity check on queue memory */
16134 	if (!eq)
16135 		return -ENODEV;
16136 	if (!phba->sli4_hba.pc_sli4_params.supported)
16137 		hw_page_size = SLI4_PAGE_SIZE;
16138 
16139 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16140 	if (!mbox)
16141 		return -ENOMEM;
16142 	length = (sizeof(struct lpfc_mbx_eq_create) -
16143 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16144 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16145 			 LPFC_MBOX_OPCODE_EQ_CREATE,
16146 			 length, LPFC_SLI4_MBX_EMBED);
16147 	eq_create = &mbox->u.mqe.un.eq_create;
16148 	shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
16149 	bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
16150 	       eq->page_count);
16151 	bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
16152 	       LPFC_EQE_SIZE);
16153 	bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
16154 
16155 	/* Use version 2 of CREATE_EQ if eqav is set */
16156 	if (phba->sli4_hba.pc_sli4_params.eqav) {
16157 		bf_set(lpfc_mbox_hdr_version, &shdr->request,
16158 		       LPFC_Q_CREATE_VERSION_2);
16159 		bf_set(lpfc_eq_context_autovalid, &eq_create->u.request.context,
16160 		       phba->sli4_hba.pc_sli4_params.eqav);
16161 	}
16162 
16163 	/* don't setup delay multiplier using EQ_CREATE */
16164 	dmult = 0;
16165 	bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
16166 	       dmult);
16167 	switch (eq->entry_count) {
16168 	default:
16169 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16170 				"0360 Unsupported EQ count. (%d)\n",
16171 				eq->entry_count);
16172 		if (eq->entry_count < 256) {
16173 			status = -EINVAL;
16174 			goto out;
16175 		}
16176 		fallthrough;	/* otherwise default to smallest count */
16177 	case 256:
16178 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16179 		       LPFC_EQ_CNT_256);
16180 		break;
16181 	case 512:
16182 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16183 		       LPFC_EQ_CNT_512);
16184 		break;
16185 	case 1024:
16186 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16187 		       LPFC_EQ_CNT_1024);
16188 		break;
16189 	case 2048:
16190 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16191 		       LPFC_EQ_CNT_2048);
16192 		break;
16193 	case 4096:
16194 		bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
16195 		       LPFC_EQ_CNT_4096);
16196 		break;
16197 	}
16198 	list_for_each_entry(dmabuf, &eq->page_list, list) {
16199 		memset(dmabuf->virt, 0, hw_page_size);
16200 		eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16201 					putPaddrLow(dmabuf->phys);
16202 		eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16203 					putPaddrHigh(dmabuf->phys);
16204 	}
16205 	mbox->vport = phba->pport;
16206 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
16207 	mbox->ctx_buf = NULL;
16208 	mbox->ctx_ndlp = NULL;
16209 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16210 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16211 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16212 	if (shdr_status || shdr_add_status || rc) {
16213 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16214 				"2500 EQ_CREATE mailbox failed with "
16215 				"status x%x add_status x%x, mbx status x%x\n",
16216 				shdr_status, shdr_add_status, rc);
16217 		status = -ENXIO;
16218 	}
16219 	eq->type = LPFC_EQ;
16220 	eq->subtype = LPFC_NONE;
16221 	eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
16222 	if (eq->queue_id == 0xFFFF)
16223 		status = -ENXIO;
16224 	eq->host_index = 0;
16225 	eq->notify_interval = LPFC_EQ_NOTIFY_INTRVL;
16226 	eq->max_proc_limit = LPFC_EQ_MAX_PROC_LIMIT;
16227 out:
16228 	mempool_free(mbox, phba->mbox_mem_pool);
16229 	return status;
16230 }
16231 
16232 /**
16233  * lpfc_sli4_hba_intr_handler_th - SLI4 HBA threaded interrupt handler
16234  * @irq: Interrupt number.
16235  * @dev_id: The device context pointer.
16236  *
16237  * This routine is a mirror of lpfc_sli4_hba_intr_handler, but executed within
16238  * threaded irq context.
16239  *
16240  * Returns
16241  * IRQ_HANDLED - interrupt is handled
16242  * IRQ_NONE - otherwise
16243  **/
lpfc_sli4_hba_intr_handler_th(int irq,void * dev_id)16244 irqreturn_t lpfc_sli4_hba_intr_handler_th(int irq, void *dev_id)
16245 {
16246 	struct lpfc_hba *phba;
16247 	struct lpfc_hba_eq_hdl *hba_eq_hdl;
16248 	struct lpfc_queue *fpeq;
16249 	int ecount = 0;
16250 	int hba_eqidx;
16251 	struct lpfc_eq_intr_info *eqi;
16252 
16253 	/* Get the driver's phba structure from the dev_id */
16254 	hba_eq_hdl = (struct lpfc_hba_eq_hdl *)dev_id;
16255 	phba = hba_eq_hdl->phba;
16256 	hba_eqidx = hba_eq_hdl->idx;
16257 
16258 	if (unlikely(!phba))
16259 		return IRQ_NONE;
16260 	if (unlikely(!phba->sli4_hba.hdwq))
16261 		return IRQ_NONE;
16262 
16263 	/* Get to the EQ struct associated with this vector */
16264 	fpeq = phba->sli4_hba.hba_eq_hdl[hba_eqidx].eq;
16265 	if (unlikely(!fpeq))
16266 		return IRQ_NONE;
16267 
16268 	eqi = per_cpu_ptr(phba->sli4_hba.eq_info, raw_smp_processor_id());
16269 	eqi->icnt++;
16270 
16271 	fpeq->last_cpu = raw_smp_processor_id();
16272 
16273 	if (eqi->icnt > LPFC_EQD_ISR_TRIGGER &&
16274 	    fpeq->q_flag & HBA_EQ_DELAY_CHK &&
16275 	    phba->cfg_auto_imax &&
16276 	    fpeq->q_mode != LPFC_MAX_AUTO_EQ_DELAY &&
16277 	    phba->sli.sli_flag & LPFC_SLI_USE_EQDR)
16278 		lpfc_sli4_mod_hba_eq_delay(phba, fpeq, LPFC_MAX_AUTO_EQ_DELAY);
16279 
16280 	/* process and rearm the EQ */
16281 	ecount = lpfc_sli4_process_eq(phba, fpeq, LPFC_QUEUE_REARM,
16282 				      LPFC_THREADED_IRQ);
16283 
16284 	if (unlikely(ecount == 0)) {
16285 		fpeq->EQ_no_entry++;
16286 		if (phba->intr_type == MSIX)
16287 			/* MSI-X treated interrupt served as no EQ share INT */
16288 			lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
16289 					"3358 MSI-X interrupt with no EQE\n");
16290 		else
16291 			/* Non MSI-X treated on interrupt as EQ share INT */
16292 			return IRQ_NONE;
16293 	}
16294 	return IRQ_HANDLED;
16295 }
16296 
16297 /**
16298  * lpfc_cq_create - Create a Completion Queue on the HBA
16299  * @phba: HBA structure that indicates port to create a queue on.
16300  * @cq: The queue structure to use to create the completion queue.
16301  * @eq: The event queue to bind this completion queue to.
16302  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16303  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16304  *
16305  * This function creates a completion queue, as detailed in @wq, on a port,
16306  * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
16307  *
16308  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16309  * is used to get the entry count and entry size that are necessary to
16310  * determine the number of pages to allocate and use for this queue. The @eq
16311  * is used to indicate which event queue to bind this completion queue to. This
16312  * function will send the CQ_CREATE mailbox command to the HBA to setup the
16313  * completion queue. This function is asynchronous and will wait for the mailbox
16314  * command to finish before continuing.
16315  *
16316  * On success this function will return a zero. If unable to allocate enough
16317  * memory this function will return -ENOMEM. If the queue create mailbox command
16318  * fails this function will return -ENXIO.
16319  **/
16320 int
lpfc_cq_create(struct lpfc_hba * phba,struct lpfc_queue * cq,struct lpfc_queue * eq,uint32_t type,uint32_t subtype)16321 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
16322 	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
16323 {
16324 	struct lpfc_mbx_cq_create *cq_create;
16325 	struct lpfc_dmabuf *dmabuf;
16326 	LPFC_MBOXQ_t *mbox;
16327 	int rc, length, status = 0;
16328 	uint32_t shdr_status, shdr_add_status;
16329 	union lpfc_sli4_cfg_shdr *shdr;
16330 
16331 	/* sanity check on queue memory */
16332 	if (!cq || !eq)
16333 		return -ENODEV;
16334 
16335 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16336 	if (!mbox)
16337 		return -ENOMEM;
16338 	length = (sizeof(struct lpfc_mbx_cq_create) -
16339 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16340 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16341 			 LPFC_MBOX_OPCODE_CQ_CREATE,
16342 			 length, LPFC_SLI4_MBX_EMBED);
16343 	cq_create = &mbox->u.mqe.un.cq_create;
16344 	shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
16345 	bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
16346 		    cq->page_count);
16347 	bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
16348 	bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
16349 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
16350 	       phba->sli4_hba.pc_sli4_params.cqv);
16351 	if (phba->sli4_hba.pc_sli4_params.cqv == LPFC_Q_CREATE_VERSION_2) {
16352 		bf_set(lpfc_mbx_cq_create_page_size, &cq_create->u.request,
16353 		       (cq->page_size / SLI4_PAGE_SIZE));
16354 		bf_set(lpfc_cq_eq_id_2, &cq_create->u.request.context,
16355 		       eq->queue_id);
16356 		bf_set(lpfc_cq_context_autovalid, &cq_create->u.request.context,
16357 		       phba->sli4_hba.pc_sli4_params.cqav);
16358 	} else {
16359 		bf_set(lpfc_cq_eq_id, &cq_create->u.request.context,
16360 		       eq->queue_id);
16361 	}
16362 	switch (cq->entry_count) {
16363 	case 2048:
16364 	case 4096:
16365 		if (phba->sli4_hba.pc_sli4_params.cqv ==
16366 		    LPFC_Q_CREATE_VERSION_2) {
16367 			cq_create->u.request.context.lpfc_cq_context_count =
16368 				cq->entry_count;
16369 			bf_set(lpfc_cq_context_count,
16370 			       &cq_create->u.request.context,
16371 			       LPFC_CQ_CNT_WORD7);
16372 			break;
16373 		}
16374 		fallthrough;
16375 	default:
16376 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16377 				"0361 Unsupported CQ count: "
16378 				"entry cnt %d sz %d pg cnt %d\n",
16379 				cq->entry_count, cq->entry_size,
16380 				cq->page_count);
16381 		if (cq->entry_count < 256) {
16382 			status = -EINVAL;
16383 			goto out;
16384 		}
16385 		fallthrough;	/* otherwise default to smallest count */
16386 	case 256:
16387 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16388 		       LPFC_CQ_CNT_256);
16389 		break;
16390 	case 512:
16391 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16392 		       LPFC_CQ_CNT_512);
16393 		break;
16394 	case 1024:
16395 		bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
16396 		       LPFC_CQ_CNT_1024);
16397 		break;
16398 	}
16399 	list_for_each_entry(dmabuf, &cq->page_list, list) {
16400 		memset(dmabuf->virt, 0, cq->page_size);
16401 		cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16402 					putPaddrLow(dmabuf->phys);
16403 		cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16404 					putPaddrHigh(dmabuf->phys);
16405 	}
16406 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16407 
16408 	/* The IOCTL status is embedded in the mailbox subheader. */
16409 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16410 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16411 	if (shdr_status || shdr_add_status || rc) {
16412 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16413 				"2501 CQ_CREATE mailbox failed with "
16414 				"status x%x add_status x%x, mbx status x%x\n",
16415 				shdr_status, shdr_add_status, rc);
16416 		status = -ENXIO;
16417 		goto out;
16418 	}
16419 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16420 	if (cq->queue_id == 0xFFFF) {
16421 		status = -ENXIO;
16422 		goto out;
16423 	}
16424 	/* link the cq onto the parent eq child list */
16425 	list_add_tail(&cq->list, &eq->child_list);
16426 	/* Set up completion queue's type and subtype */
16427 	cq->type = type;
16428 	cq->subtype = subtype;
16429 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
16430 	cq->assoc_qid = eq->queue_id;
16431 	cq->assoc_qp = eq;
16432 	cq->host_index = 0;
16433 	cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16434 	cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit, cq->entry_count);
16435 
16436 	if (cq->queue_id > phba->sli4_hba.cq_max)
16437 		phba->sli4_hba.cq_max = cq->queue_id;
16438 out:
16439 	mempool_free(mbox, phba->mbox_mem_pool);
16440 	return status;
16441 }
16442 
16443 /**
16444  * lpfc_cq_create_set - Create a set of Completion Queues on the HBA for MRQ
16445  * @phba: HBA structure that indicates port to create a queue on.
16446  * @cqp: The queue structure array to use to create the completion queues.
16447  * @hdwq: The hardware queue array  with the EQ to bind completion queues to.
16448  * @type: Type of queue (EQ, GCQ, MCQ, WCQ, etc).
16449  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
16450  *
16451  * This function creates a set of  completion queue, s to support MRQ
16452  * as detailed in @cqp, on a port,
16453  * described by @phba by sending a CREATE_CQ_SET mailbox command to the HBA.
16454  *
16455  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16456  * is used to get the entry count and entry size that are necessary to
16457  * determine the number of pages to allocate and use for this queue. The @eq
16458  * is used to indicate which event queue to bind this completion queue to. This
16459  * function will send the CREATE_CQ_SET mailbox command to the HBA to setup the
16460  * completion queue. This function is asynchronous and will wait for the mailbox
16461  * command to finish before continuing.
16462  *
16463  * On success this function will return a zero. If unable to allocate enough
16464  * memory this function will return -ENOMEM. If the queue create mailbox command
16465  * fails this function will return -ENXIO.
16466  **/
16467 int
lpfc_cq_create_set(struct lpfc_hba * phba,struct lpfc_queue ** cqp,struct lpfc_sli4_hdw_queue * hdwq,uint32_t type,uint32_t subtype)16468 lpfc_cq_create_set(struct lpfc_hba *phba, struct lpfc_queue **cqp,
16469 		   struct lpfc_sli4_hdw_queue *hdwq, uint32_t type,
16470 		   uint32_t subtype)
16471 {
16472 	struct lpfc_queue *cq;
16473 	struct lpfc_queue *eq;
16474 	struct lpfc_mbx_cq_create_set *cq_set;
16475 	struct lpfc_dmabuf *dmabuf;
16476 	LPFC_MBOXQ_t *mbox;
16477 	int rc, length, alloclen, status = 0;
16478 	int cnt, idx, numcq, page_idx = 0;
16479 	uint32_t shdr_status, shdr_add_status;
16480 	union lpfc_sli4_cfg_shdr *shdr;
16481 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16482 
16483 	/* sanity check on queue memory */
16484 	numcq = phba->cfg_nvmet_mrq;
16485 	if (!cqp || !hdwq || !numcq)
16486 		return -ENODEV;
16487 
16488 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16489 	if (!mbox)
16490 		return -ENOMEM;
16491 
16492 	length = sizeof(struct lpfc_mbx_cq_create_set);
16493 	length += ((numcq * cqp[0]->page_count) *
16494 		   sizeof(struct dma_address));
16495 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16496 			LPFC_MBOX_OPCODE_FCOE_CQ_CREATE_SET, length,
16497 			LPFC_SLI4_MBX_NEMBED);
16498 	if (alloclen < length) {
16499 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16500 				"3098 Allocated DMA memory size (%d) is "
16501 				"less than the requested DMA memory size "
16502 				"(%d)\n", alloclen, length);
16503 		status = -ENOMEM;
16504 		goto out;
16505 	}
16506 	cq_set = mbox->sge_array->addr[0];
16507 	shdr = (union lpfc_sli4_cfg_shdr *)&cq_set->cfg_shdr;
16508 	bf_set(lpfc_mbox_hdr_version, &shdr->request, 0);
16509 
16510 	for (idx = 0; idx < numcq; idx++) {
16511 		cq = cqp[idx];
16512 		eq = hdwq[idx].hba_eq;
16513 		if (!cq || !eq) {
16514 			status = -ENOMEM;
16515 			goto out;
16516 		}
16517 		if (!phba->sli4_hba.pc_sli4_params.supported)
16518 			hw_page_size = cq->page_size;
16519 
16520 		switch (idx) {
16521 		case 0:
16522 			bf_set(lpfc_mbx_cq_create_set_page_size,
16523 			       &cq_set->u.request,
16524 			       (hw_page_size / SLI4_PAGE_SIZE));
16525 			bf_set(lpfc_mbx_cq_create_set_num_pages,
16526 			       &cq_set->u.request, cq->page_count);
16527 			bf_set(lpfc_mbx_cq_create_set_evt,
16528 			       &cq_set->u.request, 1);
16529 			bf_set(lpfc_mbx_cq_create_set_valid,
16530 			       &cq_set->u.request, 1);
16531 			bf_set(lpfc_mbx_cq_create_set_cqe_size,
16532 			       &cq_set->u.request, 0);
16533 			bf_set(lpfc_mbx_cq_create_set_num_cq,
16534 			       &cq_set->u.request, numcq);
16535 			bf_set(lpfc_mbx_cq_create_set_autovalid,
16536 			       &cq_set->u.request,
16537 			       phba->sli4_hba.pc_sli4_params.cqav);
16538 			switch (cq->entry_count) {
16539 			case 2048:
16540 			case 4096:
16541 				if (phba->sli4_hba.pc_sli4_params.cqv ==
16542 				    LPFC_Q_CREATE_VERSION_2) {
16543 					bf_set(lpfc_mbx_cq_create_set_cqe_cnt_lo,
16544 					       &cq_set->u.request,
16545 					       cq->entry_count);
16546 					bf_set(lpfc_mbx_cq_create_set_cqecnt,
16547 					       &cq_set->u.request,
16548 					       LPFC_CQ_CNT_WORD7);
16549 					break;
16550 				}
16551 				fallthrough;
16552 			default:
16553 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16554 						"3118 Bad CQ count. (%d)\n",
16555 						cq->entry_count);
16556 				if (cq->entry_count < 256) {
16557 					status = -EINVAL;
16558 					goto out;
16559 				}
16560 				fallthrough;	/* otherwise default to smallest */
16561 			case 256:
16562 				bf_set(lpfc_mbx_cq_create_set_cqecnt,
16563 				       &cq_set->u.request, LPFC_CQ_CNT_256);
16564 				break;
16565 			case 512:
16566 				bf_set(lpfc_mbx_cq_create_set_cqecnt,
16567 				       &cq_set->u.request, LPFC_CQ_CNT_512);
16568 				break;
16569 			case 1024:
16570 				bf_set(lpfc_mbx_cq_create_set_cqecnt,
16571 				       &cq_set->u.request, LPFC_CQ_CNT_1024);
16572 				break;
16573 			}
16574 			bf_set(lpfc_mbx_cq_create_set_eq_id0,
16575 			       &cq_set->u.request, eq->queue_id);
16576 			break;
16577 		case 1:
16578 			bf_set(lpfc_mbx_cq_create_set_eq_id1,
16579 			       &cq_set->u.request, eq->queue_id);
16580 			break;
16581 		case 2:
16582 			bf_set(lpfc_mbx_cq_create_set_eq_id2,
16583 			       &cq_set->u.request, eq->queue_id);
16584 			break;
16585 		case 3:
16586 			bf_set(lpfc_mbx_cq_create_set_eq_id3,
16587 			       &cq_set->u.request, eq->queue_id);
16588 			break;
16589 		case 4:
16590 			bf_set(lpfc_mbx_cq_create_set_eq_id4,
16591 			       &cq_set->u.request, eq->queue_id);
16592 			break;
16593 		case 5:
16594 			bf_set(lpfc_mbx_cq_create_set_eq_id5,
16595 			       &cq_set->u.request, eq->queue_id);
16596 			break;
16597 		case 6:
16598 			bf_set(lpfc_mbx_cq_create_set_eq_id6,
16599 			       &cq_set->u.request, eq->queue_id);
16600 			break;
16601 		case 7:
16602 			bf_set(lpfc_mbx_cq_create_set_eq_id7,
16603 			       &cq_set->u.request, eq->queue_id);
16604 			break;
16605 		case 8:
16606 			bf_set(lpfc_mbx_cq_create_set_eq_id8,
16607 			       &cq_set->u.request, eq->queue_id);
16608 			break;
16609 		case 9:
16610 			bf_set(lpfc_mbx_cq_create_set_eq_id9,
16611 			       &cq_set->u.request, eq->queue_id);
16612 			break;
16613 		case 10:
16614 			bf_set(lpfc_mbx_cq_create_set_eq_id10,
16615 			       &cq_set->u.request, eq->queue_id);
16616 			break;
16617 		case 11:
16618 			bf_set(lpfc_mbx_cq_create_set_eq_id11,
16619 			       &cq_set->u.request, eq->queue_id);
16620 			break;
16621 		case 12:
16622 			bf_set(lpfc_mbx_cq_create_set_eq_id12,
16623 			       &cq_set->u.request, eq->queue_id);
16624 			break;
16625 		case 13:
16626 			bf_set(lpfc_mbx_cq_create_set_eq_id13,
16627 			       &cq_set->u.request, eq->queue_id);
16628 			break;
16629 		case 14:
16630 			bf_set(lpfc_mbx_cq_create_set_eq_id14,
16631 			       &cq_set->u.request, eq->queue_id);
16632 			break;
16633 		case 15:
16634 			bf_set(lpfc_mbx_cq_create_set_eq_id15,
16635 			       &cq_set->u.request, eq->queue_id);
16636 			break;
16637 		}
16638 
16639 		/* link the cq onto the parent eq child list */
16640 		list_add_tail(&cq->list, &eq->child_list);
16641 		/* Set up completion queue's type and subtype */
16642 		cq->type = type;
16643 		cq->subtype = subtype;
16644 		cq->assoc_qid = eq->queue_id;
16645 		cq->assoc_qp = eq;
16646 		cq->host_index = 0;
16647 		cq->notify_interval = LPFC_CQ_NOTIFY_INTRVL;
16648 		cq->max_proc_limit = min(phba->cfg_cq_max_proc_limit,
16649 					 cq->entry_count);
16650 		cq->chann = idx;
16651 
16652 		rc = 0;
16653 		list_for_each_entry(dmabuf, &cq->page_list, list) {
16654 			memset(dmabuf->virt, 0, hw_page_size);
16655 			cnt = page_idx + dmabuf->buffer_tag;
16656 			cq_set->u.request.page[cnt].addr_lo =
16657 					putPaddrLow(dmabuf->phys);
16658 			cq_set->u.request.page[cnt].addr_hi =
16659 					putPaddrHigh(dmabuf->phys);
16660 			rc++;
16661 		}
16662 		page_idx += rc;
16663 	}
16664 
16665 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16666 
16667 	/* The IOCTL status is embedded in the mailbox subheader. */
16668 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16669 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16670 	if (shdr_status || shdr_add_status || rc) {
16671 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16672 				"3119 CQ_CREATE_SET mailbox failed with "
16673 				"status x%x add_status x%x, mbx status x%x\n",
16674 				shdr_status, shdr_add_status, rc);
16675 		status = -ENXIO;
16676 		goto out;
16677 	}
16678 	rc = bf_get(lpfc_mbx_cq_create_set_base_id, &cq_set->u.response);
16679 	if (rc == 0xFFFF) {
16680 		status = -ENXIO;
16681 		goto out;
16682 	}
16683 
16684 	for (idx = 0; idx < numcq; idx++) {
16685 		cq = cqp[idx];
16686 		cq->queue_id = rc + idx;
16687 		if (cq->queue_id > phba->sli4_hba.cq_max)
16688 			phba->sli4_hba.cq_max = cq->queue_id;
16689 	}
16690 
16691 out:
16692 	lpfc_sli4_mbox_cmd_free(phba, mbox);
16693 	return status;
16694 }
16695 
16696 /**
16697  * lpfc_mq_create_fb_init - Send MCC_CREATE without async events registration
16698  * @phba: HBA structure that indicates port to create a queue on.
16699  * @mq: The queue structure to use to create the mailbox queue.
16700  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
16701  * @cq: The completion queue to associate with this cq.
16702  *
16703  * This function provides failback (fb) functionality when the
16704  * mq_create_ext fails on older FW generations.  It's purpose is identical
16705  * to mq_create_ext otherwise.
16706  *
16707  * This routine cannot fail as all attributes were previously accessed and
16708  * initialized in mq_create_ext.
16709  **/
16710 static void
lpfc_mq_create_fb_init(struct lpfc_hba * phba,struct lpfc_queue * mq,LPFC_MBOXQ_t * mbox,struct lpfc_queue * cq)16711 lpfc_mq_create_fb_init(struct lpfc_hba *phba, struct lpfc_queue *mq,
16712 		       LPFC_MBOXQ_t *mbox, struct lpfc_queue *cq)
16713 {
16714 	struct lpfc_mbx_mq_create *mq_create;
16715 	struct lpfc_dmabuf *dmabuf;
16716 	int length;
16717 
16718 	length = (sizeof(struct lpfc_mbx_mq_create) -
16719 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16720 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16721 			 LPFC_MBOX_OPCODE_MQ_CREATE,
16722 			 length, LPFC_SLI4_MBX_EMBED);
16723 	mq_create = &mbox->u.mqe.un.mq_create;
16724 	bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
16725 	       mq->page_count);
16726 	bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
16727 	       cq->queue_id);
16728 	bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
16729 	switch (mq->entry_count) {
16730 	case 16:
16731 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16732 		       LPFC_MQ_RING_SIZE_16);
16733 		break;
16734 	case 32:
16735 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16736 		       LPFC_MQ_RING_SIZE_32);
16737 		break;
16738 	case 64:
16739 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16740 		       LPFC_MQ_RING_SIZE_64);
16741 		break;
16742 	case 128:
16743 		bf_set(lpfc_mq_context_ring_size, &mq_create->u.request.context,
16744 		       LPFC_MQ_RING_SIZE_128);
16745 		break;
16746 	}
16747 	list_for_each_entry(dmabuf, &mq->page_list, list) {
16748 		mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
16749 			putPaddrLow(dmabuf->phys);
16750 		mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
16751 			putPaddrHigh(dmabuf->phys);
16752 	}
16753 }
16754 
16755 /**
16756  * lpfc_mq_create - Create a mailbox Queue on the HBA
16757  * @phba: HBA structure that indicates port to create a queue on.
16758  * @mq: The queue structure to use to create the mailbox queue.
16759  * @cq: The completion queue to associate with this cq.
16760  * @subtype: The queue's subtype.
16761  *
16762  * This function creates a mailbox queue, as detailed in @mq, on a port,
16763  * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
16764  *
16765  * The @phba struct is used to send mailbox command to HBA. The @cq struct
16766  * is used to get the entry count and entry size that are necessary to
16767  * determine the number of pages to allocate and use for this queue. This
16768  * function will send the MQ_CREATE mailbox command to the HBA to setup the
16769  * mailbox queue. This function is asynchronous and will wait for the mailbox
16770  * command to finish before continuing.
16771  *
16772  * On success this function will return a zero. If unable to allocate enough
16773  * memory this function will return -ENOMEM. If the queue create mailbox command
16774  * fails this function will return -ENXIO.
16775  **/
16776 int32_t
lpfc_mq_create(struct lpfc_hba * phba,struct lpfc_queue * mq,struct lpfc_queue * cq,uint32_t subtype)16777 lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
16778 	       struct lpfc_queue *cq, uint32_t subtype)
16779 {
16780 	struct lpfc_mbx_mq_create *mq_create;
16781 	struct lpfc_mbx_mq_create_ext *mq_create_ext;
16782 	struct lpfc_dmabuf *dmabuf;
16783 	LPFC_MBOXQ_t *mbox;
16784 	int rc, length, status = 0;
16785 	uint32_t shdr_status, shdr_add_status;
16786 	union lpfc_sli4_cfg_shdr *shdr;
16787 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16788 
16789 	/* sanity check on queue memory */
16790 	if (!mq || !cq)
16791 		return -ENODEV;
16792 	if (!phba->sli4_hba.pc_sli4_params.supported)
16793 		hw_page_size = SLI4_PAGE_SIZE;
16794 
16795 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16796 	if (!mbox)
16797 		return -ENOMEM;
16798 	length = (sizeof(struct lpfc_mbx_mq_create_ext) -
16799 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16800 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
16801 			 LPFC_MBOX_OPCODE_MQ_CREATE_EXT,
16802 			 length, LPFC_SLI4_MBX_EMBED);
16803 
16804 	mq_create_ext = &mbox->u.mqe.un.mq_create_ext;
16805 	shdr = (union lpfc_sli4_cfg_shdr *) &mq_create_ext->header.cfg_shdr;
16806 	bf_set(lpfc_mbx_mq_create_ext_num_pages,
16807 	       &mq_create_ext->u.request, mq->page_count);
16808 	bf_set(lpfc_mbx_mq_create_ext_async_evt_link,
16809 	       &mq_create_ext->u.request, 1);
16810 	bf_set(lpfc_mbx_mq_create_ext_async_evt_fip,
16811 	       &mq_create_ext->u.request, 1);
16812 	bf_set(lpfc_mbx_mq_create_ext_async_evt_group5,
16813 	       &mq_create_ext->u.request, 1);
16814 	bf_set(lpfc_mbx_mq_create_ext_async_evt_fc,
16815 	       &mq_create_ext->u.request, 1);
16816 	bf_set(lpfc_mbx_mq_create_ext_async_evt_sli,
16817 	       &mq_create_ext->u.request, 1);
16818 	bf_set(lpfc_mq_context_valid, &mq_create_ext->u.request.context, 1);
16819 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
16820 	       phba->sli4_hba.pc_sli4_params.mqv);
16821 	if (phba->sli4_hba.pc_sli4_params.mqv == LPFC_Q_CREATE_VERSION_1)
16822 		bf_set(lpfc_mbx_mq_create_ext_cq_id, &mq_create_ext->u.request,
16823 		       cq->queue_id);
16824 	else
16825 		bf_set(lpfc_mq_context_cq_id, &mq_create_ext->u.request.context,
16826 		       cq->queue_id);
16827 	switch (mq->entry_count) {
16828 	default:
16829 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16830 				"0362 Unsupported MQ count. (%d)\n",
16831 				mq->entry_count);
16832 		if (mq->entry_count < 16) {
16833 			status = -EINVAL;
16834 			goto out;
16835 		}
16836 		fallthrough;	/* otherwise default to smallest count */
16837 	case 16:
16838 		bf_set(lpfc_mq_context_ring_size,
16839 		       &mq_create_ext->u.request.context,
16840 		       LPFC_MQ_RING_SIZE_16);
16841 		break;
16842 	case 32:
16843 		bf_set(lpfc_mq_context_ring_size,
16844 		       &mq_create_ext->u.request.context,
16845 		       LPFC_MQ_RING_SIZE_32);
16846 		break;
16847 	case 64:
16848 		bf_set(lpfc_mq_context_ring_size,
16849 		       &mq_create_ext->u.request.context,
16850 		       LPFC_MQ_RING_SIZE_64);
16851 		break;
16852 	case 128:
16853 		bf_set(lpfc_mq_context_ring_size,
16854 		       &mq_create_ext->u.request.context,
16855 		       LPFC_MQ_RING_SIZE_128);
16856 		break;
16857 	}
16858 	list_for_each_entry(dmabuf, &mq->page_list, list) {
16859 		memset(dmabuf->virt, 0, hw_page_size);
16860 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_lo =
16861 					putPaddrLow(dmabuf->phys);
16862 		mq_create_ext->u.request.page[dmabuf->buffer_tag].addr_hi =
16863 					putPaddrHigh(dmabuf->phys);
16864 	}
16865 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16866 	mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16867 			      &mq_create_ext->u.response);
16868 	if (rc != MBX_SUCCESS) {
16869 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
16870 				"2795 MQ_CREATE_EXT failed with "
16871 				"status x%x. Failback to MQ_CREATE.\n",
16872 				rc);
16873 		lpfc_mq_create_fb_init(phba, mq, mbox, cq);
16874 		mq_create = &mbox->u.mqe.un.mq_create;
16875 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
16876 		shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
16877 		mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id,
16878 				      &mq_create->u.response);
16879 	}
16880 
16881 	/* The IOCTL status is embedded in the mailbox subheader. */
16882 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
16883 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
16884 	if (shdr_status || shdr_add_status || rc) {
16885 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
16886 				"2502 MQ_CREATE mailbox failed with "
16887 				"status x%x add_status x%x, mbx status x%x\n",
16888 				shdr_status, shdr_add_status, rc);
16889 		status = -ENXIO;
16890 		goto out;
16891 	}
16892 	if (mq->queue_id == 0xFFFF) {
16893 		status = -ENXIO;
16894 		goto out;
16895 	}
16896 	mq->type = LPFC_MQ;
16897 	mq->assoc_qid = cq->queue_id;
16898 	mq->subtype = subtype;
16899 	mq->host_index = 0;
16900 	mq->hba_index = 0;
16901 
16902 	/* link the mq onto the parent cq child list */
16903 	list_add_tail(&mq->list, &cq->child_list);
16904 out:
16905 	mempool_free(mbox, phba->mbox_mem_pool);
16906 	return status;
16907 }
16908 
16909 /**
16910  * lpfc_wq_create - Create a Work Queue on the HBA
16911  * @phba: HBA structure that indicates port to create a queue on.
16912  * @wq: The queue structure to use to create the work queue.
16913  * @cq: The completion queue to bind this work queue to.
16914  * @subtype: The subtype of the work queue indicating its functionality.
16915  *
16916  * This function creates a work queue, as detailed in @wq, on a port, described
16917  * by @phba by sending a WQ_CREATE mailbox command to the HBA.
16918  *
16919  * The @phba struct is used to send mailbox command to HBA. The @wq struct
16920  * is used to get the entry count and entry size that are necessary to
16921  * determine the number of pages to allocate and use for this queue. The @cq
16922  * is used to indicate which completion queue to bind this work queue to. This
16923  * function will send the WQ_CREATE mailbox command to the HBA to setup the
16924  * work queue. This function is asynchronous and will wait for the mailbox
16925  * command to finish before continuing.
16926  *
16927  * On success this function will return a zero. If unable to allocate enough
16928  * memory this function will return -ENOMEM. If the queue create mailbox command
16929  * fails this function will return -ENXIO.
16930  **/
16931 int
lpfc_wq_create(struct lpfc_hba * phba,struct lpfc_queue * wq,struct lpfc_queue * cq,uint32_t subtype)16932 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
16933 	       struct lpfc_queue *cq, uint32_t subtype)
16934 {
16935 	struct lpfc_mbx_wq_create *wq_create;
16936 	struct lpfc_dmabuf *dmabuf;
16937 	LPFC_MBOXQ_t *mbox;
16938 	int rc, length, status = 0;
16939 	uint32_t shdr_status, shdr_add_status;
16940 	union lpfc_sli4_cfg_shdr *shdr;
16941 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
16942 	struct dma_address *page;
16943 	void __iomem *bar_memmap_p;
16944 	uint32_t db_offset;
16945 	uint16_t pci_barset;
16946 	uint8_t dpp_barset;
16947 	uint32_t dpp_offset;
16948 	uint8_t wq_create_version;
16949 
16950 	/* sanity check on queue memory */
16951 	if (!wq || !cq)
16952 		return -ENODEV;
16953 	if (!phba->sli4_hba.pc_sli4_params.supported)
16954 		hw_page_size = wq->page_size;
16955 
16956 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
16957 	if (!mbox)
16958 		return -ENOMEM;
16959 	length = (sizeof(struct lpfc_mbx_wq_create) -
16960 		  sizeof(struct lpfc_sli4_cfg_mhdr));
16961 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
16962 			 LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
16963 			 length, LPFC_SLI4_MBX_EMBED);
16964 	wq_create = &mbox->u.mqe.un.wq_create;
16965 	shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
16966 	bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
16967 		    wq->page_count);
16968 	bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
16969 		    cq->queue_id);
16970 
16971 	/* wqv is the earliest version supported, NOT the latest */
16972 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
16973 	       phba->sli4_hba.pc_sli4_params.wqv);
16974 
16975 	if ((phba->sli4_hba.pc_sli4_params.wqsize & LPFC_WQ_SZ128_SUPPORT) ||
16976 	    (wq->page_size > SLI4_PAGE_SIZE))
16977 		wq_create_version = LPFC_Q_CREATE_VERSION_1;
16978 	else
16979 		wq_create_version = LPFC_Q_CREATE_VERSION_0;
16980 
16981 	switch (wq_create_version) {
16982 	case LPFC_Q_CREATE_VERSION_1:
16983 		bf_set(lpfc_mbx_wq_create_wqe_count, &wq_create->u.request_1,
16984 		       wq->entry_count);
16985 		bf_set(lpfc_mbox_hdr_version, &shdr->request,
16986 		       LPFC_Q_CREATE_VERSION_1);
16987 
16988 		switch (wq->entry_size) {
16989 		default:
16990 		case 64:
16991 			bf_set(lpfc_mbx_wq_create_wqe_size,
16992 			       &wq_create->u.request_1,
16993 			       LPFC_WQ_WQE_SIZE_64);
16994 			break;
16995 		case 128:
16996 			bf_set(lpfc_mbx_wq_create_wqe_size,
16997 			       &wq_create->u.request_1,
16998 			       LPFC_WQ_WQE_SIZE_128);
16999 			break;
17000 		}
17001 		/* Request DPP by default */
17002 		bf_set(lpfc_mbx_wq_create_dpp_req, &wq_create->u.request_1, 1);
17003 		bf_set(lpfc_mbx_wq_create_page_size,
17004 		       &wq_create->u.request_1,
17005 		       (wq->page_size / SLI4_PAGE_SIZE));
17006 		page = wq_create->u.request_1.page;
17007 		break;
17008 	default:
17009 		page = wq_create->u.request.page;
17010 		break;
17011 	}
17012 
17013 	list_for_each_entry(dmabuf, &wq->page_list, list) {
17014 		memset(dmabuf->virt, 0, hw_page_size);
17015 		page[dmabuf->buffer_tag].addr_lo = putPaddrLow(dmabuf->phys);
17016 		page[dmabuf->buffer_tag].addr_hi = putPaddrHigh(dmabuf->phys);
17017 	}
17018 
17019 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17020 		bf_set(lpfc_mbx_wq_create_dua, &wq_create->u.request, 1);
17021 
17022 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17023 	/* The IOCTL status is embedded in the mailbox subheader. */
17024 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17025 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17026 	if (shdr_status || shdr_add_status || rc) {
17027 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17028 				"2503 WQ_CREATE mailbox failed with "
17029 				"status x%x add_status x%x, mbx status x%x\n",
17030 				shdr_status, shdr_add_status, rc);
17031 		status = -ENXIO;
17032 		goto out;
17033 	}
17034 
17035 	if (wq_create_version == LPFC_Q_CREATE_VERSION_0)
17036 		wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id,
17037 					&wq_create->u.response);
17038 	else
17039 		wq->queue_id = bf_get(lpfc_mbx_wq_create_v1_q_id,
17040 					&wq_create->u.response_1);
17041 
17042 	if (wq->queue_id == 0xFFFF) {
17043 		status = -ENXIO;
17044 		goto out;
17045 	}
17046 
17047 	wq->db_format = LPFC_DB_LIST_FORMAT;
17048 	if (wq_create_version == LPFC_Q_CREATE_VERSION_0) {
17049 		if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17050 			wq->db_format = bf_get(lpfc_mbx_wq_create_db_format,
17051 					       &wq_create->u.response);
17052 			if ((wq->db_format != LPFC_DB_LIST_FORMAT) &&
17053 			    (wq->db_format != LPFC_DB_RING_FORMAT)) {
17054 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17055 						"3265 WQ[%d] doorbell format "
17056 						"not supported: x%x\n",
17057 						wq->queue_id, wq->db_format);
17058 				status = -EINVAL;
17059 				goto out;
17060 			}
17061 			pci_barset = bf_get(lpfc_mbx_wq_create_bar_set,
17062 					    &wq_create->u.response);
17063 			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17064 								   pci_barset);
17065 			if (!bar_memmap_p) {
17066 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17067 						"3263 WQ[%d] failed to memmap "
17068 						"pci barset:x%x\n",
17069 						wq->queue_id, pci_barset);
17070 				status = -ENOMEM;
17071 				goto out;
17072 			}
17073 			db_offset = wq_create->u.response.doorbell_offset;
17074 			if ((db_offset != LPFC_ULP0_WQ_DOORBELL) &&
17075 			    (db_offset != LPFC_ULP1_WQ_DOORBELL)) {
17076 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17077 						"3252 WQ[%d] doorbell offset "
17078 						"not supported: x%x\n",
17079 						wq->queue_id, db_offset);
17080 				status = -EINVAL;
17081 				goto out;
17082 			}
17083 			wq->db_regaddr = bar_memmap_p + db_offset;
17084 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17085 					"3264 WQ[%d]: barset:x%x, offset:x%x, "
17086 					"format:x%x\n", wq->queue_id,
17087 					pci_barset, db_offset, wq->db_format);
17088 		} else
17089 			wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17090 	} else {
17091 		/* Check if DPP was honored by the firmware */
17092 		wq->dpp_enable = bf_get(lpfc_mbx_wq_create_dpp_rsp,
17093 				    &wq_create->u.response_1);
17094 		if (wq->dpp_enable) {
17095 			pci_barset = bf_get(lpfc_mbx_wq_create_v1_bar_set,
17096 					    &wq_create->u.response_1);
17097 			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17098 								   pci_barset);
17099 			if (!bar_memmap_p) {
17100 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17101 						"3267 WQ[%d] failed to memmap "
17102 						"pci barset:x%x\n",
17103 						wq->queue_id, pci_barset);
17104 				status = -ENOMEM;
17105 				goto out;
17106 			}
17107 			db_offset = wq_create->u.response_1.doorbell_offset;
17108 			wq->db_regaddr = bar_memmap_p + db_offset;
17109 			wq->dpp_id = bf_get(lpfc_mbx_wq_create_dpp_id,
17110 					    &wq_create->u.response_1);
17111 			dpp_barset = bf_get(lpfc_mbx_wq_create_dpp_bar,
17112 					    &wq_create->u.response_1);
17113 			bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba,
17114 								   dpp_barset);
17115 			if (!bar_memmap_p) {
17116 				lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17117 						"3268 WQ[%d] failed to memmap "
17118 						"pci barset:x%x\n",
17119 						wq->queue_id, dpp_barset);
17120 				status = -ENOMEM;
17121 				goto out;
17122 			}
17123 			dpp_offset = wq_create->u.response_1.dpp_offset;
17124 			wq->dpp_regaddr = bar_memmap_p + dpp_offset;
17125 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17126 					"3271 WQ[%d]: barset:x%x, offset:x%x, "
17127 					"dpp_id:x%x dpp_barset:x%x "
17128 					"dpp_offset:x%x\n",
17129 					wq->queue_id, pci_barset, db_offset,
17130 					wq->dpp_id, dpp_barset, dpp_offset);
17131 
17132 #ifdef CONFIG_X86
17133 			/* Enable combined writes for DPP aperture */
17134 			bar_memmap_p = lpfc_dpp_wc_map(phba, dpp_barset);
17135 			if (!bar_memmap_p) {
17136 				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
17137 					"3272 Cannot setup Combined "
17138 					"Write on WQ[%d] - disable DPP\n",
17139 					wq->queue_id);
17140 				phba->cfg_enable_dpp = 0;
17141 			} else {
17142 				wq->dpp_regaddr = bar_memmap_p + dpp_offset;
17143 			}
17144 #else
17145 			phba->cfg_enable_dpp = 0;
17146 #endif
17147 		} else
17148 			wq->db_regaddr = phba->sli4_hba.WQDBregaddr;
17149 	}
17150 	wq->pring = kzalloc_obj(struct lpfc_sli_ring);
17151 	if (wq->pring == NULL) {
17152 		status = -ENOMEM;
17153 		goto out;
17154 	}
17155 	wq->type = LPFC_WQ;
17156 	wq->assoc_qid = cq->queue_id;
17157 	wq->subtype = subtype;
17158 	wq->host_index = 0;
17159 	wq->hba_index = 0;
17160 	wq->notify_interval = LPFC_WQ_NOTIFY_INTRVL;
17161 
17162 	/* link the wq onto the parent cq child list */
17163 	list_add_tail(&wq->list, &cq->child_list);
17164 out:
17165 	mempool_free(mbox, phba->mbox_mem_pool);
17166 	return status;
17167 }
17168 
17169 /**
17170  * lpfc_rq_create - Create a Receive Queue on the HBA
17171  * @phba: HBA structure that indicates port to create a queue on.
17172  * @hrq: The queue structure to use to create the header receive queue.
17173  * @drq: The queue structure to use to create the data receive queue.
17174  * @cq: The completion queue to bind this work queue to.
17175  * @subtype: The subtype of the work queue indicating its functionality.
17176  *
17177  * This function creates a receive buffer queue pair , as detailed in @hrq and
17178  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17179  * to the HBA.
17180  *
17181  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17182  * struct is used to get the entry count that is necessary to determine the
17183  * number of pages to use for this queue. The @cq is used to indicate which
17184  * completion queue to bind received buffers that are posted to these queues to.
17185  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17186  * receive queue pair. This function is asynchronous and will wait for the
17187  * mailbox command to finish before continuing.
17188  *
17189  * On success this function will return a zero. If unable to allocate enough
17190  * memory this function will return -ENOMEM. If the queue create mailbox command
17191  * fails this function will return -ENXIO.
17192  **/
17193 int
lpfc_rq_create(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq,struct lpfc_queue * cq,uint32_t subtype)17194 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17195 	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
17196 {
17197 	struct lpfc_mbx_rq_create *rq_create;
17198 	struct lpfc_dmabuf *dmabuf;
17199 	LPFC_MBOXQ_t *mbox;
17200 	int rc, length, status = 0;
17201 	uint32_t shdr_status, shdr_add_status;
17202 	union lpfc_sli4_cfg_shdr *shdr;
17203 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17204 	void __iomem *bar_memmap_p;
17205 	uint32_t db_offset;
17206 	uint16_t pci_barset;
17207 
17208 	/* sanity check on queue memory */
17209 	if (!hrq || !drq || !cq)
17210 		return -ENODEV;
17211 	if (!phba->sli4_hba.pc_sli4_params.supported)
17212 		hw_page_size = SLI4_PAGE_SIZE;
17213 
17214 	if (hrq->entry_count != drq->entry_count)
17215 		return -EINVAL;
17216 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17217 	if (!mbox)
17218 		return -ENOMEM;
17219 	length = (sizeof(struct lpfc_mbx_rq_create) -
17220 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17221 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17222 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17223 			 length, LPFC_SLI4_MBX_EMBED);
17224 	rq_create = &mbox->u.mqe.un.rq_create;
17225 	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17226 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
17227 	       phba->sli4_hba.pc_sli4_params.rqv);
17228 	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17229 		bf_set(lpfc_rq_context_rqe_count_1,
17230 		       &rq_create->u.request.context,
17231 		       hrq->entry_count);
17232 		rq_create->u.request.context.buffer_size = LPFC_HDR_BUF_SIZE;
17233 		bf_set(lpfc_rq_context_rqe_size,
17234 		       &rq_create->u.request.context,
17235 		       LPFC_RQE_SIZE_8);
17236 		bf_set(lpfc_rq_context_page_size,
17237 		       &rq_create->u.request.context,
17238 		       LPFC_RQ_PAGE_SIZE_4096);
17239 	} else {
17240 		switch (hrq->entry_count) {
17241 		default:
17242 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17243 					"2535 Unsupported RQ count. (%d)\n",
17244 					hrq->entry_count);
17245 			if (hrq->entry_count < 512) {
17246 				status = -EINVAL;
17247 				goto out;
17248 			}
17249 			fallthrough;	/* otherwise default to smallest count */
17250 		case 512:
17251 			bf_set(lpfc_rq_context_rqe_count,
17252 			       &rq_create->u.request.context,
17253 			       LPFC_RQ_RING_SIZE_512);
17254 			break;
17255 		case 1024:
17256 			bf_set(lpfc_rq_context_rqe_count,
17257 			       &rq_create->u.request.context,
17258 			       LPFC_RQ_RING_SIZE_1024);
17259 			break;
17260 		case 2048:
17261 			bf_set(lpfc_rq_context_rqe_count,
17262 			       &rq_create->u.request.context,
17263 			       LPFC_RQ_RING_SIZE_2048);
17264 			break;
17265 		case 4096:
17266 			bf_set(lpfc_rq_context_rqe_count,
17267 			       &rq_create->u.request.context,
17268 			       LPFC_RQ_RING_SIZE_4096);
17269 			break;
17270 		}
17271 		bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
17272 		       LPFC_HDR_BUF_SIZE);
17273 	}
17274 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17275 	       cq->queue_id);
17276 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17277 	       hrq->page_count);
17278 	list_for_each_entry(dmabuf, &hrq->page_list, list) {
17279 		memset(dmabuf->virt, 0, hw_page_size);
17280 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17281 					putPaddrLow(dmabuf->phys);
17282 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17283 					putPaddrHigh(dmabuf->phys);
17284 	}
17285 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17286 		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17287 
17288 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17289 	/* The IOCTL status is embedded in the mailbox subheader. */
17290 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17291 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17292 	if (shdr_status || shdr_add_status || rc) {
17293 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17294 				"2504 RQ_CREATE mailbox failed with "
17295 				"status x%x add_status x%x, mbx status x%x\n",
17296 				shdr_status, shdr_add_status, rc);
17297 		status = -ENXIO;
17298 		goto out;
17299 	}
17300 	hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17301 	if (hrq->queue_id == 0xFFFF) {
17302 		status = -ENXIO;
17303 		goto out;
17304 	}
17305 
17306 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE) {
17307 		hrq->db_format = bf_get(lpfc_mbx_rq_create_db_format,
17308 					&rq_create->u.response);
17309 		if ((hrq->db_format != LPFC_DB_LIST_FORMAT) &&
17310 		    (hrq->db_format != LPFC_DB_RING_FORMAT)) {
17311 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17312 					"3262 RQ [%d] doorbell format not "
17313 					"supported: x%x\n", hrq->queue_id,
17314 					hrq->db_format);
17315 			status = -EINVAL;
17316 			goto out;
17317 		}
17318 
17319 		pci_barset = bf_get(lpfc_mbx_rq_create_bar_set,
17320 				    &rq_create->u.response);
17321 		bar_memmap_p = lpfc_dual_chute_pci_bar_map(phba, pci_barset);
17322 		if (!bar_memmap_p) {
17323 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17324 					"3269 RQ[%d] failed to memmap pci "
17325 					"barset:x%x\n", hrq->queue_id,
17326 					pci_barset);
17327 			status = -ENOMEM;
17328 			goto out;
17329 		}
17330 
17331 		db_offset = rq_create->u.response.doorbell_offset;
17332 		if ((db_offset != LPFC_ULP0_RQ_DOORBELL) &&
17333 		    (db_offset != LPFC_ULP1_RQ_DOORBELL)) {
17334 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17335 					"3270 RQ[%d] doorbell offset not "
17336 					"supported: x%x\n", hrq->queue_id,
17337 					db_offset);
17338 			status = -EINVAL;
17339 			goto out;
17340 		}
17341 		hrq->db_regaddr = bar_memmap_p + db_offset;
17342 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
17343 				"3266 RQ[qid:%d]: barset:x%x, offset:x%x, "
17344 				"format:x%x\n", hrq->queue_id, pci_barset,
17345 				db_offset, hrq->db_format);
17346 	} else {
17347 		hrq->db_format = LPFC_DB_RING_FORMAT;
17348 		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17349 	}
17350 	hrq->type = LPFC_HRQ;
17351 	hrq->assoc_qid = cq->queue_id;
17352 	hrq->subtype = subtype;
17353 	hrq->host_index = 0;
17354 	hrq->hba_index = 0;
17355 	hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17356 
17357 	/* now create the data queue */
17358 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17359 			 LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
17360 			 length, LPFC_SLI4_MBX_EMBED);
17361 	bf_set(lpfc_mbox_hdr_version, &shdr->request,
17362 	       phba->sli4_hba.pc_sli4_params.rqv);
17363 	if (phba->sli4_hba.pc_sli4_params.rqv == LPFC_Q_CREATE_VERSION_1) {
17364 		bf_set(lpfc_rq_context_rqe_count_1,
17365 		       &rq_create->u.request.context, hrq->entry_count);
17366 		if (subtype == LPFC_NVMET)
17367 			rq_create->u.request.context.buffer_size =
17368 				LPFC_NVMET_DATA_BUF_SIZE;
17369 		else
17370 			rq_create->u.request.context.buffer_size =
17371 				LPFC_DATA_BUF_SIZE;
17372 		bf_set(lpfc_rq_context_rqe_size, &rq_create->u.request.context,
17373 		       LPFC_RQE_SIZE_8);
17374 		bf_set(lpfc_rq_context_page_size, &rq_create->u.request.context,
17375 		       (PAGE_SIZE/SLI4_PAGE_SIZE));
17376 	} else {
17377 		switch (drq->entry_count) {
17378 		default:
17379 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17380 					"2536 Unsupported RQ count. (%d)\n",
17381 					drq->entry_count);
17382 			if (drq->entry_count < 512) {
17383 				status = -EINVAL;
17384 				goto out;
17385 			}
17386 			fallthrough;	/* otherwise default to smallest count */
17387 		case 512:
17388 			bf_set(lpfc_rq_context_rqe_count,
17389 			       &rq_create->u.request.context,
17390 			       LPFC_RQ_RING_SIZE_512);
17391 			break;
17392 		case 1024:
17393 			bf_set(lpfc_rq_context_rqe_count,
17394 			       &rq_create->u.request.context,
17395 			       LPFC_RQ_RING_SIZE_1024);
17396 			break;
17397 		case 2048:
17398 			bf_set(lpfc_rq_context_rqe_count,
17399 			       &rq_create->u.request.context,
17400 			       LPFC_RQ_RING_SIZE_2048);
17401 			break;
17402 		case 4096:
17403 			bf_set(lpfc_rq_context_rqe_count,
17404 			       &rq_create->u.request.context,
17405 			       LPFC_RQ_RING_SIZE_4096);
17406 			break;
17407 		}
17408 		if (subtype == LPFC_NVMET)
17409 			bf_set(lpfc_rq_context_buf_size,
17410 			       &rq_create->u.request.context,
17411 			       LPFC_NVMET_DATA_BUF_SIZE);
17412 		else
17413 			bf_set(lpfc_rq_context_buf_size,
17414 			       &rq_create->u.request.context,
17415 			       LPFC_DATA_BUF_SIZE);
17416 	}
17417 	bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
17418 	       cq->queue_id);
17419 	bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
17420 	       drq->page_count);
17421 	list_for_each_entry(dmabuf, &drq->page_list, list) {
17422 		rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
17423 					putPaddrLow(dmabuf->phys);
17424 		rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
17425 					putPaddrHigh(dmabuf->phys);
17426 	}
17427 	if (phba->sli4_hba.fw_func_mode & LPFC_DUA_MODE)
17428 		bf_set(lpfc_mbx_rq_create_dua, &rq_create->u.request, 1);
17429 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17430 	/* The IOCTL status is embedded in the mailbox subheader. */
17431 	shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
17432 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17433 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17434 	if (shdr_status || shdr_add_status || rc) {
17435 		status = -ENXIO;
17436 		goto out;
17437 	}
17438 	drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17439 	if (drq->queue_id == 0xFFFF) {
17440 		status = -ENXIO;
17441 		goto out;
17442 	}
17443 	drq->type = LPFC_DRQ;
17444 	drq->assoc_qid = cq->queue_id;
17445 	drq->subtype = subtype;
17446 	drq->host_index = 0;
17447 	drq->hba_index = 0;
17448 	drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17449 
17450 	/* link the header and data RQs onto the parent cq child list */
17451 	list_add_tail(&hrq->list, &cq->child_list);
17452 	list_add_tail(&drq->list, &cq->child_list);
17453 
17454 out:
17455 	mempool_free(mbox, phba->mbox_mem_pool);
17456 	return status;
17457 }
17458 
17459 /**
17460  * lpfc_mrq_create - Create MRQ Receive Queues on the HBA
17461  * @phba: HBA structure that indicates port to create a queue on.
17462  * @hrqp: The queue structure array to use to create the header receive queues.
17463  * @drqp: The queue structure array to use to create the data receive queues.
17464  * @cqp: The completion queue array to bind these receive queues to.
17465  * @subtype: Functional purpose of the queue (MBOX, IO, ELS, NVMET, etc).
17466  *
17467  * This function creates a receive buffer queue pair , as detailed in @hrq and
17468  * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
17469  * to the HBA.
17470  *
17471  * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
17472  * struct is used to get the entry count that is necessary to determine the
17473  * number of pages to use for this queue. The @cq is used to indicate which
17474  * completion queue to bind received buffers that are posted to these queues to.
17475  * This function will send the RQ_CREATE mailbox command to the HBA to setup the
17476  * receive queue pair. This function is asynchronous and will wait for the
17477  * mailbox command to finish before continuing.
17478  *
17479  * On success this function will return a zero. If unable to allocate enough
17480  * memory this function will return -ENOMEM. If the queue create mailbox command
17481  * fails this function will return -ENXIO.
17482  **/
17483 int
lpfc_mrq_create(struct lpfc_hba * phba,struct lpfc_queue ** hrqp,struct lpfc_queue ** drqp,struct lpfc_queue ** cqp,uint32_t subtype)17484 lpfc_mrq_create(struct lpfc_hba *phba, struct lpfc_queue **hrqp,
17485 		struct lpfc_queue **drqp, struct lpfc_queue **cqp,
17486 		uint32_t subtype)
17487 {
17488 	struct lpfc_queue *hrq, *drq, *cq;
17489 	struct lpfc_mbx_rq_create_v2 *rq_create;
17490 	struct lpfc_dmabuf *dmabuf;
17491 	LPFC_MBOXQ_t *mbox;
17492 	int rc, length, alloclen, status = 0;
17493 	int cnt, idx, numrq, page_idx = 0;
17494 	uint32_t shdr_status, shdr_add_status;
17495 	union lpfc_sli4_cfg_shdr *shdr;
17496 	uint32_t hw_page_size = phba->sli4_hba.pc_sli4_params.if_page_sz;
17497 
17498 	numrq = phba->cfg_nvmet_mrq;
17499 	/* sanity check on array memory */
17500 	if (!hrqp || !drqp || !cqp || !numrq)
17501 		return -ENODEV;
17502 	if (!phba->sli4_hba.pc_sli4_params.supported)
17503 		hw_page_size = SLI4_PAGE_SIZE;
17504 
17505 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
17506 	if (!mbox)
17507 		return -ENOMEM;
17508 
17509 	length = sizeof(struct lpfc_mbx_rq_create_v2);
17510 	length += ((2 * numrq * hrqp[0]->page_count) *
17511 		   sizeof(struct dma_address));
17512 
17513 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17514 				    LPFC_MBOX_OPCODE_FCOE_RQ_CREATE, length,
17515 				    LPFC_SLI4_MBX_NEMBED);
17516 	if (alloclen < length) {
17517 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17518 				"3099 Allocated DMA memory size (%d) is "
17519 				"less than the requested DMA memory size "
17520 				"(%d)\n", alloclen, length);
17521 		status = -ENOMEM;
17522 		goto out;
17523 	}
17524 
17525 
17526 
17527 	rq_create = mbox->sge_array->addr[0];
17528 	shdr = (union lpfc_sli4_cfg_shdr *)&rq_create->cfg_shdr;
17529 
17530 	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_2);
17531 	cnt = 0;
17532 
17533 	for (idx = 0; idx < numrq; idx++) {
17534 		hrq = hrqp[idx];
17535 		drq = drqp[idx];
17536 		cq  = cqp[idx];
17537 
17538 		/* sanity check on queue memory */
17539 		if (!hrq || !drq || !cq) {
17540 			status = -ENODEV;
17541 			goto out;
17542 		}
17543 
17544 		if (hrq->entry_count != drq->entry_count) {
17545 			status = -EINVAL;
17546 			goto out;
17547 		}
17548 
17549 		if (idx == 0) {
17550 			bf_set(lpfc_mbx_rq_create_num_pages,
17551 			       &rq_create->u.request,
17552 			       hrq->page_count);
17553 			bf_set(lpfc_mbx_rq_create_rq_cnt,
17554 			       &rq_create->u.request, (numrq * 2));
17555 			bf_set(lpfc_mbx_rq_create_dnb, &rq_create->u.request,
17556 			       1);
17557 			bf_set(lpfc_rq_context_base_cq,
17558 			       &rq_create->u.request.context,
17559 			       cq->queue_id);
17560 			bf_set(lpfc_rq_context_data_size,
17561 			       &rq_create->u.request.context,
17562 			       LPFC_NVMET_DATA_BUF_SIZE);
17563 			bf_set(lpfc_rq_context_hdr_size,
17564 			       &rq_create->u.request.context,
17565 			       LPFC_HDR_BUF_SIZE);
17566 			bf_set(lpfc_rq_context_rqe_count_1,
17567 			       &rq_create->u.request.context,
17568 			       hrq->entry_count);
17569 			bf_set(lpfc_rq_context_rqe_size,
17570 			       &rq_create->u.request.context,
17571 			       LPFC_RQE_SIZE_8);
17572 			bf_set(lpfc_rq_context_page_size,
17573 			       &rq_create->u.request.context,
17574 			       (PAGE_SIZE/SLI4_PAGE_SIZE));
17575 		}
17576 		rc = 0;
17577 		list_for_each_entry(dmabuf, &hrq->page_list, list) {
17578 			memset(dmabuf->virt, 0, hw_page_size);
17579 			cnt = page_idx + dmabuf->buffer_tag;
17580 			rq_create->u.request.page[cnt].addr_lo =
17581 					putPaddrLow(dmabuf->phys);
17582 			rq_create->u.request.page[cnt].addr_hi =
17583 					putPaddrHigh(dmabuf->phys);
17584 			rc++;
17585 		}
17586 		page_idx += rc;
17587 
17588 		rc = 0;
17589 		list_for_each_entry(dmabuf, &drq->page_list, list) {
17590 			memset(dmabuf->virt, 0, hw_page_size);
17591 			cnt = page_idx + dmabuf->buffer_tag;
17592 			rq_create->u.request.page[cnt].addr_lo =
17593 					putPaddrLow(dmabuf->phys);
17594 			rq_create->u.request.page[cnt].addr_hi =
17595 					putPaddrHigh(dmabuf->phys);
17596 			rc++;
17597 		}
17598 		page_idx += rc;
17599 
17600 		hrq->db_format = LPFC_DB_RING_FORMAT;
17601 		hrq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17602 		hrq->type = LPFC_HRQ;
17603 		hrq->assoc_qid = cq->queue_id;
17604 		hrq->subtype = subtype;
17605 		hrq->host_index = 0;
17606 		hrq->hba_index = 0;
17607 		hrq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17608 
17609 		drq->db_format = LPFC_DB_RING_FORMAT;
17610 		drq->db_regaddr = phba->sli4_hba.RQDBregaddr;
17611 		drq->type = LPFC_DRQ;
17612 		drq->assoc_qid = cq->queue_id;
17613 		drq->subtype = subtype;
17614 		drq->host_index = 0;
17615 		drq->hba_index = 0;
17616 		drq->notify_interval = LPFC_RQ_NOTIFY_INTRVL;
17617 
17618 		list_add_tail(&hrq->list, &cq->child_list);
17619 		list_add_tail(&drq->list, &cq->child_list);
17620 	}
17621 
17622 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
17623 	/* The IOCTL status is embedded in the mailbox subheader. */
17624 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17625 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17626 	if (shdr_status || shdr_add_status || rc) {
17627 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17628 				"3120 RQ_CREATE mailbox failed with "
17629 				"status x%x add_status x%x, mbx status x%x\n",
17630 				shdr_status, shdr_add_status, rc);
17631 		status = -ENXIO;
17632 		goto out;
17633 	}
17634 	rc = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
17635 	if (rc == 0xFFFF) {
17636 		status = -ENXIO;
17637 		goto out;
17638 	}
17639 
17640 	/* Initialize all RQs with associated queue id */
17641 	for (idx = 0; idx < numrq; idx++) {
17642 		hrq = hrqp[idx];
17643 		hrq->queue_id = rc + (2 * idx);
17644 		drq = drqp[idx];
17645 		drq->queue_id = rc + (2 * idx) + 1;
17646 	}
17647 
17648 out:
17649 	lpfc_sli4_mbox_cmd_free(phba, mbox);
17650 	return status;
17651 }
17652 
17653 /**
17654  * lpfc_eq_destroy - Destroy an event Queue on the HBA
17655  * @phba: HBA structure that indicates port to destroy a queue on.
17656  * @eq: The queue structure associated with the queue to destroy.
17657  *
17658  * This function destroys a queue, as detailed in @eq by sending an mailbox
17659  * command, specific to the type of queue, to the HBA.
17660  *
17661  * The @eq struct is used to get the queue ID of the queue to destroy.
17662  *
17663  * On success this function will return a zero. If the queue destroy mailbox
17664  * command fails this function will return -ENXIO.
17665  **/
17666 int
lpfc_eq_destroy(struct lpfc_hba * phba,struct lpfc_queue * eq)17667 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
17668 {
17669 	LPFC_MBOXQ_t *mbox;
17670 	int rc, length, status = 0;
17671 	uint32_t shdr_status, shdr_add_status;
17672 	union lpfc_sli4_cfg_shdr *shdr;
17673 
17674 	/* sanity check on queue memory */
17675 	if (!eq)
17676 		return -ENODEV;
17677 
17678 	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
17679 		goto list_remove;
17680 
17681 	mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
17682 	if (!mbox)
17683 		return -ENOMEM;
17684 	length = (sizeof(struct lpfc_mbx_eq_destroy) -
17685 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17686 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17687 			 LPFC_MBOX_OPCODE_EQ_DESTROY,
17688 			 length, LPFC_SLI4_MBX_EMBED);
17689 	bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
17690 	       eq->queue_id);
17691 	mbox->vport = eq->phba->pport;
17692 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17693 
17694 	rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
17695 	/* The IOCTL status is embedded in the mailbox subheader. */
17696 	shdr = (union lpfc_sli4_cfg_shdr *)
17697 		&mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
17698 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17699 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17700 	if (shdr_status || shdr_add_status || rc) {
17701 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17702 				"2505 EQ_DESTROY mailbox failed with "
17703 				"status x%x add_status x%x, mbx status x%x\n",
17704 				shdr_status, shdr_add_status, rc);
17705 		status = -ENXIO;
17706 	}
17707 	mempool_free(mbox, eq->phba->mbox_mem_pool);
17708 
17709 list_remove:
17710 	/* Remove eq from any list */
17711 	list_del_init(&eq->list);
17712 
17713 	return status;
17714 }
17715 
17716 /**
17717  * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
17718  * @phba: HBA structure that indicates port to destroy a queue on.
17719  * @cq: The queue structure associated with the queue to destroy.
17720  *
17721  * This function destroys a queue, as detailed in @cq by sending an mailbox
17722  * command, specific to the type of queue, to the HBA.
17723  *
17724  * The @cq struct is used to get the queue ID of the queue to destroy.
17725  *
17726  * On success this function will return a zero. If the queue destroy mailbox
17727  * command fails this function will return -ENXIO.
17728  **/
17729 int
lpfc_cq_destroy(struct lpfc_hba * phba,struct lpfc_queue * cq)17730 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
17731 {
17732 	LPFC_MBOXQ_t *mbox;
17733 	int rc, length, status = 0;
17734 	uint32_t shdr_status, shdr_add_status;
17735 	union lpfc_sli4_cfg_shdr *shdr;
17736 
17737 	/* sanity check on queue memory */
17738 	if (!cq)
17739 		return -ENODEV;
17740 
17741 	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
17742 		goto list_remove;
17743 
17744 	mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
17745 	if (!mbox)
17746 		return -ENOMEM;
17747 	length = (sizeof(struct lpfc_mbx_cq_destroy) -
17748 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17749 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17750 			 LPFC_MBOX_OPCODE_CQ_DESTROY,
17751 			 length, LPFC_SLI4_MBX_EMBED);
17752 	bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
17753 	       cq->queue_id);
17754 	mbox->vport = cq->phba->pport;
17755 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17756 	rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
17757 	/* The IOCTL status is embedded in the mailbox subheader. */
17758 	shdr = (union lpfc_sli4_cfg_shdr *)
17759 		&mbox->u.mqe.un.wq_create.header.cfg_shdr;
17760 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17761 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17762 	if (shdr_status || shdr_add_status || rc) {
17763 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17764 				"2506 CQ_DESTROY mailbox failed with "
17765 				"status x%x add_status x%x, mbx status x%x\n",
17766 				shdr_status, shdr_add_status, rc);
17767 		status = -ENXIO;
17768 	}
17769 	mempool_free(mbox, cq->phba->mbox_mem_pool);
17770 
17771 list_remove:
17772 	/* Remove cq from any list */
17773 	list_del_init(&cq->list);
17774 	return status;
17775 }
17776 
17777 /**
17778  * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
17779  * @phba: HBA structure that indicates port to destroy a queue on.
17780  * @mq: The queue structure associated with the queue to destroy.
17781  *
17782  * This function destroys a queue, as detailed in @mq by sending an mailbox
17783  * command, specific to the type of queue, to the HBA.
17784  *
17785  * The @mq struct is used to get the queue ID of the queue to destroy.
17786  *
17787  * On success this function will return a zero. If the queue destroy mailbox
17788  * command fails this function will return -ENXIO.
17789  **/
17790 int
lpfc_mq_destroy(struct lpfc_hba * phba,struct lpfc_queue * mq)17791 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
17792 {
17793 	LPFC_MBOXQ_t *mbox;
17794 	int rc, length, status = 0;
17795 	uint32_t shdr_status, shdr_add_status;
17796 	union lpfc_sli4_cfg_shdr *shdr;
17797 
17798 	/* sanity check on queue memory */
17799 	if (!mq)
17800 		return -ENODEV;
17801 
17802 	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
17803 		goto list_remove;
17804 
17805 	mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
17806 	if (!mbox)
17807 		return -ENOMEM;
17808 	length = (sizeof(struct lpfc_mbx_mq_destroy) -
17809 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17810 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
17811 			 LPFC_MBOX_OPCODE_MQ_DESTROY,
17812 			 length, LPFC_SLI4_MBX_EMBED);
17813 	bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
17814 	       mq->queue_id);
17815 	mbox->vport = mq->phba->pport;
17816 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17817 	rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
17818 	/* The IOCTL status is embedded in the mailbox subheader. */
17819 	shdr = (union lpfc_sli4_cfg_shdr *)
17820 		&mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
17821 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17822 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17823 	if (shdr_status || shdr_add_status || rc) {
17824 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17825 				"2507 MQ_DESTROY mailbox failed with "
17826 				"status x%x add_status x%x, mbx status x%x\n",
17827 				shdr_status, shdr_add_status, rc);
17828 		status = -ENXIO;
17829 	}
17830 	mempool_free(mbox, mq->phba->mbox_mem_pool);
17831 
17832 list_remove:
17833 	/* Remove mq from any list */
17834 	list_del_init(&mq->list);
17835 	return status;
17836 }
17837 
17838 /**
17839  * lpfc_wq_destroy - Destroy a Work Queue on the HBA
17840  * @phba: HBA structure that indicates port to destroy a queue on.
17841  * @wq: The queue structure associated with the queue to destroy.
17842  *
17843  * This function destroys a queue, as detailed in @wq by sending an mailbox
17844  * command, specific to the type of queue, to the HBA.
17845  *
17846  * The @wq struct is used to get the queue ID of the queue to destroy.
17847  *
17848  * On success this function will return a zero. If the queue destroy mailbox
17849  * command fails this function will return -ENXIO.
17850  **/
17851 int
lpfc_wq_destroy(struct lpfc_hba * phba,struct lpfc_queue * wq)17852 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
17853 {
17854 	LPFC_MBOXQ_t *mbox;
17855 	int rc, length, status = 0;
17856 	uint32_t shdr_status, shdr_add_status;
17857 	union lpfc_sli4_cfg_shdr *shdr;
17858 
17859 	/* sanity check on queue memory */
17860 	if (!wq)
17861 		return -ENODEV;
17862 
17863 	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
17864 		goto list_remove;
17865 
17866 	mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
17867 	if (!mbox)
17868 		return -ENOMEM;
17869 	length = (sizeof(struct lpfc_mbx_wq_destroy) -
17870 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17871 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17872 			 LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
17873 			 length, LPFC_SLI4_MBX_EMBED);
17874 	bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
17875 	       wq->queue_id);
17876 	mbox->vport = wq->phba->pport;
17877 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17878 	rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
17879 	shdr = (union lpfc_sli4_cfg_shdr *)
17880 		&mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
17881 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17882 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17883 	if (shdr_status || shdr_add_status || rc) {
17884 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17885 				"2508 WQ_DESTROY mailbox failed with "
17886 				"status x%x add_status x%x, mbx status x%x\n",
17887 				shdr_status, shdr_add_status, rc);
17888 		status = -ENXIO;
17889 	}
17890 	mempool_free(mbox, wq->phba->mbox_mem_pool);
17891 
17892 list_remove:
17893 	/* Remove wq from any list */
17894 	list_del_init(&wq->list);
17895 	kfree(wq->pring);
17896 	wq->pring = NULL;
17897 	return status;
17898 }
17899 
17900 /**
17901  * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
17902  * @phba: HBA structure that indicates port to destroy a queue on.
17903  * @hrq: The queue structure associated with the queue to destroy.
17904  * @drq: The queue structure associated with the queue to destroy.
17905  *
17906  * This function destroys a queue, as detailed in @rq by sending an mailbox
17907  * command, specific to the type of queue, to the HBA.
17908  *
17909  * The @rq struct is used to get the queue ID of the queue to destroy.
17910  *
17911  * On success this function will return a zero. If the queue destroy mailbox
17912  * command fails this function will return -ENXIO.
17913  **/
17914 int
lpfc_rq_destroy(struct lpfc_hba * phba,struct lpfc_queue * hrq,struct lpfc_queue * drq)17915 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
17916 		struct lpfc_queue *drq)
17917 {
17918 	LPFC_MBOXQ_t *mbox;
17919 	int rc, length, status = 0;
17920 	uint32_t shdr_status, shdr_add_status;
17921 	union lpfc_sli4_cfg_shdr *shdr;
17922 
17923 	/* sanity check on queue memory */
17924 	if (!hrq || !drq)
17925 		return -ENODEV;
17926 
17927 	if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))
17928 		goto list_remove;
17929 
17930 	mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
17931 	if (!mbox)
17932 		return -ENOMEM;
17933 	length = (sizeof(struct lpfc_mbx_rq_destroy) -
17934 		  sizeof(struct lpfc_sli4_cfg_mhdr));
17935 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
17936 			 LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
17937 			 length, LPFC_SLI4_MBX_EMBED);
17938 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17939 	       hrq->queue_id);
17940 	mbox->vport = hrq->phba->pport;
17941 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
17942 	rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
17943 	/* The IOCTL status is embedded in the mailbox subheader. */
17944 	shdr = (union lpfc_sli4_cfg_shdr *)
17945 		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17946 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17947 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17948 	if (shdr_status || shdr_add_status || rc) {
17949 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17950 				"2509 RQ_DESTROY mailbox failed with "
17951 				"status x%x add_status x%x, mbx status x%x\n",
17952 				shdr_status, shdr_add_status, rc);
17953 		mempool_free(mbox, hrq->phba->mbox_mem_pool);
17954 		return -ENXIO;
17955 	}
17956 	bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
17957 	       drq->queue_id);
17958 	rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
17959 	shdr = (union lpfc_sli4_cfg_shdr *)
17960 		&mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
17961 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
17962 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
17963 	if (shdr_status || shdr_add_status || rc) {
17964 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
17965 				"2510 RQ_DESTROY mailbox failed with "
17966 				"status x%x add_status x%x, mbx status x%x\n",
17967 				shdr_status, shdr_add_status, rc);
17968 		status = -ENXIO;
17969 	}
17970 	mempool_free(mbox, hrq->phba->mbox_mem_pool);
17971 
17972 list_remove:
17973 	list_del_init(&hrq->list);
17974 	list_del_init(&drq->list);
17975 	return status;
17976 }
17977 
17978 /**
17979  * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
17980  * @phba: The virtual port for which this call being executed.
17981  * @pdma_phys_addr0: Physical address of the 1st SGL page.
17982  * @pdma_phys_addr1: Physical address of the 2nd SGL page.
17983  * @xritag: the xritag that ties this io to the SGL pages.
17984  *
17985  * This routine will post the sgl pages for the IO that has the xritag
17986  * that is in the iocbq structure. The xritag is assigned during iocbq
17987  * creation and persists for as long as the driver is loaded.
17988  * if the caller has fewer than 256 scatter gather segments to map then
17989  * pdma_phys_addr1 should be 0.
17990  * If the caller needs to map more than 256 scatter gather segment then
17991  * pdma_phys_addr1 should be a valid physical address.
17992  * physical address for SGLs must be 64 byte aligned.
17993  * If you are going to map 2 SGL's then the first one must have 256 entries
17994  * the second sgl can have between 1 and 256 entries.
17995  *
17996  * Return codes:
17997  * 	0 - Success
17998  * 	-ENXIO, -ENOMEM - Failure
17999  **/
18000 int
lpfc_sli4_post_sgl(struct lpfc_hba * phba,dma_addr_t pdma_phys_addr0,dma_addr_t pdma_phys_addr1,uint16_t xritag)18001 lpfc_sli4_post_sgl(struct lpfc_hba *phba,
18002 		dma_addr_t pdma_phys_addr0,
18003 		dma_addr_t pdma_phys_addr1,
18004 		uint16_t xritag)
18005 {
18006 	struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
18007 	LPFC_MBOXQ_t *mbox;
18008 	int rc;
18009 	uint32_t shdr_status, shdr_add_status;
18010 	uint32_t mbox_tmo;
18011 	union lpfc_sli4_cfg_shdr *shdr;
18012 
18013 	if (xritag == NO_XRI) {
18014 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18015 				"0364 Invalid param:\n");
18016 		return -EINVAL;
18017 	}
18018 
18019 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18020 	if (!mbox)
18021 		return -ENOMEM;
18022 
18023 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18024 			LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18025 			sizeof(struct lpfc_mbx_post_sgl_pages) -
18026 			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
18027 
18028 	post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
18029 				&mbox->u.mqe.un.post_sgl_pages;
18030 	bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
18031 	bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
18032 
18033 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo	=
18034 				cpu_to_le32(putPaddrLow(pdma_phys_addr0));
18035 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
18036 				cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
18037 
18038 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo	=
18039 				cpu_to_le32(putPaddrLow(pdma_phys_addr1));
18040 	post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
18041 				cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
18042 	if (!phba->sli4_hba.intr_enable)
18043 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18044 	else {
18045 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18046 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18047 	}
18048 	/* The IOCTL status is embedded in the mailbox subheader. */
18049 	shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
18050 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18051 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18052 	if (!phba->sli4_hba.intr_enable)
18053 		mempool_free(mbox, phba->mbox_mem_pool);
18054 	else if (rc != MBX_TIMEOUT)
18055 		mempool_free(mbox, phba->mbox_mem_pool);
18056 	if (shdr_status || shdr_add_status || rc) {
18057 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18058 				"2511 POST_SGL mailbox failed with "
18059 				"status x%x add_status x%x, mbx status x%x\n",
18060 				shdr_status, shdr_add_status, rc);
18061 	}
18062 	return 0;
18063 }
18064 
18065 /**
18066  * lpfc_sli4_alloc_xri - Get an available rpi in the device's range
18067  * @phba: pointer to lpfc hba data structure.
18068  *
18069  * This routine is invoked to post rpi header templates to the
18070  * HBA consistent with the SLI-4 interface spec.  This routine
18071  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
18072  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
18073  *
18074  * Returns
18075  *	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
18076  *	LPFC_RPI_ALLOC_ERROR if no rpis are available.
18077  **/
18078 static uint16_t
lpfc_sli4_alloc_xri(struct lpfc_hba * phba)18079 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
18080 {
18081 	unsigned long xri;
18082 
18083 	/*
18084 	 * Fetch the next logical xri.  Because this index is logical,
18085 	 * the driver starts at 0 each time.
18086 	 */
18087 	spin_lock_irq(&phba->hbalock);
18088 	xri = find_first_zero_bit(phba->sli4_hba.xri_bmask,
18089 				 phba->sli4_hba.max_cfg_param.max_xri);
18090 	if (xri >= phba->sli4_hba.max_cfg_param.max_xri) {
18091 		spin_unlock_irq(&phba->hbalock);
18092 		return NO_XRI;
18093 	} else {
18094 		set_bit(xri, phba->sli4_hba.xri_bmask);
18095 		phba->sli4_hba.max_cfg_param.xri_used++;
18096 	}
18097 	spin_unlock_irq(&phba->hbalock);
18098 	return xri;
18099 }
18100 
18101 /**
18102  * __lpfc_sli4_free_xri - Release an xri for reuse.
18103  * @phba: pointer to lpfc hba data structure.
18104  * @xri: xri to release.
18105  *
18106  * This routine is invoked to release an xri to the pool of
18107  * available rpis maintained by the driver.
18108  **/
18109 static void
__lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)18110 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18111 {
18112 	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
18113 		phba->sli4_hba.max_cfg_param.xri_used--;
18114 	}
18115 }
18116 
18117 /**
18118  * lpfc_sli4_free_xri - Release an xri for reuse.
18119  * @phba: pointer to lpfc hba data structure.
18120  * @xri: xri to release.
18121  *
18122  * This routine is invoked to release an xri to the pool of
18123  * available rpis maintained by the driver.
18124  **/
18125 void
lpfc_sli4_free_xri(struct lpfc_hba * phba,int xri)18126 lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
18127 {
18128 	spin_lock_irq(&phba->hbalock);
18129 	__lpfc_sli4_free_xri(phba, xri);
18130 	spin_unlock_irq(&phba->hbalock);
18131 }
18132 
18133 /**
18134  * lpfc_sli4_next_xritag - Get an xritag for the io
18135  * @phba: Pointer to HBA context object.
18136  *
18137  * This function gets an xritag for the iocb. If there is no unused xritag
18138  * it will return 0xffff.
18139  * The function returns the allocated xritag if successful, else returns zero.
18140  * Zero is not a valid xritag.
18141  * The caller is not required to hold any lock.
18142  **/
18143 uint16_t
lpfc_sli4_next_xritag(struct lpfc_hba * phba)18144 lpfc_sli4_next_xritag(struct lpfc_hba *phba)
18145 {
18146 	uint16_t xri_index;
18147 
18148 	xri_index = lpfc_sli4_alloc_xri(phba);
18149 	if (xri_index == NO_XRI)
18150 		lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
18151 				"2004 Failed to allocate XRI.last XRITAG is %d"
18152 				" Max XRI is %d, Used XRI is %d\n",
18153 				xri_index,
18154 				phba->sli4_hba.max_cfg_param.max_xri,
18155 				phba->sli4_hba.max_cfg_param.xri_used);
18156 	return xri_index;
18157 }
18158 
18159 /**
18160  * lpfc_sli4_post_sgl_list - post a block of ELS sgls to the port.
18161  * @phba: pointer to lpfc hba data structure.
18162  * @post_sgl_list: pointer to els sgl entry list.
18163  * @post_cnt: number of els sgl entries on the list.
18164  *
18165  * This routine is invoked to post a block of driver's sgl pages to the
18166  * HBA using non-embedded mailbox command. No Lock is held. This routine
18167  * is only called when the driver is loading and after all IO has been
18168  * stopped.
18169  **/
18170 static int
lpfc_sli4_post_sgl_list(struct lpfc_hba * phba,struct list_head * post_sgl_list,int post_cnt)18171 lpfc_sli4_post_sgl_list(struct lpfc_hba *phba,
18172 			    struct list_head *post_sgl_list,
18173 			    int post_cnt)
18174 {
18175 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
18176 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18177 	struct sgl_page_pairs *sgl_pg_pairs;
18178 	void *viraddr;
18179 	LPFC_MBOXQ_t *mbox;
18180 	uint32_t reqlen, alloclen, pg_pairs;
18181 	uint32_t mbox_tmo;
18182 	uint16_t xritag_start = 0;
18183 	int rc = 0;
18184 	uint32_t shdr_status, shdr_add_status;
18185 	union lpfc_sli4_cfg_shdr *shdr;
18186 
18187 	reqlen = post_cnt * sizeof(struct sgl_page_pairs) +
18188 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18189 	if (reqlen > SLI4_PAGE_SIZE) {
18190 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18191 				"2559 Block sgl registration required DMA "
18192 				"size (%d) great than a page\n", reqlen);
18193 		return -ENOMEM;
18194 	}
18195 
18196 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18197 	if (!mbox)
18198 		return -ENOMEM;
18199 
18200 	/* Allocate DMA memory and set up the non-embedded mailbox command */
18201 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18202 			 LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
18203 			 LPFC_SLI4_MBX_NEMBED);
18204 
18205 	if (alloclen < reqlen) {
18206 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18207 				"0285 Allocated DMA memory size (%d) is "
18208 				"less than the requested DMA memory "
18209 				"size (%d)\n", alloclen, reqlen);
18210 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18211 		return -ENOMEM;
18212 	}
18213 	/* Set up the SGL pages in the non-embedded DMA pages */
18214 	viraddr = mbox->sge_array->addr[0];
18215 	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18216 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
18217 
18218 	pg_pairs = 0;
18219 	list_for_each_entry_safe(sglq_entry, sglq_next, post_sgl_list, list) {
18220 		/* Set up the sge entry */
18221 		sgl_pg_pairs->sgl_pg0_addr_lo =
18222 				cpu_to_le32(putPaddrLow(sglq_entry->phys));
18223 		sgl_pg_pairs->sgl_pg0_addr_hi =
18224 				cpu_to_le32(putPaddrHigh(sglq_entry->phys));
18225 		sgl_pg_pairs->sgl_pg1_addr_lo =
18226 				cpu_to_le32(putPaddrLow(0));
18227 		sgl_pg_pairs->sgl_pg1_addr_hi =
18228 				cpu_to_le32(putPaddrHigh(0));
18229 
18230 		/* Keep the first xritag on the list */
18231 		if (pg_pairs == 0)
18232 			xritag_start = sglq_entry->sli4_xritag;
18233 		sgl_pg_pairs++;
18234 		pg_pairs++;
18235 	}
18236 
18237 	/* Complete initialization and perform endian conversion. */
18238 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18239 	bf_set(lpfc_post_sgl_pages_xricnt, sgl, post_cnt);
18240 	sgl->word0 = cpu_to_le32(sgl->word0);
18241 
18242 	if (!phba->sli4_hba.intr_enable)
18243 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18244 	else {
18245 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18246 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18247 	}
18248 	shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
18249 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18250 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18251 	if (!phba->sli4_hba.intr_enable)
18252 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18253 	else if (rc != MBX_TIMEOUT)
18254 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18255 	if (shdr_status || shdr_add_status || rc) {
18256 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18257 				"2513 POST_SGL_BLOCK mailbox command failed "
18258 				"status x%x add_status x%x mbx status x%x\n",
18259 				shdr_status, shdr_add_status, rc);
18260 		rc = -ENXIO;
18261 	}
18262 	return rc;
18263 }
18264 
18265 /**
18266  * lpfc_sli4_post_io_sgl_block - post a block of nvme sgl list to firmware
18267  * @phba: pointer to lpfc hba data structure.
18268  * @nblist: pointer to nvme buffer list.
18269  * @count: number of scsi buffers on the list.
18270  *
18271  * This routine is invoked to post a block of @count scsi sgl pages from a
18272  * SCSI buffer list @nblist to the HBA using non-embedded mailbox command.
18273  * No Lock is held.
18274  *
18275  **/
18276 static int
lpfc_sli4_post_io_sgl_block(struct lpfc_hba * phba,struct list_head * nblist,int count)18277 lpfc_sli4_post_io_sgl_block(struct lpfc_hba *phba, struct list_head *nblist,
18278 			    int count)
18279 {
18280 	struct lpfc_io_buf *lpfc_ncmd;
18281 	struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
18282 	struct sgl_page_pairs *sgl_pg_pairs;
18283 	void *viraddr;
18284 	LPFC_MBOXQ_t *mbox;
18285 	uint32_t reqlen, alloclen, pg_pairs;
18286 	uint32_t mbox_tmo;
18287 	uint16_t xritag_start = 0;
18288 	int rc = 0;
18289 	uint32_t shdr_status, shdr_add_status;
18290 	dma_addr_t pdma_phys_bpl1;
18291 	union lpfc_sli4_cfg_shdr *shdr;
18292 
18293 	/* Calculate the requested length of the dma memory */
18294 	reqlen = count * sizeof(struct sgl_page_pairs) +
18295 		 sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
18296 	if (reqlen > SLI4_PAGE_SIZE) {
18297 		lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
18298 				"6118 Block sgl registration required DMA "
18299 				"size (%d) great than a page\n", reqlen);
18300 		return -ENOMEM;
18301 	}
18302 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
18303 	if (!mbox) {
18304 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18305 				"6119 Failed to allocate mbox cmd memory\n");
18306 		return -ENOMEM;
18307 	}
18308 
18309 	/* Allocate DMA memory and set up the non-embedded mailbox command */
18310 	alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
18311 				    LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
18312 				    reqlen, LPFC_SLI4_MBX_NEMBED);
18313 
18314 	if (alloclen < reqlen) {
18315 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18316 				"6120 Allocated DMA memory size (%d) is "
18317 				"less than the requested DMA memory "
18318 				"size (%d)\n", alloclen, reqlen);
18319 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18320 		return -ENOMEM;
18321 	}
18322 
18323 	/* Get the first SGE entry from the non-embedded DMA memory */
18324 	viraddr = mbox->sge_array->addr[0];
18325 
18326 	/* Set up the SGL pages in the non-embedded DMA pages */
18327 	sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
18328 	sgl_pg_pairs = &sgl->sgl_pg_pairs;
18329 
18330 	pg_pairs = 0;
18331 	list_for_each_entry(lpfc_ncmd, nblist, list) {
18332 		/* Set up the sge entry */
18333 		sgl_pg_pairs->sgl_pg0_addr_lo =
18334 			cpu_to_le32(putPaddrLow(lpfc_ncmd->dma_phys_sgl));
18335 		sgl_pg_pairs->sgl_pg0_addr_hi =
18336 			cpu_to_le32(putPaddrHigh(lpfc_ncmd->dma_phys_sgl));
18337 		if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
18338 			pdma_phys_bpl1 = lpfc_ncmd->dma_phys_sgl +
18339 						SGL_PAGE_SIZE;
18340 		else
18341 			pdma_phys_bpl1 = 0;
18342 		sgl_pg_pairs->sgl_pg1_addr_lo =
18343 			cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
18344 		sgl_pg_pairs->sgl_pg1_addr_hi =
18345 			cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
18346 		/* Keep the first xritag on the list */
18347 		if (pg_pairs == 0)
18348 			xritag_start = lpfc_ncmd->cur_iocbq.sli4_xritag;
18349 		sgl_pg_pairs++;
18350 		pg_pairs++;
18351 	}
18352 	bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
18353 	bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
18354 	/* Perform endian conversion if necessary */
18355 	sgl->word0 = cpu_to_le32(sgl->word0);
18356 
18357 	if (!phba->sli4_hba.intr_enable) {
18358 		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
18359 	} else {
18360 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
18361 		rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
18362 	}
18363 	shdr = (union lpfc_sli4_cfg_shdr *)&sgl->cfg_shdr;
18364 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
18365 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
18366 	if (!phba->sli4_hba.intr_enable)
18367 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18368 	else if (rc != MBX_TIMEOUT)
18369 		lpfc_sli4_mbox_cmd_free(phba, mbox);
18370 	if (shdr_status || shdr_add_status || rc) {
18371 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18372 				"6125 POST_SGL_BLOCK mailbox command failed "
18373 				"status x%x add_status x%x mbx status x%x\n",
18374 				shdr_status, shdr_add_status, rc);
18375 		rc = -ENXIO;
18376 	}
18377 	return rc;
18378 }
18379 
18380 /**
18381  * lpfc_sli4_post_io_sgl_list - Post blocks of nvme buffer sgls from a list
18382  * @phba: pointer to lpfc hba data structure.
18383  * @post_nblist: pointer to the nvme buffer list.
18384  * @sb_count: number of nvme buffers.
18385  *
18386  * This routine walks a list of nvme buffers that was passed in. It attempts
18387  * to construct blocks of nvme buffer sgls which contains contiguous xris and
18388  * uses the non-embedded SGL block post mailbox commands to post to the port.
18389  * For single NVME buffer sgl with non-contiguous xri, if any, it shall use
18390  * embedded SGL post mailbox command for posting. The @post_nblist passed in
18391  * must be local list, thus no lock is needed when manipulate the list.
18392  *
18393  * Returns: 0 = failure, non-zero number of successfully posted buffers.
18394  **/
18395 int
lpfc_sli4_post_io_sgl_list(struct lpfc_hba * phba,struct list_head * post_nblist,int sb_count)18396 lpfc_sli4_post_io_sgl_list(struct lpfc_hba *phba,
18397 			   struct list_head *post_nblist, int sb_count)
18398 {
18399 	struct lpfc_io_buf *lpfc_ncmd, *lpfc_ncmd_next;
18400 	int status, sgl_size;
18401 	int post_cnt = 0, block_cnt = 0, num_posting = 0, num_posted = 0;
18402 	dma_addr_t pdma_phys_sgl1;
18403 	int last_xritag = NO_XRI;
18404 	int cur_xritag;
18405 	LIST_HEAD(prep_nblist);
18406 	LIST_HEAD(blck_nblist);
18407 	LIST_HEAD(nvme_nblist);
18408 
18409 	/* sanity check */
18410 	if (sb_count <= 0)
18411 		return -EINVAL;
18412 
18413 	sgl_size = phba->cfg_sg_dma_buf_size;
18414 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next, post_nblist, list) {
18415 		list_del_init(&lpfc_ncmd->list);
18416 		block_cnt++;
18417 		if ((last_xritag != NO_XRI) &&
18418 		    (lpfc_ncmd->cur_iocbq.sli4_xritag != last_xritag + 1)) {
18419 			/* a hole in xri block, form a sgl posting block */
18420 			list_splice_init(&prep_nblist, &blck_nblist);
18421 			post_cnt = block_cnt - 1;
18422 			/* prepare list for next posting block */
18423 			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18424 			block_cnt = 1;
18425 		} else {
18426 			/* prepare list for next posting block */
18427 			list_add_tail(&lpfc_ncmd->list, &prep_nblist);
18428 			/* enough sgls for non-embed sgl mbox command */
18429 			if (block_cnt == LPFC_NEMBED_MBOX_SGL_CNT) {
18430 				list_splice_init(&prep_nblist, &blck_nblist);
18431 				post_cnt = block_cnt;
18432 				block_cnt = 0;
18433 			}
18434 		}
18435 		num_posting++;
18436 		last_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18437 
18438 		/* end of repost sgl list condition for NVME buffers */
18439 		if (num_posting == sb_count) {
18440 			if (post_cnt == 0) {
18441 				/* last sgl posting block */
18442 				list_splice_init(&prep_nblist, &blck_nblist);
18443 				post_cnt = block_cnt;
18444 			} else if (block_cnt == 1) {
18445 				/* last single sgl with non-contiguous xri */
18446 				if (sgl_size > SGL_PAGE_SIZE)
18447 					pdma_phys_sgl1 =
18448 						lpfc_ncmd->dma_phys_sgl +
18449 						SGL_PAGE_SIZE;
18450 				else
18451 					pdma_phys_sgl1 = 0;
18452 				cur_xritag = lpfc_ncmd->cur_iocbq.sli4_xritag;
18453 				status = lpfc_sli4_post_sgl(
18454 						phba, lpfc_ncmd->dma_phys_sgl,
18455 						pdma_phys_sgl1, cur_xritag);
18456 				if (status) {
18457 					/* Post error.  Buffer unavailable. */
18458 					lpfc_ncmd->flags |=
18459 						LPFC_SBUF_NOT_POSTED;
18460 				} else {
18461 					/* Post success. Bffer available. */
18462 					lpfc_ncmd->flags &=
18463 						~LPFC_SBUF_NOT_POSTED;
18464 					lpfc_ncmd->status = IOSTAT_SUCCESS;
18465 					num_posted++;
18466 				}
18467 				/* success, put on NVME buffer sgl list */
18468 				list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18469 			}
18470 		}
18471 
18472 		/* continue until a nembed page worth of sgls */
18473 		if (post_cnt == 0)
18474 			continue;
18475 
18476 		/* post block of NVME buffer list sgls */
18477 		status = lpfc_sli4_post_io_sgl_block(phba, &blck_nblist,
18478 						     post_cnt);
18479 
18480 		/* don't reset xirtag due to hole in xri block */
18481 		if (block_cnt == 0)
18482 			last_xritag = NO_XRI;
18483 
18484 		/* reset NVME buffer post count for next round of posting */
18485 		post_cnt = 0;
18486 
18487 		/* put posted NVME buffer-sgl posted on NVME buffer sgl list */
18488 		while (!list_empty(&blck_nblist)) {
18489 			list_remove_head(&blck_nblist, lpfc_ncmd,
18490 					 struct lpfc_io_buf, list);
18491 			if (status) {
18492 				/* Post error.  Mark buffer unavailable. */
18493 				lpfc_ncmd->flags |= LPFC_SBUF_NOT_POSTED;
18494 			} else {
18495 				/* Post success, Mark buffer available. */
18496 				lpfc_ncmd->flags &= ~LPFC_SBUF_NOT_POSTED;
18497 				lpfc_ncmd->status = IOSTAT_SUCCESS;
18498 				num_posted++;
18499 			}
18500 			list_add_tail(&lpfc_ncmd->list, &nvme_nblist);
18501 		}
18502 	}
18503 	/* Push NVME buffers with sgl posted to the available list */
18504 	lpfc_io_buf_replenish(phba, &nvme_nblist);
18505 
18506 	return num_posted;
18507 }
18508 
18509 /**
18510  * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
18511  * @phba: pointer to lpfc_hba struct that the frame was received on
18512  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18513  *
18514  * This function checks the fields in the @fc_hdr to see if the FC frame is a
18515  * valid type of frame that the LPFC driver will handle. This function will
18516  * return a zero if the frame is a valid frame or a non zero value when the
18517  * frame does not pass the check.
18518  **/
18519 static int
lpfc_fc_frame_check(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr)18520 lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
18521 {
18522 	/*  make rctl_names static to save stack space */
18523 	struct fc_vft_header *fc_vft_hdr;
18524 	struct fc_app_header *fc_app_hdr;
18525 	uint32_t *header = (uint32_t *) fc_hdr;
18526 
18527 #define FC_RCTL_MDS_DIAGS	0xF4
18528 
18529 	switch (fc_hdr->fh_r_ctl) {
18530 	case FC_RCTL_DD_UNCAT:		/* uncategorized information */
18531 	case FC_RCTL_DD_SOL_DATA:	/* solicited data */
18532 	case FC_RCTL_DD_UNSOL_CTL:	/* unsolicited control */
18533 	case FC_RCTL_DD_SOL_CTL:	/* solicited control or reply */
18534 	case FC_RCTL_DD_UNSOL_DATA:	/* unsolicited data */
18535 	case FC_RCTL_DD_DATA_DESC:	/* data descriptor */
18536 	case FC_RCTL_DD_UNSOL_CMD:	/* unsolicited command */
18537 	case FC_RCTL_DD_CMD_STATUS:	/* command status */
18538 	case FC_RCTL_ELS_REQ:	/* extended link services request */
18539 	case FC_RCTL_ELS_REP:	/* extended link services reply */
18540 	case FC_RCTL_ELS4_REQ:	/* FC-4 ELS request */
18541 	case FC_RCTL_ELS4_REP:	/* FC-4 ELS reply */
18542 	case FC_RCTL_BA_ABTS: 	/* basic link service abort */
18543 	case FC_RCTL_BA_RMC: 	/* remove connection */
18544 	case FC_RCTL_BA_ACC:	/* basic accept */
18545 	case FC_RCTL_BA_RJT:	/* basic reject */
18546 	case FC_RCTL_BA_PRMT:
18547 	case FC_RCTL_ACK_1:	/* acknowledge_1 */
18548 	case FC_RCTL_ACK_0:	/* acknowledge_0 */
18549 	case FC_RCTL_P_RJT:	/* port reject */
18550 	case FC_RCTL_F_RJT:	/* fabric reject */
18551 	case FC_RCTL_P_BSY:	/* port busy */
18552 	case FC_RCTL_F_BSY:	/* fabric busy to data frame */
18553 	case FC_RCTL_F_BSYL:	/* fabric busy to link control frame */
18554 	case FC_RCTL_LCR:	/* link credit reset */
18555 	case FC_RCTL_MDS_DIAGS: /* MDS Diagnostics */
18556 	case FC_RCTL_END:	/* end */
18557 		break;
18558 	case FC_RCTL_VFTH:	/* Virtual Fabric tagging Header */
18559 		fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18560 		fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
18561 		return lpfc_fc_frame_check(phba, fc_hdr);
18562 	case FC_RCTL_BA_NOP:	/* basic link service NOP */
18563 	default:
18564 		goto drop;
18565 	}
18566 
18567 	switch (fc_hdr->fh_type) {
18568 	case FC_TYPE_BLS:
18569 	case FC_TYPE_ELS:
18570 	case FC_TYPE_FCP:
18571 	case FC_TYPE_CT:
18572 	case FC_TYPE_NVME:
18573 		break;
18574 	case FC_TYPE_IP:
18575 	case FC_TYPE_ILS:
18576 	default:
18577 		goto drop;
18578 	}
18579 
18580 	if (unlikely(phba->link_flag == LS_LOOPBACK_MODE &&
18581 				phba->cfg_vmid_app_header)) {
18582 		/* Application header is 16B device header */
18583 		if (fc_hdr->fh_df_ctl & LPFC_FC_16B_DEVICE_HEADER) {
18584 			fc_app_hdr = (struct fc_app_header *) (fc_hdr + 1);
18585 			if (be32_to_cpu(fc_app_hdr->src_app_id) !=
18586 					LOOPBACK_SRC_APPID) {
18587 				lpfc_printf_log(phba, KERN_WARNING,
18588 						LOG_ELS | LOG_LIBDFC,
18589 						"1932 Loopback src app id "
18590 						"not matched, app_id:x%x\n",
18591 						be32_to_cpu(fc_app_hdr->src_app_id));
18592 
18593 				goto drop;
18594 			}
18595 		} else {
18596 			lpfc_printf_log(phba, KERN_WARNING,
18597 					LOG_ELS | LOG_LIBDFC,
18598 					"1933 Loopback df_ctl bit not set, "
18599 					"df_ctl:x%x\n",
18600 					fc_hdr->fh_df_ctl);
18601 
18602 			goto drop;
18603 		}
18604 	}
18605 
18606 	lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
18607 			"2538 Received frame rctl:x%x, type:x%x, "
18608 			"frame Data:%08x %08x %08x %08x %08x %08x %08x\n",
18609 			fc_hdr->fh_r_ctl, fc_hdr->fh_type,
18610 			be32_to_cpu(header[0]), be32_to_cpu(header[1]),
18611 			be32_to_cpu(header[2]), be32_to_cpu(header[3]),
18612 			be32_to_cpu(header[4]), be32_to_cpu(header[5]),
18613 			be32_to_cpu(header[6]));
18614 	return 0;
18615 drop:
18616 	lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
18617 			"2539 Dropped frame rctl:x%x type:x%x\n",
18618 			fc_hdr->fh_r_ctl, fc_hdr->fh_type);
18619 	return 1;
18620 }
18621 
18622 /**
18623  * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
18624  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18625  *
18626  * This function processes the FC header to retrieve the VFI from the VF
18627  * header, if one exists. This function will return the VFI if one exists
18628  * or 0 if no VSAN Header exists.
18629  **/
18630 static uint32_t
lpfc_fc_hdr_get_vfi(struct fc_frame_header * fc_hdr)18631 lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
18632 {
18633 	struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
18634 
18635 	if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
18636 		return 0;
18637 	return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
18638 }
18639 
18640 /**
18641  * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
18642  * @phba: Pointer to the HBA structure to search for the vport on
18643  * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
18644  * @fcfi: The FC Fabric ID that the frame came from
18645  * @did: Destination ID to match against
18646  *
18647  * This function searches the @phba for a vport that matches the content of the
18648  * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
18649  * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
18650  * returns the matching vport pointer or NULL if unable to match frame to a
18651  * vport.
18652  **/
18653 static struct lpfc_vport *
lpfc_fc_frame_to_vport(struct lpfc_hba * phba,struct fc_frame_header * fc_hdr,uint16_t fcfi,uint32_t did)18654 lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
18655 		       uint16_t fcfi, uint32_t did)
18656 {
18657 	struct lpfc_vport **vports;
18658 	struct lpfc_vport *vport = NULL;
18659 	int i;
18660 
18661 	if (did == Fabric_DID)
18662 		return phba->pport;
18663 	if (test_bit(FC_PT2PT, &phba->pport->fc_flag) &&
18664 	    phba->link_state != LPFC_HBA_READY)
18665 		return phba->pport;
18666 
18667 	vports = lpfc_create_vport_work_array(phba);
18668 	if (vports != NULL) {
18669 		for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
18670 			if (phba->fcf.fcfi == fcfi &&
18671 			    vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
18672 			    vports[i]->fc_myDID == did) {
18673 				vport = vports[i];
18674 				break;
18675 			}
18676 		}
18677 	}
18678 	lpfc_destroy_vport_work_array(phba, vports);
18679 	return vport;
18680 }
18681 
18682 /**
18683  * lpfc_update_rcv_time_stamp - Update vport's rcv seq time stamp
18684  * @vport: The vport to work on.
18685  *
18686  * This function updates the receive sequence time stamp for this vport. The
18687  * receive sequence time stamp indicates the time that the last frame of the
18688  * the sequence that has been idle for the longest amount of time was received.
18689  * the driver uses this time stamp to indicate if any received sequences have
18690  * timed out.
18691  **/
18692 static void
lpfc_update_rcv_time_stamp(struct lpfc_vport * vport)18693 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
18694 {
18695 	struct lpfc_dmabuf *h_buf;
18696 	struct hbq_dmabuf *dmabuf = NULL;
18697 
18698 	/* get the oldest sequence on the rcv list */
18699 	h_buf = list_get_first(&vport->rcv_buffer_list,
18700 			       struct lpfc_dmabuf, list);
18701 	if (!h_buf)
18702 		return;
18703 	dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18704 	vport->rcv_buffer_time_stamp = dmabuf->time_stamp;
18705 }
18706 
18707 /**
18708  * lpfc_cleanup_rcv_buffers - Cleans up all outstanding receive sequences.
18709  * @vport: The vport that the received sequences were sent to.
18710  *
18711  * This function cleans up all outstanding received sequences. This is called
18712  * by the driver when a link event or user action invalidates all the received
18713  * sequences.
18714  **/
18715 void
lpfc_cleanup_rcv_buffers(struct lpfc_vport * vport)18716 lpfc_cleanup_rcv_buffers(struct lpfc_vport *vport)
18717 {
18718 	struct lpfc_dmabuf *h_buf, *hnext;
18719 	struct lpfc_dmabuf *d_buf, *dnext;
18720 	struct hbq_dmabuf *dmabuf = NULL;
18721 
18722 	/* start with the oldest sequence on the rcv list */
18723 	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18724 		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18725 		list_del_init(&dmabuf->hbuf.list);
18726 		list_for_each_entry_safe(d_buf, dnext,
18727 					 &dmabuf->dbuf.list, list) {
18728 			list_del_init(&d_buf->list);
18729 			lpfc_in_buf_free(vport->phba, d_buf);
18730 		}
18731 		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18732 	}
18733 }
18734 
18735 /**
18736  * lpfc_rcv_seq_check_edtov - Cleans up timed out receive sequences.
18737  * @vport: The vport that the received sequences were sent to.
18738  *
18739  * This function determines whether any received sequences have timed out by
18740  * first checking the vport's rcv_buffer_time_stamp. If this time_stamp
18741  * indicates that there is at least one timed out sequence this routine will
18742  * go through the received sequences one at a time from most inactive to most
18743  * active to determine which ones need to be cleaned up. Once it has determined
18744  * that a sequence needs to be cleaned up it will simply free up the resources
18745  * without sending an abort.
18746  **/
18747 void
lpfc_rcv_seq_check_edtov(struct lpfc_vport * vport)18748 lpfc_rcv_seq_check_edtov(struct lpfc_vport *vport)
18749 {
18750 	struct lpfc_dmabuf *h_buf, *hnext;
18751 	struct lpfc_dmabuf *d_buf, *dnext;
18752 	struct hbq_dmabuf *dmabuf = NULL;
18753 	unsigned long timeout;
18754 	int abort_count = 0;
18755 
18756 	timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18757 		   vport->rcv_buffer_time_stamp);
18758 	if (list_empty(&vport->rcv_buffer_list) ||
18759 	    time_before(jiffies, timeout))
18760 		return;
18761 	/* start with the oldest sequence on the rcv list */
18762 	list_for_each_entry_safe(h_buf, hnext, &vport->rcv_buffer_list, list) {
18763 		dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18764 		timeout = (msecs_to_jiffies(vport->phba->fc_edtov) +
18765 			   dmabuf->time_stamp);
18766 		if (time_before(jiffies, timeout))
18767 			break;
18768 		abort_count++;
18769 		list_del_init(&dmabuf->hbuf.list);
18770 		list_for_each_entry_safe(d_buf, dnext,
18771 					 &dmabuf->dbuf.list, list) {
18772 			list_del_init(&d_buf->list);
18773 			lpfc_in_buf_free(vport->phba, d_buf);
18774 		}
18775 		lpfc_in_buf_free(vport->phba, &dmabuf->dbuf);
18776 	}
18777 	if (abort_count)
18778 		lpfc_update_rcv_time_stamp(vport);
18779 }
18780 
18781 /**
18782  * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
18783  * @vport: pointer to a vitural port
18784  * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
18785  *
18786  * This function searches through the existing incomplete sequences that have
18787  * been sent to this @vport. If the frame matches one of the incomplete
18788  * sequences then the dbuf in the @dmabuf is added to the list of frames that
18789  * make up that sequence. If no sequence is found that matches this frame then
18790  * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
18791  * This function returns a pointer to the first dmabuf in the sequence list that
18792  * the frame was linked to.
18793  **/
18794 static struct hbq_dmabuf *
lpfc_fc_frame_add(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18795 lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18796 {
18797 	struct fc_frame_header *new_hdr;
18798 	struct fc_frame_header *temp_hdr;
18799 	struct lpfc_dmabuf *d_buf;
18800 	struct lpfc_dmabuf *h_buf;
18801 	struct hbq_dmabuf *seq_dmabuf = NULL;
18802 	struct hbq_dmabuf *temp_dmabuf = NULL;
18803 	uint8_t	found = 0;
18804 
18805 	INIT_LIST_HEAD(&dmabuf->dbuf.list);
18806 	dmabuf->time_stamp = jiffies;
18807 	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18808 
18809 	/* Use the hdr_buf to find the sequence that this frame belongs to */
18810 	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18811 		temp_hdr = (struct fc_frame_header *)h_buf->virt;
18812 		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18813 		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18814 		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18815 			continue;
18816 		/* found a pending sequence that matches this frame */
18817 		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18818 		break;
18819 	}
18820 	if (!seq_dmabuf) {
18821 		/*
18822 		 * This indicates first frame received for this sequence.
18823 		 * Queue the buffer on the vport's rcv_buffer_list.
18824 		 */
18825 		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18826 		lpfc_update_rcv_time_stamp(vport);
18827 		return dmabuf;
18828 	}
18829 	temp_hdr = seq_dmabuf->hbuf.virt;
18830 	if (be16_to_cpu(new_hdr->fh_seq_cnt) <
18831 		be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18832 		list_del_init(&seq_dmabuf->hbuf.list);
18833 		list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
18834 		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18835 		lpfc_update_rcv_time_stamp(vport);
18836 		return dmabuf;
18837 	}
18838 	/* move this sequence to the tail to indicate a young sequence */
18839 	list_move_tail(&seq_dmabuf->hbuf.list, &vport->rcv_buffer_list);
18840 	seq_dmabuf->time_stamp = jiffies;
18841 	lpfc_update_rcv_time_stamp(vport);
18842 	if (list_empty(&seq_dmabuf->dbuf.list)) {
18843 		list_add_tail(&dmabuf->dbuf.list, &seq_dmabuf->dbuf.list);
18844 		return seq_dmabuf;
18845 	}
18846 	/* find the correct place in the sequence to insert this frame */
18847 	d_buf = list_entry(seq_dmabuf->dbuf.list.prev, typeof(*d_buf), list);
18848 	while (!found) {
18849 		temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
18850 		temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
18851 		/*
18852 		 * If the frame's sequence count is greater than the frame on
18853 		 * the list then insert the frame right after this frame
18854 		 */
18855 		if (be16_to_cpu(new_hdr->fh_seq_cnt) >
18856 			be16_to_cpu(temp_hdr->fh_seq_cnt)) {
18857 			list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
18858 			found = 1;
18859 			break;
18860 		}
18861 
18862 		if (&d_buf->list == &seq_dmabuf->dbuf.list)
18863 			break;
18864 		d_buf = list_entry(d_buf->list.prev, typeof(*d_buf), list);
18865 	}
18866 
18867 	if (found)
18868 		return seq_dmabuf;
18869 	return NULL;
18870 }
18871 
18872 /**
18873  * lpfc_sli4_abort_partial_seq - Abort partially assembled unsol sequence
18874  * @vport: pointer to a vitural port
18875  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18876  *
18877  * This function tries to abort from the partially assembed sequence, described
18878  * by the information from basic abbort @dmabuf. It checks to see whether such
18879  * partially assembled sequence held by the driver. If so, it shall free up all
18880  * the frames from the partially assembled sequence.
18881  *
18882  * Return
18883  * true  -- if there is matching partially assembled sequence present and all
18884  *          the frames freed with the sequence;
18885  * false -- if there is no matching partially assembled sequence present so
18886  *          nothing got aborted in the lower layer driver
18887  **/
18888 static bool
lpfc_sli4_abort_partial_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18889 lpfc_sli4_abort_partial_seq(struct lpfc_vport *vport,
18890 			    struct hbq_dmabuf *dmabuf)
18891 {
18892 	struct fc_frame_header *new_hdr;
18893 	struct fc_frame_header *temp_hdr;
18894 	struct lpfc_dmabuf *d_buf, *n_buf, *h_buf;
18895 	struct hbq_dmabuf *seq_dmabuf = NULL;
18896 
18897 	/* Use the hdr_buf to find the sequence that matches this frame */
18898 	INIT_LIST_HEAD(&dmabuf->dbuf.list);
18899 	INIT_LIST_HEAD(&dmabuf->hbuf.list);
18900 	new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
18901 	list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
18902 		temp_hdr = (struct fc_frame_header *)h_buf->virt;
18903 		if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
18904 		    (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
18905 		    (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
18906 			continue;
18907 		/* found a pending sequence that matches this frame */
18908 		seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
18909 		break;
18910 	}
18911 
18912 	/* Free up all the frames from the partially assembled sequence */
18913 	if (seq_dmabuf) {
18914 		list_for_each_entry_safe(d_buf, n_buf,
18915 					 &seq_dmabuf->dbuf.list, list) {
18916 			list_del_init(&d_buf->list);
18917 			lpfc_in_buf_free(vport->phba, d_buf);
18918 		}
18919 		return true;
18920 	}
18921 	return false;
18922 }
18923 
18924 /**
18925  * lpfc_sli4_abort_ulp_seq - Abort assembled unsol sequence from ulp
18926  * @vport: pointer to a vitural port
18927  * @dmabuf: pointer to a dmabuf that describes the FC sequence
18928  *
18929  * This function tries to abort from the assembed sequence from upper level
18930  * protocol, described by the information from basic abbort @dmabuf. It
18931  * checks to see whether such pending context exists at upper level protocol.
18932  * If so, it shall clean up the pending context.
18933  *
18934  * Return
18935  * true  -- if there is matching pending context of the sequence cleaned
18936  *          at ulp;
18937  * false -- if there is no matching pending context of the sequence present
18938  *          at ulp.
18939  **/
18940 static bool
lpfc_sli4_abort_ulp_seq(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)18941 lpfc_sli4_abort_ulp_seq(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
18942 {
18943 	struct lpfc_hba *phba = vport->phba;
18944 	int handled;
18945 
18946 	/* Accepting abort at ulp with SLI4 only */
18947 	if (phba->sli_rev < LPFC_SLI_REV4)
18948 		return false;
18949 
18950 	/* Register all caring upper level protocols to attend abort */
18951 	handled = lpfc_ct_handle_unsol_abort(phba, dmabuf);
18952 	if (handled)
18953 		return true;
18954 
18955 	return false;
18956 }
18957 
18958 /**
18959  * lpfc_sli4_seq_abort_rsp_cmpl - BLS ABORT RSP seq abort iocb complete handler
18960  * @phba: Pointer to HBA context object.
18961  * @cmd_iocbq: pointer to the command iocbq structure.
18962  * @rsp_iocbq: pointer to the response iocbq structure.
18963  *
18964  * This function handles the sequence abort response iocb command complete
18965  * event. It properly releases the memory allocated to the sequence abort
18966  * accept iocb.
18967  **/
18968 static void
lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmd_iocbq,struct lpfc_iocbq * rsp_iocbq)18969 lpfc_sli4_seq_abort_rsp_cmpl(struct lpfc_hba *phba,
18970 			     struct lpfc_iocbq *cmd_iocbq,
18971 			     struct lpfc_iocbq *rsp_iocbq)
18972 {
18973 	if (cmd_iocbq) {
18974 		lpfc_nlp_put(cmd_iocbq->ndlp);
18975 		lpfc_sli_release_iocbq(phba, cmd_iocbq);
18976 	}
18977 
18978 	/* Failure means BLS ABORT RSP did not get delivered to remote node*/
18979 	if (rsp_iocbq && rsp_iocbq->iocb.ulpStatus)
18980 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
18981 			"3154 BLS ABORT RSP failed, data:  x%x/x%x\n",
18982 			get_job_ulpstatus(phba, rsp_iocbq),
18983 			get_job_word4(phba, rsp_iocbq));
18984 }
18985 
18986 /**
18987  * lpfc_sli4_xri_inrange - check xri is in range of xris owned by driver.
18988  * @phba: Pointer to HBA context object.
18989  * @xri: xri id in transaction.
18990  *
18991  * This function validates the xri maps to the known range of XRIs allocated an
18992  * used by the driver.
18993  **/
18994 uint16_t
lpfc_sli4_xri_inrange(struct lpfc_hba * phba,uint16_t xri)18995 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
18996 		      uint16_t xri)
18997 {
18998 	uint16_t i;
18999 
19000 	for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
19001 		if (xri == phba->sli4_hba.xri_ids[i])
19002 			return i;
19003 	}
19004 	return NO_XRI;
19005 }
19006 
19007 /**
19008  * lpfc_sli4_seq_abort_rsp - bls rsp to sequence abort
19009  * @vport: pointer to a virtual port.
19010  * @fc_hdr: pointer to a FC frame header.
19011  * @aborted: was the partially assembled receive sequence successfully aborted
19012  *
19013  * This function sends a basic response to a previous unsol sequence abort
19014  * event after aborting the sequence handling.
19015  **/
19016 void
lpfc_sli4_seq_abort_rsp(struct lpfc_vport * vport,struct fc_frame_header * fc_hdr,bool aborted)19017 lpfc_sli4_seq_abort_rsp(struct lpfc_vport *vport,
19018 			struct fc_frame_header *fc_hdr, bool aborted)
19019 {
19020 	struct lpfc_hba *phba = vport->phba;
19021 	struct lpfc_iocbq *ctiocb = NULL;
19022 	struct lpfc_nodelist *ndlp;
19023 	uint16_t oxid, rxid, xri, lxri;
19024 	uint32_t sid, fctl;
19025 	union lpfc_wqe128 *icmd;
19026 	int rc;
19027 
19028 	if (!lpfc_is_link_up(phba))
19029 		return;
19030 
19031 	sid = sli4_sid_from_fc_hdr(fc_hdr);
19032 	oxid = be16_to_cpu(fc_hdr->fh_ox_id);
19033 	rxid = be16_to_cpu(fc_hdr->fh_rx_id);
19034 
19035 	ndlp = lpfc_findnode_did(vport, sid);
19036 	if (!ndlp) {
19037 		ndlp = lpfc_nlp_init(vport, sid);
19038 		if (!ndlp) {
19039 			lpfc_printf_vlog(vport, KERN_WARNING, LOG_ELS,
19040 					 "1268 Failed to allocate ndlp for "
19041 					 "oxid:x%x SID:x%x\n", oxid, sid);
19042 			return;
19043 		}
19044 		/* Put ndlp onto vport node list */
19045 		lpfc_enqueue_node(vport, ndlp);
19046 	}
19047 
19048 	/* Allocate buffer for rsp iocb */
19049 	ctiocb = lpfc_sli_get_iocbq(phba);
19050 	if (!ctiocb)
19051 		return;
19052 
19053 	icmd = &ctiocb->wqe;
19054 
19055 	/* Extract the F_CTL field from FC_HDR */
19056 	fctl = sli4_fctl_from_fc_hdr(fc_hdr);
19057 
19058 	ctiocb->ndlp = lpfc_nlp_get(ndlp);
19059 	if (!ctiocb->ndlp) {
19060 		lpfc_sli_release_iocbq(phba, ctiocb);
19061 		return;
19062 	}
19063 
19064 	ctiocb->vport = vport;
19065 	ctiocb->cmd_cmpl = lpfc_sli4_seq_abort_rsp_cmpl;
19066 	ctiocb->sli4_lxritag = NO_XRI;
19067 	ctiocb->sli4_xritag = NO_XRI;
19068 	ctiocb->abort_rctl = FC_RCTL_BA_ACC;
19069 
19070 	if (fctl & FC_FC_EX_CTX)
19071 		/* Exchange responder sent the abort so we
19072 		 * own the oxid.
19073 		 */
19074 		xri = oxid;
19075 	else
19076 		xri = rxid;
19077 	lxri = lpfc_sli4_xri_inrange(phba, xri);
19078 	if (lxri != NO_XRI)
19079 		lpfc_set_rrq_active(phba, ndlp, lxri,
19080 			(xri == oxid) ? rxid : oxid, 0);
19081 	/* For BA_ABTS from exchange responder, if the logical xri with
19082 	 * the oxid maps to the FCP XRI range, the port no longer has
19083 	 * that exchange context, send a BLS_RJT. Override the IOCB for
19084 	 * a BA_RJT.
19085 	 */
19086 	if ((fctl & FC_FC_EX_CTX) &&
19087 	    (lxri > lpfc_sli4_get_iocb_cnt(phba))) {
19088 		ctiocb->abort_rctl = FC_RCTL_BA_RJT;
19089 		bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
19090 		bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
19091 		       FC_BA_RJT_INV_XID);
19092 		bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
19093 		       FC_BA_RJT_UNABLE);
19094 	}
19095 
19096 	/* If BA_ABTS failed to abort a partially assembled receive sequence,
19097 	 * the driver no longer has that exchange, send a BLS_RJT. Override
19098 	 * the IOCB for a BA_RJT.
19099 	 */
19100 	if (aborted == false) {
19101 		ctiocb->abort_rctl = FC_RCTL_BA_RJT;
19102 		bf_set(xmit_bls_rsp64_rjt_vspec, &icmd->xmit_bls_rsp, 0);
19103 		bf_set(xmit_bls_rsp64_rjt_expc, &icmd->xmit_bls_rsp,
19104 		       FC_BA_RJT_INV_XID);
19105 		bf_set(xmit_bls_rsp64_rjt_rsnc, &icmd->xmit_bls_rsp,
19106 		       FC_BA_RJT_UNABLE);
19107 	}
19108 
19109 	if (fctl & FC_FC_EX_CTX) {
19110 		/* ABTS sent by responder to CT exchange, construction
19111 		 * of BA_ACC will use OX_ID from ABTS for the XRI_TAG
19112 		 * field and RX_ID from ABTS for RX_ID field.
19113 		 */
19114 		ctiocb->abort_bls = LPFC_ABTS_UNSOL_RSP;
19115 		bf_set(xmit_bls_rsp64_rxid, &icmd->xmit_bls_rsp, rxid);
19116 	} else {
19117 		/* ABTS sent by initiator to CT exchange, construction
19118 		 * of BA_ACC will need to allocate a new XRI as for the
19119 		 * XRI_TAG field.
19120 		 */
19121 		ctiocb->abort_bls = LPFC_ABTS_UNSOL_INT;
19122 	}
19123 
19124 	/* OX_ID is invariable to who sent ABTS to CT exchange */
19125 	bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, oxid);
19126 	bf_set(xmit_bls_rsp64_oxid, &icmd->xmit_bls_rsp, rxid);
19127 
19128 	/* Use CT=VPI */
19129 	bf_set(wqe_els_did, &icmd->xmit_bls_rsp.wqe_dest,
19130 	       ndlp->nlp_DID);
19131 	bf_set(xmit_bls_rsp64_temprpi, &icmd->xmit_bls_rsp,
19132 	       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
19133 	bf_set(wqe_cmnd, &icmd->generic.wqe_com, CMD_XMIT_BLS_RSP64_CX);
19134 
19135 	/* Xmit CT abts response on exchange <xid> */
19136 	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
19137 			 "1200 Send BLS cmd x%x on oxid x%x Data: x%x\n",
19138 			 ctiocb->abort_rctl, oxid, phba->link_state);
19139 
19140 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
19141 	if (rc == IOCB_ERROR) {
19142 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
19143 				 "2925 Failed to issue CT ABTS RSP x%x on "
19144 				 "xri x%x, Data x%x\n",
19145 				 ctiocb->abort_rctl, oxid,
19146 				 phba->link_state);
19147 		lpfc_nlp_put(ndlp);
19148 		ctiocb->ndlp = NULL;
19149 		lpfc_sli_release_iocbq(phba, ctiocb);
19150 	}
19151 
19152 	/* if only usage of this nodelist is BLS response, release initial ref
19153 	 * to free ndlp when transmit completes
19154 	 */
19155 	if (ndlp->nlp_state == NLP_STE_UNUSED_NODE &&
19156 	    !test_bit(NLP_DROPPED, &ndlp->nlp_flag) &&
19157 	    !(ndlp->fc4_xpt_flags & (NVME_XPT_REGD | SCSI_XPT_REGD))) {
19158 		set_bit(NLP_DROPPED, &ndlp->nlp_flag);
19159 		lpfc_nlp_put(ndlp);
19160 	}
19161 }
19162 
19163 /**
19164  * lpfc_sli4_handle_unsol_abort - Handle sli-4 unsolicited abort event
19165  * @vport: Pointer to the vport on which this sequence was received
19166  * @dmabuf: pointer to a dmabuf that describes the FC sequence
19167  *
19168  * This function handles an SLI-4 unsolicited abort event. If the unsolicited
19169  * receive sequence is only partially assembed by the driver, it shall abort
19170  * the partially assembled frames for the sequence. Otherwise, if the
19171  * unsolicited receive sequence has been completely assembled and passed to
19172  * the Upper Layer Protocol (ULP), it then mark the per oxid status for the
19173  * unsolicited sequence has been aborted. After that, it will issue a basic
19174  * accept to accept the abort.
19175  **/
19176 static void
lpfc_sli4_handle_unsol_abort(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)19177 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
19178 			     struct hbq_dmabuf *dmabuf)
19179 {
19180 	struct lpfc_hba *phba = vport->phba;
19181 	struct fc_frame_header fc_hdr;
19182 	uint32_t fctl;
19183 	bool aborted;
19184 
19185 	/* Make a copy of fc_hdr before the dmabuf being released */
19186 	memcpy(&fc_hdr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
19187 	fctl = sli4_fctl_from_fc_hdr(&fc_hdr);
19188 
19189 	if (fctl & FC_FC_EX_CTX) {
19190 		/* ABTS by responder to exchange, no cleanup needed */
19191 		aborted = true;
19192 	} else {
19193 		/* ABTS by initiator to exchange, need to do cleanup */
19194 		aborted = lpfc_sli4_abort_partial_seq(vport, dmabuf);
19195 		if (aborted == false)
19196 			aborted = lpfc_sli4_abort_ulp_seq(vport, dmabuf);
19197 	}
19198 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
19199 
19200 	if (phba->nvmet_support) {
19201 		lpfc_nvmet_rcv_unsol_abort(vport, &fc_hdr);
19202 		return;
19203 	}
19204 
19205 	/* Respond with BA_ACC or BA_RJT accordingly */
19206 	lpfc_sli4_seq_abort_rsp(vport, &fc_hdr, aborted);
19207 }
19208 
19209 /**
19210  * lpfc_seq_complete - Indicates if a sequence is complete
19211  * @dmabuf: pointer to a dmabuf that describes the FC sequence
19212  *
19213  * This function checks the sequence, starting with the frame described by
19214  * @dmabuf, to see if all the frames associated with this sequence are present.
19215  * the frames associated with this sequence are linked to the @dmabuf using the
19216  * dbuf list. This function looks for two major things. 1) That the first frame
19217  * has a sequence count of zero. 2) There is a frame with last frame of sequence
19218  * set. 3) That there are no holes in the sequence count. The function will
19219  * return 1 when the sequence is complete, otherwise it will return 0.
19220  **/
19221 static int
lpfc_seq_complete(struct hbq_dmabuf * dmabuf)19222 lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
19223 {
19224 	struct fc_frame_header *hdr;
19225 	struct lpfc_dmabuf *d_buf;
19226 	struct hbq_dmabuf *seq_dmabuf;
19227 	uint32_t fctl;
19228 	int seq_count = 0;
19229 
19230 	hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19231 	/* make sure first fame of sequence has a sequence count of zero */
19232 	if (hdr->fh_seq_cnt != seq_count)
19233 		return 0;
19234 	fctl = (hdr->fh_f_ctl[0] << 16 |
19235 		hdr->fh_f_ctl[1] << 8 |
19236 		hdr->fh_f_ctl[2]);
19237 	/* If last frame of sequence we can return success. */
19238 	if (fctl & FC_FC_END_SEQ)
19239 		return 1;
19240 	list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
19241 		seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19242 		hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19243 		/* If there is a hole in the sequence count then fail. */
19244 		if (++seq_count != be16_to_cpu(hdr->fh_seq_cnt))
19245 			return 0;
19246 		fctl = (hdr->fh_f_ctl[0] << 16 |
19247 			hdr->fh_f_ctl[1] << 8 |
19248 			hdr->fh_f_ctl[2]);
19249 		/* If last frame of sequence we can return success. */
19250 		if (fctl & FC_FC_END_SEQ)
19251 			return 1;
19252 	}
19253 	return 0;
19254 }
19255 
19256 /**
19257  * lpfc_prep_seq - Prep sequence for ULP processing
19258  * @vport: Pointer to the vport on which this sequence was received
19259  * @seq_dmabuf: pointer to a dmabuf that describes the FC sequence
19260  *
19261  * This function takes a sequence, described by a list of frames, and creates
19262  * a list of iocbq structures to describe the sequence. This iocbq list will be
19263  * used to issue to the generic unsolicited sequence handler. This routine
19264  * returns a pointer to the first iocbq in the list. If the function is unable
19265  * to allocate an iocbq then it throw out the received frames that were not
19266  * able to be described and return a pointer to the first iocbq. If unable to
19267  * allocate any iocbqs (including the first) this function will return NULL.
19268  **/
19269 static struct lpfc_iocbq *
lpfc_prep_seq(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)19270 lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
19271 {
19272 	struct hbq_dmabuf *hbq_buf;
19273 	struct lpfc_dmabuf *d_buf, *n_buf;
19274 	struct lpfc_iocbq *first_iocbq, *iocbq;
19275 	struct fc_frame_header *fc_hdr;
19276 	uint32_t sid;
19277 	uint32_t len, tot_len;
19278 
19279 	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19280 	/* remove from receive buffer list */
19281 	list_del_init(&seq_dmabuf->hbuf.list);
19282 	lpfc_update_rcv_time_stamp(vport);
19283 	/* get the Remote Port's SID */
19284 	sid = sli4_sid_from_fc_hdr(fc_hdr);
19285 	tot_len = 0;
19286 	/* Get an iocbq struct to fill in. */
19287 	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
19288 	if (first_iocbq) {
19289 		/* Initialize the first IOCB. */
19290 		first_iocbq->wcqe_cmpl.total_data_placed = 0;
19291 		bf_set(lpfc_wcqe_c_status, &first_iocbq->wcqe_cmpl,
19292 		       IOSTAT_SUCCESS);
19293 		first_iocbq->vport = vport;
19294 
19295 		/* Check FC Header to see what TYPE of frame we are rcv'ing */
19296 		if (sli4_type_from_fc_hdr(fc_hdr) == FC_TYPE_ELS) {
19297 			bf_set(els_rsp64_sid, &first_iocbq->wqe.xmit_els_rsp,
19298 			       sli4_did_from_fc_hdr(fc_hdr));
19299 		}
19300 
19301 		bf_set(wqe_ctxt_tag, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19302 		       NO_XRI);
19303 		bf_set(wqe_rcvoxid, &first_iocbq->wqe.xmit_els_rsp.wqe_com,
19304 		       be16_to_cpu(fc_hdr->fh_ox_id));
19305 
19306 		/* put the first buffer into the first iocb */
19307 		tot_len = bf_get(lpfc_rcqe_length,
19308 				 &seq_dmabuf->cq_event.cqe.rcqe_cmpl);
19309 
19310 		first_iocbq->cmd_dmabuf = &seq_dmabuf->dbuf;
19311 		first_iocbq->bpl_dmabuf = NULL;
19312 		/* Keep track of the BDE count */
19313 		first_iocbq->wcqe_cmpl.word3 = 1;
19314 
19315 		if (tot_len > LPFC_DATA_BUF_SIZE)
19316 			first_iocbq->wqe.gen_req.bde.tus.f.bdeSize =
19317 				LPFC_DATA_BUF_SIZE;
19318 		else
19319 			first_iocbq->wqe.gen_req.bde.tus.f.bdeSize = tot_len;
19320 
19321 		first_iocbq->wcqe_cmpl.total_data_placed = tot_len;
19322 		bf_set(wqe_els_did, &first_iocbq->wqe.xmit_els_rsp.wqe_dest,
19323 		       sid);
19324 	}
19325 	iocbq = first_iocbq;
19326 	/*
19327 	 * Each IOCBq can have two Buffers assigned, so go through the list
19328 	 * of buffers for this sequence and save two buffers in each IOCBq
19329 	 */
19330 	list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
19331 		if (!iocbq) {
19332 			lpfc_in_buf_free(vport->phba, d_buf);
19333 			continue;
19334 		}
19335 		if (!iocbq->bpl_dmabuf) {
19336 			iocbq->bpl_dmabuf = d_buf;
19337 			iocbq->wcqe_cmpl.word3++;
19338 			/* We need to get the size out of the right CQE */
19339 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19340 			len = bf_get(lpfc_rcqe_length,
19341 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
19342 			iocbq->unsol_rcv_len = len;
19343 			iocbq->wcqe_cmpl.total_data_placed += len;
19344 			tot_len += len;
19345 		} else {
19346 			iocbq = lpfc_sli_get_iocbq(vport->phba);
19347 			if (!iocbq) {
19348 				if (first_iocbq) {
19349 					bf_set(lpfc_wcqe_c_status,
19350 					       &first_iocbq->wcqe_cmpl,
19351 					       IOSTAT_SUCCESS);
19352 					first_iocbq->wcqe_cmpl.parameter =
19353 						IOERR_NO_RESOURCES;
19354 				}
19355 				lpfc_in_buf_free(vport->phba, d_buf);
19356 				continue;
19357 			}
19358 			/* We need to get the size out of the right CQE */
19359 			hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
19360 			len = bf_get(lpfc_rcqe_length,
19361 				       &hbq_buf->cq_event.cqe.rcqe_cmpl);
19362 			iocbq->cmd_dmabuf = d_buf;
19363 			iocbq->bpl_dmabuf = NULL;
19364 			iocbq->wcqe_cmpl.word3 = 1;
19365 
19366 			if (len > LPFC_DATA_BUF_SIZE)
19367 				iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19368 					LPFC_DATA_BUF_SIZE;
19369 			else
19370 				iocbq->wqe.xmit_els_rsp.bde.tus.f.bdeSize =
19371 					len;
19372 
19373 			tot_len += len;
19374 			iocbq->wcqe_cmpl.total_data_placed = tot_len;
19375 			bf_set(wqe_els_did, &iocbq->wqe.xmit_els_rsp.wqe_dest,
19376 			       sid);
19377 			list_add_tail(&iocbq->list, &first_iocbq->list);
19378 		}
19379 	}
19380 	/* Free the sequence's header buffer */
19381 	if (!first_iocbq)
19382 		lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
19383 
19384 	return first_iocbq;
19385 }
19386 
19387 static void
lpfc_sli4_send_seq_to_ulp(struct lpfc_vport * vport,struct hbq_dmabuf * seq_dmabuf)19388 lpfc_sli4_send_seq_to_ulp(struct lpfc_vport *vport,
19389 			  struct hbq_dmabuf *seq_dmabuf)
19390 {
19391 	struct fc_frame_header *fc_hdr;
19392 	struct lpfc_iocbq *iocbq, *curr_iocb, *next_iocb;
19393 	struct lpfc_hba *phba = vport->phba;
19394 
19395 	fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
19396 	iocbq = lpfc_prep_seq(vport, seq_dmabuf);
19397 	if (!iocbq) {
19398 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19399 				"2707 Ring %d handler: Failed to allocate "
19400 				"iocb Rctl x%x Type x%x received\n",
19401 				LPFC_ELS_RING,
19402 				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19403 		return;
19404 	}
19405 	if (!lpfc_complete_unsol_iocb(phba,
19406 				      phba->sli4_hba.els_wq->pring,
19407 				      iocbq, fc_hdr->fh_r_ctl,
19408 				      fc_hdr->fh_type)) {
19409 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19410 				"2540 Ring %d handler: unexpected Rctl "
19411 				"x%x Type x%x received\n",
19412 				LPFC_ELS_RING,
19413 				fc_hdr->fh_r_ctl, fc_hdr->fh_type);
19414 		lpfc_in_buf_free(phba, &seq_dmabuf->dbuf);
19415 	}
19416 
19417 	/* Free iocb created in lpfc_prep_seq */
19418 	list_for_each_entry_safe(curr_iocb, next_iocb,
19419 				 &iocbq->list, list) {
19420 		list_del_init(&curr_iocb->list);
19421 		lpfc_sli_release_iocbq(phba, curr_iocb);
19422 	}
19423 	lpfc_sli_release_iocbq(phba, iocbq);
19424 }
19425 
19426 static void
lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,struct lpfc_iocbq * rspiocb)19427 lpfc_sli4_mds_loopback_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
19428 			    struct lpfc_iocbq *rspiocb)
19429 {
19430 	struct lpfc_dmabuf *pcmd = cmdiocb->cmd_dmabuf;
19431 
19432 	if (pcmd && pcmd->virt)
19433 		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19434 	kfree(pcmd);
19435 	lpfc_sli_release_iocbq(phba, cmdiocb);
19436 	lpfc_drain_txq(phba);
19437 }
19438 
19439 static void
lpfc_sli4_handle_mds_loopback(struct lpfc_vport * vport,struct hbq_dmabuf * dmabuf)19440 lpfc_sli4_handle_mds_loopback(struct lpfc_vport *vport,
19441 			      struct hbq_dmabuf *dmabuf)
19442 {
19443 	struct fc_frame_header *fc_hdr;
19444 	struct lpfc_hba *phba = vport->phba;
19445 	struct lpfc_iocbq *iocbq = NULL;
19446 	union  lpfc_wqe128 *pwqe;
19447 	struct lpfc_dmabuf *pcmd = NULL;
19448 	uint32_t frame_len;
19449 	int rc;
19450 	unsigned long iflags;
19451 
19452 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19453 	frame_len = bf_get(lpfc_rcqe_length, &dmabuf->cq_event.cqe.rcqe_cmpl);
19454 
19455 	/* Send the received frame back */
19456 	iocbq = lpfc_sli_get_iocbq(phba);
19457 	if (!iocbq) {
19458 		/* Queue cq event and wakeup worker thread to process it */
19459 		spin_lock_irqsave(&phba->hbalock, iflags);
19460 		list_add_tail(&dmabuf->cq_event.list,
19461 			      &phba->sli4_hba.sp_queue_event);
19462 		spin_unlock_irqrestore(&phba->hbalock, iflags);
19463 		set_bit(HBA_SP_QUEUE_EVT, &phba->hba_flag);
19464 		lpfc_worker_wake_up(phba);
19465 		return;
19466 	}
19467 
19468 	/* Allocate buffer for command payload */
19469 	pcmd = kmalloc_obj(struct lpfc_dmabuf);
19470 	if (pcmd)
19471 		pcmd->virt = dma_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
19472 					    &pcmd->phys);
19473 	if (!pcmd || !pcmd->virt)
19474 		goto exit;
19475 
19476 	INIT_LIST_HEAD(&pcmd->list);
19477 
19478 	/* copyin the payload */
19479 	memcpy(pcmd->virt, dmabuf->dbuf.virt, frame_len);
19480 
19481 	iocbq->cmd_dmabuf = pcmd;
19482 	iocbq->vport = vport;
19483 	iocbq->cmd_flag &= ~LPFC_FIP_ELS_ID_MASK;
19484 	iocbq->cmd_flag |= LPFC_USE_FCPWQIDX;
19485 	iocbq->num_bdes = 0;
19486 
19487 	pwqe = &iocbq->wqe;
19488 	/* fill in BDE's for command */
19489 	pwqe->gen_req.bde.addrHigh = putPaddrHigh(pcmd->phys);
19490 	pwqe->gen_req.bde.addrLow = putPaddrLow(pcmd->phys);
19491 	pwqe->gen_req.bde.tus.f.bdeSize = frame_len;
19492 	pwqe->gen_req.bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
19493 
19494 	pwqe->send_frame.frame_len = frame_len;
19495 	pwqe->send_frame.fc_hdr_wd0 = be32_to_cpu(*((__be32 *)fc_hdr));
19496 	pwqe->send_frame.fc_hdr_wd1 = be32_to_cpu(*((__be32 *)fc_hdr + 1));
19497 	pwqe->send_frame.fc_hdr_wd2 = be32_to_cpu(*((__be32 *)fc_hdr + 2));
19498 	pwqe->send_frame.fc_hdr_wd3 = be32_to_cpu(*((__be32 *)fc_hdr + 3));
19499 	pwqe->send_frame.fc_hdr_wd4 = be32_to_cpu(*((__be32 *)fc_hdr + 4));
19500 	pwqe->send_frame.fc_hdr_wd5 = be32_to_cpu(*((__be32 *)fc_hdr + 5));
19501 
19502 	pwqe->generic.wqe_com.word7 = 0;
19503 	pwqe->generic.wqe_com.word10 = 0;
19504 
19505 	bf_set(wqe_cmnd, &pwqe->generic.wqe_com, CMD_SEND_FRAME);
19506 	bf_set(wqe_sof, &pwqe->generic.wqe_com, 0x2E); /* SOF byte */
19507 	bf_set(wqe_eof, &pwqe->generic.wqe_com, 0x41); /* EOF byte */
19508 	bf_set(wqe_lenloc, &pwqe->generic.wqe_com, 1);
19509 	bf_set(wqe_xbl, &pwqe->generic.wqe_com, 1);
19510 	bf_set(wqe_dbde, &pwqe->generic.wqe_com, 1);
19511 	bf_set(wqe_xc, &pwqe->generic.wqe_com, 1);
19512 	bf_set(wqe_cmd_type, &pwqe->generic.wqe_com, 0xA);
19513 	bf_set(wqe_cqid, &pwqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
19514 	bf_set(wqe_xri_tag, &pwqe->generic.wqe_com, iocbq->sli4_xritag);
19515 	bf_set(wqe_reqtag, &pwqe->generic.wqe_com, iocbq->iotag);
19516 	bf_set(wqe_class, &pwqe->generic.wqe_com, CLASS3);
19517 	pwqe->generic.wqe_com.abort_tag = iocbq->iotag;
19518 
19519 	iocbq->cmd_cmpl = lpfc_sli4_mds_loopback_cmpl;
19520 
19521 	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocbq, 0);
19522 	if (rc == IOCB_ERROR)
19523 		goto exit;
19524 
19525 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
19526 	return;
19527 
19528 exit:
19529 	lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
19530 			"2023 Unable to process MDS loopback frame\n");
19531 	if (pcmd && pcmd->virt)
19532 		dma_pool_free(phba->lpfc_drb_pool, pcmd->virt, pcmd->phys);
19533 	kfree(pcmd);
19534 	if (iocbq)
19535 		lpfc_sli_release_iocbq(phba, iocbq);
19536 	lpfc_in_buf_free(phba, &dmabuf->dbuf);
19537 }
19538 
19539 /**
19540  * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
19541  * @phba: Pointer to HBA context object.
19542  * @dmabuf: Pointer to a dmabuf that describes the FC sequence.
19543  *
19544  * This function is called with no lock held. This function processes all
19545  * the received buffers and gives it to upper layers when a received buffer
19546  * indicates that it is the final frame in the sequence. The interrupt
19547  * service routine processes received buffers at interrupt contexts.
19548  * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
19549  * appropriate receive function when the final frame in a sequence is received.
19550  **/
19551 void
lpfc_sli4_handle_received_buffer(struct lpfc_hba * phba,struct hbq_dmabuf * dmabuf)19552 lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba,
19553 				 struct hbq_dmabuf *dmabuf)
19554 {
19555 	struct hbq_dmabuf *seq_dmabuf;
19556 	struct fc_frame_header *fc_hdr;
19557 	struct lpfc_vport *vport;
19558 	uint32_t fcfi;
19559 	uint32_t did;
19560 
19561 	/* Process each received buffer */
19562 	fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
19563 
19564 	if (fc_hdr->fh_r_ctl == FC_RCTL_MDS_DIAGS ||
19565 	    fc_hdr->fh_r_ctl == FC_RCTL_DD_UNSOL_DATA) {
19566 		vport = phba->pport;
19567 		/* Handle MDS Loopback frames */
19568 		if  (!test_bit(FC_UNLOADING, &phba->pport->load_flag))
19569 			lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19570 		else
19571 			lpfc_in_buf_free(phba, &dmabuf->dbuf);
19572 		return;
19573 	}
19574 
19575 	/* check to see if this a valid type of frame */
19576 	if (lpfc_fc_frame_check(phba, fc_hdr)) {
19577 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
19578 		return;
19579 	}
19580 
19581 	if ((bf_get(lpfc_cqe_code,
19582 		    &dmabuf->cq_event.cqe.rcqe_cmpl) == CQE_CODE_RECEIVE_V1))
19583 		fcfi = bf_get(lpfc_rcqe_fcf_id_v1,
19584 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
19585 	else
19586 		fcfi = bf_get(lpfc_rcqe_fcf_id,
19587 			      &dmabuf->cq_event.cqe.rcqe_cmpl);
19588 
19589 	if (fc_hdr->fh_r_ctl == 0xF4 && fc_hdr->fh_type == 0xFF) {
19590 		vport = phba->pport;
19591 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
19592 				"2023 MDS Loopback %d bytes\n",
19593 				bf_get(lpfc_rcqe_length,
19594 				       &dmabuf->cq_event.cqe.rcqe_cmpl));
19595 		/* Handle MDS Loopback frames */
19596 		lpfc_sli4_handle_mds_loopback(vport, dmabuf);
19597 		return;
19598 	}
19599 
19600 	/* d_id this frame is directed to */
19601 	did = sli4_did_from_fc_hdr(fc_hdr);
19602 
19603 	vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi, did);
19604 	if (!vport) {
19605 		/* throw out the frame */
19606 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
19607 		return;
19608 	}
19609 
19610 	/* vport is registered unless we rcv a FLOGI directed to Fabric_DID */
19611 	if (!(vport->vpi_state & LPFC_VPI_REGISTERED) &&
19612 		(did != Fabric_DID)) {
19613 		/*
19614 		 * Throw out the frame if we are not pt2pt.
19615 		 * The pt2pt protocol allows for discovery frames
19616 		 * to be received without a registered VPI.
19617 		 */
19618 		if (!test_bit(FC_PT2PT, &vport->fc_flag) ||
19619 		    phba->link_state == LPFC_HBA_READY) {
19620 			lpfc_in_buf_free(phba, &dmabuf->dbuf);
19621 			return;
19622 		}
19623 	}
19624 
19625 	/* Handle the basic abort sequence (BA_ABTS) event */
19626 	if (fc_hdr->fh_r_ctl == FC_RCTL_BA_ABTS) {
19627 		lpfc_sli4_handle_unsol_abort(vport, dmabuf);
19628 		return;
19629 	}
19630 
19631 	/* Link this frame */
19632 	seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
19633 	if (!seq_dmabuf) {
19634 		/* unable to add frame to vport - throw it out */
19635 		lpfc_in_buf_free(phba, &dmabuf->dbuf);
19636 		return;
19637 	}
19638 	/* If not last frame in sequence continue processing frames. */
19639 	if (!lpfc_seq_complete(seq_dmabuf))
19640 		return;
19641 
19642 	/* Send the complete sequence to the upper layer protocol */
19643 	lpfc_sli4_send_seq_to_ulp(vport, seq_dmabuf);
19644 }
19645 
19646 /**
19647  * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
19648  * @phba: pointer to lpfc hba data structure.
19649  *
19650  * This routine is invoked to post rpi header templates to the
19651  * HBA consistent with the SLI-4 interface spec.  This routine
19652  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19653  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19654  *
19655  * This routine does not require any locks.  It's usage is expected
19656  * to be driver load or reset recovery when the driver is
19657  * sequential.
19658  *
19659  * Return codes
19660  * 	0 - successful
19661  *      -EIO - The mailbox failed to complete successfully.
19662  * 	When this error occurs, the driver is not guaranteed
19663  *	to have any rpi regions posted to the device and
19664  *	must either attempt to repost the regions or take a
19665  *	fatal error.
19666  **/
19667 int
lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba * phba)19668 lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
19669 {
19670 	struct lpfc_rpi_hdr *rpi_page;
19671 	uint32_t rc = 0;
19672 	uint16_t lrpi = 0;
19673 
19674 	/* SLI4 ports that support extents do not require RPI headers. */
19675 	if (!phba->sli4_hba.rpi_hdrs_in_use)
19676 		goto exit;
19677 	if (phba->sli4_hba.extents_in_use)
19678 		return -EIO;
19679 
19680 	list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
19681 		/*
19682 		 * Assign the rpi headers a physical rpi only if the driver
19683 		 * has not initialized those resources.  A port reset only
19684 		 * needs the headers posted.
19685 		 */
19686 		if (bf_get(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags) !=
19687 		    LPFC_RPI_RSRC_RDY)
19688 			rpi_page->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19689 
19690 		rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
19691 		if (rc != MBX_SUCCESS) {
19692 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19693 					"2008 Error %d posting all rpi "
19694 					"headers\n", rc);
19695 			rc = -EIO;
19696 			break;
19697 		}
19698 	}
19699 
19700  exit:
19701 	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags,
19702 	       LPFC_RPI_RSRC_RDY);
19703 	return rc;
19704 }
19705 
19706 /**
19707  * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
19708  * @phba: pointer to lpfc hba data structure.
19709  * @rpi_page:  pointer to the rpi memory region.
19710  *
19711  * This routine is invoked to post a single rpi header to the
19712  * HBA consistent with the SLI-4 interface spec.  This memory region
19713  * maps up to 64 rpi context regions.
19714  *
19715  * Return codes
19716  * 	0 - successful
19717  * 	-ENOMEM - No available memory
19718  *      -EIO - The mailbox failed to complete successfully.
19719  **/
19720 int
lpfc_sli4_post_rpi_hdr(struct lpfc_hba * phba,struct lpfc_rpi_hdr * rpi_page)19721 lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
19722 {
19723 	LPFC_MBOXQ_t *mboxq;
19724 	struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
19725 	uint32_t rc = 0;
19726 	uint32_t shdr_status, shdr_add_status;
19727 	union lpfc_sli4_cfg_shdr *shdr;
19728 
19729 	/* SLI4 ports that support extents do not require RPI headers. */
19730 	if (!phba->sli4_hba.rpi_hdrs_in_use)
19731 		return rc;
19732 	if (phba->sli4_hba.extents_in_use)
19733 		return -EIO;
19734 
19735 	/* The port is notified of the header region via a mailbox command. */
19736 	mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19737 	if (!mboxq) {
19738 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19739 				"2001 Unable to allocate memory for issuing "
19740 				"SLI_CONFIG_SPECIAL mailbox command\n");
19741 		return -ENOMEM;
19742 	}
19743 
19744 	/* Post all rpi memory regions to the port. */
19745 	hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
19746 	lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
19747 			 LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
19748 			 sizeof(struct lpfc_mbx_post_hdr_tmpl) -
19749 			 sizeof(struct lpfc_sli4_cfg_mhdr),
19750 			 LPFC_SLI4_MBX_EMBED);
19751 
19752 
19753 	/* Post the physical rpi to the port for this rpi header. */
19754 	bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
19755 	       rpi_page->start_rpi);
19756 	bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
19757 	       hdr_tmpl, rpi_page->page_count);
19758 
19759 	hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
19760 	hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
19761 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
19762 	shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
19763 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
19764 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
19765 	mempool_free(mboxq, phba->mbox_mem_pool);
19766 	if (shdr_status || shdr_add_status || rc) {
19767 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19768 				"2514 POST_RPI_HDR mailbox failed with "
19769 				"status x%x add_status x%x, mbx status x%x\n",
19770 				shdr_status, shdr_add_status, rc);
19771 		rc = -ENXIO;
19772 	} else {
19773 		/*
19774 		 * The next_rpi stores the next logical module-64 rpi value used
19775 		 * to post physical rpis in subsequent rpi postings.
19776 		 */
19777 		spin_lock_irq(&phba->hbalock);
19778 		phba->sli4_hba.next_rpi = rpi_page->next_rpi;
19779 		spin_unlock_irq(&phba->hbalock);
19780 	}
19781 	return rc;
19782 }
19783 
19784 /**
19785  * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
19786  * @phba: pointer to lpfc hba data structure.
19787  *
19788  * This routine is invoked to post rpi header templates to the
19789  * HBA consistent with the SLI-4 interface spec.  This routine
19790  * posts a SLI4_PAGE_SIZE memory region to the port to hold up to
19791  * SLI4_PAGE_SIZE modulo 64 rpi context headers.
19792  *
19793  * Returns
19794  * 	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
19795  * 	LPFC_RPI_ALLOC_ERROR if no rpis are available.
19796  **/
19797 int
lpfc_sli4_alloc_rpi(struct lpfc_hba * phba)19798 lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
19799 {
19800 	unsigned long rpi;
19801 	uint16_t max_rpi, rpi_limit;
19802 	uint16_t rpi_remaining, lrpi = 0;
19803 	struct lpfc_rpi_hdr *rpi_hdr;
19804 	unsigned long iflag;
19805 
19806 	/*
19807 	 * Fetch the next logical rpi.  Because this index is logical,
19808 	 * the  driver starts at 0 each time.
19809 	 */
19810 	spin_lock_irqsave(&phba->hbalock, iflag);
19811 	max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
19812 	rpi_limit = phba->sli4_hba.next_rpi;
19813 
19814 	rpi = find_first_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit);
19815 	if (rpi >= rpi_limit)
19816 		rpi = LPFC_RPI_ALLOC_ERROR;
19817 	else {
19818 		set_bit(rpi, phba->sli4_hba.rpi_bmask);
19819 		phba->sli4_hba.max_cfg_param.rpi_used++;
19820 		phba->sli4_hba.rpi_count++;
19821 	}
19822 	lpfc_printf_log(phba, KERN_INFO,
19823 			LOG_NODE | LOG_DISCOVERY,
19824 			"0001 Allocated rpi:x%x max:x%x lim:x%x\n",
19825 			(int) rpi, max_rpi, rpi_limit);
19826 
19827 	/*
19828 	 * Don't try to allocate more rpi header regions if the device limit
19829 	 * has been exhausted.
19830 	 */
19831 	if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
19832 	    (phba->sli4_hba.rpi_count >= max_rpi)) {
19833 		spin_unlock_irqrestore(&phba->hbalock, iflag);
19834 		return rpi;
19835 	}
19836 
19837 	/*
19838 	 * RPI header postings are not required for SLI4 ports capable of
19839 	 * extents.
19840 	 */
19841 	if (!phba->sli4_hba.rpi_hdrs_in_use) {
19842 		spin_unlock_irqrestore(&phba->hbalock, iflag);
19843 		return rpi;
19844 	}
19845 
19846 	/*
19847 	 * If the driver is running low on rpi resources, allocate another
19848 	 * page now.  Note that the next_rpi value is used because
19849 	 * it represents how many are actually in use whereas max_rpi notes
19850 	 * how many are supported max by the device.
19851 	 */
19852 	rpi_remaining = phba->sli4_hba.next_rpi - phba->sli4_hba.rpi_count;
19853 	spin_unlock_irqrestore(&phba->hbalock, iflag);
19854 	if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
19855 		rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
19856 		if (!rpi_hdr) {
19857 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19858 					"2002 Error Could not grow rpi "
19859 					"count\n");
19860 		} else {
19861 			lrpi = rpi_hdr->start_rpi;
19862 			rpi_hdr->start_rpi = phba->sli4_hba.rpi_ids[lrpi];
19863 			lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
19864 		}
19865 	}
19866 
19867 	return rpi;
19868 }
19869 
19870 /**
19871  * __lpfc_sli4_free_rpi - Release an rpi for reuse.
19872  * @phba: pointer to lpfc hba data structure.
19873  * @rpi: rpi to free
19874  *
19875  * This routine is invoked to release an rpi to the pool of
19876  * available rpis maintained by the driver.
19877  **/
19878 static void
__lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)19879 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19880 {
19881 	/*
19882 	 * if the rpi value indicates a prior unreg has already
19883 	 * been done, skip the unreg.
19884 	 */
19885 	if (rpi == LPFC_RPI_ALLOC_ERROR)
19886 		return;
19887 
19888 	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
19889 		phba->sli4_hba.rpi_count--;
19890 		phba->sli4_hba.max_cfg_param.rpi_used--;
19891 	} else {
19892 		lpfc_printf_log(phba, KERN_INFO,
19893 				LOG_NODE | LOG_DISCOVERY,
19894 				"2016 rpi %x not inuse\n",
19895 				rpi);
19896 	}
19897 }
19898 
19899 /**
19900  * lpfc_sli4_free_rpi - Release an rpi for reuse.
19901  * @phba: pointer to lpfc hba data structure.
19902  * @rpi: rpi to free
19903  *
19904  * This routine is invoked to release an rpi to the pool of
19905  * available rpis maintained by the driver.
19906  **/
19907 void
lpfc_sli4_free_rpi(struct lpfc_hba * phba,int rpi)19908 lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
19909 {
19910 	spin_lock_irq(&phba->hbalock);
19911 	__lpfc_sli4_free_rpi(phba, rpi);
19912 	spin_unlock_irq(&phba->hbalock);
19913 }
19914 
19915 /**
19916  * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
19917  * @phba: pointer to lpfc hba data structure.
19918  *
19919  * This routine is invoked to remove the memory region that
19920  * provided rpi via a bitmask.
19921  **/
19922 void
lpfc_sli4_remove_rpis(struct lpfc_hba * phba)19923 lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
19924 {
19925 	kfree(phba->sli4_hba.rpi_bmask);
19926 	kfree(phba->sli4_hba.rpi_ids);
19927 	bf_set(lpfc_rpi_rsrc_rdy, &phba->sli4_hba.sli4_flags, 0);
19928 }
19929 
19930 /**
19931  * lpfc_sli4_resume_rpi - Resume traffic relative to an RPI
19932  * @ndlp: pointer to lpfc nodelist data structure.
19933  * @cmpl: completion call-back.
19934  * @iocbq: data to load as mbox ctx_u information
19935  *
19936  * Return codes
19937  *	0 - successful
19938  *	-ENOMEM - No available memory
19939  *	-EIO - The mailbox failed to complete successfully.
19940  **/
19941 int
lpfc_sli4_resume_rpi(struct lpfc_nodelist * ndlp,void (* cmpl)(struct lpfc_hba *,LPFC_MBOXQ_t *),struct lpfc_iocbq * iocbq)19942 lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp,
19943 		     void (*cmpl)(struct lpfc_hba *, LPFC_MBOXQ_t *),
19944 		     struct lpfc_iocbq *iocbq)
19945 {
19946 	LPFC_MBOXQ_t *mboxq;
19947 	struct lpfc_hba *phba = ndlp->phba;
19948 	int rc;
19949 
19950 	/* The port is notified of the header region via a mailbox command. */
19951 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
19952 	if (!mboxq)
19953 		return -ENOMEM;
19954 
19955 	/* If cmpl assigned, then this nlp_get pairs with
19956 	 * lpfc_mbx_cmpl_resume_rpi.
19957 	 *
19958 	 * Else cmpl is NULL, then this nlp_get pairs with
19959 	 * lpfc_sli_def_mbox_cmpl.
19960 	 */
19961 	if (!lpfc_nlp_get(ndlp)) {
19962 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19963 				"2122 %s: Failed to get nlp ref\n",
19964 				__func__);
19965 		mempool_free(mboxq, phba->mbox_mem_pool);
19966 		return -EIO;
19967 	}
19968 
19969 	lpfc_resume_rpi(mboxq, ndlp);
19970 	if (cmpl) {
19971 		mboxq->mbox_cmpl = cmpl;
19972 		mboxq->ctx_u.save_iocb = iocbq;
19973 	} else
19974 		mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
19975 	mboxq->ctx_ndlp = ndlp;
19976 	mboxq->vport = ndlp->vport;
19977 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
19978 	if (rc == MBX_NOT_FINISHED) {
19979 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
19980 				"2010 Resume RPI Mailbox failed "
19981 				"status %d, mbxStatus x%x\n", rc,
19982 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
19983 		lpfc_nlp_put(ndlp);
19984 		mempool_free(mboxq, phba->mbox_mem_pool);
19985 		return -EIO;
19986 	}
19987 	return 0;
19988 }
19989 
19990 /**
19991  * lpfc_sli4_init_vpi - Initialize a vpi with the port
19992  * @vport: Pointer to the vport for which the vpi is being initialized
19993  *
19994  * This routine is invoked to activate a vpi with the port.
19995  *
19996  * Returns:
19997  *    0 success
19998  *    -Evalue otherwise
19999  **/
20000 int
lpfc_sli4_init_vpi(struct lpfc_vport * vport)20001 lpfc_sli4_init_vpi(struct lpfc_vport *vport)
20002 {
20003 	LPFC_MBOXQ_t *mboxq;
20004 	int rc = 0;
20005 	int retval = MBX_SUCCESS;
20006 	uint32_t mbox_tmo;
20007 	struct lpfc_hba *phba = vport->phba;
20008 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20009 	if (!mboxq)
20010 		return -ENOMEM;
20011 	lpfc_init_vpi(phba, mboxq, vport->vpi);
20012 	mbox_tmo = lpfc_mbox_tmo_val(phba, mboxq);
20013 	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
20014 	if (rc != MBX_SUCCESS) {
20015 		lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT,
20016 				"2022 INIT VPI Mailbox failed "
20017 				"status %d, mbxStatus x%x\n", rc,
20018 				bf_get(lpfc_mqe_status, &mboxq->u.mqe));
20019 		retval = -EIO;
20020 	}
20021 	if (rc != MBX_TIMEOUT)
20022 		mempool_free(mboxq, vport->phba->mbox_mem_pool);
20023 
20024 	return retval;
20025 }
20026 
20027 /**
20028  * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
20029  * @phba: pointer to lpfc hba data structure.
20030  * @mboxq: Pointer to mailbox object.
20031  *
20032  * This routine is invoked to manually add a single FCF record. The caller
20033  * must pass a completely initialized FCF_Record.  This routine takes
20034  * care of the nonembedded mailbox operations.
20035  **/
20036 static void
lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba * phba,LPFC_MBOXQ_t * mboxq)20037 lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
20038 {
20039 	void *virt_addr;
20040 	union lpfc_sli4_cfg_shdr *shdr;
20041 	uint32_t shdr_status, shdr_add_status;
20042 
20043 	virt_addr = mboxq->sge_array->addr[0];
20044 	/* The IOCTL status is embedded in the mailbox subheader. */
20045 	shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
20046 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
20047 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
20048 
20049 	if ((shdr_status || shdr_add_status) &&
20050 		(shdr_status != STATUS_FCF_IN_USE))
20051 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20052 			"2558 ADD_FCF_RECORD mailbox failed with "
20053 			"status x%x add_status x%x\n",
20054 			shdr_status, shdr_add_status);
20055 
20056 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
20057 }
20058 
20059 /**
20060  * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
20061  * @phba: pointer to lpfc hba data structure.
20062  * @fcf_record:  pointer to the initialized fcf record to add.
20063  *
20064  * This routine is invoked to manually add a single FCF record. The caller
20065  * must pass a completely initialized FCF_Record.  This routine takes
20066  * care of the nonembedded mailbox operations.
20067  **/
20068 int
lpfc_sli4_add_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record)20069 lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
20070 {
20071 	int rc = 0;
20072 	LPFC_MBOXQ_t *mboxq;
20073 	uint8_t *bytep;
20074 	void *virt_addr;
20075 	struct lpfc_mbx_sge sge;
20076 	uint32_t alloc_len, req_len;
20077 	uint32_t fcfindex;
20078 
20079 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20080 	if (!mboxq) {
20081 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20082 			"2009 Failed to allocate mbox for ADD_FCF cmd\n");
20083 		return -ENOMEM;
20084 	}
20085 
20086 	req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
20087 		  sizeof(uint32_t);
20088 
20089 	/* Allocate DMA memory and set up the non-embedded mailbox command */
20090 	alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
20091 				     LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
20092 				     req_len, LPFC_SLI4_MBX_NEMBED);
20093 	if (alloc_len < req_len) {
20094 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20095 			"2523 Allocated DMA memory size (x%x) is "
20096 			"less than the requested DMA memory "
20097 			"size (x%x)\n", alloc_len, req_len);
20098 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20099 		return -ENOMEM;
20100 	}
20101 
20102 	/*
20103 	 * Get the first SGE entry from the non-embedded DMA memory.  This
20104 	 * routine only uses a single SGE.
20105 	 */
20106 	lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
20107 	virt_addr = mboxq->sge_array->addr[0];
20108 	/*
20109 	 * Configure the FCF record for FCFI 0.  This is the driver's
20110 	 * hardcoded default and gets used in nonFIP mode.
20111 	 */
20112 	fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
20113 	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
20114 	lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
20115 
20116 	/*
20117 	 * Copy the fcf_index and the FCF Record Data. The data starts after
20118 	 * the FCoE header plus word10. The data copy needs to be endian
20119 	 * correct.
20120 	 */
20121 	bytep += sizeof(uint32_t);
20122 	lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
20123 	mboxq->vport = phba->pport;
20124 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
20125 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20126 	if (rc == MBX_NOT_FINISHED) {
20127 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20128 			"2515 ADD_FCF_RECORD mailbox failed with "
20129 			"status 0x%x\n", rc);
20130 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20131 		rc = -EIO;
20132 	} else
20133 		rc = 0;
20134 
20135 	return rc;
20136 }
20137 
20138 /**
20139  * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
20140  * @phba: pointer to lpfc hba data structure.
20141  * @fcf_record:  pointer to the fcf record to write the default data.
20142  * @fcf_index: FCF table entry index.
20143  *
20144  * This routine is invoked to build the driver's default FCF record.  The
20145  * values used are hardcoded.  This routine handles memory initialization.
20146  *
20147  **/
20148 void
lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba * phba,struct fcf_record * fcf_record,uint16_t fcf_index)20149 lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
20150 				struct fcf_record *fcf_record,
20151 				uint16_t fcf_index)
20152 {
20153 	memset(fcf_record, 0, sizeof(struct fcf_record));
20154 	fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
20155 	fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
20156 	fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
20157 	bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
20158 	bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
20159 	bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
20160 	bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
20161 	bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
20162 	bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
20163 	bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
20164 	bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
20165 	bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
20166 	bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
20167 	bf_set(lpfc_fcf_record_fcf_avail, fcf_record, 1);
20168 	bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
20169 	bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
20170 		LPFC_FCF_FPMA | LPFC_FCF_SPMA);
20171 	/* Set the VLAN bit map */
20172 	if (phba->valid_vlan) {
20173 		fcf_record->vlan_bitmap[phba->vlan_id / 8]
20174 			= 1 << (phba->vlan_id % 8);
20175 	}
20176 }
20177 
20178 /**
20179  * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
20180  * @phba: pointer to lpfc hba data structure.
20181  * @fcf_index: FCF table entry offset.
20182  *
20183  * This routine is invoked to scan the entire FCF table by reading FCF
20184  * record and processing it one at a time starting from the @fcf_index
20185  * for initial FCF discovery or fast FCF failover rediscovery.
20186  *
20187  * Return 0 if the mailbox command is submitted successfully, none 0
20188  * otherwise.
20189  **/
20190 int
lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)20191 lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20192 {
20193 	int rc = 0, error;
20194 	LPFC_MBOXQ_t *mboxq;
20195 
20196 	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
20197 	phba->fcoe_cvl_eventtag_attn = phba->fcoe_cvl_eventtag;
20198 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20199 	if (!mboxq) {
20200 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20201 				"2000 Failed to allocate mbox for "
20202 				"READ_FCF cmd\n");
20203 		error = -ENOMEM;
20204 		goto fail_fcf_scan;
20205 	}
20206 	/* Construct the read FCF record mailbox command */
20207 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20208 	if (rc) {
20209 		error = -EINVAL;
20210 		goto fail_fcf_scan;
20211 	}
20212 	/* Issue the mailbox command asynchronously */
20213 	mboxq->vport = phba->pport;
20214 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
20215 
20216 	set_bit(FCF_TS_INPROG, &phba->hba_flag);
20217 
20218 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20219 	if (rc == MBX_NOT_FINISHED)
20220 		error = -EIO;
20221 	else {
20222 		/* Reset eligible FCF count for new scan */
20223 		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
20224 			phba->fcf.eligible_fcf_cnt = 0;
20225 		error = 0;
20226 	}
20227 fail_fcf_scan:
20228 	if (error) {
20229 		if (mboxq)
20230 			lpfc_sli4_mbox_cmd_free(phba, mboxq);
20231 		/* FCF scan failed, clear FCF_TS_INPROG flag */
20232 		clear_bit(FCF_TS_INPROG, &phba->hba_flag);
20233 	}
20234 	return error;
20235 }
20236 
20237 /**
20238  * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
20239  * @phba: pointer to lpfc hba data structure.
20240  * @fcf_index: FCF table entry offset.
20241  *
20242  * This routine is invoked to read an FCF record indicated by @fcf_index
20243  * and to use it for FLOGI roundrobin FCF failover.
20244  *
20245  * Return 0 if the mailbox command is submitted successfully, none 0
20246  * otherwise.
20247  **/
20248 int
lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)20249 lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20250 {
20251 	int rc = 0, error;
20252 	LPFC_MBOXQ_t *mboxq;
20253 
20254 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20255 	if (!mboxq) {
20256 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20257 				"2763 Failed to allocate mbox for "
20258 				"READ_FCF cmd\n");
20259 		error = -ENOMEM;
20260 		goto fail_fcf_read;
20261 	}
20262 	/* Construct the read FCF record mailbox command */
20263 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20264 	if (rc) {
20265 		error = -EINVAL;
20266 		goto fail_fcf_read;
20267 	}
20268 	/* Issue the mailbox command asynchronously */
20269 	mboxq->vport = phba->pport;
20270 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
20271 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20272 	if (rc == MBX_NOT_FINISHED)
20273 		error = -EIO;
20274 	else
20275 		error = 0;
20276 
20277 fail_fcf_read:
20278 	if (error && mboxq)
20279 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20280 	return error;
20281 }
20282 
20283 /**
20284  * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
20285  * @phba: pointer to lpfc hba data structure.
20286  * @fcf_index: FCF table entry offset.
20287  *
20288  * This routine is invoked to read an FCF record indicated by @fcf_index to
20289  * determine whether it's eligible for FLOGI roundrobin failover list.
20290  *
20291  * Return 0 if the mailbox command is submitted successfully, none 0
20292  * otherwise.
20293  **/
20294 int
lpfc_sli4_read_fcf_rec(struct lpfc_hba * phba,uint16_t fcf_index)20295 lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
20296 {
20297 	int rc = 0, error;
20298 	LPFC_MBOXQ_t *mboxq;
20299 
20300 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20301 	if (!mboxq) {
20302 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
20303 				"2758 Failed to allocate mbox for "
20304 				"READ_FCF cmd\n");
20305 				error = -ENOMEM;
20306 				goto fail_fcf_read;
20307 	}
20308 	/* Construct the read FCF record mailbox command */
20309 	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
20310 	if (rc) {
20311 		error = -EINVAL;
20312 		goto fail_fcf_read;
20313 	}
20314 	/* Issue the mailbox command asynchronously */
20315 	mboxq->vport = phba->pport;
20316 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
20317 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
20318 	if (rc == MBX_NOT_FINISHED)
20319 		error = -EIO;
20320 	else
20321 		error = 0;
20322 
20323 fail_fcf_read:
20324 	if (error && mboxq)
20325 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
20326 	return error;
20327 }
20328 
20329 /**
20330  * lpfc_check_next_fcf_pri_level
20331  * @phba: pointer to the lpfc_hba struct for this port.
20332  * This routine is called from the lpfc_sli4_fcf_rr_next_index_get
20333  * routine when the rr_bmask is empty. The FCF indecies are put into the
20334  * rr_bmask based on their priority level. Starting from the highest priority
20335  * to the lowest. The most likely FCF candidate will be in the highest
20336  * priority group. When this routine is called it searches the fcf_pri list for
20337  * next lowest priority group and repopulates the rr_bmask with only those
20338  * fcf_indexes.
20339  * returns:
20340  * 1=success 0=failure
20341  **/
20342 static int
lpfc_check_next_fcf_pri_level(struct lpfc_hba * phba)20343 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
20344 {
20345 	uint16_t next_fcf_pri;
20346 	uint16_t last_index;
20347 	struct lpfc_fcf_pri *fcf_pri;
20348 	int rc;
20349 	int ret = 0;
20350 
20351 	last_index = find_first_bit(phba->fcf.fcf_rr_bmask,
20352 			LPFC_SLI4_FCF_TBL_INDX_MAX);
20353 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20354 			"3060 Last IDX %d\n", last_index);
20355 
20356 	/* Verify the priority list has 2 or more entries */
20357 	spin_lock_irq(&phba->hbalock);
20358 	if (list_empty(&phba->fcf.fcf_pri_list) ||
20359 	    list_is_singular(&phba->fcf.fcf_pri_list)) {
20360 		spin_unlock_irq(&phba->hbalock);
20361 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20362 			"3061 Last IDX %d\n", last_index);
20363 		return 0; /* Empty rr list */
20364 	}
20365 	spin_unlock_irq(&phba->hbalock);
20366 
20367 	next_fcf_pri = 0;
20368 	/*
20369 	 * Clear the rr_bmask and set all of the bits that are at this
20370 	 * priority.
20371 	 */
20372 	memset(phba->fcf.fcf_rr_bmask, 0,
20373 			sizeof(*phba->fcf.fcf_rr_bmask));
20374 	spin_lock_irq(&phba->hbalock);
20375 	list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20376 		if (fcf_pri->fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)
20377 			continue;
20378 		/*
20379 		 * the 1st priority that has not FLOGI failed
20380 		 * will be the highest.
20381 		 */
20382 		if (!next_fcf_pri)
20383 			next_fcf_pri = fcf_pri->fcf_rec.priority;
20384 		spin_unlock_irq(&phba->hbalock);
20385 		if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20386 			rc = lpfc_sli4_fcf_rr_index_set(phba,
20387 						fcf_pri->fcf_rec.fcf_index);
20388 			if (rc)
20389 				return 0;
20390 		}
20391 		spin_lock_irq(&phba->hbalock);
20392 	}
20393 	/*
20394 	 * if next_fcf_pri was not set above and the list is not empty then
20395 	 * we have failed flogis on all of them. So reset flogi failed
20396 	 * and start at the beginning.
20397 	 */
20398 	if (!next_fcf_pri && !list_empty(&phba->fcf.fcf_pri_list)) {
20399 		list_for_each_entry(fcf_pri, &phba->fcf.fcf_pri_list, list) {
20400 			fcf_pri->fcf_rec.flag &= ~LPFC_FCF_FLOGI_FAILED;
20401 			/*
20402 			 * the 1st priority that has not FLOGI failed
20403 			 * will be the highest.
20404 			 */
20405 			if (!next_fcf_pri)
20406 				next_fcf_pri = fcf_pri->fcf_rec.priority;
20407 			spin_unlock_irq(&phba->hbalock);
20408 			if (fcf_pri->fcf_rec.priority == next_fcf_pri) {
20409 				rc = lpfc_sli4_fcf_rr_index_set(phba,
20410 						fcf_pri->fcf_rec.fcf_index);
20411 				if (rc)
20412 					return 0;
20413 			}
20414 			spin_lock_irq(&phba->hbalock);
20415 		}
20416 	} else
20417 		ret = 1;
20418 	spin_unlock_irq(&phba->hbalock);
20419 
20420 	return ret;
20421 }
20422 /**
20423  * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
20424  * @phba: pointer to lpfc hba data structure.
20425  *
20426  * This routine is to get the next eligible FCF record index in a round
20427  * robin fashion. If the next eligible FCF record index equals to the
20428  * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
20429  * shall be returned, otherwise, the next eligible FCF record's index
20430  * shall be returned.
20431  **/
20432 uint16_t
lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba * phba)20433 lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
20434 {
20435 	uint16_t next;
20436 
20437 	do {
20438 		for_each_set_bit_wrap(next, phba->fcf.fcf_rr_bmask,
20439 				LPFC_SLI4_FCF_TBL_INDX_MAX, phba->fcf.current_rec.fcf_indx) {
20440 			if (next == phba->fcf.current_rec.fcf_indx)
20441 				continue;
20442 
20443 			if (!(phba->fcf.fcf_pri[next].fcf_rec.flag & LPFC_FCF_FLOGI_FAILED)) {
20444 				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20445 					"2845 Get next roundrobin failover FCF (x%x)\n", next);
20446 				return next;
20447 			}
20448 
20449 			if (list_is_singular(&phba->fcf.fcf_pri_list))
20450 				return LPFC_FCOE_FCF_NEXT_NONE;
20451 		}
20452 
20453 		/*
20454 		 * If next fcf index is not found check if there are lower
20455 		 * Priority level fcf's in the fcf_priority list.
20456 		 * Set up the rr_bmask with all of the avaiable fcf bits
20457 		 * at that level and continue the selection process.
20458 		 */
20459 	} while (lpfc_check_next_fcf_pri_level(phba));
20460 
20461 	lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
20462 			"2844 No roundrobin failover FCF available\n");
20463 
20464 	return LPFC_FCOE_FCF_NEXT_NONE;
20465 }
20466 
20467 /**
20468  * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
20469  * @phba: pointer to lpfc hba data structure.
20470  * @fcf_index: index into the FCF table to 'set'
20471  *
20472  * This routine sets the FCF record index in to the eligible bmask for
20473  * roundrobin failover search. It checks to make sure that the index
20474  * does not go beyond the range of the driver allocated bmask dimension
20475  * before setting the bit.
20476  *
20477  * Returns 0 if the index bit successfully set, otherwise, it returns
20478  * -EINVAL.
20479  **/
20480 int
lpfc_sli4_fcf_rr_index_set(struct lpfc_hba * phba,uint16_t fcf_index)20481 lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
20482 {
20483 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20484 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20485 				"2610 FCF (x%x) reached driver's book "
20486 				"keeping dimension:x%x\n",
20487 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20488 		return -EINVAL;
20489 	}
20490 	/* Set the eligible FCF record index bmask */
20491 	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20492 
20493 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20494 			"2790 Set FCF (x%x) to roundrobin FCF failover "
20495 			"bmask\n", fcf_index);
20496 
20497 	return 0;
20498 }
20499 
20500 /**
20501  * lpfc_sli4_fcf_rr_index_clear - Clear bmask from eligible fcf record index
20502  * @phba: pointer to lpfc hba data structure.
20503  * @fcf_index: index into the FCF table to 'clear'
20504  *
20505  * This routine clears the FCF record index from the eligible bmask for
20506  * roundrobin failover search. It checks to make sure that the index
20507  * does not go beyond the range of the driver allocated bmask dimension
20508  * before clearing the bit.
20509  **/
20510 void
lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba * phba,uint16_t fcf_index)20511 lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
20512 {
20513 	struct lpfc_fcf_pri *fcf_pri, *fcf_pri_next;
20514 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
20515 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20516 				"2762 FCF (x%x) reached driver's book "
20517 				"keeping dimension:x%x\n",
20518 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
20519 		return;
20520 	}
20521 	/* Clear the eligible FCF record index bmask */
20522 	spin_lock_irq(&phba->hbalock);
20523 	list_for_each_entry_safe(fcf_pri, fcf_pri_next, &phba->fcf.fcf_pri_list,
20524 				 list) {
20525 		if (fcf_pri->fcf_rec.fcf_index == fcf_index) {
20526 			list_del_init(&fcf_pri->list);
20527 			break;
20528 		}
20529 	}
20530 	spin_unlock_irq(&phba->hbalock);
20531 	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
20532 
20533 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20534 			"2791 Clear FCF (x%x) from roundrobin failover "
20535 			"bmask\n", fcf_index);
20536 }
20537 
20538 /**
20539  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
20540  * @phba: pointer to lpfc hba data structure.
20541  * @mbox: An allocated pointer to type LPFC_MBOXQ_t
20542  *
20543  * This routine is the completion routine for the rediscover FCF table mailbox
20544  * command. If the mailbox command returned failure, it will try to stop the
20545  * FCF rediscover wait timer.
20546  **/
20547 static void
lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba * phba,LPFC_MBOXQ_t * mbox)20548 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
20549 {
20550 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20551 	uint32_t shdr_status, shdr_add_status;
20552 
20553 	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20554 
20555 	shdr_status = bf_get(lpfc_mbox_hdr_status,
20556 			     &redisc_fcf->header.cfg_shdr.response);
20557 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
20558 			     &redisc_fcf->header.cfg_shdr.response);
20559 	if (shdr_status || shdr_add_status) {
20560 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
20561 				"2746 Requesting for FCF rediscovery failed "
20562 				"status x%x add_status x%x\n",
20563 				shdr_status, shdr_add_status);
20564 		if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
20565 			spin_lock_irq(&phba->hbalock);
20566 			phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
20567 			spin_unlock_irq(&phba->hbalock);
20568 			/*
20569 			 * CVL event triggered FCF rediscover request failed,
20570 			 * last resort to re-try current registered FCF entry.
20571 			 */
20572 			lpfc_retry_pport_discovery(phba);
20573 		} else {
20574 			spin_lock_irq(&phba->hbalock);
20575 			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
20576 			spin_unlock_irq(&phba->hbalock);
20577 			/*
20578 			 * DEAD FCF event triggered FCF rediscover request
20579 			 * failed, last resort to fail over as a link down
20580 			 * to FCF registration.
20581 			 */
20582 			lpfc_sli4_fcf_dead_failthrough(phba);
20583 		}
20584 	} else {
20585 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
20586 				"2775 Start FCF rediscover quiescent timer\n");
20587 		/*
20588 		 * Start FCF rediscovery wait timer for pending FCF
20589 		 * before rescan FCF record table.
20590 		 */
20591 		lpfc_fcf_redisc_wait_start_timer(phba);
20592 	}
20593 
20594 	mempool_free(mbox, phba->mbox_mem_pool);
20595 }
20596 
20597 /**
20598  * lpfc_sli4_redisc_fcf_table - Request to rediscover entire FCF table by port.
20599  * @phba: pointer to lpfc hba data structure.
20600  *
20601  * This routine is invoked to request for rediscovery of the entire FCF table
20602  * by the port.
20603  **/
20604 int
lpfc_sli4_redisc_fcf_table(struct lpfc_hba * phba)20605 lpfc_sli4_redisc_fcf_table(struct lpfc_hba *phba)
20606 {
20607 	LPFC_MBOXQ_t *mbox;
20608 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
20609 	int rc, length;
20610 
20611 	/* Cancel retry delay timers to all vports before FCF rediscover */
20612 	lpfc_cancel_all_vport_retry_delay_timer(phba);
20613 
20614 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20615 	if (!mbox) {
20616 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20617 				"2745 Failed to allocate mbox for "
20618 				"requesting FCF rediscover.\n");
20619 		return -ENOMEM;
20620 	}
20621 
20622 	length = (sizeof(struct lpfc_mbx_redisc_fcf_tbl) -
20623 		  sizeof(struct lpfc_sli4_cfg_mhdr));
20624 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
20625 			 LPFC_MBOX_OPCODE_FCOE_REDISCOVER_FCF,
20626 			 length, LPFC_SLI4_MBX_EMBED);
20627 
20628 	redisc_fcf = &mbox->u.mqe.un.redisc_fcf_tbl;
20629 	/* Set count to 0 for invalidating the entire FCF database */
20630 	bf_set(lpfc_mbx_redisc_fcf_count, redisc_fcf, 0);
20631 
20632 	/* Issue the mailbox command asynchronously */
20633 	mbox->vport = phba->pport;
20634 	mbox->mbox_cmpl = lpfc_mbx_cmpl_redisc_fcf_table;
20635 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
20636 
20637 	if (rc == MBX_NOT_FINISHED) {
20638 		mempool_free(mbox, phba->mbox_mem_pool);
20639 		return -EIO;
20640 	}
20641 	return 0;
20642 }
20643 
20644 /**
20645  * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
20646  * @phba: pointer to lpfc hba data structure.
20647  *
20648  * This function is the failover routine as a last resort to the FCF DEAD
20649  * event when driver failed to perform fast FCF failover.
20650  **/
20651 void
lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba * phba)20652 lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
20653 {
20654 	uint32_t link_state;
20655 
20656 	/*
20657 	 * Last resort as FCF DEAD event failover will treat this as
20658 	 * a link down, but save the link state because we don't want
20659 	 * it to be changed to Link Down unless it is already down.
20660 	 */
20661 	link_state = phba->link_state;
20662 	lpfc_linkdown(phba);
20663 	phba->link_state = link_state;
20664 
20665 	/* Unregister FCF if no devices connected to it */
20666 	lpfc_unregister_unused_fcf(phba);
20667 }
20668 
20669 /**
20670  * lpfc_sli_get_config_region23 - Get sli3 port region 23 data.
20671  * @phba: pointer to lpfc hba data structure.
20672  * @rgn23_data: pointer to configure region 23 data.
20673  *
20674  * This function gets SLI3 port configure region 23 data through memory dump
20675  * mailbox command. When it successfully retrieves data, the size of the data
20676  * will be returned, otherwise, 0 will be returned.
20677  **/
20678 static uint32_t
lpfc_sli_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)20679 lpfc_sli_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20680 {
20681 	LPFC_MBOXQ_t *pmb = NULL;
20682 	MAILBOX_t *mb;
20683 	uint32_t offset = 0;
20684 	int rc;
20685 
20686 	if (!rgn23_data)
20687 		return 0;
20688 
20689 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20690 	if (!pmb) {
20691 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20692 				"2600 failed to allocate mailbox memory\n");
20693 		return 0;
20694 	}
20695 	mb = &pmb->u.mb;
20696 
20697 	do {
20698 		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
20699 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
20700 
20701 		if (rc != MBX_SUCCESS) {
20702 			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
20703 					"2601 failed to read config "
20704 					"region 23, rc 0x%x Status 0x%x\n",
20705 					rc, mb->mbxStatus);
20706 			mb->un.varDmp.word_cnt = 0;
20707 		}
20708 		/*
20709 		 * dump mem may return a zero when finished or we got a
20710 		 * mailbox error, either way we are done.
20711 		 */
20712 		if (mb->un.varDmp.word_cnt == 0)
20713 			break;
20714 
20715 		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
20716 			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
20717 
20718 		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
20719 				       rgn23_data + offset,
20720 				       mb->un.varDmp.word_cnt);
20721 		offset += mb->un.varDmp.word_cnt;
20722 	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
20723 
20724 	mempool_free(pmb, phba->mbox_mem_pool);
20725 	return offset;
20726 }
20727 
20728 /**
20729  * lpfc_sli4_get_config_region23 - Get sli4 port region 23 data.
20730  * @phba: pointer to lpfc hba data structure.
20731  * @rgn23_data: pointer to configure region 23 data.
20732  *
20733  * This function gets SLI4 port configure region 23 data through memory dump
20734  * mailbox command. When it successfully retrieves data, the size of the data
20735  * will be returned, otherwise, 0 will be returned.
20736  **/
20737 static uint32_t
lpfc_sli4_get_config_region23(struct lpfc_hba * phba,char * rgn23_data)20738 lpfc_sli4_get_config_region23(struct lpfc_hba *phba, char *rgn23_data)
20739 {
20740 	LPFC_MBOXQ_t *mboxq = NULL;
20741 	struct lpfc_dmabuf *mp = NULL;
20742 	struct lpfc_mqe *mqe;
20743 	uint32_t data_length = 0;
20744 	int rc;
20745 
20746 	if (!rgn23_data)
20747 		return 0;
20748 
20749 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20750 	if (!mboxq) {
20751 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20752 				"3105 failed to allocate mailbox memory\n");
20753 		return 0;
20754 	}
20755 
20756 	if (lpfc_sli4_dump_cfg_rg23(phba, mboxq))
20757 		goto out;
20758 	mqe = &mboxq->u.mqe;
20759 	mp = mboxq->ctx_buf;
20760 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
20761 	if (rc)
20762 		goto out;
20763 	data_length = mqe->un.mb_words[5];
20764 	if (data_length == 0)
20765 		goto out;
20766 	if (data_length > DMP_RGN23_SIZE) {
20767 		data_length = 0;
20768 		goto out;
20769 	}
20770 	lpfc_sli_pcimem_bcopy((char *)mp->virt, rgn23_data, data_length);
20771 out:
20772 	lpfc_mbox_rsrc_cleanup(phba, mboxq, MBOX_THD_UNLOCKED);
20773 	return data_length;
20774 }
20775 
20776 /**
20777  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
20778  * @phba: pointer to lpfc hba data structure.
20779  *
20780  * This function read region 23 and parse TLV for port status to
20781  * decide if the user disaled the port. If the TLV indicates the
20782  * port is disabled, the hba_flag is set accordingly.
20783  **/
20784 void
lpfc_sli_read_link_ste(struct lpfc_hba * phba)20785 lpfc_sli_read_link_ste(struct lpfc_hba *phba)
20786 {
20787 	uint8_t *rgn23_data = NULL;
20788 	uint32_t if_type, data_size, sub_tlv_len, tlv_offset;
20789 	uint32_t offset = 0;
20790 
20791 	/* Get adapter Region 23 data */
20792 	rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
20793 	if (!rgn23_data)
20794 		goto out;
20795 
20796 	if (phba->sli_rev < LPFC_SLI_REV4)
20797 		data_size = lpfc_sli_get_config_region23(phba, rgn23_data);
20798 	else {
20799 		if_type = bf_get(lpfc_sli_intf_if_type,
20800 				 &phba->sli4_hba.sli_intf);
20801 		if (if_type == LPFC_SLI_INTF_IF_TYPE_0)
20802 			goto out;
20803 		data_size = lpfc_sli4_get_config_region23(phba, rgn23_data);
20804 	}
20805 
20806 	if (!data_size)
20807 		goto out;
20808 
20809 	/* Check the region signature first */
20810 	if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
20811 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20812 			"2619 Config region 23 has bad signature\n");
20813 			goto out;
20814 	}
20815 	offset += 4;
20816 
20817 	/* Check the data structure version */
20818 	if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
20819 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
20820 			"2620 Config region 23 has bad version\n");
20821 		goto out;
20822 	}
20823 	offset += 4;
20824 
20825 	/* Parse TLV entries in the region */
20826 	while (offset < data_size) {
20827 		if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
20828 			break;
20829 		/*
20830 		 * If the TLV is not driver specific TLV or driver id is
20831 		 * not linux driver id, skip the record.
20832 		 */
20833 		if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
20834 		    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
20835 		    (rgn23_data[offset + 3] != 0)) {
20836 			offset += rgn23_data[offset + 1] * 4 + 4;
20837 			continue;
20838 		}
20839 
20840 		/* Driver found a driver specific TLV in the config region */
20841 		sub_tlv_len = rgn23_data[offset + 1] * 4;
20842 		offset += 4;
20843 		tlv_offset = 0;
20844 
20845 		/*
20846 		 * Search for configured port state sub-TLV.
20847 		 */
20848 		while ((offset < data_size) &&
20849 			(tlv_offset < sub_tlv_len)) {
20850 			if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
20851 				offset += 4;
20852 				tlv_offset += 4;
20853 				break;
20854 			}
20855 			if (rgn23_data[offset] != PORT_STE_TYPE) {
20856 				offset += rgn23_data[offset + 1] * 4 + 4;
20857 				tlv_offset += rgn23_data[offset + 1] * 4 + 4;
20858 				continue;
20859 			}
20860 
20861 			/* This HBA contains PORT_STE configured */
20862 			if (!rgn23_data[offset + 2])
20863 				set_bit(LINK_DISABLED, &phba->hba_flag);
20864 
20865 			goto out;
20866 		}
20867 	}
20868 
20869 out:
20870 	kfree(rgn23_data);
20871 	return;
20872 }
20873 
20874 /**
20875  * lpfc_log_fw_write_cmpl - logs firmware write completion status
20876  * @phba: pointer to lpfc hba data structure
20877  * @shdr_status: wr_object rsp's status field
20878  * @shdr_add_status: wr_object rsp's add_status field
20879  * @shdr_add_status_2: wr_object rsp's add_status_2 field
20880  * @shdr_change_status: wr_object rsp's change_status field
20881  * @shdr_csf: wr_object rsp's csf bit
20882  *
20883  * This routine is intended to be called after a firmware write completes.
20884  * It will log next action items to be performed by the user to instantiate
20885  * the newly downloaded firmware or reason for incompatibility.
20886  **/
20887 static void
lpfc_log_fw_write_cmpl(struct lpfc_hba * phba,u32 shdr_status,u32 shdr_add_status,u32 shdr_add_status_2,u32 shdr_change_status,u32 shdr_csf)20888 lpfc_log_fw_write_cmpl(struct lpfc_hba *phba, u32 shdr_status,
20889 		       u32 shdr_add_status, u32 shdr_add_status_2,
20890 		       u32 shdr_change_status, u32 shdr_csf)
20891 {
20892 	lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
20893 			"4198 %s: flash_id x%02x, asic_rev x%02x, "
20894 			"status x%02x, add_status x%02x, add_status_2 x%02x, "
20895 			"change_status x%02x, csf %01x\n", __func__,
20896 			phba->sli4_hba.flash_id, phba->sli4_hba.asic_rev,
20897 			shdr_status, shdr_add_status, shdr_add_status_2,
20898 			shdr_change_status, shdr_csf);
20899 
20900 	if (shdr_add_status == LPFC_ADD_STATUS_INCOMPAT_OBJ) {
20901 		switch (shdr_add_status_2) {
20902 		case LPFC_ADD_STATUS_2_INCOMPAT_FLASH:
20903 			lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20904 				     "4199 Firmware write failed: "
20905 				     "image incompatible with flash x%02x\n",
20906 				     phba->sli4_hba.flash_id);
20907 			break;
20908 		case LPFC_ADD_STATUS_2_INCORRECT_ASIC:
20909 			lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20910 				     "4200 Firmware write failed: "
20911 				     "image incompatible with ASIC "
20912 				     "architecture x%02x\n",
20913 				     phba->sli4_hba.asic_rev);
20914 			break;
20915 		default:
20916 			lpfc_log_msg(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
20917 				     "4210 Firmware write failed: "
20918 				     "add_status_2 x%02x\n",
20919 				     shdr_add_status_2);
20920 			break;
20921 		}
20922 	} else if (!shdr_status && !shdr_add_status) {
20923 		if (shdr_change_status == LPFC_CHANGE_STATUS_FW_RESET ||
20924 		    shdr_change_status == LPFC_CHANGE_STATUS_PORT_MIGRATION) {
20925 			if (shdr_csf)
20926 				shdr_change_status =
20927 						   LPFC_CHANGE_STATUS_PCI_RESET;
20928 		}
20929 
20930 		switch (shdr_change_status) {
20931 		case (LPFC_CHANGE_STATUS_PHYS_DEV_RESET):
20932 			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20933 				     "3198 Firmware write complete: System "
20934 				     "reboot required to instantiate\n");
20935 			break;
20936 		case (LPFC_CHANGE_STATUS_FW_RESET):
20937 			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20938 				     "3199 Firmware write complete: "
20939 				     "Firmware reset required to "
20940 				     "instantiate\n");
20941 			break;
20942 		case (LPFC_CHANGE_STATUS_PORT_MIGRATION):
20943 			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20944 				     "3200 Firmware write complete: Port "
20945 				     "Migration or PCI Reset required to "
20946 				     "instantiate\n");
20947 			break;
20948 		case (LPFC_CHANGE_STATUS_PCI_RESET):
20949 			lpfc_log_msg(phba, KERN_NOTICE, LOG_MBOX | LOG_SLI,
20950 				     "3201 Firmware write complete: PCI "
20951 				     "Reset required to instantiate\n");
20952 			break;
20953 		default:
20954 			break;
20955 		}
20956 	}
20957 }
20958 
20959 /**
20960  * lpfc_wr_object - write an object to the firmware
20961  * @phba: HBA structure that indicates port to create a queue on.
20962  * @dmabuf_list: list of dmabufs to write to the port.
20963  * @size: the total byte value of the objects to write to the port.
20964  * @offset: the current offset to be used to start the transfer.
20965  *
20966  * This routine will create a wr_object mailbox command to send to the port.
20967  * the mailbox command will be constructed using the dma buffers described in
20968  * @dmabuf_list to create a list of BDEs. This routine will fill in as many
20969  * BDEs that the imbedded mailbox can support. The @offset variable will be
20970  * used to indicate the starting offset of the transfer and will also return
20971  * the offset after the write object mailbox has completed. @size is used to
20972  * determine the end of the object and whether the eof bit should be set.
20973  *
20974  * Return 0 is successful and offset will contain the new offset to use
20975  * for the next write.
20976  * Return negative value for error cases.
20977  **/
20978 int
lpfc_wr_object(struct lpfc_hba * phba,struct list_head * dmabuf_list,uint32_t size,uint32_t * offset)20979 lpfc_wr_object(struct lpfc_hba *phba, struct list_head *dmabuf_list,
20980 	       uint32_t size, uint32_t *offset)
20981 {
20982 	struct lpfc_mbx_wr_object *wr_object;
20983 	LPFC_MBOXQ_t *mbox;
20984 	int rc = 0, i = 0;
20985 	int mbox_status = 0;
20986 	uint32_t shdr_status, shdr_add_status, shdr_add_status_2;
20987 	uint32_t shdr_change_status = 0, shdr_csf = 0;
20988 	uint32_t mbox_tmo;
20989 	struct lpfc_dmabuf *dmabuf;
20990 	uint32_t written = 0;
20991 	bool check_change_status = false;
20992 
20993 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
20994 	if (!mbox)
20995 		return -ENOMEM;
20996 
20997 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
20998 			LPFC_MBOX_OPCODE_WRITE_OBJECT,
20999 			sizeof(struct lpfc_mbx_wr_object) -
21000 			sizeof(struct lpfc_sli4_cfg_mhdr), LPFC_SLI4_MBX_EMBED);
21001 
21002 	wr_object = (struct lpfc_mbx_wr_object *)&mbox->u.mqe.un.wr_object;
21003 	wr_object->u.request.write_offset = *offset;
21004 	sprintf((uint8_t *)wr_object->u.request.object_name, "/");
21005 	wr_object->u.request.object_name[0] =
21006 		cpu_to_le32(wr_object->u.request.object_name[0]);
21007 	bf_set(lpfc_wr_object_eof, &wr_object->u.request, 0);
21008 	list_for_each_entry(dmabuf, dmabuf_list, list) {
21009 		if (i >= LPFC_MBX_WR_CONFIG_MAX_BDE || written >= size)
21010 			break;
21011 		wr_object->u.request.bde[i].addrLow = putPaddrLow(dmabuf->phys);
21012 		wr_object->u.request.bde[i].addrHigh =
21013 			putPaddrHigh(dmabuf->phys);
21014 		if (written + SLI4_PAGE_SIZE >= size) {
21015 			wr_object->u.request.bde[i].tus.f.bdeSize =
21016 				(size - written);
21017 			written += (size - written);
21018 			bf_set(lpfc_wr_object_eof, &wr_object->u.request, 1);
21019 			bf_set(lpfc_wr_object_eas, &wr_object->u.request, 1);
21020 			check_change_status = true;
21021 		} else {
21022 			wr_object->u.request.bde[i].tus.f.bdeSize =
21023 				SLI4_PAGE_SIZE;
21024 			written += SLI4_PAGE_SIZE;
21025 		}
21026 		i++;
21027 	}
21028 	wr_object->u.request.bde_count = i;
21029 	bf_set(lpfc_wr_object_write_length, &wr_object->u.request, written);
21030 	if (!phba->sli4_hba.intr_enable)
21031 		mbox_status = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
21032 	else {
21033 		mbox_tmo = lpfc_mbox_tmo_val(phba, mbox);
21034 		mbox_status = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
21035 	}
21036 
21037 	/* The mbox status needs to be maintained to detect MBOX_TIMEOUT. */
21038 	rc = mbox_status;
21039 
21040 	/* The IOCTL status is embedded in the mailbox subheader. */
21041 	shdr_status = bf_get(lpfc_mbox_hdr_status,
21042 			     &wr_object->header.cfg_shdr.response);
21043 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
21044 				 &wr_object->header.cfg_shdr.response);
21045 	shdr_add_status_2 = bf_get(lpfc_mbox_hdr_add_status_2,
21046 				   &wr_object->header.cfg_shdr.response);
21047 	if (check_change_status) {
21048 		shdr_change_status = bf_get(lpfc_wr_object_change_status,
21049 					    &wr_object->u.response);
21050 		shdr_csf = bf_get(lpfc_wr_object_csf,
21051 				  &wr_object->u.response);
21052 	}
21053 
21054 	if (shdr_status || shdr_add_status || shdr_add_status_2 || rc) {
21055 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21056 				"3025 Write Object mailbox failed with "
21057 				"status x%x add_status x%x, add_status_2 x%x, "
21058 				"mbx status x%x\n",
21059 				shdr_status, shdr_add_status, shdr_add_status_2,
21060 				rc);
21061 		rc = -ENXIO;
21062 		*offset = shdr_add_status;
21063 	} else {
21064 		*offset += wr_object->u.response.actual_write_length;
21065 	}
21066 
21067 	if (rc || check_change_status)
21068 		lpfc_log_fw_write_cmpl(phba, shdr_status, shdr_add_status,
21069 				       shdr_add_status_2, shdr_change_status,
21070 				       shdr_csf);
21071 
21072 	if (!phba->sli4_hba.intr_enable)
21073 		mempool_free(mbox, phba->mbox_mem_pool);
21074 	else if (mbox_status != MBX_TIMEOUT)
21075 		mempool_free(mbox, phba->mbox_mem_pool);
21076 
21077 	return rc;
21078 }
21079 
21080 /**
21081  * lpfc_cleanup_pending_mbox - Free up vport discovery mailbox commands.
21082  * @vport: pointer to vport data structure.
21083  *
21084  * This function iterate through the mailboxq and clean up all REG_LOGIN
21085  * and REG_VPI mailbox commands associated with the vport. This function
21086  * is called when driver want to restart discovery of the vport due to
21087  * a Clear Virtual Link event.
21088  **/
21089 void
lpfc_cleanup_pending_mbox(struct lpfc_vport * vport)21090 lpfc_cleanup_pending_mbox(struct lpfc_vport *vport)
21091 {
21092 	struct lpfc_hba *phba = vport->phba;
21093 	LPFC_MBOXQ_t *mb, *nextmb;
21094 	struct lpfc_nodelist *ndlp;
21095 	struct lpfc_nodelist *act_mbx_ndlp = NULL;
21096 	LIST_HEAD(mbox_cmd_list);
21097 	uint8_t restart_loop;
21098 
21099 	/* Clean up internally queued mailbox commands with the vport */
21100 	spin_lock_irq(&phba->hbalock);
21101 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
21102 		if (mb->vport != vport)
21103 			continue;
21104 
21105 		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21106 			(mb->u.mb.mbxCommand != MBX_REG_VPI))
21107 			continue;
21108 
21109 		list_move_tail(&mb->list, &mbox_cmd_list);
21110 	}
21111 	/* Clean up active mailbox command with the vport */
21112 	mb = phba->sli.mbox_active;
21113 	if (mb && (mb->vport == vport)) {
21114 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) ||
21115 			(mb->u.mb.mbxCommand == MBX_REG_VPI))
21116 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21117 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21118 			act_mbx_ndlp = mb->ctx_ndlp;
21119 
21120 			/* This reference is local to this routine.  The
21121 			 * reference is removed at routine exit.
21122 			 */
21123 			act_mbx_ndlp = lpfc_nlp_get(act_mbx_ndlp);
21124 
21125 			/* Unregister the RPI when mailbox complete */
21126 			mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21127 		}
21128 	}
21129 	/* Cleanup any mailbox completions which are not yet processed */
21130 	do {
21131 		restart_loop = 0;
21132 		list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
21133 			/*
21134 			 * If this mailox is already processed or it is
21135 			 * for another vport ignore it.
21136 			 */
21137 			if ((mb->vport != vport) ||
21138 				(mb->mbox_flag & LPFC_MBX_IMED_UNREG))
21139 				continue;
21140 
21141 			if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) &&
21142 				(mb->u.mb.mbxCommand != MBX_REG_VPI))
21143 				continue;
21144 
21145 			mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
21146 			if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21147 				ndlp = mb->ctx_ndlp;
21148 				/* Unregister the RPI when mailbox complete */
21149 				mb->mbox_flag |= LPFC_MBX_IMED_UNREG;
21150 				restart_loop = 1;
21151 				clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag);
21152 				break;
21153 			}
21154 		}
21155 	} while (restart_loop);
21156 
21157 	spin_unlock_irq(&phba->hbalock);
21158 
21159 	/* Release the cleaned-up mailbox commands */
21160 	while (!list_empty(&mbox_cmd_list)) {
21161 		list_remove_head(&mbox_cmd_list, mb, LPFC_MBOXQ_t, list);
21162 		if (mb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
21163 			ndlp = mb->ctx_ndlp;
21164 			mb->ctx_ndlp = NULL;
21165 			if (ndlp) {
21166 				clear_bit(NLP_IGNR_REG_CMPL, &ndlp->nlp_flag);
21167 				lpfc_nlp_put(ndlp);
21168 			}
21169 		}
21170 		lpfc_mbox_rsrc_cleanup(phba, mb, MBOX_THD_UNLOCKED);
21171 	}
21172 
21173 	/* Release the ndlp with the cleaned-up active mailbox command */
21174 	if (act_mbx_ndlp) {
21175 		clear_bit(NLP_IGNR_REG_CMPL, &act_mbx_ndlp->nlp_flag);
21176 		lpfc_nlp_put(act_mbx_ndlp);
21177 	}
21178 }
21179 
21180 /**
21181  * lpfc_drain_txq - Drain the txq
21182  * @phba: Pointer to HBA context object.
21183  *
21184  * This function attempt to submit IOCBs on the txq
21185  * to the adapter.  For SLI4 adapters, the txq contains
21186  * ELS IOCBs that have been deferred because the there
21187  * are no SGLs.  This congestion can occur with large
21188  * vport counts during node discovery.
21189  **/
21190 
21191 uint32_t
lpfc_drain_txq(struct lpfc_hba * phba)21192 lpfc_drain_txq(struct lpfc_hba *phba)
21193 {
21194 	LIST_HEAD(completions);
21195 	struct lpfc_sli_ring *pring;
21196 	struct lpfc_iocbq *piocbq = NULL;
21197 	unsigned long iflags = 0;
21198 	char *fail_msg = NULL;
21199 	uint32_t txq_cnt = 0;
21200 	struct lpfc_queue *wq;
21201 	int ret = 0;
21202 
21203 	if (phba->link_flag & LS_MDS_LOOPBACK) {
21204 		/* MDS WQE are posted only to first WQ*/
21205 		wq = phba->sli4_hba.hdwq[0].io_wq;
21206 		if (unlikely(!wq))
21207 			return 0;
21208 		pring = wq->pring;
21209 	} else {
21210 		wq = phba->sli4_hba.els_wq;
21211 		if (unlikely(!wq))
21212 			return 0;
21213 		pring = lpfc_phba_elsring(phba);
21214 	}
21215 
21216 	if (unlikely(!pring) || list_empty(&pring->txq))
21217 		return 0;
21218 
21219 	spin_lock_irqsave(&pring->ring_lock, iflags);
21220 	list_for_each_entry(piocbq, &pring->txq, list) {
21221 		txq_cnt++;
21222 	}
21223 
21224 	if (txq_cnt > pring->txq_max)
21225 		pring->txq_max = txq_cnt;
21226 
21227 	spin_unlock_irqrestore(&pring->ring_lock, iflags);
21228 
21229 	while (!list_empty(&pring->txq)) {
21230 		spin_lock_irqsave(&pring->ring_lock, iflags);
21231 
21232 		piocbq = lpfc_sli_ringtx_get(phba, pring);
21233 		if (!piocbq) {
21234 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21235 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21236 				"2823 txq empty and txq_cnt is %d\n",
21237 				txq_cnt);
21238 			break;
21239 		}
21240 		txq_cnt--;
21241 
21242 		ret = __lpfc_sli_issue_iocb(phba, pring->ringno, piocbq, 0);
21243 
21244 		if (ret && ret != IOCB_BUSY) {
21245 			fail_msg = " - Cannot send IO ";
21246 			piocbq->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21247 		}
21248 		if (fail_msg) {
21249 			piocbq->cmd_flag |= LPFC_DRIVER_ABORTED;
21250 			/* Failed means we can't issue and need to cancel */
21251 			lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
21252 					"2822 IOCB failed %s iotag 0x%x "
21253 					"xri 0x%x %d flg x%x\n",
21254 					fail_msg, piocbq->iotag,
21255 					piocbq->sli4_xritag, ret,
21256 					piocbq->cmd_flag);
21257 			list_add_tail(&piocbq->list, &completions);
21258 			fail_msg = NULL;
21259 		}
21260 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21261 		if (txq_cnt == 0 || ret == IOCB_BUSY)
21262 			break;
21263 	}
21264 	/* Cancel all the IOCBs that cannot be issued */
21265 	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
21266 			      IOERR_SLI_ABORTED);
21267 
21268 	return txq_cnt;
21269 }
21270 
21271 /**
21272  * lpfc_wqe_bpl2sgl - Convert the bpl/bde to a sgl.
21273  * @phba: Pointer to HBA context object.
21274  * @pwqeq: Pointer to command WQE.
21275  * @sglq: Pointer to the scatter gather queue object.
21276  *
21277  * This routine converts the bpl or bde that is in the WQE
21278  * to a sgl list for the sli4 hardware. The physical address
21279  * of the bpl/bde is converted back to a virtual address.
21280  * If the WQE contains a BPL then the list of BDE's is
21281  * converted to sli4_sge's. If the WQE contains a single
21282  * BDE then it is converted to a single sli_sge.
21283  * The WQE is still in cpu endianness so the contents of
21284  * the bpl can be used without byte swapping.
21285  *
21286  * Returns valid XRI = Success, NO_XRI = Failure.
21287  */
21288 static uint16_t
lpfc_wqe_bpl2sgl(struct lpfc_hba * phba,struct lpfc_iocbq * pwqeq,struct lpfc_sglq * sglq)21289 lpfc_wqe_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *pwqeq,
21290 		 struct lpfc_sglq *sglq)
21291 {
21292 	uint16_t xritag = NO_XRI;
21293 	struct ulp_bde64 *bpl = NULL;
21294 	struct ulp_bde64 bde;
21295 	struct sli4_sge *sgl  = NULL;
21296 	struct lpfc_dmabuf *dmabuf;
21297 	union lpfc_wqe128 *wqe;
21298 	int numBdes = 0;
21299 	int i = 0;
21300 	uint32_t offset = 0; /* accumulated offset in the sg request list */
21301 	int inbound = 0; /* number of sg reply entries inbound from firmware */
21302 	uint32_t cmd;
21303 
21304 	if (!pwqeq || !sglq)
21305 		return xritag;
21306 
21307 	sgl  = (struct sli4_sge *)sglq->sgl;
21308 	wqe = &pwqeq->wqe;
21309 	pwqeq->iocb.ulpIoTag = pwqeq->iotag;
21310 
21311 	cmd = bf_get(wqe_cmnd, &wqe->generic.wqe_com);
21312 	if (cmd == CMD_XMIT_BLS_RSP64_WQE)
21313 		return sglq->sli4_xritag;
21314 	numBdes = pwqeq->num_bdes;
21315 	if (numBdes) {
21316 		/* The addrHigh and addrLow fields within the WQE
21317 		 * have not been byteswapped yet so there is no
21318 		 * need to swap them back.
21319 		 */
21320 		if (pwqeq->bpl_dmabuf)
21321 			dmabuf = pwqeq->bpl_dmabuf;
21322 		else
21323 			return xritag;
21324 
21325 		bpl  = (struct ulp_bde64 *)dmabuf->virt;
21326 		if (!bpl)
21327 			return xritag;
21328 
21329 		for (i = 0; i < numBdes; i++) {
21330 			/* Should already be byte swapped. */
21331 			sgl->addr_hi = bpl->addrHigh;
21332 			sgl->addr_lo = bpl->addrLow;
21333 
21334 			sgl->word2 = le32_to_cpu(sgl->word2);
21335 			if ((i+1) == numBdes)
21336 				bf_set(lpfc_sli4_sge_last, sgl, 1);
21337 			else
21338 				bf_set(lpfc_sli4_sge_last, sgl, 0);
21339 			/* swap the size field back to the cpu so we
21340 			 * can assign it to the sgl.
21341 			 */
21342 			bde.tus.w = le32_to_cpu(bpl->tus.w);
21343 			sgl->sge_len = cpu_to_le32(bde.tus.f.bdeSize);
21344 			/* The offsets in the sgl need to be accumulated
21345 			 * separately for the request and reply lists.
21346 			 * The request is always first, the reply follows.
21347 			 */
21348 			switch (cmd) {
21349 			case CMD_GEN_REQUEST64_WQE:
21350 				/* add up the reply sg entries */
21351 				if (bpl->tus.f.bdeFlags == BUFF_TYPE_BDE_64I)
21352 					inbound++;
21353 				/* first inbound? reset the offset */
21354 				if (inbound == 1)
21355 					offset = 0;
21356 				bf_set(lpfc_sli4_sge_offset, sgl, offset);
21357 				bf_set(lpfc_sli4_sge_type, sgl,
21358 					LPFC_SGE_TYPE_DATA);
21359 				offset += bde.tus.f.bdeSize;
21360 				break;
21361 			case CMD_FCP_TRSP64_WQE:
21362 				bf_set(lpfc_sli4_sge_offset, sgl, 0);
21363 				bf_set(lpfc_sli4_sge_type, sgl,
21364 					LPFC_SGE_TYPE_DATA);
21365 				break;
21366 			case CMD_FCP_TSEND64_WQE:
21367 			case CMD_FCP_TRECEIVE64_WQE:
21368 				bf_set(lpfc_sli4_sge_type, sgl,
21369 					bpl->tus.f.bdeFlags);
21370 				if (i < 3)
21371 					offset = 0;
21372 				else
21373 					offset += bde.tus.f.bdeSize;
21374 				bf_set(lpfc_sli4_sge_offset, sgl, offset);
21375 				break;
21376 			}
21377 			sgl->word2 = cpu_to_le32(sgl->word2);
21378 			bpl++;
21379 			sgl++;
21380 		}
21381 	} else if (wqe->gen_req.bde.tus.f.bdeFlags == BUFF_TYPE_BDE_64) {
21382 		/* The addrHigh and addrLow fields of the BDE have not
21383 		 * been byteswapped yet so they need to be swapped
21384 		 * before putting them in the sgl.
21385 		 */
21386 		sgl->addr_hi = cpu_to_le32(wqe->gen_req.bde.addrHigh);
21387 		sgl->addr_lo = cpu_to_le32(wqe->gen_req.bde.addrLow);
21388 		sgl->word2 = le32_to_cpu(sgl->word2);
21389 		bf_set(lpfc_sli4_sge_last, sgl, 1);
21390 		sgl->word2 = cpu_to_le32(sgl->word2);
21391 		sgl->sge_len = cpu_to_le32(wqe->gen_req.bde.tus.f.bdeSize);
21392 	}
21393 	return sglq->sli4_xritag;
21394 }
21395 
21396 /**
21397  * lpfc_sli4_issue_wqe - Issue an SLI4 Work Queue Entry (WQE)
21398  * @phba: Pointer to HBA context object.
21399  * @qp: Pointer to HDW queue.
21400  * @pwqe: Pointer to command WQE.
21401  **/
21402 int
lpfc_sli4_issue_wqe(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_iocbq * pwqe)21403 lpfc_sli4_issue_wqe(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21404 		    struct lpfc_iocbq *pwqe)
21405 {
21406 	union lpfc_wqe128 *wqe = &pwqe->wqe;
21407 	struct lpfc_async_xchg_ctx *ctxp;
21408 	struct lpfc_queue *wq;
21409 	struct lpfc_sglq *sglq;
21410 	struct lpfc_sli_ring *pring;
21411 	unsigned long iflags;
21412 	int ret = 0;
21413 
21414 	/* NVME_LS and NVME_LS ABTS requests. */
21415 	if (pwqe->cmd_flag & LPFC_IO_NVME_LS) {
21416 		pring =  phba->sli4_hba.nvmels_wq->pring;
21417 		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21418 					  qp, wq_access);
21419 		sglq = __lpfc_sli_get_els_sglq(phba, pwqe);
21420 		if (!sglq) {
21421 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21422 			return WQE_BUSY;
21423 		}
21424 		pwqe->sli4_lxritag = sglq->sli4_lxritag;
21425 		pwqe->sli4_xritag = sglq->sli4_xritag;
21426 		if (lpfc_wqe_bpl2sgl(phba, pwqe, sglq) == NO_XRI) {
21427 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21428 			return WQE_ERROR;
21429 		}
21430 		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21431 		       pwqe->sli4_xritag);
21432 		ret = lpfc_sli4_wq_put(phba->sli4_hba.nvmels_wq, wqe);
21433 		if (ret) {
21434 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21435 			return ret;
21436 		}
21437 
21438 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21439 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21440 
21441 		lpfc_sli4_poll_eq(qp->hba_eq);
21442 		return 0;
21443 	}
21444 
21445 	/* NVME_FCREQ and NVME_ABTS requests */
21446 	if (pwqe->cmd_flag & (LPFC_IO_NVME | LPFC_IO_FCP | LPFC_IO_CMF)) {
21447 		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
21448 		wq = qp->io_wq;
21449 		pring = wq->pring;
21450 
21451 		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21452 
21453 		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21454 					  qp, wq_access);
21455 		ret = lpfc_sli4_wq_put(wq, wqe);
21456 		if (ret) {
21457 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21458 			return ret;
21459 		}
21460 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21461 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21462 
21463 		lpfc_sli4_poll_eq(qp->hba_eq);
21464 		return 0;
21465 	}
21466 
21467 	/* NVMET requests */
21468 	if (pwqe->cmd_flag & LPFC_IO_NVMET) {
21469 		/* Get the IO distribution (hba_wqidx) for WQ assignment. */
21470 		wq = qp->io_wq;
21471 		pring = wq->pring;
21472 
21473 		ctxp = pwqe->context_un.axchg;
21474 		sglq = ctxp->ctxbuf->sglq;
21475 		if (pwqe->sli4_xritag ==  NO_XRI) {
21476 			pwqe->sli4_lxritag = sglq->sli4_lxritag;
21477 			pwqe->sli4_xritag = sglq->sli4_xritag;
21478 		}
21479 		bf_set(wqe_xri_tag, &pwqe->wqe.xmit_bls_rsp.wqe_com,
21480 		       pwqe->sli4_xritag);
21481 		bf_set(wqe_cqid, &wqe->generic.wqe_com, qp->io_cq_map);
21482 
21483 		lpfc_qp_spin_lock_irqsave(&pring->ring_lock, iflags,
21484 					  qp, wq_access);
21485 		ret = lpfc_sli4_wq_put(wq, wqe);
21486 		if (ret) {
21487 			spin_unlock_irqrestore(&pring->ring_lock, iflags);
21488 			return ret;
21489 		}
21490 		lpfc_sli_ringtxcmpl_put(phba, pring, pwqe);
21491 		spin_unlock_irqrestore(&pring->ring_lock, iflags);
21492 
21493 		lpfc_sli4_poll_eq(qp->hba_eq);
21494 		return 0;
21495 	}
21496 	return WQE_ERROR;
21497 }
21498 
21499 /**
21500  * lpfc_sli4_issue_abort_iotag - SLI-4 WQE init & issue for the Abort
21501  * @phba: Pointer to HBA context object.
21502  * @cmdiocb: Pointer to driver command iocb object.
21503  * @cmpl: completion function.
21504  *
21505  * Fill the appropriate fields for the abort WQE and call
21506  * internal routine lpfc_sli4_issue_wqe to send the WQE
21507  * This function is called with hbalock held and no ring_lock held.
21508  *
21509  * RETURNS 0 - SUCCESS
21510  **/
21511 
21512 int
lpfc_sli4_issue_abort_iotag(struct lpfc_hba * phba,struct lpfc_iocbq * cmdiocb,void * cmpl)21513 lpfc_sli4_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb,
21514 			    void *cmpl)
21515 {
21516 	struct lpfc_vport *vport = cmdiocb->vport;
21517 	struct lpfc_iocbq *abtsiocb = NULL;
21518 	union lpfc_wqe128 *abtswqe;
21519 	struct lpfc_io_buf *lpfc_cmd;
21520 	int retval = IOCB_ERROR;
21521 	u16 xritag = cmdiocb->sli4_xritag;
21522 
21523 	/*
21524 	 * The scsi command can not be in txq and it is in flight because the
21525 	 * pCmd is still pointing at the SCSI command we have to abort. There
21526 	 * is no need to search the txcmplq. Just send an abort to the FW.
21527 	 */
21528 
21529 	abtsiocb = __lpfc_sli_get_iocbq(phba);
21530 	if (!abtsiocb)
21531 		return WQE_NORESOURCE;
21532 
21533 	/* Indicate the IO is being aborted by the driver. */
21534 	cmdiocb->cmd_flag |= LPFC_DRIVER_ABORTED;
21535 
21536 	abtswqe = &abtsiocb->wqe;
21537 	memset(abtswqe, 0, sizeof(*abtswqe));
21538 
21539 	if (!lpfc_is_link_up(phba) || (phba->link_flag & LS_EXTERNAL_LOOPBACK))
21540 		bf_set(abort_cmd_ia, &abtswqe->abort_cmd, 1);
21541 	bf_set(abort_cmd_criteria, &abtswqe->abort_cmd, T_XRI_TAG);
21542 	abtswqe->abort_cmd.rsrvd5 = 0;
21543 	abtswqe->abort_cmd.wqe_com.abort_tag = xritag;
21544 	bf_set(wqe_reqtag, &abtswqe->abort_cmd.wqe_com, abtsiocb->iotag);
21545 	bf_set(wqe_cmnd, &abtswqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
21546 	bf_set(wqe_xri_tag, &abtswqe->generic.wqe_com, 0);
21547 	bf_set(wqe_qosd, &abtswqe->abort_cmd.wqe_com, 1);
21548 	bf_set(wqe_lenloc, &abtswqe->abort_cmd.wqe_com, LPFC_WQE_LENLOC_NONE);
21549 	bf_set(wqe_cmd_type, &abtswqe->abort_cmd.wqe_com, OTHER_COMMAND);
21550 
21551 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
21552 	abtsiocb->hba_wqidx = cmdiocb->hba_wqidx;
21553 	abtsiocb->cmd_flag |= LPFC_USE_FCPWQIDX;
21554 	if (cmdiocb->cmd_flag & LPFC_IO_FCP)
21555 		abtsiocb->cmd_flag |= LPFC_IO_FCP;
21556 	if (cmdiocb->cmd_flag & LPFC_IO_NVME)
21557 		abtsiocb->cmd_flag |= LPFC_IO_NVME;
21558 	if (cmdiocb->cmd_flag & LPFC_IO_FOF)
21559 		abtsiocb->cmd_flag |= LPFC_IO_FOF;
21560 	abtsiocb->vport = vport;
21561 	abtsiocb->cmd_cmpl = cmpl;
21562 
21563 	lpfc_cmd = container_of(cmdiocb, struct lpfc_io_buf, cur_iocbq);
21564 	retval = lpfc_sli4_issue_wqe(phba, lpfc_cmd->hdwq, abtsiocb);
21565 
21566 	lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
21567 			 "0359 Abort xri x%x, original iotag x%x, "
21568 			 "abort cmd iotag x%x retval x%x\n",
21569 			 xritag, cmdiocb->iotag, abtsiocb->iotag, retval);
21570 
21571 	if (retval) {
21572 		cmdiocb->cmd_flag &= ~LPFC_DRIVER_ABORTED;
21573 		__lpfc_sli_release_iocbq(phba, abtsiocb);
21574 	}
21575 
21576 	return retval;
21577 }
21578 
21579 #ifdef LPFC_MXP_STAT
21580 /**
21581  * lpfc_snapshot_mxp - Snapshot pbl, pvt and busy count
21582  * @phba: pointer to lpfc hba data structure.
21583  * @hwqid: belong to which HWQ.
21584  *
21585  * The purpose of this routine is to take a snapshot of pbl, pvt and busy count
21586  * 15 seconds after a test case is running.
21587  *
21588  * The user should call lpfc_debugfs_multixripools_write before running a test
21589  * case to clear stat_snapshot_taken. Then the user starts a test case. During
21590  * test case is running, stat_snapshot_taken is incremented by 1 every time when
21591  * this routine is called from heartbeat timer. When stat_snapshot_taken is
21592  * equal to LPFC_MXP_SNAPSHOT_TAKEN, a snapshot is taken.
21593  **/
lpfc_snapshot_mxp(struct lpfc_hba * phba,u32 hwqid)21594 void lpfc_snapshot_mxp(struct lpfc_hba *phba, u32 hwqid)
21595 {
21596 	struct lpfc_sli4_hdw_queue *qp;
21597 	struct lpfc_multixri_pool *multixri_pool;
21598 	struct lpfc_pvt_pool *pvt_pool;
21599 	struct lpfc_pbl_pool *pbl_pool;
21600 	u32 txcmplq_cnt;
21601 
21602 	qp = &phba->sli4_hba.hdwq[hwqid];
21603 	multixri_pool = qp->p_multixri_pool;
21604 	if (!multixri_pool)
21605 		return;
21606 
21607 	if (multixri_pool->stat_snapshot_taken == LPFC_MXP_SNAPSHOT_TAKEN) {
21608 		pvt_pool = &qp->p_multixri_pool->pvt_pool;
21609 		pbl_pool = &qp->p_multixri_pool->pbl_pool;
21610 		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21611 
21612 		multixri_pool->stat_pbl_count = pbl_pool->count;
21613 		multixri_pool->stat_pvt_count = pvt_pool->count;
21614 		multixri_pool->stat_busy_count = txcmplq_cnt;
21615 	}
21616 
21617 	multixri_pool->stat_snapshot_taken++;
21618 }
21619 #endif
21620 
21621 /**
21622  * lpfc_adjust_pvt_pool_count - Adjust private pool count
21623  * @phba: pointer to lpfc hba data structure.
21624  * @hwqid: belong to which HWQ.
21625  *
21626  * This routine moves some XRIs from private to public pool when private pool
21627  * is not busy.
21628  **/
lpfc_adjust_pvt_pool_count(struct lpfc_hba * phba,u32 hwqid)21629 void lpfc_adjust_pvt_pool_count(struct lpfc_hba *phba, u32 hwqid)
21630 {
21631 	struct lpfc_multixri_pool *multixri_pool;
21632 	u32 io_req_count;
21633 	u32 prev_io_req_count;
21634 
21635 	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21636 	if (!multixri_pool)
21637 		return;
21638 	io_req_count = multixri_pool->io_req_count;
21639 	prev_io_req_count = multixri_pool->prev_io_req_count;
21640 
21641 	if (prev_io_req_count != io_req_count) {
21642 		/* Private pool is busy */
21643 		multixri_pool->prev_io_req_count = io_req_count;
21644 	} else {
21645 		/* Private pool is not busy.
21646 		 * Move XRIs from private to public pool.
21647 		 */
21648 		lpfc_move_xri_pvt_to_pbl(phba, hwqid);
21649 	}
21650 }
21651 
21652 /**
21653  * lpfc_adjust_high_watermark - Adjust high watermark
21654  * @phba: pointer to lpfc hba data structure.
21655  * @hwqid: belong to which HWQ.
21656  *
21657  * This routine sets high watermark as number of outstanding XRIs,
21658  * but make sure the new value is between xri_limit/2 and xri_limit.
21659  **/
lpfc_adjust_high_watermark(struct lpfc_hba * phba,u32 hwqid)21660 void lpfc_adjust_high_watermark(struct lpfc_hba *phba, u32 hwqid)
21661 {
21662 	u32 new_watermark;
21663 	u32 watermark_max;
21664 	u32 watermark_min;
21665 	u32 xri_limit;
21666 	u32 txcmplq_cnt;
21667 	u32 abts_io_bufs;
21668 	struct lpfc_multixri_pool *multixri_pool;
21669 	struct lpfc_sli4_hdw_queue *qp;
21670 
21671 	qp = &phba->sli4_hba.hdwq[hwqid];
21672 	multixri_pool = qp->p_multixri_pool;
21673 	if (!multixri_pool)
21674 		return;
21675 	xri_limit = multixri_pool->xri_limit;
21676 
21677 	watermark_max = xri_limit;
21678 	watermark_min = xri_limit / 2;
21679 
21680 	txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21681 	abts_io_bufs = qp->abts_scsi_io_bufs;
21682 	abts_io_bufs += qp->abts_nvme_io_bufs;
21683 
21684 	new_watermark = txcmplq_cnt + abts_io_bufs;
21685 	new_watermark = min(watermark_max, new_watermark);
21686 	new_watermark = max(watermark_min, new_watermark);
21687 	multixri_pool->pvt_pool.high_watermark = new_watermark;
21688 
21689 #ifdef LPFC_MXP_STAT
21690 	multixri_pool->stat_max_hwm = max(multixri_pool->stat_max_hwm,
21691 					  new_watermark);
21692 #endif
21693 }
21694 
21695 /**
21696  * lpfc_move_xri_pvt_to_pbl - Move some XRIs from private to public pool
21697  * @phba: pointer to lpfc hba data structure.
21698  * @hwqid: belong to which HWQ.
21699  *
21700  * This routine is called from hearbeat timer when pvt_pool is idle.
21701  * All free XRIs are moved from private to public pool on hwqid with 2 steps.
21702  * The first step moves (all - low_watermark) amount of XRIs.
21703  * The second step moves the rest of XRIs.
21704  **/
lpfc_move_xri_pvt_to_pbl(struct lpfc_hba * phba,u32 hwqid)21705 void lpfc_move_xri_pvt_to_pbl(struct lpfc_hba *phba, u32 hwqid)
21706 {
21707 	struct lpfc_pbl_pool *pbl_pool;
21708 	struct lpfc_pvt_pool *pvt_pool;
21709 	struct lpfc_sli4_hdw_queue *qp;
21710 	struct lpfc_io_buf *lpfc_ncmd;
21711 	struct lpfc_io_buf *lpfc_ncmd_next;
21712 	unsigned long iflag;
21713 	struct list_head tmp_list;
21714 	u32 tmp_count;
21715 
21716 	qp = &phba->sli4_hba.hdwq[hwqid];
21717 	pbl_pool = &qp->p_multixri_pool->pbl_pool;
21718 	pvt_pool = &qp->p_multixri_pool->pvt_pool;
21719 	tmp_count = 0;
21720 
21721 	lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag, qp, mv_to_pub_pool);
21722 	lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_from_pvt_pool);
21723 
21724 	if (pvt_pool->count > pvt_pool->low_watermark) {
21725 		/* Step 1: move (all - low_watermark) from pvt_pool
21726 		 * to pbl_pool
21727 		 */
21728 
21729 		/* Move low watermark of bufs from pvt_pool to tmp_list */
21730 		INIT_LIST_HEAD(&tmp_list);
21731 		list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
21732 					 &pvt_pool->list, list) {
21733 			list_move_tail(&lpfc_ncmd->list, &tmp_list);
21734 			tmp_count++;
21735 			if (tmp_count >= pvt_pool->low_watermark)
21736 				break;
21737 		}
21738 
21739 		/* Move all bufs from pvt_pool to pbl_pool */
21740 		list_splice_init(&pvt_pool->list, &pbl_pool->list);
21741 
21742 		/* Move all bufs from tmp_list to pvt_pool */
21743 		list_splice(&tmp_list, &pvt_pool->list);
21744 
21745 		pbl_pool->count += (pvt_pool->count - tmp_count);
21746 		pvt_pool->count = tmp_count;
21747 	} else {
21748 		/* Step 2: move the rest from pvt_pool to pbl_pool */
21749 		list_splice_init(&pvt_pool->list, &pbl_pool->list);
21750 		pbl_pool->count += pvt_pool->count;
21751 		pvt_pool->count = 0;
21752 	}
21753 
21754 	spin_unlock(&pvt_pool->lock);
21755 	spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21756 }
21757 
21758 /**
21759  * _lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21760  * @phba: pointer to lpfc hba data structure
21761  * @qp: pointer to HDW queue
21762  * @pbl_pool: specified public free XRI pool
21763  * @pvt_pool: specified private free XRI pool
21764  * @count: number of XRIs to move
21765  *
21766  * This routine tries to move some free common bufs from the specified pbl_pool
21767  * to the specified pvt_pool. It might move less than count XRIs if there's not
21768  * enough in public pool.
21769  *
21770  * Return:
21771  *   true - if XRIs are successfully moved from the specified pbl_pool to the
21772  *          specified pvt_pool
21773  *   false - if the specified pbl_pool is empty or locked by someone else
21774  **/
21775 static bool
_lpfc_move_xri_pbl_to_pvt(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_pbl_pool * pbl_pool,struct lpfc_pvt_pool * pvt_pool,u32 count)21776 _lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, struct lpfc_sli4_hdw_queue *qp,
21777 			  struct lpfc_pbl_pool *pbl_pool,
21778 			  struct lpfc_pvt_pool *pvt_pool, u32 count)
21779 {
21780 	struct lpfc_io_buf *lpfc_ncmd;
21781 	struct lpfc_io_buf *lpfc_ncmd_next;
21782 	unsigned long iflag;
21783 	int ret;
21784 
21785 	ret = spin_trylock_irqsave(&pbl_pool->lock, iflag);
21786 	if (ret) {
21787 		if (pbl_pool->count) {
21788 			/* Move a batch of XRIs from public to private pool */
21789 			lpfc_qp_spin_lock(&pvt_pool->lock, qp, mv_to_pvt_pool);
21790 			list_for_each_entry_safe(lpfc_ncmd,
21791 						 lpfc_ncmd_next,
21792 						 &pbl_pool->list,
21793 						 list) {
21794 				list_move_tail(&lpfc_ncmd->list,
21795 					       &pvt_pool->list);
21796 				pvt_pool->count++;
21797 				pbl_pool->count--;
21798 				count--;
21799 				if (count == 0)
21800 					break;
21801 			}
21802 
21803 			spin_unlock(&pvt_pool->lock);
21804 			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21805 			return true;
21806 		}
21807 		spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21808 	}
21809 
21810 	return false;
21811 }
21812 
21813 /**
21814  * lpfc_move_xri_pbl_to_pvt - Move some XRIs from public to private pool
21815  * @phba: pointer to lpfc hba data structure.
21816  * @hwqid: belong to which HWQ.
21817  * @count: number of XRIs to move
21818  *
21819  * This routine tries to find some free common bufs in one of public pools with
21820  * Round Robin method. The search always starts from local hwqid, then the next
21821  * HWQ which was found last time (rrb_next_hwqid). Once a public pool is found,
21822  * a batch of free common bufs are moved to private pool on hwqid.
21823  * It might move less than count XRIs if there's not enough in public pool.
21824  **/
lpfc_move_xri_pbl_to_pvt(struct lpfc_hba * phba,u32 hwqid,u32 count)21825 void lpfc_move_xri_pbl_to_pvt(struct lpfc_hba *phba, u32 hwqid, u32 count)
21826 {
21827 	struct lpfc_multixri_pool *multixri_pool;
21828 	struct lpfc_multixri_pool *next_multixri_pool;
21829 	struct lpfc_pvt_pool *pvt_pool;
21830 	struct lpfc_pbl_pool *pbl_pool;
21831 	struct lpfc_sli4_hdw_queue *qp;
21832 	u32 next_hwqid;
21833 	u32 hwq_count;
21834 	int ret;
21835 
21836 	qp = &phba->sli4_hba.hdwq[hwqid];
21837 	multixri_pool = qp->p_multixri_pool;
21838 	pvt_pool = &multixri_pool->pvt_pool;
21839 	pbl_pool = &multixri_pool->pbl_pool;
21840 
21841 	/* Check if local pbl_pool is available */
21842 	ret = _lpfc_move_xri_pbl_to_pvt(phba, qp, pbl_pool, pvt_pool, count);
21843 	if (ret) {
21844 #ifdef LPFC_MXP_STAT
21845 		multixri_pool->local_pbl_hit_count++;
21846 #endif
21847 		return;
21848 	}
21849 
21850 	hwq_count = phba->cfg_hdw_queue;
21851 
21852 	/* Get the next hwqid which was found last time */
21853 	next_hwqid = multixri_pool->rrb_next_hwqid;
21854 
21855 	do {
21856 		/* Go to next hwq */
21857 		next_hwqid = (next_hwqid + 1) % hwq_count;
21858 
21859 		next_multixri_pool =
21860 			phba->sli4_hba.hdwq[next_hwqid].p_multixri_pool;
21861 		pbl_pool = &next_multixri_pool->pbl_pool;
21862 
21863 		/* Check if the public free xri pool is available */
21864 		ret = _lpfc_move_xri_pbl_to_pvt(
21865 			phba, qp, pbl_pool, pvt_pool, count);
21866 
21867 		/* Exit while-loop if success or all hwqid are checked */
21868 	} while (!ret && next_hwqid != multixri_pool->rrb_next_hwqid);
21869 
21870 	/* Starting point for the next time */
21871 	multixri_pool->rrb_next_hwqid = next_hwqid;
21872 
21873 	if (!ret) {
21874 		/* stats: all public pools are empty*/
21875 		multixri_pool->pbl_empty_count++;
21876 	}
21877 
21878 #ifdef LPFC_MXP_STAT
21879 	if (ret) {
21880 		if (next_hwqid == hwqid)
21881 			multixri_pool->local_pbl_hit_count++;
21882 		else
21883 			multixri_pool->other_pbl_hit_count++;
21884 	}
21885 #endif
21886 }
21887 
21888 /**
21889  * lpfc_keep_pvt_pool_above_lowwm - Keep pvt_pool above low watermark
21890  * @phba: pointer to lpfc hba data structure.
21891  * @hwqid: belong to which HWQ.
21892  *
21893  * This routine get a batch of XRIs from pbl_pool if pvt_pool is less than
21894  * low watermark.
21895  **/
lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba * phba,u32 hwqid)21896 void lpfc_keep_pvt_pool_above_lowwm(struct lpfc_hba *phba, u32 hwqid)
21897 {
21898 	struct lpfc_multixri_pool *multixri_pool;
21899 	struct lpfc_pvt_pool *pvt_pool;
21900 
21901 	multixri_pool = phba->sli4_hba.hdwq[hwqid].p_multixri_pool;
21902 	pvt_pool = &multixri_pool->pvt_pool;
21903 
21904 	if (pvt_pool->count < pvt_pool->low_watermark)
21905 		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
21906 }
21907 
21908 /**
21909  * lpfc_release_io_buf - Return one IO buf back to free pool
21910  * @phba: pointer to lpfc hba data structure.
21911  * @lpfc_ncmd: IO buf to be returned.
21912  * @qp: belong to which HWQ.
21913  *
21914  * This routine returns one IO buf back to free pool. If this is an urgent IO,
21915  * the IO buf is returned to expedite pool. If cfg_xri_rebalancing==1,
21916  * the IO buf is returned to pbl_pool or pvt_pool based on watermark and
21917  * xri_limit.  If cfg_xri_rebalancing==0, the IO buf is returned to
21918  * lpfc_io_buf_list_put.
21919  **/
lpfc_release_io_buf(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_ncmd,struct lpfc_sli4_hdw_queue * qp)21920 void lpfc_release_io_buf(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_ncmd,
21921 			 struct lpfc_sli4_hdw_queue *qp)
21922 {
21923 	unsigned long iflag;
21924 	struct lpfc_pbl_pool *pbl_pool;
21925 	struct lpfc_pvt_pool *pvt_pool;
21926 	struct lpfc_epd_pool *epd_pool;
21927 	u32 txcmplq_cnt;
21928 	u32 xri_owned;
21929 	u32 xri_limit;
21930 	u32 abts_io_bufs;
21931 
21932 	/* MUST zero fields if buffer is reused by another protocol */
21933 	lpfc_ncmd->nvmeCmd = NULL;
21934 	lpfc_ncmd->cur_iocbq.cmd_cmpl = NULL;
21935 
21936 	if (phba->cfg_xpsgl && !phba->nvmet_support &&
21937 	    !list_empty(&lpfc_ncmd->dma_sgl_xtra_list))
21938 		lpfc_put_sgl_per_hdwq(phba, lpfc_ncmd);
21939 
21940 	if (!list_empty(&lpfc_ncmd->dma_cmd_rsp_list))
21941 		lpfc_put_cmd_rsp_buf_per_hdwq(phba, lpfc_ncmd);
21942 
21943 	if (phba->cfg_xri_rebalancing) {
21944 		if (lpfc_ncmd->expedite) {
21945 			/* Return to expedite pool */
21946 			epd_pool = &phba->epd_pool;
21947 			spin_lock_irqsave(&epd_pool->lock, iflag);
21948 			list_add_tail(&lpfc_ncmd->list, &epd_pool->list);
21949 			epd_pool->count++;
21950 			spin_unlock_irqrestore(&epd_pool->lock, iflag);
21951 			return;
21952 		}
21953 
21954 		/* Avoid invalid access if an IO sneaks in and is being rejected
21955 		 * just _after_ xri pools are destroyed in lpfc_offline.
21956 		 * Nothing much can be done at this point.
21957 		 */
21958 		if (!qp->p_multixri_pool)
21959 			return;
21960 
21961 		pbl_pool = &qp->p_multixri_pool->pbl_pool;
21962 		pvt_pool = &qp->p_multixri_pool->pvt_pool;
21963 
21964 		txcmplq_cnt = qp->io_wq->pring->txcmplq_cnt;
21965 		abts_io_bufs = qp->abts_scsi_io_bufs;
21966 		abts_io_bufs += qp->abts_nvme_io_bufs;
21967 
21968 		xri_owned = pvt_pool->count + txcmplq_cnt + abts_io_bufs;
21969 		xri_limit = qp->p_multixri_pool->xri_limit;
21970 
21971 #ifdef LPFC_MXP_STAT
21972 		if (xri_owned <= xri_limit)
21973 			qp->p_multixri_pool->below_limit_count++;
21974 		else
21975 			qp->p_multixri_pool->above_limit_count++;
21976 #endif
21977 
21978 		/* XRI goes to either public or private free xri pool
21979 		 *     based on watermark and xri_limit
21980 		 */
21981 		if ((pvt_pool->count < pvt_pool->low_watermark) ||
21982 		    (xri_owned < xri_limit &&
21983 		     pvt_pool->count < pvt_pool->high_watermark)) {
21984 			lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag,
21985 						  qp, free_pvt_pool);
21986 			list_add_tail(&lpfc_ncmd->list,
21987 				      &pvt_pool->list);
21988 			pvt_pool->count++;
21989 			spin_unlock_irqrestore(&pvt_pool->lock, iflag);
21990 		} else {
21991 			lpfc_qp_spin_lock_irqsave(&pbl_pool->lock, iflag,
21992 						  qp, free_pub_pool);
21993 			list_add_tail(&lpfc_ncmd->list,
21994 				      &pbl_pool->list);
21995 			pbl_pool->count++;
21996 			spin_unlock_irqrestore(&pbl_pool->lock, iflag);
21997 		}
21998 	} else {
21999 		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_put_lock, iflag,
22000 					  qp, free_xri);
22001 		list_add_tail(&lpfc_ncmd->list,
22002 			      &qp->lpfc_io_buf_list_put);
22003 		qp->put_io_bufs++;
22004 		spin_unlock_irqrestore(&qp->io_buf_list_put_lock,
22005 				       iflag);
22006 	}
22007 }
22008 
22009 /**
22010  * lpfc_get_io_buf_from_private_pool - Get one free IO buf from private pool
22011  * @phba: pointer to lpfc hba data structure.
22012  * @qp: pointer to HDW queue
22013  * @pvt_pool: pointer to private pool data structure.
22014  * @ndlp: pointer to lpfc nodelist data structure.
22015  *
22016  * This routine tries to get one free IO buf from private pool.
22017  *
22018  * Return:
22019  *   pointer to one free IO buf - if private pool is not empty
22020  *   NULL - if private pool is empty
22021  **/
22022 static struct lpfc_io_buf *
lpfc_get_io_buf_from_private_pool(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * qp,struct lpfc_pvt_pool * pvt_pool,struct lpfc_nodelist * ndlp)22023 lpfc_get_io_buf_from_private_pool(struct lpfc_hba *phba,
22024 				  struct lpfc_sli4_hdw_queue *qp,
22025 				  struct lpfc_pvt_pool *pvt_pool,
22026 				  struct lpfc_nodelist *ndlp)
22027 {
22028 	struct lpfc_io_buf *lpfc_ncmd;
22029 	struct lpfc_io_buf *lpfc_ncmd_next;
22030 	unsigned long iflag;
22031 
22032 	lpfc_qp_spin_lock_irqsave(&pvt_pool->lock, iflag, qp, alloc_pvt_pool);
22033 	list_for_each_entry_safe(lpfc_ncmd, lpfc_ncmd_next,
22034 				 &pvt_pool->list, list) {
22035 		if (lpfc_test_rrq_active(
22036 			phba, ndlp, lpfc_ncmd->cur_iocbq.sli4_lxritag))
22037 			continue;
22038 		list_del(&lpfc_ncmd->list);
22039 		pvt_pool->count--;
22040 		spin_unlock_irqrestore(&pvt_pool->lock, iflag);
22041 		return lpfc_ncmd;
22042 	}
22043 	spin_unlock_irqrestore(&pvt_pool->lock, iflag);
22044 
22045 	return NULL;
22046 }
22047 
22048 /**
22049  * lpfc_get_io_buf_from_expedite_pool - Get one free IO buf from expedite pool
22050  * @phba: pointer to lpfc hba data structure.
22051  *
22052  * This routine tries to get one free IO buf from expedite pool.
22053  *
22054  * Return:
22055  *   pointer to one free IO buf - if expedite pool is not empty
22056  *   NULL - if expedite pool is empty
22057  **/
22058 static struct lpfc_io_buf *
lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba * phba)22059 lpfc_get_io_buf_from_expedite_pool(struct lpfc_hba *phba)
22060 {
22061 	struct lpfc_io_buf *lpfc_ncmd = NULL, *iter;
22062 	struct lpfc_io_buf *lpfc_ncmd_next;
22063 	unsigned long iflag;
22064 	struct lpfc_epd_pool *epd_pool;
22065 
22066 	epd_pool = &phba->epd_pool;
22067 
22068 	spin_lock_irqsave(&epd_pool->lock, iflag);
22069 	if (epd_pool->count > 0) {
22070 		list_for_each_entry_safe(iter, lpfc_ncmd_next,
22071 					 &epd_pool->list, list) {
22072 			list_del(&iter->list);
22073 			epd_pool->count--;
22074 			lpfc_ncmd = iter;
22075 			break;
22076 		}
22077 	}
22078 	spin_unlock_irqrestore(&epd_pool->lock, iflag);
22079 
22080 	return lpfc_ncmd;
22081 }
22082 
22083 /**
22084  * lpfc_get_io_buf_from_multixri_pools - Get one free IO bufs
22085  * @phba: pointer to lpfc hba data structure.
22086  * @ndlp: pointer to lpfc nodelist data structure.
22087  * @hwqid: belong to which HWQ
22088  * @expedite: 1 means this request is urgent.
22089  *
22090  * This routine will do the following actions and then return a pointer to
22091  * one free IO buf.
22092  *
22093  * 1. If private free xri count is empty, move some XRIs from public to
22094  *    private pool.
22095  * 2. Get one XRI from private free xri pool.
22096  * 3. If we fail to get one from pvt_pool and this is an expedite request,
22097  *    get one free xri from expedite pool.
22098  *
22099  * Note: ndlp is only used on SCSI side for RRQ testing.
22100  *       The caller should pass NULL for ndlp on NVME side.
22101  *
22102  * Return:
22103  *   pointer to one free IO buf - if private pool is not empty
22104  *   NULL - if private pool is empty
22105  **/
22106 static struct lpfc_io_buf *
lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,int hwqid,int expedite)22107 lpfc_get_io_buf_from_multixri_pools(struct lpfc_hba *phba,
22108 				    struct lpfc_nodelist *ndlp,
22109 				    int hwqid, int expedite)
22110 {
22111 	struct lpfc_sli4_hdw_queue *qp;
22112 	struct lpfc_multixri_pool *multixri_pool;
22113 	struct lpfc_pvt_pool *pvt_pool;
22114 	struct lpfc_io_buf *lpfc_ncmd;
22115 
22116 	qp = &phba->sli4_hba.hdwq[hwqid];
22117 	lpfc_ncmd = NULL;
22118 	if (!qp) {
22119 		lpfc_printf_log(phba, KERN_INFO,
22120 				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22121 				"5556 NULL qp for hwqid  x%x\n", hwqid);
22122 		return lpfc_ncmd;
22123 	}
22124 	multixri_pool = qp->p_multixri_pool;
22125 	if (!multixri_pool) {
22126 		lpfc_printf_log(phba, KERN_INFO,
22127 				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22128 				"5557 NULL multixri for hwqid  x%x\n", hwqid);
22129 		return lpfc_ncmd;
22130 	}
22131 	pvt_pool = &multixri_pool->pvt_pool;
22132 	if (!pvt_pool) {
22133 		lpfc_printf_log(phba, KERN_INFO,
22134 				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22135 				"5558 NULL pvt_pool for hwqid  x%x\n", hwqid);
22136 		return lpfc_ncmd;
22137 	}
22138 	multixri_pool->io_req_count++;
22139 
22140 	/* If pvt_pool is empty, move some XRIs from public to private pool */
22141 	if (pvt_pool->count == 0)
22142 		lpfc_move_xri_pbl_to_pvt(phba, hwqid, XRI_BATCH);
22143 
22144 	/* Get one XRI from private free xri pool */
22145 	lpfc_ncmd = lpfc_get_io_buf_from_private_pool(phba, qp, pvt_pool, ndlp);
22146 
22147 	if (lpfc_ncmd) {
22148 		lpfc_ncmd->hdwq = qp;
22149 		lpfc_ncmd->hdwq_no = hwqid;
22150 	} else if (expedite) {
22151 		/* If we fail to get one from pvt_pool and this is an expedite
22152 		 * request, get one free xri from expedite pool.
22153 		 */
22154 		lpfc_ncmd = lpfc_get_io_buf_from_expedite_pool(phba);
22155 	}
22156 
22157 	return lpfc_ncmd;
22158 }
22159 
22160 static inline struct lpfc_io_buf *
lpfc_io_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,int idx)22161 lpfc_io_buf(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp, int idx)
22162 {
22163 	struct lpfc_sli4_hdw_queue *qp;
22164 	struct lpfc_io_buf *lpfc_cmd, *lpfc_cmd_next;
22165 
22166 	qp = &phba->sli4_hba.hdwq[idx];
22167 	list_for_each_entry_safe(lpfc_cmd, lpfc_cmd_next,
22168 				 &qp->lpfc_io_buf_list_get, list) {
22169 		if (lpfc_test_rrq_active(phba, ndlp,
22170 					 lpfc_cmd->cur_iocbq.sli4_lxritag))
22171 			continue;
22172 
22173 		if (lpfc_cmd->flags & LPFC_SBUF_NOT_POSTED)
22174 			continue;
22175 
22176 		list_del_init(&lpfc_cmd->list);
22177 		qp->get_io_bufs--;
22178 		lpfc_cmd->hdwq = qp;
22179 		lpfc_cmd->hdwq_no = idx;
22180 		return lpfc_cmd;
22181 	}
22182 	return NULL;
22183 }
22184 
22185 /**
22186  * lpfc_get_io_buf - Get one IO buffer from free pool
22187  * @phba: The HBA for which this call is being executed.
22188  * @ndlp: pointer to lpfc nodelist data structure.
22189  * @hwqid: belong to which HWQ
22190  * @expedite: 1 means this request is urgent.
22191  *
22192  * This routine gets one IO buffer from free pool. If cfg_xri_rebalancing==1,
22193  * removes a IO buffer from multiXRI pools. If cfg_xri_rebalancing==0, removes
22194  * a IO buffer from head of @hdwq io_buf_list and returns to caller.
22195  *
22196  * Note: ndlp is only used on SCSI side for RRQ testing.
22197  *       The caller should pass NULL for ndlp on NVME side.
22198  *
22199  * Return codes:
22200  *   NULL - Error
22201  *   Pointer to lpfc_io_buf - Success
22202  **/
lpfc_get_io_buf(struct lpfc_hba * phba,struct lpfc_nodelist * ndlp,u32 hwqid,int expedite)22203 struct lpfc_io_buf *lpfc_get_io_buf(struct lpfc_hba *phba,
22204 				    struct lpfc_nodelist *ndlp,
22205 				    u32 hwqid, int expedite)
22206 {
22207 	struct lpfc_sli4_hdw_queue *qp;
22208 	unsigned long iflag;
22209 	struct lpfc_io_buf *lpfc_cmd;
22210 
22211 	qp = &phba->sli4_hba.hdwq[hwqid];
22212 	lpfc_cmd = NULL;
22213 	if (!qp) {
22214 		lpfc_printf_log(phba, KERN_WARNING,
22215 				LOG_SLI | LOG_NVME_ABTS | LOG_FCP,
22216 				"5555 NULL qp for hwqid  x%x\n", hwqid);
22217 		return lpfc_cmd;
22218 	}
22219 
22220 	if (phba->cfg_xri_rebalancing)
22221 		lpfc_cmd = lpfc_get_io_buf_from_multixri_pools(
22222 			phba, ndlp, hwqid, expedite);
22223 	else {
22224 		lpfc_qp_spin_lock_irqsave(&qp->io_buf_list_get_lock, iflag,
22225 					  qp, alloc_xri_get);
22226 		if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT || expedite)
22227 			lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22228 		if (!lpfc_cmd) {
22229 			lpfc_qp_spin_lock(&qp->io_buf_list_put_lock,
22230 					  qp, alloc_xri_put);
22231 			list_splice(&qp->lpfc_io_buf_list_put,
22232 				    &qp->lpfc_io_buf_list_get);
22233 			qp->get_io_bufs += qp->put_io_bufs;
22234 			INIT_LIST_HEAD(&qp->lpfc_io_buf_list_put);
22235 			qp->put_io_bufs = 0;
22236 			spin_unlock(&qp->io_buf_list_put_lock);
22237 			if (qp->get_io_bufs > LPFC_NVME_EXPEDITE_XRICNT ||
22238 			    expedite)
22239 				lpfc_cmd = lpfc_io_buf(phba, ndlp, hwqid);
22240 		}
22241 		spin_unlock_irqrestore(&qp->io_buf_list_get_lock, iflag);
22242 	}
22243 
22244 	return lpfc_cmd;
22245 }
22246 
22247 /**
22248  * lpfc_read_object - Retrieve object data from HBA
22249  * @phba: The HBA for which this call is being executed.
22250  * @rdobject: Pathname of object data we want to read.
22251  * @datap: Pointer to where data will be copied to.
22252  * @datasz: size of data area
22253  *
22254  * This routine is limited to object sizes of LPFC_BPL_SIZE (1024) or less.
22255  * The data will be truncated if datasz is not large enough.
22256  * Version 1 is not supported with Embedded mbox cmd, so we must use version 0.
22257  * Returns the actual bytes read from the object.
22258  *
22259  * This routine is hard coded to use a poll completion.  Unlike other
22260  * sli4_config mailboxes, it uses lpfc_mbuf memory which is not
22261  * cleaned up in lpfc_sli4_cmd_mbox_free.  If this routine is modified
22262  * to use interrupt-based completions, code is needed to fully cleanup
22263  * the memory.
22264  */
22265 int
lpfc_read_object(struct lpfc_hba * phba,char * rdobject,uint32_t * datap,uint32_t datasz)22266 lpfc_read_object(struct lpfc_hba *phba, char *rdobject, uint32_t *datap,
22267 		 uint32_t datasz)
22268 {
22269 	struct lpfc_mbx_read_object *read_object;
22270 	LPFC_MBOXQ_t *mbox;
22271 	int rc, length, eof, j, byte_cnt = 0;
22272 	uint32_t shdr_status, shdr_add_status;
22273 	union lpfc_sli4_cfg_shdr *shdr;
22274 	struct lpfc_dmabuf *pcmd;
22275 	u32 rd_object_name[LPFC_MBX_OBJECT_NAME_LEN_DW] = {0};
22276 
22277 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
22278 	if (!mbox)
22279 		return -ENOMEM;
22280 	length = (sizeof(struct lpfc_mbx_read_object) -
22281 		  sizeof(struct lpfc_sli4_cfg_mhdr));
22282 	lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
22283 			 LPFC_MBOX_OPCODE_READ_OBJECT,
22284 			 length, LPFC_SLI4_MBX_EMBED);
22285 	read_object = &mbox->u.mqe.un.read_object;
22286 	shdr = (union lpfc_sli4_cfg_shdr *)&read_object->header.cfg_shdr;
22287 
22288 	bf_set(lpfc_mbox_hdr_version, &shdr->request, LPFC_Q_CREATE_VERSION_0);
22289 	bf_set(lpfc_mbx_rd_object_rlen, &read_object->u.request, datasz);
22290 	read_object->u.request.rd_object_offset = 0;
22291 	read_object->u.request.rd_object_cnt = 1;
22292 
22293 	memset((void *)read_object->u.request.rd_object_name, 0,
22294 	       LPFC_OBJ_NAME_SZ);
22295 	scnprintf((char *)rd_object_name, sizeof(rd_object_name), rdobject);
22296 	for (j = 0; j < strlen(rdobject); j++)
22297 		read_object->u.request.rd_object_name[j] =
22298 			cpu_to_le32(rd_object_name[j]);
22299 
22300 	pcmd = kmalloc_obj(*pcmd);
22301 	if (pcmd)
22302 		pcmd->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &pcmd->phys);
22303 	if (!pcmd || !pcmd->virt) {
22304 		kfree(pcmd);
22305 		mempool_free(mbox, phba->mbox_mem_pool);
22306 		return -ENOMEM;
22307 	}
22308 	memset((void *)pcmd->virt, 0, LPFC_BPL_SIZE);
22309 	read_object->u.request.rd_object_hbuf[0].pa_lo =
22310 		putPaddrLow(pcmd->phys);
22311 	read_object->u.request.rd_object_hbuf[0].pa_hi =
22312 		putPaddrHigh(pcmd->phys);
22313 	read_object->u.request.rd_object_hbuf[0].length = LPFC_BPL_SIZE;
22314 
22315 	mbox->vport = phba->pport;
22316 	mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
22317 	mbox->ctx_ndlp = NULL;
22318 
22319 	rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
22320 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
22321 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
22322 
22323 	if (shdr_status == STATUS_FAILED &&
22324 	    shdr_add_status == ADD_STATUS_INVALID_OBJECT_NAME) {
22325 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22326 				"4674 No port cfg file in FW.\n");
22327 		byte_cnt = -ENOENT;
22328 	} else if (shdr_status || shdr_add_status || rc) {
22329 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT | LOG_CGN_MGMT,
22330 				"2625 READ_OBJECT mailbox failed with "
22331 				"status x%x add_status x%x, mbx status x%x\n",
22332 				shdr_status, shdr_add_status, rc);
22333 		byte_cnt = -ENXIO;
22334 	} else {
22335 		/* Success */
22336 		length = read_object->u.response.rd_object_actual_rlen;
22337 		eof = bf_get(lpfc_mbx_rd_object_eof, &read_object->u.response);
22338 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT | LOG_CGN_MGMT,
22339 				"2626 READ_OBJECT Success len %d:%d, EOF %d\n",
22340 				length, datasz, eof);
22341 
22342 		/* Detect the port config file exists but is empty */
22343 		if (!length && eof) {
22344 			byte_cnt = 0;
22345 			goto exit;
22346 		}
22347 
22348 		byte_cnt = length;
22349 		lpfc_sli_pcimem_bcopy(pcmd->virt, datap, byte_cnt);
22350 	}
22351 
22352  exit:
22353 	/* This is an embedded SLI4 mailbox with an external buffer allocated.
22354 	 * Free the pcmd and then cleanup with the correct routine.
22355 	 */
22356 	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
22357 	kfree(pcmd);
22358 	lpfc_sli4_mbox_cmd_free(phba, mbox);
22359 	return byte_cnt;
22360 }
22361 
22362 /**
22363  * lpfc_get_sgl_per_hdwq - Get one SGL chunk from hdwq's pool
22364  * @phba: The HBA for which this call is being executed.
22365  * @lpfc_buf: IO buf structure to append the SGL chunk
22366  *
22367  * This routine gets one SGL chunk buffer from hdwq's SGL chunk pool,
22368  * and will allocate an SGL chunk if the pool is empty.
22369  *
22370  * Return codes:
22371  *   NULL - Error
22372  *   Pointer to sli4_hybrid_sgl - Success
22373  **/
22374 struct sli4_hybrid_sgl *
lpfc_get_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22375 lpfc_get_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22376 {
22377 	struct sli4_hybrid_sgl *list_entry = NULL;
22378 	struct sli4_hybrid_sgl *tmp = NULL;
22379 	struct sli4_hybrid_sgl *allocated_sgl = NULL;
22380 	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22381 	struct list_head *buf_list = &hdwq->sgl_list;
22382 	unsigned long iflags;
22383 
22384 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22385 
22386 	if (likely(!list_empty(buf_list))) {
22387 		/* break off 1 chunk from the sgl_list */
22388 		list_for_each_entry_safe(list_entry, tmp,
22389 					 buf_list, list_node) {
22390 			list_move_tail(&list_entry->list_node,
22391 				       &lpfc_buf->dma_sgl_xtra_list);
22392 			break;
22393 		}
22394 	} else {
22395 		/* allocate more */
22396 		spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22397 		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22398 				   cpu_to_node(hdwq->io_wq->chann));
22399 		if (!tmp) {
22400 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22401 					"8353 error kmalloc memory for HDWQ "
22402 					"%d %s\n",
22403 					lpfc_buf->hdwq_no, __func__);
22404 			return NULL;
22405 		}
22406 
22407 		tmp->dma_sgl = dma_pool_alloc(phba->lpfc_sg_dma_buf_pool,
22408 					      GFP_ATOMIC, &tmp->dma_phys_sgl);
22409 		if (!tmp->dma_sgl) {
22410 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22411 					"8354 error pool_alloc memory for HDWQ "
22412 					"%d %s\n",
22413 					lpfc_buf->hdwq_no, __func__);
22414 			kfree(tmp);
22415 			return NULL;
22416 		}
22417 
22418 		spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22419 		list_add_tail(&tmp->list_node, &lpfc_buf->dma_sgl_xtra_list);
22420 	}
22421 
22422 	allocated_sgl = list_last_entry(&lpfc_buf->dma_sgl_xtra_list,
22423 					struct sli4_hybrid_sgl,
22424 					list_node);
22425 
22426 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22427 
22428 	return allocated_sgl;
22429 }
22430 
22431 /**
22432  * lpfc_put_sgl_per_hdwq - Put one SGL chunk into hdwq pool
22433  * @phba: The HBA for which this call is being executed.
22434  * @lpfc_buf: IO buf structure with the SGL chunk
22435  *
22436  * This routine puts one SGL chunk buffer into hdwq's SGL chunk pool.
22437  *
22438  * Return codes:
22439  *   0 - Success
22440  *   -EINVAL - Error
22441  **/
22442 int
lpfc_put_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22443 lpfc_put_sgl_per_hdwq(struct lpfc_hba *phba, struct lpfc_io_buf *lpfc_buf)
22444 {
22445 	int rc = 0;
22446 	struct sli4_hybrid_sgl *list_entry = NULL;
22447 	struct sli4_hybrid_sgl *tmp = NULL;
22448 	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22449 	struct list_head *buf_list = &hdwq->sgl_list;
22450 	unsigned long iflags;
22451 
22452 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22453 
22454 	if (likely(!list_empty(&lpfc_buf->dma_sgl_xtra_list))) {
22455 		list_for_each_entry_safe(list_entry, tmp,
22456 					 &lpfc_buf->dma_sgl_xtra_list,
22457 					 list_node) {
22458 			list_move_tail(&list_entry->list_node,
22459 				       buf_list);
22460 		}
22461 	} else {
22462 		rc = -EINVAL;
22463 	}
22464 
22465 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22466 	return rc;
22467 }
22468 
22469 /**
22470  * lpfc_free_sgl_per_hdwq - Free all SGL chunks of hdwq pool
22471  * @phba: phba object
22472  * @hdwq: hdwq to cleanup sgl buff resources on
22473  *
22474  * This routine frees all SGL chunks of hdwq SGL chunk pool.
22475  *
22476  * Return codes:
22477  *   None
22478  **/
22479 void
lpfc_free_sgl_per_hdwq(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * hdwq)22480 lpfc_free_sgl_per_hdwq(struct lpfc_hba *phba,
22481 		       struct lpfc_sli4_hdw_queue *hdwq)
22482 {
22483 	struct list_head *buf_list = &hdwq->sgl_list;
22484 	struct sli4_hybrid_sgl *list_entry = NULL;
22485 	struct sli4_hybrid_sgl *tmp = NULL;
22486 	unsigned long iflags;
22487 
22488 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22489 
22490 	/* Free sgl pool */
22491 	list_for_each_entry_safe(list_entry, tmp,
22492 				 buf_list, list_node) {
22493 		list_del(&list_entry->list_node);
22494 		dma_pool_free(phba->lpfc_sg_dma_buf_pool,
22495 			      list_entry->dma_sgl,
22496 			      list_entry->dma_phys_sgl);
22497 		kfree(list_entry);
22498 	}
22499 
22500 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22501 }
22502 
22503 /**
22504  * lpfc_get_cmd_rsp_buf_per_hdwq - Get one CMD/RSP buffer from hdwq
22505  * @phba: The HBA for which this call is being executed.
22506  * @lpfc_buf: IO buf structure to attach the CMD/RSP buffer
22507  *
22508  * This routine gets one CMD/RSP buffer from hdwq's CMD/RSP pool,
22509  * and will allocate an CMD/RSP buffer if the pool is empty.
22510  *
22511  * Return codes:
22512  *   NULL - Error
22513  *   Pointer to fcp_cmd_rsp_buf - Success
22514  **/
22515 struct fcp_cmd_rsp_buf *
lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22516 lpfc_get_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22517 			      struct lpfc_io_buf *lpfc_buf)
22518 {
22519 	struct fcp_cmd_rsp_buf *list_entry = NULL;
22520 	struct fcp_cmd_rsp_buf *tmp = NULL;
22521 	struct fcp_cmd_rsp_buf *allocated_buf = NULL;
22522 	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22523 	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22524 	unsigned long iflags;
22525 
22526 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22527 
22528 	if (likely(!list_empty(buf_list))) {
22529 		/* break off 1 chunk from the list */
22530 		list_for_each_entry_safe(list_entry, tmp,
22531 					 buf_list,
22532 					 list_node) {
22533 			list_move_tail(&list_entry->list_node,
22534 				       &lpfc_buf->dma_cmd_rsp_list);
22535 			break;
22536 		}
22537 	} else {
22538 		/* allocate more */
22539 		spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22540 		tmp = kmalloc_node(sizeof(*tmp), GFP_ATOMIC,
22541 				   cpu_to_node(hdwq->io_wq->chann));
22542 		if (!tmp) {
22543 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22544 					"8355 error kmalloc memory for HDWQ "
22545 					"%d %s\n",
22546 					lpfc_buf->hdwq_no, __func__);
22547 			return NULL;
22548 		}
22549 
22550 		tmp->fcp_cmnd = dma_pool_zalloc(phba->lpfc_cmd_rsp_buf_pool,
22551 						GFP_ATOMIC,
22552 						&tmp->fcp_cmd_rsp_dma_handle);
22553 
22554 		if (!tmp->fcp_cmnd) {
22555 			lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
22556 					"8356 error pool_alloc memory for HDWQ "
22557 					"%d %s\n",
22558 					lpfc_buf->hdwq_no, __func__);
22559 			kfree(tmp);
22560 			return NULL;
22561 		}
22562 
22563 		tmp->fcp_rsp = (struct fcp_rsp *)((uint8_t *)tmp->fcp_cmnd +
22564 				sizeof(struct fcp_cmnd32));
22565 
22566 		spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22567 		list_add_tail(&tmp->list_node, &lpfc_buf->dma_cmd_rsp_list);
22568 	}
22569 
22570 	allocated_buf = list_last_entry(&lpfc_buf->dma_cmd_rsp_list,
22571 					struct fcp_cmd_rsp_buf,
22572 					list_node);
22573 
22574 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22575 
22576 	return allocated_buf;
22577 }
22578 
22579 /**
22580  * lpfc_put_cmd_rsp_buf_per_hdwq - Put one CMD/RSP buffer into hdwq pool
22581  * @phba: The HBA for which this call is being executed.
22582  * @lpfc_buf: IO buf structure with the CMD/RSP buf
22583  *
22584  * This routine puts one CMD/RSP buffer into executing CPU's CMD/RSP pool.
22585  *
22586  * Return codes:
22587  *   0 - Success
22588  *   -EINVAL - Error
22589  **/
22590 int
lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_io_buf * lpfc_buf)22591 lpfc_put_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22592 			      struct lpfc_io_buf *lpfc_buf)
22593 {
22594 	int rc = 0;
22595 	struct fcp_cmd_rsp_buf *list_entry = NULL;
22596 	struct fcp_cmd_rsp_buf *tmp = NULL;
22597 	struct lpfc_sli4_hdw_queue *hdwq = lpfc_buf->hdwq;
22598 	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22599 	unsigned long iflags;
22600 
22601 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22602 
22603 	if (likely(!list_empty(&lpfc_buf->dma_cmd_rsp_list))) {
22604 		list_for_each_entry_safe(list_entry, tmp,
22605 					 &lpfc_buf->dma_cmd_rsp_list,
22606 					 list_node) {
22607 			list_move_tail(&list_entry->list_node,
22608 				       buf_list);
22609 		}
22610 	} else {
22611 		rc = -EINVAL;
22612 	}
22613 
22614 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22615 	return rc;
22616 }
22617 
22618 /**
22619  * lpfc_free_cmd_rsp_buf_per_hdwq - Free all CMD/RSP chunks of hdwq pool
22620  * @phba: phba object
22621  * @hdwq: hdwq to cleanup cmd rsp buff resources on
22622  *
22623  * This routine frees all CMD/RSP buffers of hdwq's CMD/RSP buf pool.
22624  *
22625  * Return codes:
22626  *   None
22627  **/
22628 void
lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba * phba,struct lpfc_sli4_hdw_queue * hdwq)22629 lpfc_free_cmd_rsp_buf_per_hdwq(struct lpfc_hba *phba,
22630 			       struct lpfc_sli4_hdw_queue *hdwq)
22631 {
22632 	struct list_head *buf_list = &hdwq->cmd_rsp_buf_list;
22633 	struct fcp_cmd_rsp_buf *list_entry = NULL;
22634 	struct fcp_cmd_rsp_buf *tmp = NULL;
22635 	unsigned long iflags;
22636 
22637 	spin_lock_irqsave(&hdwq->hdwq_lock, iflags);
22638 
22639 	/* Free cmd_rsp buf pool */
22640 	list_for_each_entry_safe(list_entry, tmp,
22641 				 buf_list,
22642 				 list_node) {
22643 		list_del(&list_entry->list_node);
22644 		dma_pool_free(phba->lpfc_cmd_rsp_buf_pool,
22645 			      list_entry->fcp_cmnd,
22646 			      list_entry->fcp_cmd_rsp_dma_handle);
22647 		kfree(list_entry);
22648 	}
22649 
22650 	spin_unlock_irqrestore(&hdwq->hdwq_lock, iflags);
22651 }
22652 
22653 /**
22654  * lpfc_sli_prep_wqe - Prepare WQE for the command to be posted
22655  * @phba: phba object
22656  * @job: job entry of the command to be posted.
22657  *
22658  * Fill the common fields of the wqe for each of the command.
22659  *
22660  * Return codes:
22661  *	None
22662  **/
22663 void
lpfc_sli_prep_wqe(struct lpfc_hba * phba,struct lpfc_iocbq * job)22664 lpfc_sli_prep_wqe(struct lpfc_hba *phba, struct lpfc_iocbq *job)
22665 {
22666 	u8 cmnd;
22667 	u32 *pcmd;
22668 	u32 if_type = 0;
22669 	u32 abort_tag;
22670 	bool fip;
22671 	struct lpfc_nodelist *ndlp = NULL;
22672 	union lpfc_wqe128 *wqe = &job->wqe;
22673 	u8 command_type = ELS_COMMAND_NON_FIP;
22674 
22675 	fip = test_bit(HBA_FIP_SUPPORT, &phba->hba_flag);
22676 	/* The fcp commands will set command type */
22677 	if (job->cmd_flag &  LPFC_IO_FCP)
22678 		command_type = FCP_COMMAND;
22679 	else if (fip && (job->cmd_flag & LPFC_FIP_ELS_ID_MASK))
22680 		command_type = ELS_COMMAND_FIP;
22681 	else
22682 		command_type = ELS_COMMAND_NON_FIP;
22683 
22684 	abort_tag = job->iotag;
22685 	cmnd = bf_get(wqe_cmnd, &wqe->els_req.wqe_com);
22686 
22687 	switch (cmnd) {
22688 	case CMD_ELS_REQUEST64_WQE:
22689 		ndlp = job->ndlp;
22690 
22691 		if_type = bf_get(lpfc_sli_intf_if_type,
22692 				 &phba->sli4_hba.sli_intf);
22693 		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22694 			pcmd = (u32 *)job->cmd_dmabuf->virt;
22695 			if (pcmd && (*pcmd == ELS_CMD_FLOGI ||
22696 				     *pcmd == ELS_CMD_SCR ||
22697 				     *pcmd == ELS_CMD_RDF ||
22698 				     *pcmd == ELS_CMD_EDC ||
22699 				     *pcmd == ELS_CMD_RSCN_XMT ||
22700 				     *pcmd == ELS_CMD_FDISC ||
22701 				     *pcmd == ELS_CMD_LOGO ||
22702 				     *pcmd == ELS_CMD_QFPA ||
22703 				     *pcmd == ELS_CMD_UVEM ||
22704 				     *pcmd == ELS_CMD_PLOGI)) {
22705 				bf_set(els_req64_sp, &wqe->els_req, 1);
22706 				bf_set(els_req64_sid, &wqe->els_req,
22707 				       job->vport->fc_myDID);
22708 
22709 				if ((*pcmd == ELS_CMD_FLOGI) &&
22710 				    !(phba->fc_topology ==
22711 				      LPFC_TOPOLOGY_LOOP))
22712 					bf_set(els_req64_sid, &wqe->els_req, 0);
22713 
22714 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 1);
22715 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22716 				       phba->vpi_ids[job->vport->vpi]);
22717 			} else if (pcmd) {
22718 				bf_set(wqe_ct, &wqe->els_req.wqe_com, 0);
22719 				bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
22720 				       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22721 			}
22722 		}
22723 
22724 		bf_set(wqe_temp_rpi, &wqe->els_req.wqe_com,
22725 		       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22726 
22727 		bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
22728 		bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
22729 		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
22730 		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22731 		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
22732 		break;
22733 	case CMD_XMIT_ELS_RSP64_WQE:
22734 		ndlp = job->ndlp;
22735 
22736 		/* word4 */
22737 		wqe->xmit_els_rsp.word4 = 0;
22738 
22739 		if_type = bf_get(lpfc_sli_intf_if_type,
22740 				 &phba->sli4_hba.sli_intf);
22741 		if (if_type >= LPFC_SLI_INTF_IF_TYPE_2) {
22742 			if (test_bit(FC_PT2PT, &job->vport->fc_flag)) {
22743 				bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22744 				bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22745 				       job->vport->fc_myDID);
22746 				if (job->vport->fc_myDID == Fabric_DID) {
22747 					bf_set(wqe_els_did,
22748 					       &wqe->xmit_els_rsp.wqe_dest, 0);
22749 				}
22750 			}
22751 		}
22752 
22753 		bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
22754 		bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
22755 		bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
22756 		bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
22757 		       LPFC_WQE_LENLOC_WORD3);
22758 		bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
22759 
22760 		if (phba->fc_topology == LPFC_TOPOLOGY_LOOP) {
22761 			bf_set(els_rsp64_sp, &wqe->xmit_els_rsp, 1);
22762 			bf_set(els_rsp64_sid, &wqe->xmit_els_rsp,
22763 			       job->vport->fc_myDID);
22764 			bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com, 1);
22765 		}
22766 
22767 		if (phba->sli_rev == LPFC_SLI_REV4) {
22768 			bf_set(wqe_rsp_temp_rpi, &wqe->xmit_els_rsp,
22769 			       phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]);
22770 
22771 			if (bf_get(wqe_ct, &wqe->xmit_els_rsp.wqe_com))
22772 				bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
22773 				       phba->vpi_ids[job->vport->vpi]);
22774 		}
22775 		command_type = OTHER_COMMAND;
22776 		break;
22777 	case CMD_GEN_REQUEST64_WQE:
22778 		/* Word 10 */
22779 		bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
22780 		bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
22781 		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
22782 		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
22783 		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
22784 		command_type = OTHER_COMMAND;
22785 		break;
22786 	case CMD_XMIT_SEQUENCE64_WQE:
22787 		if (phba->link_flag & LS_LOOPBACK_MODE)
22788 			bf_set(wqe_xo, &wqe->xmit_sequence.wge_ctl, 1);
22789 
22790 		wqe->xmit_sequence.rsvd3 = 0;
22791 		bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
22792 		bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
22793 		bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
22794 		       LPFC_WQE_IOD_WRITE);
22795 		bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
22796 		       LPFC_WQE_LENLOC_WORD12);
22797 		bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
22798 		command_type = OTHER_COMMAND;
22799 		break;
22800 	case CMD_XMIT_BLS_RSP64_WQE:
22801 		bf_set(xmit_bls_rsp64_seqcnthi, &wqe->xmit_bls_rsp, 0xffff);
22802 		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
22803 		bf_set(wqe_ct, &wqe->xmit_bls_rsp.wqe_com, 1);
22804 		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
22805 		       phba->vpi_ids[phba->pport->vpi]);
22806 		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
22807 		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
22808 		       LPFC_WQE_LENLOC_NONE);
22809 		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
22810 		command_type = OTHER_COMMAND;
22811 		break;
22812 	case CMD_FCP_ICMND64_WQE:	/* task mgmt commands */
22813 	case CMD_ABORT_XRI_WQE:		/* abort iotag */
22814 	case CMD_SEND_FRAME:		/* mds loopback */
22815 		/* cases already formatted for sli4 wqe - no chgs necessary */
22816 		return;
22817 	default:
22818 		dump_stack();
22819 		lpfc_printf_log(phba, KERN_ERR, LOG_TRACE_EVENT,
22820 				"6207 Invalid command 0x%x\n",
22821 				cmnd);
22822 		break;
22823 	}
22824 
22825 	wqe->generic.wqe_com.abort_tag = abort_tag;
22826 	bf_set(wqe_reqtag, &wqe->generic.wqe_com, job->iotag);
22827 	bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
22828 	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
22829 }
22830